]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/mips/kernel/mips-r2-to-r6-emul.c
ASoC: sti: fix missing clk_disable_unprepare() on error in uni_player_start()
[mirror_ubuntu-zesty-kernel.git] / arch / mips / kernel / mips-r2-to-r6-emul.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2014 Imagination Technologies Ltd.
7 * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
8 * Author: Markos Chandras <markos.chandras@imgtec.com>
9 *
10 * MIPS R2 user space instruction emulator for MIPS R6
11 *
12 */
13 #include <linux/bug.h>
14 #include <linux/compiler.h>
15 #include <linux/debugfs.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/ptrace.h>
20 #include <linux/seq_file.h>
21
22 #include <asm/asm.h>
23 #include <asm/branch.h>
24 #include <asm/break.h>
25 #include <asm/debug.h>
26 #include <asm/fpu.h>
27 #include <asm/fpu_emulator.h>
28 #include <asm/inst.h>
29 #include <asm/mips-r2-to-r6-emul.h>
30 #include <asm/local.h>
31 #include <asm/mipsregs.h>
32 #include <asm/ptrace.h>
33 #include <asm/uaccess.h>
34
35 #ifdef CONFIG_64BIT
36 #define ADDIU "daddiu "
37 #define INS "dins "
38 #define EXT "dext "
39 #else
40 #define ADDIU "addiu "
41 #define INS "ins "
42 #define EXT "ext "
43 #endif /* CONFIG_64BIT */
44
45 #define SB "sb "
46 #define LB "lb "
47 #define LL "ll "
48 #define SC "sc "
49
50 DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
51 DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
52 DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
53
54 extern const unsigned int fpucondbit[8];
55
56 #define MIPS_R2_EMUL_TOTAL_PASS 10
57
58 int mipsr2_emulation = 0;
59
60 static int __init mipsr2emu_enable(char *s)
61 {
62 mipsr2_emulation = 1;
63
64 pr_info("MIPS R2-to-R6 Emulator Enabled!");
65
66 return 1;
67 }
68 __setup("mipsr2emu", mipsr2emu_enable);
69
70 /**
71 * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
72 * for performance instead of the traditional way of using a stack trampoline
73 * which is rather slow.
74 * @regs: Process register set
75 * @ir: Instruction
76 */
77 static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
78 {
79 switch (MIPSInst_OPCODE(ir)) {
80 case addiu_op:
81 if (MIPSInst_RT(ir))
82 regs->regs[MIPSInst_RT(ir)] =
83 (s32)regs->regs[MIPSInst_RS(ir)] +
84 (s32)MIPSInst_SIMM(ir);
85 return 0;
86 case daddiu_op:
87 if (IS_ENABLED(CONFIG_32BIT))
88 break;
89
90 if (MIPSInst_RT(ir))
91 regs->regs[MIPSInst_RT(ir)] =
92 (s64)regs->regs[MIPSInst_RS(ir)] +
93 (s64)MIPSInst_SIMM(ir);
94 return 0;
95 case lwc1_op:
96 case swc1_op:
97 case cop1_op:
98 case cop1x_op:
99 /* FPU instructions in delay slot */
100 return -SIGFPE;
101 case spec_op:
102 switch (MIPSInst_FUNC(ir)) {
103 case or_op:
104 if (MIPSInst_RD(ir))
105 regs->regs[MIPSInst_RD(ir)] =
106 regs->regs[MIPSInst_RS(ir)] |
107 regs->regs[MIPSInst_RT(ir)];
108 return 0;
109 case sll_op:
110 if (MIPSInst_RS(ir))
111 break;
112
113 if (MIPSInst_RD(ir))
114 regs->regs[MIPSInst_RD(ir)] =
115 (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
116 MIPSInst_FD(ir));
117 return 0;
118 case srl_op:
119 if (MIPSInst_RS(ir))
120 break;
121
122 if (MIPSInst_RD(ir))
123 regs->regs[MIPSInst_RD(ir)] =
124 (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
125 MIPSInst_FD(ir));
126 return 0;
127 case addu_op:
128 if (MIPSInst_FD(ir))
129 break;
130
131 if (MIPSInst_RD(ir))
132 regs->regs[MIPSInst_RD(ir)] =
133 (s32)((u32)regs->regs[MIPSInst_RS(ir)] +
134 (u32)regs->regs[MIPSInst_RT(ir)]);
135 return 0;
136 case subu_op:
137 if (MIPSInst_FD(ir))
138 break;
139
140 if (MIPSInst_RD(ir))
141 regs->regs[MIPSInst_RD(ir)] =
142 (s32)((u32)regs->regs[MIPSInst_RS(ir)] -
143 (u32)regs->regs[MIPSInst_RT(ir)]);
144 return 0;
145 case dsll_op:
146 if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
147 break;
148
149 if (MIPSInst_RD(ir))
150 regs->regs[MIPSInst_RD(ir)] =
151 (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
152 MIPSInst_FD(ir));
153 return 0;
154 case dsrl_op:
155 if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
156 break;
157
158 if (MIPSInst_RD(ir))
159 regs->regs[MIPSInst_RD(ir)] =
160 (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
161 MIPSInst_FD(ir));
162 return 0;
163 case daddu_op:
164 if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
165 break;
166
167 if (MIPSInst_RD(ir))
168 regs->regs[MIPSInst_RD(ir)] =
169 (u64)regs->regs[MIPSInst_RS(ir)] +
170 (u64)regs->regs[MIPSInst_RT(ir)];
171 return 0;
172 case dsubu_op:
173 if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
174 break;
175
176 if (MIPSInst_RD(ir))
177 regs->regs[MIPSInst_RD(ir)] =
178 (s64)((u64)regs->regs[MIPSInst_RS(ir)] -
179 (u64)regs->regs[MIPSInst_RT(ir)]);
180 return 0;
181 }
182 break;
183 default:
184 pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
185 ir, MIPSInst_OPCODE(ir));
186 }
187
188 return SIGILL;
189 }
190
191 /**
192 * movf_func - Emulate a MOVF instruction
193 * @regs: Process register set
194 * @ir: Instruction
195 *
196 * Returns 0 since it always succeeds.
197 */
198 static int movf_func(struct pt_regs *regs, u32 ir)
199 {
200 u32 csr;
201 u32 cond;
202
203 csr = current->thread.fpu.fcr31;
204 cond = fpucondbit[MIPSInst_RT(ir) >> 2];
205
206 if (((csr & cond) == 0) && MIPSInst_RD(ir))
207 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
208
209 MIPS_R2_STATS(movs);
210
211 return 0;
212 }
213
214 /**
215 * movt_func - Emulate a MOVT instruction
216 * @regs: Process register set
217 * @ir: Instruction
218 *
219 * Returns 0 since it always succeeds.
220 */
221 static int movt_func(struct pt_regs *regs, u32 ir)
222 {
223 u32 csr;
224 u32 cond;
225
226 csr = current->thread.fpu.fcr31;
227 cond = fpucondbit[MIPSInst_RT(ir) >> 2];
228
229 if (((csr & cond) != 0) && MIPSInst_RD(ir))
230 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
231
232 MIPS_R2_STATS(movs);
233
234 return 0;
235 }
236
237 /**
238 * jr_func - Emulate a JR instruction.
239 * @pt_regs: Process register set
240 * @ir: Instruction
241 *
242 * Returns SIGILL if JR was in delay slot, SIGEMT if we
243 * can't compute the EPC, SIGSEGV if we can't access the
244 * userland instruction or 0 on success.
245 */
246 static int jr_func(struct pt_regs *regs, u32 ir)
247 {
248 int err;
249 unsigned long cepc, epc, nepc;
250 u32 nir;
251
252 if (delay_slot(regs))
253 return SIGILL;
254
255 /* EPC after the RI/JR instruction */
256 nepc = regs->cp0_epc;
257 /* Roll back to the reserved R2 JR instruction */
258 regs->cp0_epc -= 4;
259 epc = regs->cp0_epc;
260 err = __compute_return_epc(regs);
261
262 if (err < 0)
263 return SIGEMT;
264
265
266 /* Computed EPC */
267 cepc = regs->cp0_epc;
268
269 /* Get DS instruction */
270 err = __get_user(nir, (u32 __user *)nepc);
271 if (err)
272 return SIGSEGV;
273
274 MIPS_R2BR_STATS(jrs);
275
276 /* If nir == 0(NOP), then nothing else to do */
277 if (nir) {
278 /*
279 * Negative err means FPU instruction in BD-slot,
280 * Zero err means 'BD-slot emulation done'
281 * For anything else we go back to trampoline emulation.
282 */
283 err = mipsr6_emul(regs, nir);
284 if (err > 0) {
285 regs->cp0_epc = nepc;
286 err = mips_dsemul(regs, nir, epc, cepc);
287 if (err == SIGILL)
288 err = SIGEMT;
289 MIPS_R2_STATS(dsemul);
290 }
291 }
292
293 return err;
294 }
295
296 /**
297 * movz_func - Emulate a MOVZ instruction
298 * @regs: Process register set
299 * @ir: Instruction
300 *
301 * Returns 0 since it always succeeds.
302 */
303 static int movz_func(struct pt_regs *regs, u32 ir)
304 {
305 if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
306 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
307 MIPS_R2_STATS(movs);
308
309 return 0;
310 }
311
312 /**
313 * movn_func - Emulate a MOVZ instruction
314 * @regs: Process register set
315 * @ir: Instruction
316 *
317 * Returns 0 since it always succeeds.
318 */
319 static int movn_func(struct pt_regs *regs, u32 ir)
320 {
321 if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
322 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
323 MIPS_R2_STATS(movs);
324
325 return 0;
326 }
327
328 /**
329 * mfhi_func - Emulate a MFHI instruction
330 * @regs: Process register set
331 * @ir: Instruction
332 *
333 * Returns 0 since it always succeeds.
334 */
335 static int mfhi_func(struct pt_regs *regs, u32 ir)
336 {
337 if (MIPSInst_RD(ir))
338 regs->regs[MIPSInst_RD(ir)] = regs->hi;
339
340 MIPS_R2_STATS(hilo);
341
342 return 0;
343 }
344
345 /**
346 * mthi_func - Emulate a MTHI instruction
347 * @regs: Process register set
348 * @ir: Instruction
349 *
350 * Returns 0 since it always succeeds.
351 */
352 static int mthi_func(struct pt_regs *regs, u32 ir)
353 {
354 regs->hi = regs->regs[MIPSInst_RS(ir)];
355
356 MIPS_R2_STATS(hilo);
357
358 return 0;
359 }
360
361 /**
362 * mflo_func - Emulate a MFLO instruction
363 * @regs: Process register set
364 * @ir: Instruction
365 *
366 * Returns 0 since it always succeeds.
367 */
368 static int mflo_func(struct pt_regs *regs, u32 ir)
369 {
370 if (MIPSInst_RD(ir))
371 regs->regs[MIPSInst_RD(ir)] = regs->lo;
372
373 MIPS_R2_STATS(hilo);
374
375 return 0;
376 }
377
378 /**
379 * mtlo_func - Emulate a MTLO instruction
380 * @regs: Process register set
381 * @ir: Instruction
382 *
383 * Returns 0 since it always succeeds.
384 */
385 static int mtlo_func(struct pt_regs *regs, u32 ir)
386 {
387 regs->lo = regs->regs[MIPSInst_RS(ir)];
388
389 MIPS_R2_STATS(hilo);
390
391 return 0;
392 }
393
394 /**
395 * mult_func - Emulate a MULT instruction
396 * @regs: Process register set
397 * @ir: Instruction
398 *
399 * Returns 0 since it always succeeds.
400 */
401 static int mult_func(struct pt_regs *regs, u32 ir)
402 {
403 s64 res;
404 s32 rt, rs;
405
406 rt = regs->regs[MIPSInst_RT(ir)];
407 rs = regs->regs[MIPSInst_RS(ir)];
408 res = (s64)rt * (s64)rs;
409
410 rs = res;
411 regs->lo = (s64)rs;
412 rt = res >> 32;
413 res = (s64)rt;
414 regs->hi = res;
415
416 MIPS_R2_STATS(muls);
417
418 return 0;
419 }
420
421 /**
422 * multu_func - Emulate a MULTU instruction
423 * @regs: Process register set
424 * @ir: Instruction
425 *
426 * Returns 0 since it always succeeds.
427 */
428 static int multu_func(struct pt_regs *regs, u32 ir)
429 {
430 u64 res;
431 u32 rt, rs;
432
433 rt = regs->regs[MIPSInst_RT(ir)];
434 rs = regs->regs[MIPSInst_RS(ir)];
435 res = (u64)rt * (u64)rs;
436 rt = res;
437 regs->lo = (s64)rt;
438 regs->hi = (s64)(res >> 32);
439
440 MIPS_R2_STATS(muls);
441
442 return 0;
443 }
444
445 /**
446 * div_func - Emulate a DIV instruction
447 * @regs: Process register set
448 * @ir: Instruction
449 *
450 * Returns 0 since it always succeeds.
451 */
452 static int div_func(struct pt_regs *regs, u32 ir)
453 {
454 s32 rt, rs;
455
456 rt = regs->regs[MIPSInst_RT(ir)];
457 rs = regs->regs[MIPSInst_RS(ir)];
458
459 regs->lo = (s64)(rs / rt);
460 regs->hi = (s64)(rs % rt);
461
462 MIPS_R2_STATS(divs);
463
464 return 0;
465 }
466
467 /**
468 * divu_func - Emulate a DIVU instruction
469 * @regs: Process register set
470 * @ir: Instruction
471 *
472 * Returns 0 since it always succeeds.
473 */
474 static int divu_func(struct pt_regs *regs, u32 ir)
475 {
476 u32 rt, rs;
477
478 rt = regs->regs[MIPSInst_RT(ir)];
479 rs = regs->regs[MIPSInst_RS(ir)];
480
481 regs->lo = (s64)(rs / rt);
482 regs->hi = (s64)(rs % rt);
483
484 MIPS_R2_STATS(divs);
485
486 return 0;
487 }
488
489 /**
490 * dmult_func - Emulate a DMULT instruction
491 * @regs: Process register set
492 * @ir: Instruction
493 *
494 * Returns 0 on success or SIGILL for 32-bit kernels.
495 */
496 static int dmult_func(struct pt_regs *regs, u32 ir)
497 {
498 s64 res;
499 s64 rt, rs;
500
501 if (IS_ENABLED(CONFIG_32BIT))
502 return SIGILL;
503
504 rt = regs->regs[MIPSInst_RT(ir)];
505 rs = regs->regs[MIPSInst_RS(ir)];
506 res = rt * rs;
507
508 regs->lo = res;
509 __asm__ __volatile__(
510 "dmuh %0, %1, %2\t\n"
511 : "=r"(res)
512 : "r"(rt), "r"(rs));
513
514 regs->hi = res;
515
516 MIPS_R2_STATS(muls);
517
518 return 0;
519 }
520
521 /**
522 * dmultu_func - Emulate a DMULTU instruction
523 * @regs: Process register set
524 * @ir: Instruction
525 *
526 * Returns 0 on success or SIGILL for 32-bit kernels.
527 */
528 static int dmultu_func(struct pt_regs *regs, u32 ir)
529 {
530 u64 res;
531 u64 rt, rs;
532
533 if (IS_ENABLED(CONFIG_32BIT))
534 return SIGILL;
535
536 rt = regs->regs[MIPSInst_RT(ir)];
537 rs = regs->regs[MIPSInst_RS(ir)];
538 res = rt * rs;
539
540 regs->lo = res;
541 __asm__ __volatile__(
542 "dmuhu %0, %1, %2\t\n"
543 : "=r"(res)
544 : "r"(rt), "r"(rs));
545
546 regs->hi = res;
547
548 MIPS_R2_STATS(muls);
549
550 return 0;
551 }
552
553 /**
554 * ddiv_func - Emulate a DDIV instruction
555 * @regs: Process register set
556 * @ir: Instruction
557 *
558 * Returns 0 on success or SIGILL for 32-bit kernels.
559 */
560 static int ddiv_func(struct pt_regs *regs, u32 ir)
561 {
562 s64 rt, rs;
563
564 if (IS_ENABLED(CONFIG_32BIT))
565 return SIGILL;
566
567 rt = regs->regs[MIPSInst_RT(ir)];
568 rs = regs->regs[MIPSInst_RS(ir)];
569
570 regs->lo = rs / rt;
571 regs->hi = rs % rt;
572
573 MIPS_R2_STATS(divs);
574
575 return 0;
576 }
577
578 /**
579 * ddivu_func - Emulate a DDIVU instruction
580 * @regs: Process register set
581 * @ir: Instruction
582 *
583 * Returns 0 on success or SIGILL for 32-bit kernels.
584 */
585 static int ddivu_func(struct pt_regs *regs, u32 ir)
586 {
587 u64 rt, rs;
588
589 if (IS_ENABLED(CONFIG_32BIT))
590 return SIGILL;
591
592 rt = regs->regs[MIPSInst_RT(ir)];
593 rs = regs->regs[MIPSInst_RS(ir)];
594
595 regs->lo = rs / rt;
596 regs->hi = rs % rt;
597
598 MIPS_R2_STATS(divs);
599
600 return 0;
601 }
602
603 /* R6 removed instructions for the SPECIAL opcode */
604 static struct r2_decoder_table spec_op_table[] = {
605 { 0xfc1ff83f, 0x00000008, jr_func },
606 { 0xfc00ffff, 0x00000018, mult_func },
607 { 0xfc00ffff, 0x00000019, multu_func },
608 { 0xfc00ffff, 0x0000001c, dmult_func },
609 { 0xfc00ffff, 0x0000001d, dmultu_func },
610 { 0xffff07ff, 0x00000010, mfhi_func },
611 { 0xfc1fffff, 0x00000011, mthi_func },
612 { 0xffff07ff, 0x00000012, mflo_func },
613 { 0xfc1fffff, 0x00000013, mtlo_func },
614 { 0xfc0307ff, 0x00000001, movf_func },
615 { 0xfc0307ff, 0x00010001, movt_func },
616 { 0xfc0007ff, 0x0000000a, movz_func },
617 { 0xfc0007ff, 0x0000000b, movn_func },
618 { 0xfc00ffff, 0x0000001a, div_func },
619 { 0xfc00ffff, 0x0000001b, divu_func },
620 { 0xfc00ffff, 0x0000001e, ddiv_func },
621 { 0xfc00ffff, 0x0000001f, ddivu_func },
622 {}
623 };
624
625 /**
626 * madd_func - Emulate a MADD instruction
627 * @regs: Process register set
628 * @ir: Instruction
629 *
630 * Returns 0 since it always succeeds.
631 */
632 static int madd_func(struct pt_regs *regs, u32 ir)
633 {
634 s64 res;
635 s32 rt, rs;
636
637 rt = regs->regs[MIPSInst_RT(ir)];
638 rs = regs->regs[MIPSInst_RS(ir)];
639 res = (s64)rt * (s64)rs;
640 rt = regs->hi;
641 rs = regs->lo;
642 res += ((((s64)rt) << 32) | (u32)rs);
643
644 rt = res;
645 regs->lo = (s64)rt;
646 rs = res >> 32;
647 regs->hi = (s64)rs;
648
649 MIPS_R2_STATS(dsps);
650
651 return 0;
652 }
653
654 /**
655 * maddu_func - Emulate a MADDU instruction
656 * @regs: Process register set
657 * @ir: Instruction
658 *
659 * Returns 0 since it always succeeds.
660 */
661 static int maddu_func(struct pt_regs *regs, u32 ir)
662 {
663 u64 res;
664 u32 rt, rs;
665
666 rt = regs->regs[MIPSInst_RT(ir)];
667 rs = regs->regs[MIPSInst_RS(ir)];
668 res = (u64)rt * (u64)rs;
669 rt = regs->hi;
670 rs = regs->lo;
671 res += ((((s64)rt) << 32) | (u32)rs);
672
673 rt = res;
674 regs->lo = (s64)rt;
675 rs = res >> 32;
676 regs->hi = (s64)rs;
677
678 MIPS_R2_STATS(dsps);
679
680 return 0;
681 }
682
683 /**
684 * msub_func - Emulate a MSUB instruction
685 * @regs: Process register set
686 * @ir: Instruction
687 *
688 * Returns 0 since it always succeeds.
689 */
690 static int msub_func(struct pt_regs *regs, u32 ir)
691 {
692 s64 res;
693 s32 rt, rs;
694
695 rt = regs->regs[MIPSInst_RT(ir)];
696 rs = regs->regs[MIPSInst_RS(ir)];
697 res = (s64)rt * (s64)rs;
698 rt = regs->hi;
699 rs = regs->lo;
700 res = ((((s64)rt) << 32) | (u32)rs) - res;
701
702 rt = res;
703 regs->lo = (s64)rt;
704 rs = res >> 32;
705 regs->hi = (s64)rs;
706
707 MIPS_R2_STATS(dsps);
708
709 return 0;
710 }
711
712 /**
713 * msubu_func - Emulate a MSUBU instruction
714 * @regs: Process register set
715 * @ir: Instruction
716 *
717 * Returns 0 since it always succeeds.
718 */
719 static int msubu_func(struct pt_regs *regs, u32 ir)
720 {
721 u64 res;
722 u32 rt, rs;
723
724 rt = regs->regs[MIPSInst_RT(ir)];
725 rs = regs->regs[MIPSInst_RS(ir)];
726 res = (u64)rt * (u64)rs;
727 rt = regs->hi;
728 rs = regs->lo;
729 res = ((((s64)rt) << 32) | (u32)rs) - res;
730
731 rt = res;
732 regs->lo = (s64)rt;
733 rs = res >> 32;
734 regs->hi = (s64)rs;
735
736 MIPS_R2_STATS(dsps);
737
738 return 0;
739 }
740
741 /**
742 * mul_func - Emulate a MUL instruction
743 * @regs: Process register set
744 * @ir: Instruction
745 *
746 * Returns 0 since it always succeeds.
747 */
748 static int mul_func(struct pt_regs *regs, u32 ir)
749 {
750 s64 res;
751 s32 rt, rs;
752
753 if (!MIPSInst_RD(ir))
754 return 0;
755 rt = regs->regs[MIPSInst_RT(ir)];
756 rs = regs->regs[MIPSInst_RS(ir)];
757 res = (s64)rt * (s64)rs;
758
759 rs = res;
760 regs->regs[MIPSInst_RD(ir)] = (s64)rs;
761
762 MIPS_R2_STATS(muls);
763
764 return 0;
765 }
766
767 /**
768 * clz_func - Emulate a CLZ instruction
769 * @regs: Process register set
770 * @ir: Instruction
771 *
772 * Returns 0 since it always succeeds.
773 */
774 static int clz_func(struct pt_regs *regs, u32 ir)
775 {
776 u32 res;
777 u32 rs;
778
779 if (!MIPSInst_RD(ir))
780 return 0;
781
782 rs = regs->regs[MIPSInst_RS(ir)];
783 __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
784 regs->regs[MIPSInst_RD(ir)] = res;
785
786 MIPS_R2_STATS(bops);
787
788 return 0;
789 }
790
791 /**
792 * clo_func - Emulate a CLO instruction
793 * @regs: Process register set
794 * @ir: Instruction
795 *
796 * Returns 0 since it always succeeds.
797 */
798
799 static int clo_func(struct pt_regs *regs, u32 ir)
800 {
801 u32 res;
802 u32 rs;
803
804 if (!MIPSInst_RD(ir))
805 return 0;
806
807 rs = regs->regs[MIPSInst_RS(ir)];
808 __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
809 regs->regs[MIPSInst_RD(ir)] = res;
810
811 MIPS_R2_STATS(bops);
812
813 return 0;
814 }
815
816 /**
817 * dclz_func - Emulate a DCLZ instruction
818 * @regs: Process register set
819 * @ir: Instruction
820 *
821 * Returns 0 since it always succeeds.
822 */
823 static int dclz_func(struct pt_regs *regs, u32 ir)
824 {
825 u64 res;
826 u64 rs;
827
828 if (IS_ENABLED(CONFIG_32BIT))
829 return SIGILL;
830
831 if (!MIPSInst_RD(ir))
832 return 0;
833
834 rs = regs->regs[MIPSInst_RS(ir)];
835 __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
836 regs->regs[MIPSInst_RD(ir)] = res;
837
838 MIPS_R2_STATS(bops);
839
840 return 0;
841 }
842
843 /**
844 * dclo_func - Emulate a DCLO instruction
845 * @regs: Process register set
846 * @ir: Instruction
847 *
848 * Returns 0 since it always succeeds.
849 */
850 static int dclo_func(struct pt_regs *regs, u32 ir)
851 {
852 u64 res;
853 u64 rs;
854
855 if (IS_ENABLED(CONFIG_32BIT))
856 return SIGILL;
857
858 if (!MIPSInst_RD(ir))
859 return 0;
860
861 rs = regs->regs[MIPSInst_RS(ir)];
862 __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
863 regs->regs[MIPSInst_RD(ir)] = res;
864
865 MIPS_R2_STATS(bops);
866
867 return 0;
868 }
869
870 /* R6 removed instructions for the SPECIAL2 opcode */
871 static struct r2_decoder_table spec2_op_table[] = {
872 { 0xfc00ffff, 0x70000000, madd_func },
873 { 0xfc00ffff, 0x70000001, maddu_func },
874 { 0xfc0007ff, 0x70000002, mul_func },
875 { 0xfc00ffff, 0x70000004, msub_func },
876 { 0xfc00ffff, 0x70000005, msubu_func },
877 { 0xfc0007ff, 0x70000020, clz_func },
878 { 0xfc0007ff, 0x70000021, clo_func },
879 { 0xfc0007ff, 0x70000024, dclz_func },
880 { 0xfc0007ff, 0x70000025, dclo_func },
881 { }
882 };
883
884 static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
885 struct r2_decoder_table *table)
886 {
887 struct r2_decoder_table *p;
888 int err;
889
890 for (p = table; p->func; p++) {
891 if ((inst & p->mask) == p->code) {
892 err = (p->func)(regs, inst);
893 return err;
894 }
895 }
896 return SIGILL;
897 }
898
899 /**
900 * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
901 * @regs: Process register set
902 * @inst: Instruction to decode and emulate
903 * @fcr31: Floating Point Control and Status Register returned
904 */
905 int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
906 {
907 int err = 0;
908 unsigned long vaddr;
909 u32 nir;
910 unsigned long cpc, epc, nepc, r31, res, rs, rt;
911
912 void __user *fault_addr = NULL;
913 int pass = 0;
914
915 repeat:
916 r31 = regs->regs[31];
917 epc = regs->cp0_epc;
918 err = compute_return_epc(regs);
919 if (err < 0) {
920 BUG();
921 return SIGEMT;
922 }
923 pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
924 inst, epc, pass);
925
926 switch (MIPSInst_OPCODE(inst)) {
927 case spec_op:
928 err = mipsr2_find_op_func(regs, inst, spec_op_table);
929 if (err < 0) {
930 /* FPU instruction under JR */
931 regs->cp0_cause |= CAUSEF_BD;
932 goto fpu_emul;
933 }
934 break;
935 case spec2_op:
936 err = mipsr2_find_op_func(regs, inst, spec2_op_table);
937 break;
938 case bcond_op:
939 rt = MIPSInst_RT(inst);
940 rs = MIPSInst_RS(inst);
941 switch (rt) {
942 case tgei_op:
943 if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
944 do_trap_or_bp(regs, 0, 0, "TGEI");
945
946 MIPS_R2_STATS(traps);
947
948 break;
949 case tgeiu_op:
950 if (regs->regs[rs] >= MIPSInst_UIMM(inst))
951 do_trap_or_bp(regs, 0, 0, "TGEIU");
952
953 MIPS_R2_STATS(traps);
954
955 break;
956 case tlti_op:
957 if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
958 do_trap_or_bp(regs, 0, 0, "TLTI");
959
960 MIPS_R2_STATS(traps);
961
962 break;
963 case tltiu_op:
964 if (regs->regs[rs] < MIPSInst_UIMM(inst))
965 do_trap_or_bp(regs, 0, 0, "TLTIU");
966
967 MIPS_R2_STATS(traps);
968
969 break;
970 case teqi_op:
971 if (regs->regs[rs] == MIPSInst_SIMM(inst))
972 do_trap_or_bp(regs, 0, 0, "TEQI");
973
974 MIPS_R2_STATS(traps);
975
976 break;
977 case tnei_op:
978 if (regs->regs[rs] != MIPSInst_SIMM(inst))
979 do_trap_or_bp(regs, 0, 0, "TNEI");
980
981 MIPS_R2_STATS(traps);
982
983 break;
984 case bltzl_op:
985 case bgezl_op:
986 case bltzall_op:
987 case bgezall_op:
988 if (delay_slot(regs)) {
989 err = SIGILL;
990 break;
991 }
992 regs->regs[31] = r31;
993 regs->cp0_epc = epc;
994 err = __compute_return_epc(regs);
995 if (err < 0)
996 return SIGEMT;
997 if (err != BRANCH_LIKELY_TAKEN)
998 break;
999 cpc = regs->cp0_epc;
1000 nepc = epc + 4;
1001 err = __get_user(nir, (u32 __user *)nepc);
1002 if (err) {
1003 err = SIGSEGV;
1004 break;
1005 }
1006 /*
1007 * This will probably be optimized away when
1008 * CONFIG_DEBUG_FS is not enabled
1009 */
1010 switch (rt) {
1011 case bltzl_op:
1012 MIPS_R2BR_STATS(bltzl);
1013 break;
1014 case bgezl_op:
1015 MIPS_R2BR_STATS(bgezl);
1016 break;
1017 case bltzall_op:
1018 MIPS_R2BR_STATS(bltzall);
1019 break;
1020 case bgezall_op:
1021 MIPS_R2BR_STATS(bgezall);
1022 break;
1023 }
1024
1025 switch (MIPSInst_OPCODE(nir)) {
1026 case cop1_op:
1027 case cop1x_op:
1028 case lwc1_op:
1029 case swc1_op:
1030 regs->cp0_cause |= CAUSEF_BD;
1031 goto fpu_emul;
1032 }
1033 if (nir) {
1034 err = mipsr6_emul(regs, nir);
1035 if (err > 0) {
1036 err = mips_dsemul(regs, nir, epc, cpc);
1037 if (err == SIGILL)
1038 err = SIGEMT;
1039 MIPS_R2_STATS(dsemul);
1040 }
1041 }
1042 break;
1043 case bltzal_op:
1044 case bgezal_op:
1045 if (delay_slot(regs)) {
1046 err = SIGILL;
1047 break;
1048 }
1049 regs->regs[31] = r31;
1050 regs->cp0_epc = epc;
1051 err = __compute_return_epc(regs);
1052 if (err < 0)
1053 return SIGEMT;
1054 cpc = regs->cp0_epc;
1055 nepc = epc + 4;
1056 err = __get_user(nir, (u32 __user *)nepc);
1057 if (err) {
1058 err = SIGSEGV;
1059 break;
1060 }
1061 /*
1062 * This will probably be optimized away when
1063 * CONFIG_DEBUG_FS is not enabled
1064 */
1065 switch (rt) {
1066 case bltzal_op:
1067 MIPS_R2BR_STATS(bltzal);
1068 break;
1069 case bgezal_op:
1070 MIPS_R2BR_STATS(bgezal);
1071 break;
1072 }
1073
1074 switch (MIPSInst_OPCODE(nir)) {
1075 case cop1_op:
1076 case cop1x_op:
1077 case lwc1_op:
1078 case swc1_op:
1079 regs->cp0_cause |= CAUSEF_BD;
1080 goto fpu_emul;
1081 }
1082 if (nir) {
1083 err = mipsr6_emul(regs, nir);
1084 if (err > 0) {
1085 err = mips_dsemul(regs, nir, epc, cpc);
1086 if (err == SIGILL)
1087 err = SIGEMT;
1088 MIPS_R2_STATS(dsemul);
1089 }
1090 }
1091 break;
1092 default:
1093 regs->regs[31] = r31;
1094 regs->cp0_epc = epc;
1095 err = SIGILL;
1096 break;
1097 }
1098 break;
1099
1100 case beql_op:
1101 case bnel_op:
1102 case blezl_op:
1103 case bgtzl_op:
1104 if (delay_slot(regs)) {
1105 err = SIGILL;
1106 break;
1107 }
1108 regs->regs[31] = r31;
1109 regs->cp0_epc = epc;
1110 err = __compute_return_epc(regs);
1111 if (err < 0)
1112 return SIGEMT;
1113 if (err != BRANCH_LIKELY_TAKEN)
1114 break;
1115 cpc = regs->cp0_epc;
1116 nepc = epc + 4;
1117 err = __get_user(nir, (u32 __user *)nepc);
1118 if (err) {
1119 err = SIGSEGV;
1120 break;
1121 }
1122 /*
1123 * This will probably be optimized away when
1124 * CONFIG_DEBUG_FS is not enabled
1125 */
1126 switch (MIPSInst_OPCODE(inst)) {
1127 case beql_op:
1128 MIPS_R2BR_STATS(beql);
1129 break;
1130 case bnel_op:
1131 MIPS_R2BR_STATS(bnel);
1132 break;
1133 case blezl_op:
1134 MIPS_R2BR_STATS(blezl);
1135 break;
1136 case bgtzl_op:
1137 MIPS_R2BR_STATS(bgtzl);
1138 break;
1139 }
1140
1141 switch (MIPSInst_OPCODE(nir)) {
1142 case cop1_op:
1143 case cop1x_op:
1144 case lwc1_op:
1145 case swc1_op:
1146 regs->cp0_cause |= CAUSEF_BD;
1147 goto fpu_emul;
1148 }
1149 if (nir) {
1150 err = mipsr6_emul(regs, nir);
1151 if (err > 0) {
1152 err = mips_dsemul(regs, nir, epc, cpc);
1153 if (err == SIGILL)
1154 err = SIGEMT;
1155 MIPS_R2_STATS(dsemul);
1156 }
1157 }
1158 break;
1159 case lwc1_op:
1160 case swc1_op:
1161 case cop1_op:
1162 case cop1x_op:
1163 fpu_emul:
1164 regs->regs[31] = r31;
1165 regs->cp0_epc = epc;
1166 if (!used_math()) { /* First time FPU user. */
1167 err = init_fpu();
1168 set_used_math();
1169 }
1170 lose_fpu(1); /* Save FPU state for the emulator. */
1171
1172 err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1173 &fault_addr);
1174 *fcr31 = current->thread.fpu.fcr31;
1175
1176 /*
1177 * We can't allow the emulated instruction to leave any of
1178 * the cause bits set in $fcr31.
1179 */
1180 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
1181
1182 /*
1183 * this is a tricky issue - lose_fpu() uses LL/SC atomics
1184 * if FPU is owned and effectively cancels user level LL/SC.
1185 * So, it could be logical to don't restore FPU ownership here.
1186 * But the sequence of multiple FPU instructions is much much
1187 * more often than LL-FPU-SC and I prefer loop here until
1188 * next scheduler cycle cancels FPU ownership
1189 */
1190 own_fpu(1); /* Restore FPU state. */
1191
1192 if (err)
1193 current->thread.cp0_baduaddr = (unsigned long)fault_addr;
1194
1195 MIPS_R2_STATS(fpus);
1196
1197 break;
1198
1199 case lwl_op:
1200 rt = regs->regs[MIPSInst_RT(inst)];
1201 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1202 if (!access_ok(VERIFY_READ, vaddr, 4)) {
1203 current->thread.cp0_baduaddr = vaddr;
1204 err = SIGSEGV;
1205 break;
1206 }
1207 __asm__ __volatile__(
1208 " .set push\n"
1209 " .set reorder\n"
1210 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1211 "1:" LB "%1, 0(%2)\n"
1212 INS "%0, %1, 24, 8\n"
1213 " andi %1, %2, 0x3\n"
1214 " beq $0, %1, 9f\n"
1215 ADDIU "%2, %2, -1\n"
1216 "2:" LB "%1, 0(%2)\n"
1217 INS "%0, %1, 16, 8\n"
1218 " andi %1, %2, 0x3\n"
1219 " beq $0, %1, 9f\n"
1220 ADDIU "%2, %2, -1\n"
1221 "3:" LB "%1, 0(%2)\n"
1222 INS "%0, %1, 8, 8\n"
1223 " andi %1, %2, 0x3\n"
1224 " beq $0, %1, 9f\n"
1225 ADDIU "%2, %2, -1\n"
1226 "4:" LB "%1, 0(%2)\n"
1227 INS "%0, %1, 0, 8\n"
1228 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1229 "1:" LB "%1, 0(%2)\n"
1230 INS "%0, %1, 24, 8\n"
1231 ADDIU "%2, %2, 1\n"
1232 " andi %1, %2, 0x3\n"
1233 " beq $0, %1, 9f\n"
1234 "2:" LB "%1, 0(%2)\n"
1235 INS "%0, %1, 16, 8\n"
1236 ADDIU "%2, %2, 1\n"
1237 " andi %1, %2, 0x3\n"
1238 " beq $0, %1, 9f\n"
1239 "3:" LB "%1, 0(%2)\n"
1240 INS "%0, %1, 8, 8\n"
1241 ADDIU "%2, %2, 1\n"
1242 " andi %1, %2, 0x3\n"
1243 " beq $0, %1, 9f\n"
1244 "4:" LB "%1, 0(%2)\n"
1245 INS "%0, %1, 0, 8\n"
1246 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1247 "9: sll %0, %0, 0\n"
1248 "10:\n"
1249 " .insn\n"
1250 " .section .fixup,\"ax\"\n"
1251 "8: li %3,%4\n"
1252 " j 10b\n"
1253 " .previous\n"
1254 " .section __ex_table,\"a\"\n"
1255 STR(PTR) " 1b,8b\n"
1256 STR(PTR) " 2b,8b\n"
1257 STR(PTR) " 3b,8b\n"
1258 STR(PTR) " 4b,8b\n"
1259 " .previous\n"
1260 " .set pop\n"
1261 : "+&r"(rt), "=&r"(rs),
1262 "+&r"(vaddr), "+&r"(err)
1263 : "i"(SIGSEGV));
1264
1265 if (MIPSInst_RT(inst) && !err)
1266 regs->regs[MIPSInst_RT(inst)] = rt;
1267
1268 MIPS_R2_STATS(loads);
1269
1270 break;
1271
1272 case lwr_op:
1273 rt = regs->regs[MIPSInst_RT(inst)];
1274 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1275 if (!access_ok(VERIFY_READ, vaddr, 4)) {
1276 current->thread.cp0_baduaddr = vaddr;
1277 err = SIGSEGV;
1278 break;
1279 }
1280 __asm__ __volatile__(
1281 " .set push\n"
1282 " .set reorder\n"
1283 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1284 "1:" LB "%1, 0(%2)\n"
1285 INS "%0, %1, 0, 8\n"
1286 ADDIU "%2, %2, 1\n"
1287 " andi %1, %2, 0x3\n"
1288 " beq $0, %1, 9f\n"
1289 "2:" LB "%1, 0(%2)\n"
1290 INS "%0, %1, 8, 8\n"
1291 ADDIU "%2, %2, 1\n"
1292 " andi %1, %2, 0x3\n"
1293 " beq $0, %1, 9f\n"
1294 "3:" LB "%1, 0(%2)\n"
1295 INS "%0, %1, 16, 8\n"
1296 ADDIU "%2, %2, 1\n"
1297 " andi %1, %2, 0x3\n"
1298 " beq $0, %1, 9f\n"
1299 "4:" LB "%1, 0(%2)\n"
1300 INS "%0, %1, 24, 8\n"
1301 " sll %0, %0, 0\n"
1302 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1303 "1:" LB "%1, 0(%2)\n"
1304 INS "%0, %1, 0, 8\n"
1305 " andi %1, %2, 0x3\n"
1306 " beq $0, %1, 9f\n"
1307 ADDIU "%2, %2, -1\n"
1308 "2:" LB "%1, 0(%2)\n"
1309 INS "%0, %1, 8, 8\n"
1310 " andi %1, %2, 0x3\n"
1311 " beq $0, %1, 9f\n"
1312 ADDIU "%2, %2, -1\n"
1313 "3:" LB "%1, 0(%2)\n"
1314 INS "%0, %1, 16, 8\n"
1315 " andi %1, %2, 0x3\n"
1316 " beq $0, %1, 9f\n"
1317 ADDIU "%2, %2, -1\n"
1318 "4:" LB "%1, 0(%2)\n"
1319 INS "%0, %1, 24, 8\n"
1320 " sll %0, %0, 0\n"
1321 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1322 "9:\n"
1323 "10:\n"
1324 " .insn\n"
1325 " .section .fixup,\"ax\"\n"
1326 "8: li %3,%4\n"
1327 " j 10b\n"
1328 " .previous\n"
1329 " .section __ex_table,\"a\"\n"
1330 STR(PTR) " 1b,8b\n"
1331 STR(PTR) " 2b,8b\n"
1332 STR(PTR) " 3b,8b\n"
1333 STR(PTR) " 4b,8b\n"
1334 " .previous\n"
1335 " .set pop\n"
1336 : "+&r"(rt), "=&r"(rs),
1337 "+&r"(vaddr), "+&r"(err)
1338 : "i"(SIGSEGV));
1339 if (MIPSInst_RT(inst) && !err)
1340 regs->regs[MIPSInst_RT(inst)] = rt;
1341
1342 MIPS_R2_STATS(loads);
1343
1344 break;
1345
1346 case swl_op:
1347 rt = regs->regs[MIPSInst_RT(inst)];
1348 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1349 if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
1350 current->thread.cp0_baduaddr = vaddr;
1351 err = SIGSEGV;
1352 break;
1353 }
1354 __asm__ __volatile__(
1355 " .set push\n"
1356 " .set reorder\n"
1357 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1358 EXT "%1, %0, 24, 8\n"
1359 "1:" SB "%1, 0(%2)\n"
1360 " andi %1, %2, 0x3\n"
1361 " beq $0, %1, 9f\n"
1362 ADDIU "%2, %2, -1\n"
1363 EXT "%1, %0, 16, 8\n"
1364 "2:" SB "%1, 0(%2)\n"
1365 " andi %1, %2, 0x3\n"
1366 " beq $0, %1, 9f\n"
1367 ADDIU "%2, %2, -1\n"
1368 EXT "%1, %0, 8, 8\n"
1369 "3:" SB "%1, 0(%2)\n"
1370 " andi %1, %2, 0x3\n"
1371 " beq $0, %1, 9f\n"
1372 ADDIU "%2, %2, -1\n"
1373 EXT "%1, %0, 0, 8\n"
1374 "4:" SB "%1, 0(%2)\n"
1375 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1376 EXT "%1, %0, 24, 8\n"
1377 "1:" SB "%1, 0(%2)\n"
1378 ADDIU "%2, %2, 1\n"
1379 " andi %1, %2, 0x3\n"
1380 " beq $0, %1, 9f\n"
1381 EXT "%1, %0, 16, 8\n"
1382 "2:" SB "%1, 0(%2)\n"
1383 ADDIU "%2, %2, 1\n"
1384 " andi %1, %2, 0x3\n"
1385 " beq $0, %1, 9f\n"
1386 EXT "%1, %0, 8, 8\n"
1387 "3:" SB "%1, 0(%2)\n"
1388 ADDIU "%2, %2, 1\n"
1389 " andi %1, %2, 0x3\n"
1390 " beq $0, %1, 9f\n"
1391 EXT "%1, %0, 0, 8\n"
1392 "4:" SB "%1, 0(%2)\n"
1393 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1394 "9:\n"
1395 " .insn\n"
1396 " .section .fixup,\"ax\"\n"
1397 "8: li %3,%4\n"
1398 " j 9b\n"
1399 " .previous\n"
1400 " .section __ex_table,\"a\"\n"
1401 STR(PTR) " 1b,8b\n"
1402 STR(PTR) " 2b,8b\n"
1403 STR(PTR) " 3b,8b\n"
1404 STR(PTR) " 4b,8b\n"
1405 " .previous\n"
1406 " .set pop\n"
1407 : "+&r"(rt), "=&r"(rs),
1408 "+&r"(vaddr), "+&r"(err)
1409 : "i"(SIGSEGV)
1410 : "memory");
1411
1412 MIPS_R2_STATS(stores);
1413
1414 break;
1415
1416 case swr_op:
1417 rt = regs->regs[MIPSInst_RT(inst)];
1418 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1419 if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
1420 current->thread.cp0_baduaddr = vaddr;
1421 err = SIGSEGV;
1422 break;
1423 }
1424 __asm__ __volatile__(
1425 " .set push\n"
1426 " .set reorder\n"
1427 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1428 EXT "%1, %0, 0, 8\n"
1429 "1:" SB "%1, 0(%2)\n"
1430 ADDIU "%2, %2, 1\n"
1431 " andi %1, %2, 0x3\n"
1432 " beq $0, %1, 9f\n"
1433 EXT "%1, %0, 8, 8\n"
1434 "2:" SB "%1, 0(%2)\n"
1435 ADDIU "%2, %2, 1\n"
1436 " andi %1, %2, 0x3\n"
1437 " beq $0, %1, 9f\n"
1438 EXT "%1, %0, 16, 8\n"
1439 "3:" SB "%1, 0(%2)\n"
1440 ADDIU "%2, %2, 1\n"
1441 " andi %1, %2, 0x3\n"
1442 " beq $0, %1, 9f\n"
1443 EXT "%1, %0, 24, 8\n"
1444 "4:" SB "%1, 0(%2)\n"
1445 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1446 EXT "%1, %0, 0, 8\n"
1447 "1:" SB "%1, 0(%2)\n"
1448 " andi %1, %2, 0x3\n"
1449 " beq $0, %1, 9f\n"
1450 ADDIU "%2, %2, -1\n"
1451 EXT "%1, %0, 8, 8\n"
1452 "2:" SB "%1, 0(%2)\n"
1453 " andi %1, %2, 0x3\n"
1454 " beq $0, %1, 9f\n"
1455 ADDIU "%2, %2, -1\n"
1456 EXT "%1, %0, 16, 8\n"
1457 "3:" SB "%1, 0(%2)\n"
1458 " andi %1, %2, 0x3\n"
1459 " beq $0, %1, 9f\n"
1460 ADDIU "%2, %2, -1\n"
1461 EXT "%1, %0, 24, 8\n"
1462 "4:" SB "%1, 0(%2)\n"
1463 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1464 "9:\n"
1465 " .insn\n"
1466 " .section .fixup,\"ax\"\n"
1467 "8: li %3,%4\n"
1468 " j 9b\n"
1469 " .previous\n"
1470 " .section __ex_table,\"a\"\n"
1471 STR(PTR) " 1b,8b\n"
1472 STR(PTR) " 2b,8b\n"
1473 STR(PTR) " 3b,8b\n"
1474 STR(PTR) " 4b,8b\n"
1475 " .previous\n"
1476 " .set pop\n"
1477 : "+&r"(rt), "=&r"(rs),
1478 "+&r"(vaddr), "+&r"(err)
1479 : "i"(SIGSEGV)
1480 : "memory");
1481
1482 MIPS_R2_STATS(stores);
1483
1484 break;
1485
1486 case ldl_op:
1487 if (IS_ENABLED(CONFIG_32BIT)) {
1488 err = SIGILL;
1489 break;
1490 }
1491
1492 rt = regs->regs[MIPSInst_RT(inst)];
1493 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1494 if (!access_ok(VERIFY_READ, vaddr, 8)) {
1495 current->thread.cp0_baduaddr = vaddr;
1496 err = SIGSEGV;
1497 break;
1498 }
1499 __asm__ __volatile__(
1500 " .set push\n"
1501 " .set reorder\n"
1502 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1503 "1: lb %1, 0(%2)\n"
1504 " dinsu %0, %1, 56, 8\n"
1505 " andi %1, %2, 0x7\n"
1506 " beq $0, %1, 9f\n"
1507 " daddiu %2, %2, -1\n"
1508 "2: lb %1, 0(%2)\n"
1509 " dinsu %0, %1, 48, 8\n"
1510 " andi %1, %2, 0x7\n"
1511 " beq $0, %1, 9f\n"
1512 " daddiu %2, %2, -1\n"
1513 "3: lb %1, 0(%2)\n"
1514 " dinsu %0, %1, 40, 8\n"
1515 " andi %1, %2, 0x7\n"
1516 " beq $0, %1, 9f\n"
1517 " daddiu %2, %2, -1\n"
1518 "4: lb %1, 0(%2)\n"
1519 " dinsu %0, %1, 32, 8\n"
1520 " andi %1, %2, 0x7\n"
1521 " beq $0, %1, 9f\n"
1522 " daddiu %2, %2, -1\n"
1523 "5: lb %1, 0(%2)\n"
1524 " dins %0, %1, 24, 8\n"
1525 " andi %1, %2, 0x7\n"
1526 " beq $0, %1, 9f\n"
1527 " daddiu %2, %2, -1\n"
1528 "6: lb %1, 0(%2)\n"
1529 " dins %0, %1, 16, 8\n"
1530 " andi %1, %2, 0x7\n"
1531 " beq $0, %1, 9f\n"
1532 " daddiu %2, %2, -1\n"
1533 "7: lb %1, 0(%2)\n"
1534 " dins %0, %1, 8, 8\n"
1535 " andi %1, %2, 0x7\n"
1536 " beq $0, %1, 9f\n"
1537 " daddiu %2, %2, -1\n"
1538 "0: lb %1, 0(%2)\n"
1539 " dins %0, %1, 0, 8\n"
1540 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1541 "1: lb %1, 0(%2)\n"
1542 " dinsu %0, %1, 56, 8\n"
1543 " daddiu %2, %2, 1\n"
1544 " andi %1, %2, 0x7\n"
1545 " beq $0, %1, 9f\n"
1546 "2: lb %1, 0(%2)\n"
1547 " dinsu %0, %1, 48, 8\n"
1548 " daddiu %2, %2, 1\n"
1549 " andi %1, %2, 0x7\n"
1550 " beq $0, %1, 9f\n"
1551 "3: lb %1, 0(%2)\n"
1552 " dinsu %0, %1, 40, 8\n"
1553 " daddiu %2, %2, 1\n"
1554 " andi %1, %2, 0x7\n"
1555 " beq $0, %1, 9f\n"
1556 "4: lb %1, 0(%2)\n"
1557 " dinsu %0, %1, 32, 8\n"
1558 " daddiu %2, %2, 1\n"
1559 " andi %1, %2, 0x7\n"
1560 " beq $0, %1, 9f\n"
1561 "5: lb %1, 0(%2)\n"
1562 " dins %0, %1, 24, 8\n"
1563 " daddiu %2, %2, 1\n"
1564 " andi %1, %2, 0x7\n"
1565 " beq $0, %1, 9f\n"
1566 "6: lb %1, 0(%2)\n"
1567 " dins %0, %1, 16, 8\n"
1568 " daddiu %2, %2, 1\n"
1569 " andi %1, %2, 0x7\n"
1570 " beq $0, %1, 9f\n"
1571 "7: lb %1, 0(%2)\n"
1572 " dins %0, %1, 8, 8\n"
1573 " daddiu %2, %2, 1\n"
1574 " andi %1, %2, 0x7\n"
1575 " beq $0, %1, 9f\n"
1576 "0: lb %1, 0(%2)\n"
1577 " dins %0, %1, 0, 8\n"
1578 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1579 "9:\n"
1580 " .insn\n"
1581 " .section .fixup,\"ax\"\n"
1582 "8: li %3,%4\n"
1583 " j 9b\n"
1584 " .previous\n"
1585 " .section __ex_table,\"a\"\n"
1586 STR(PTR) " 1b,8b\n"
1587 STR(PTR) " 2b,8b\n"
1588 STR(PTR) " 3b,8b\n"
1589 STR(PTR) " 4b,8b\n"
1590 STR(PTR) " 5b,8b\n"
1591 STR(PTR) " 6b,8b\n"
1592 STR(PTR) " 7b,8b\n"
1593 STR(PTR) " 0b,8b\n"
1594 " .previous\n"
1595 " .set pop\n"
1596 : "+&r"(rt), "=&r"(rs),
1597 "+&r"(vaddr), "+&r"(err)
1598 : "i"(SIGSEGV));
1599 if (MIPSInst_RT(inst) && !err)
1600 regs->regs[MIPSInst_RT(inst)] = rt;
1601
1602 MIPS_R2_STATS(loads);
1603 break;
1604
1605 case ldr_op:
1606 if (IS_ENABLED(CONFIG_32BIT)) {
1607 err = SIGILL;
1608 break;
1609 }
1610
1611 rt = regs->regs[MIPSInst_RT(inst)];
1612 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1613 if (!access_ok(VERIFY_READ, vaddr, 8)) {
1614 current->thread.cp0_baduaddr = vaddr;
1615 err = SIGSEGV;
1616 break;
1617 }
1618 __asm__ __volatile__(
1619 " .set push\n"
1620 " .set reorder\n"
1621 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1622 "1: lb %1, 0(%2)\n"
1623 " dins %0, %1, 0, 8\n"
1624 " daddiu %2, %2, 1\n"
1625 " andi %1, %2, 0x7\n"
1626 " beq $0, %1, 9f\n"
1627 "2: lb %1, 0(%2)\n"
1628 " dins %0, %1, 8, 8\n"
1629 " daddiu %2, %2, 1\n"
1630 " andi %1, %2, 0x7\n"
1631 " beq $0, %1, 9f\n"
1632 "3: lb %1, 0(%2)\n"
1633 " dins %0, %1, 16, 8\n"
1634 " daddiu %2, %2, 1\n"
1635 " andi %1, %2, 0x7\n"
1636 " beq $0, %1, 9f\n"
1637 "4: lb %1, 0(%2)\n"
1638 " dins %0, %1, 24, 8\n"
1639 " daddiu %2, %2, 1\n"
1640 " andi %1, %2, 0x7\n"
1641 " beq $0, %1, 9f\n"
1642 "5: lb %1, 0(%2)\n"
1643 " dinsu %0, %1, 32, 8\n"
1644 " daddiu %2, %2, 1\n"
1645 " andi %1, %2, 0x7\n"
1646 " beq $0, %1, 9f\n"
1647 "6: lb %1, 0(%2)\n"
1648 " dinsu %0, %1, 40, 8\n"
1649 " daddiu %2, %2, 1\n"
1650 " andi %1, %2, 0x7\n"
1651 " beq $0, %1, 9f\n"
1652 "7: lb %1, 0(%2)\n"
1653 " dinsu %0, %1, 48, 8\n"
1654 " daddiu %2, %2, 1\n"
1655 " andi %1, %2, 0x7\n"
1656 " beq $0, %1, 9f\n"
1657 "0: lb %1, 0(%2)\n"
1658 " dinsu %0, %1, 56, 8\n"
1659 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1660 "1: lb %1, 0(%2)\n"
1661 " dins %0, %1, 0, 8\n"
1662 " andi %1, %2, 0x7\n"
1663 " beq $0, %1, 9f\n"
1664 " daddiu %2, %2, -1\n"
1665 "2: lb %1, 0(%2)\n"
1666 " dins %0, %1, 8, 8\n"
1667 " andi %1, %2, 0x7\n"
1668 " beq $0, %1, 9f\n"
1669 " daddiu %2, %2, -1\n"
1670 "3: lb %1, 0(%2)\n"
1671 " dins %0, %1, 16, 8\n"
1672 " andi %1, %2, 0x7\n"
1673 " beq $0, %1, 9f\n"
1674 " daddiu %2, %2, -1\n"
1675 "4: lb %1, 0(%2)\n"
1676 " dins %0, %1, 24, 8\n"
1677 " andi %1, %2, 0x7\n"
1678 " beq $0, %1, 9f\n"
1679 " daddiu %2, %2, -1\n"
1680 "5: lb %1, 0(%2)\n"
1681 " dinsu %0, %1, 32, 8\n"
1682 " andi %1, %2, 0x7\n"
1683 " beq $0, %1, 9f\n"
1684 " daddiu %2, %2, -1\n"
1685 "6: lb %1, 0(%2)\n"
1686 " dinsu %0, %1, 40, 8\n"
1687 " andi %1, %2, 0x7\n"
1688 " beq $0, %1, 9f\n"
1689 " daddiu %2, %2, -1\n"
1690 "7: lb %1, 0(%2)\n"
1691 " dinsu %0, %1, 48, 8\n"
1692 " andi %1, %2, 0x7\n"
1693 " beq $0, %1, 9f\n"
1694 " daddiu %2, %2, -1\n"
1695 "0: lb %1, 0(%2)\n"
1696 " dinsu %0, %1, 56, 8\n"
1697 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1698 "9:\n"
1699 " .insn\n"
1700 " .section .fixup,\"ax\"\n"
1701 "8: li %3,%4\n"
1702 " j 9b\n"
1703 " .previous\n"
1704 " .section __ex_table,\"a\"\n"
1705 STR(PTR) " 1b,8b\n"
1706 STR(PTR) " 2b,8b\n"
1707 STR(PTR) " 3b,8b\n"
1708 STR(PTR) " 4b,8b\n"
1709 STR(PTR) " 5b,8b\n"
1710 STR(PTR) " 6b,8b\n"
1711 STR(PTR) " 7b,8b\n"
1712 STR(PTR) " 0b,8b\n"
1713 " .previous\n"
1714 " .set pop\n"
1715 : "+&r"(rt), "=&r"(rs),
1716 "+&r"(vaddr), "+&r"(err)
1717 : "i"(SIGSEGV));
1718 if (MIPSInst_RT(inst) && !err)
1719 regs->regs[MIPSInst_RT(inst)] = rt;
1720
1721 MIPS_R2_STATS(loads);
1722 break;
1723
1724 case sdl_op:
1725 if (IS_ENABLED(CONFIG_32BIT)) {
1726 err = SIGILL;
1727 break;
1728 }
1729
1730 rt = regs->regs[MIPSInst_RT(inst)];
1731 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1732 if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
1733 current->thread.cp0_baduaddr = vaddr;
1734 err = SIGSEGV;
1735 break;
1736 }
1737 __asm__ __volatile__(
1738 " .set push\n"
1739 " .set reorder\n"
1740 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1741 " dextu %1, %0, 56, 8\n"
1742 "1: sb %1, 0(%2)\n"
1743 " andi %1, %2, 0x7\n"
1744 " beq $0, %1, 9f\n"
1745 " daddiu %2, %2, -1\n"
1746 " dextu %1, %0, 48, 8\n"
1747 "2: sb %1, 0(%2)\n"
1748 " andi %1, %2, 0x7\n"
1749 " beq $0, %1, 9f\n"
1750 " daddiu %2, %2, -1\n"
1751 " dextu %1, %0, 40, 8\n"
1752 "3: sb %1, 0(%2)\n"
1753 " andi %1, %2, 0x7\n"
1754 " beq $0, %1, 9f\n"
1755 " daddiu %2, %2, -1\n"
1756 " dextu %1, %0, 32, 8\n"
1757 "4: sb %1, 0(%2)\n"
1758 " andi %1, %2, 0x7\n"
1759 " beq $0, %1, 9f\n"
1760 " daddiu %2, %2, -1\n"
1761 " dext %1, %0, 24, 8\n"
1762 "5: sb %1, 0(%2)\n"
1763 " andi %1, %2, 0x7\n"
1764 " beq $0, %1, 9f\n"
1765 " daddiu %2, %2, -1\n"
1766 " dext %1, %0, 16, 8\n"
1767 "6: sb %1, 0(%2)\n"
1768 " andi %1, %2, 0x7\n"
1769 " beq $0, %1, 9f\n"
1770 " daddiu %2, %2, -1\n"
1771 " dext %1, %0, 8, 8\n"
1772 "7: sb %1, 0(%2)\n"
1773 " andi %1, %2, 0x7\n"
1774 " beq $0, %1, 9f\n"
1775 " daddiu %2, %2, -1\n"
1776 " dext %1, %0, 0, 8\n"
1777 "0: sb %1, 0(%2)\n"
1778 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1779 " dextu %1, %0, 56, 8\n"
1780 "1: sb %1, 0(%2)\n"
1781 " daddiu %2, %2, 1\n"
1782 " andi %1, %2, 0x7\n"
1783 " beq $0, %1, 9f\n"
1784 " dextu %1, %0, 48, 8\n"
1785 "2: sb %1, 0(%2)\n"
1786 " daddiu %2, %2, 1\n"
1787 " andi %1, %2, 0x7\n"
1788 " beq $0, %1, 9f\n"
1789 " dextu %1, %0, 40, 8\n"
1790 "3: sb %1, 0(%2)\n"
1791 " daddiu %2, %2, 1\n"
1792 " andi %1, %2, 0x7\n"
1793 " beq $0, %1, 9f\n"
1794 " dextu %1, %0, 32, 8\n"
1795 "4: sb %1, 0(%2)\n"
1796 " daddiu %2, %2, 1\n"
1797 " andi %1, %2, 0x7\n"
1798 " beq $0, %1, 9f\n"
1799 " dext %1, %0, 24, 8\n"
1800 "5: sb %1, 0(%2)\n"
1801 " daddiu %2, %2, 1\n"
1802 " andi %1, %2, 0x7\n"
1803 " beq $0, %1, 9f\n"
1804 " dext %1, %0, 16, 8\n"
1805 "6: sb %1, 0(%2)\n"
1806 " daddiu %2, %2, 1\n"
1807 " andi %1, %2, 0x7\n"
1808 " beq $0, %1, 9f\n"
1809 " dext %1, %0, 8, 8\n"
1810 "7: sb %1, 0(%2)\n"
1811 " daddiu %2, %2, 1\n"
1812 " andi %1, %2, 0x7\n"
1813 " beq $0, %1, 9f\n"
1814 " dext %1, %0, 0, 8\n"
1815 "0: sb %1, 0(%2)\n"
1816 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1817 "9:\n"
1818 " .insn\n"
1819 " .section .fixup,\"ax\"\n"
1820 "8: li %3,%4\n"
1821 " j 9b\n"
1822 " .previous\n"
1823 " .section __ex_table,\"a\"\n"
1824 STR(PTR) " 1b,8b\n"
1825 STR(PTR) " 2b,8b\n"
1826 STR(PTR) " 3b,8b\n"
1827 STR(PTR) " 4b,8b\n"
1828 STR(PTR) " 5b,8b\n"
1829 STR(PTR) " 6b,8b\n"
1830 STR(PTR) " 7b,8b\n"
1831 STR(PTR) " 0b,8b\n"
1832 " .previous\n"
1833 " .set pop\n"
1834 : "+&r"(rt), "=&r"(rs),
1835 "+&r"(vaddr), "+&r"(err)
1836 : "i"(SIGSEGV)
1837 : "memory");
1838
1839 MIPS_R2_STATS(stores);
1840 break;
1841
1842 case sdr_op:
1843 if (IS_ENABLED(CONFIG_32BIT)) {
1844 err = SIGILL;
1845 break;
1846 }
1847
1848 rt = regs->regs[MIPSInst_RT(inst)];
1849 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1850 if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
1851 current->thread.cp0_baduaddr = vaddr;
1852 err = SIGSEGV;
1853 break;
1854 }
1855 __asm__ __volatile__(
1856 " .set push\n"
1857 " .set reorder\n"
1858 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1859 " dext %1, %0, 0, 8\n"
1860 "1: sb %1, 0(%2)\n"
1861 " daddiu %2, %2, 1\n"
1862 " andi %1, %2, 0x7\n"
1863 " beq $0, %1, 9f\n"
1864 " dext %1, %0, 8, 8\n"
1865 "2: sb %1, 0(%2)\n"
1866 " daddiu %2, %2, 1\n"
1867 " andi %1, %2, 0x7\n"
1868 " beq $0, %1, 9f\n"
1869 " dext %1, %0, 16, 8\n"
1870 "3: sb %1, 0(%2)\n"
1871 " daddiu %2, %2, 1\n"
1872 " andi %1, %2, 0x7\n"
1873 " beq $0, %1, 9f\n"
1874 " dext %1, %0, 24, 8\n"
1875 "4: sb %1, 0(%2)\n"
1876 " daddiu %2, %2, 1\n"
1877 " andi %1, %2, 0x7\n"
1878 " beq $0, %1, 9f\n"
1879 " dextu %1, %0, 32, 8\n"
1880 "5: sb %1, 0(%2)\n"
1881 " daddiu %2, %2, 1\n"
1882 " andi %1, %2, 0x7\n"
1883 " beq $0, %1, 9f\n"
1884 " dextu %1, %0, 40, 8\n"
1885 "6: sb %1, 0(%2)\n"
1886 " daddiu %2, %2, 1\n"
1887 " andi %1, %2, 0x7\n"
1888 " beq $0, %1, 9f\n"
1889 " dextu %1, %0, 48, 8\n"
1890 "7: sb %1, 0(%2)\n"
1891 " daddiu %2, %2, 1\n"
1892 " andi %1, %2, 0x7\n"
1893 " beq $0, %1, 9f\n"
1894 " dextu %1, %0, 56, 8\n"
1895 "0: sb %1, 0(%2)\n"
1896 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1897 " dext %1, %0, 0, 8\n"
1898 "1: sb %1, 0(%2)\n"
1899 " andi %1, %2, 0x7\n"
1900 " beq $0, %1, 9f\n"
1901 " daddiu %2, %2, -1\n"
1902 " dext %1, %0, 8, 8\n"
1903 "2: sb %1, 0(%2)\n"
1904 " andi %1, %2, 0x7\n"
1905 " beq $0, %1, 9f\n"
1906 " daddiu %2, %2, -1\n"
1907 " dext %1, %0, 16, 8\n"
1908 "3: sb %1, 0(%2)\n"
1909 " andi %1, %2, 0x7\n"
1910 " beq $0, %1, 9f\n"
1911 " daddiu %2, %2, -1\n"
1912 " dext %1, %0, 24, 8\n"
1913 "4: sb %1, 0(%2)\n"
1914 " andi %1, %2, 0x7\n"
1915 " beq $0, %1, 9f\n"
1916 " daddiu %2, %2, -1\n"
1917 " dextu %1, %0, 32, 8\n"
1918 "5: sb %1, 0(%2)\n"
1919 " andi %1, %2, 0x7\n"
1920 " beq $0, %1, 9f\n"
1921 " daddiu %2, %2, -1\n"
1922 " dextu %1, %0, 40, 8\n"
1923 "6: sb %1, 0(%2)\n"
1924 " andi %1, %2, 0x7\n"
1925 " beq $0, %1, 9f\n"
1926 " daddiu %2, %2, -1\n"
1927 " dextu %1, %0, 48, 8\n"
1928 "7: sb %1, 0(%2)\n"
1929 " andi %1, %2, 0x7\n"
1930 " beq $0, %1, 9f\n"
1931 " daddiu %2, %2, -1\n"
1932 " dextu %1, %0, 56, 8\n"
1933 "0: sb %1, 0(%2)\n"
1934 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1935 "9:\n"
1936 " .insn\n"
1937 " .section .fixup,\"ax\"\n"
1938 "8: li %3,%4\n"
1939 " j 9b\n"
1940 " .previous\n"
1941 " .section __ex_table,\"a\"\n"
1942 STR(PTR) " 1b,8b\n"
1943 STR(PTR) " 2b,8b\n"
1944 STR(PTR) " 3b,8b\n"
1945 STR(PTR) " 4b,8b\n"
1946 STR(PTR) " 5b,8b\n"
1947 STR(PTR) " 6b,8b\n"
1948 STR(PTR) " 7b,8b\n"
1949 STR(PTR) " 0b,8b\n"
1950 " .previous\n"
1951 " .set pop\n"
1952 : "+&r"(rt), "=&r"(rs),
1953 "+&r"(vaddr), "+&r"(err)
1954 : "i"(SIGSEGV)
1955 : "memory");
1956
1957 MIPS_R2_STATS(stores);
1958
1959 break;
1960 case ll_op:
1961 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1962 if (vaddr & 0x3) {
1963 current->thread.cp0_baduaddr = vaddr;
1964 err = SIGBUS;
1965 break;
1966 }
1967 if (!access_ok(VERIFY_READ, vaddr, 4)) {
1968 current->thread.cp0_baduaddr = vaddr;
1969 err = SIGBUS;
1970 break;
1971 }
1972
1973 if (!cpu_has_rw_llb) {
1974 /*
1975 * An LL/SC block can't be safely emulated without
1976 * a Config5/LLB availability. So it's probably time to
1977 * kill our process before things get any worse. This is
1978 * because Config5/LLB allows us to use ERETNC so that
1979 * the LLAddr/LLB bit is not cleared when we return from
1980 * an exception. MIPS R2 LL/SC instructions trap with an
1981 * RI exception so once we emulate them here, we return
1982 * back to userland with ERETNC. That preserves the
1983 * LLAddr/LLB so the subsequent SC instruction will
1984 * succeed preserving the atomic semantics of the LL/SC
1985 * block. Without that, there is no safe way to emulate
1986 * an LL/SC block in MIPSR2 userland.
1987 */
1988 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
1989 err = SIGKILL;
1990 break;
1991 }
1992
1993 __asm__ __volatile__(
1994 "1:\n"
1995 "ll %0, 0(%2)\n"
1996 "2:\n"
1997 ".insn\n"
1998 ".section .fixup,\"ax\"\n"
1999 "3:\n"
2000 "li %1, %3\n"
2001 "j 2b\n"
2002 ".previous\n"
2003 ".section __ex_table,\"a\"\n"
2004 STR(PTR) " 1b,3b\n"
2005 ".previous\n"
2006 : "=&r"(res), "+&r"(err)
2007 : "r"(vaddr), "i"(SIGSEGV)
2008 : "memory");
2009
2010 if (MIPSInst_RT(inst) && !err)
2011 regs->regs[MIPSInst_RT(inst)] = res;
2012 MIPS_R2_STATS(llsc);
2013
2014 break;
2015
2016 case sc_op:
2017 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2018 if (vaddr & 0x3) {
2019 current->thread.cp0_baduaddr = vaddr;
2020 err = SIGBUS;
2021 break;
2022 }
2023 if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
2024 current->thread.cp0_baduaddr = vaddr;
2025 err = SIGBUS;
2026 break;
2027 }
2028
2029 if (!cpu_has_rw_llb) {
2030 /*
2031 * An LL/SC block can't be safely emulated without
2032 * a Config5/LLB availability. So it's probably time to
2033 * kill our process before things get any worse. This is
2034 * because Config5/LLB allows us to use ERETNC so that
2035 * the LLAddr/LLB bit is not cleared when we return from
2036 * an exception. MIPS R2 LL/SC instructions trap with an
2037 * RI exception so once we emulate them here, we return
2038 * back to userland with ERETNC. That preserves the
2039 * LLAddr/LLB so the subsequent SC instruction will
2040 * succeed preserving the atomic semantics of the LL/SC
2041 * block. Without that, there is no safe way to emulate
2042 * an LL/SC block in MIPSR2 userland.
2043 */
2044 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2045 err = SIGKILL;
2046 break;
2047 }
2048
2049 res = regs->regs[MIPSInst_RT(inst)];
2050
2051 __asm__ __volatile__(
2052 "1:\n"
2053 "sc %0, 0(%2)\n"
2054 "2:\n"
2055 ".insn\n"
2056 ".section .fixup,\"ax\"\n"
2057 "3:\n"
2058 "li %1, %3\n"
2059 "j 2b\n"
2060 ".previous\n"
2061 ".section __ex_table,\"a\"\n"
2062 STR(PTR) " 1b,3b\n"
2063 ".previous\n"
2064 : "+&r"(res), "+&r"(err)
2065 : "r"(vaddr), "i"(SIGSEGV));
2066
2067 if (MIPSInst_RT(inst) && !err)
2068 regs->regs[MIPSInst_RT(inst)] = res;
2069
2070 MIPS_R2_STATS(llsc);
2071
2072 break;
2073
2074 case lld_op:
2075 if (IS_ENABLED(CONFIG_32BIT)) {
2076 err = SIGILL;
2077 break;
2078 }
2079
2080 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2081 if (vaddr & 0x7) {
2082 current->thread.cp0_baduaddr = vaddr;
2083 err = SIGBUS;
2084 break;
2085 }
2086 if (!access_ok(VERIFY_READ, vaddr, 8)) {
2087 current->thread.cp0_baduaddr = vaddr;
2088 err = SIGBUS;
2089 break;
2090 }
2091
2092 if (!cpu_has_rw_llb) {
2093 /*
2094 * An LL/SC block can't be safely emulated without
2095 * a Config5/LLB availability. So it's probably time to
2096 * kill our process before things get any worse. This is
2097 * because Config5/LLB allows us to use ERETNC so that
2098 * the LLAddr/LLB bit is not cleared when we return from
2099 * an exception. MIPS R2 LL/SC instructions trap with an
2100 * RI exception so once we emulate them here, we return
2101 * back to userland with ERETNC. That preserves the
2102 * LLAddr/LLB so the subsequent SC instruction will
2103 * succeed preserving the atomic semantics of the LL/SC
2104 * block. Without that, there is no safe way to emulate
2105 * an LL/SC block in MIPSR2 userland.
2106 */
2107 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2108 err = SIGKILL;
2109 break;
2110 }
2111
2112 __asm__ __volatile__(
2113 "1:\n"
2114 "lld %0, 0(%2)\n"
2115 "2:\n"
2116 ".insn\n"
2117 ".section .fixup,\"ax\"\n"
2118 "3:\n"
2119 "li %1, %3\n"
2120 "j 2b\n"
2121 ".previous\n"
2122 ".section __ex_table,\"a\"\n"
2123 STR(PTR) " 1b,3b\n"
2124 ".previous\n"
2125 : "=&r"(res), "+&r"(err)
2126 : "r"(vaddr), "i"(SIGSEGV)
2127 : "memory");
2128 if (MIPSInst_RT(inst) && !err)
2129 regs->regs[MIPSInst_RT(inst)] = res;
2130
2131 MIPS_R2_STATS(llsc);
2132
2133 break;
2134
2135 case scd_op:
2136 if (IS_ENABLED(CONFIG_32BIT)) {
2137 err = SIGILL;
2138 break;
2139 }
2140
2141 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2142 if (vaddr & 0x7) {
2143 current->thread.cp0_baduaddr = vaddr;
2144 err = SIGBUS;
2145 break;
2146 }
2147 if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
2148 current->thread.cp0_baduaddr = vaddr;
2149 err = SIGBUS;
2150 break;
2151 }
2152
2153 if (!cpu_has_rw_llb) {
2154 /*
2155 * An LL/SC block can't be safely emulated without
2156 * a Config5/LLB availability. So it's probably time to
2157 * kill our process before things get any worse. This is
2158 * because Config5/LLB allows us to use ERETNC so that
2159 * the LLAddr/LLB bit is not cleared when we return from
2160 * an exception. MIPS R2 LL/SC instructions trap with an
2161 * RI exception so once we emulate them here, we return
2162 * back to userland with ERETNC. That preserves the
2163 * LLAddr/LLB so the subsequent SC instruction will
2164 * succeed preserving the atomic semantics of the LL/SC
2165 * block. Without that, there is no safe way to emulate
2166 * an LL/SC block in MIPSR2 userland.
2167 */
2168 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2169 err = SIGKILL;
2170 break;
2171 }
2172
2173 res = regs->regs[MIPSInst_RT(inst)];
2174
2175 __asm__ __volatile__(
2176 "1:\n"
2177 "scd %0, 0(%2)\n"
2178 "2:\n"
2179 ".insn\n"
2180 ".section .fixup,\"ax\"\n"
2181 "3:\n"
2182 "li %1, %3\n"
2183 "j 2b\n"
2184 ".previous\n"
2185 ".section __ex_table,\"a\"\n"
2186 STR(PTR) " 1b,3b\n"
2187 ".previous\n"
2188 : "+&r"(res), "+&r"(err)
2189 : "r"(vaddr), "i"(SIGSEGV));
2190
2191 if (MIPSInst_RT(inst) && !err)
2192 regs->regs[MIPSInst_RT(inst)] = res;
2193
2194 MIPS_R2_STATS(llsc);
2195
2196 break;
2197 case pref_op:
2198 /* skip it */
2199 break;
2200 default:
2201 err = SIGILL;
2202 }
2203
2204 /*
2205 * Let's not return to userland just yet. It's costly and
2206 * it's likely we have more R2 instructions to emulate
2207 */
2208 if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
2209 regs->cp0_cause &= ~CAUSEF_BD;
2210 err = get_user(inst, (u32 __user *)regs->cp0_epc);
2211 if (!err)
2212 goto repeat;
2213
2214 if (err < 0)
2215 err = SIGSEGV;
2216 }
2217
2218 if (err && (err != SIGEMT)) {
2219 regs->regs[31] = r31;
2220 regs->cp0_epc = epc;
2221 }
2222
2223 /* Likely a MIPS R6 compatible instruction */
2224 if (pass && (err == SIGILL))
2225 err = 0;
2226
2227 return err;
2228 }
2229
2230 #ifdef CONFIG_DEBUG_FS
2231
2232 static int mipsr2_stats_show(struct seq_file *s, void *unused)
2233 {
2234
2235 seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
2236 seq_printf(s, "movs\t\t%ld\t%ld\n",
2237 (unsigned long)__this_cpu_read(mipsr2emustats.movs),
2238 (unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
2239 seq_printf(s, "hilo\t\t%ld\t%ld\n",
2240 (unsigned long)__this_cpu_read(mipsr2emustats.hilo),
2241 (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
2242 seq_printf(s, "muls\t\t%ld\t%ld\n",
2243 (unsigned long)__this_cpu_read(mipsr2emustats.muls),
2244 (unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
2245 seq_printf(s, "divs\t\t%ld\t%ld\n",
2246 (unsigned long)__this_cpu_read(mipsr2emustats.divs),
2247 (unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
2248 seq_printf(s, "dsps\t\t%ld\t%ld\n",
2249 (unsigned long)__this_cpu_read(mipsr2emustats.dsps),
2250 (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
2251 seq_printf(s, "bops\t\t%ld\t%ld\n",
2252 (unsigned long)__this_cpu_read(mipsr2emustats.bops),
2253 (unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
2254 seq_printf(s, "traps\t\t%ld\t%ld\n",
2255 (unsigned long)__this_cpu_read(mipsr2emustats.traps),
2256 (unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
2257 seq_printf(s, "fpus\t\t%ld\t%ld\n",
2258 (unsigned long)__this_cpu_read(mipsr2emustats.fpus),
2259 (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
2260 seq_printf(s, "loads\t\t%ld\t%ld\n",
2261 (unsigned long)__this_cpu_read(mipsr2emustats.loads),
2262 (unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
2263 seq_printf(s, "stores\t\t%ld\t%ld\n",
2264 (unsigned long)__this_cpu_read(mipsr2emustats.stores),
2265 (unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
2266 seq_printf(s, "llsc\t\t%ld\t%ld\n",
2267 (unsigned long)__this_cpu_read(mipsr2emustats.llsc),
2268 (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
2269 seq_printf(s, "dsemul\t\t%ld\t%ld\n",
2270 (unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
2271 (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
2272 seq_printf(s, "jr\t\t%ld\n",
2273 (unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
2274 seq_printf(s, "bltzl\t\t%ld\n",
2275 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
2276 seq_printf(s, "bgezl\t\t%ld\n",
2277 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
2278 seq_printf(s, "bltzll\t\t%ld\n",
2279 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
2280 seq_printf(s, "bgezll\t\t%ld\n",
2281 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
2282 seq_printf(s, "bltzal\t\t%ld\n",
2283 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
2284 seq_printf(s, "bgezal\t\t%ld\n",
2285 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
2286 seq_printf(s, "beql\t\t%ld\n",
2287 (unsigned long)__this_cpu_read(mipsr2bremustats.beql));
2288 seq_printf(s, "bnel\t\t%ld\n",
2289 (unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
2290 seq_printf(s, "blezl\t\t%ld\n",
2291 (unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
2292 seq_printf(s, "bgtzl\t\t%ld\n",
2293 (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
2294
2295 return 0;
2296 }
2297
2298 static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
2299 {
2300 mipsr2_stats_show(s, unused);
2301
2302 __this_cpu_write((mipsr2emustats).movs, 0);
2303 __this_cpu_write((mipsr2bdemustats).movs, 0);
2304 __this_cpu_write((mipsr2emustats).hilo, 0);
2305 __this_cpu_write((mipsr2bdemustats).hilo, 0);
2306 __this_cpu_write((mipsr2emustats).muls, 0);
2307 __this_cpu_write((mipsr2bdemustats).muls, 0);
2308 __this_cpu_write((mipsr2emustats).divs, 0);
2309 __this_cpu_write((mipsr2bdemustats).divs, 0);
2310 __this_cpu_write((mipsr2emustats).dsps, 0);
2311 __this_cpu_write((mipsr2bdemustats).dsps, 0);
2312 __this_cpu_write((mipsr2emustats).bops, 0);
2313 __this_cpu_write((mipsr2bdemustats).bops, 0);
2314 __this_cpu_write((mipsr2emustats).traps, 0);
2315 __this_cpu_write((mipsr2bdemustats).traps, 0);
2316 __this_cpu_write((mipsr2emustats).fpus, 0);
2317 __this_cpu_write((mipsr2bdemustats).fpus, 0);
2318 __this_cpu_write((mipsr2emustats).loads, 0);
2319 __this_cpu_write((mipsr2bdemustats).loads, 0);
2320 __this_cpu_write((mipsr2emustats).stores, 0);
2321 __this_cpu_write((mipsr2bdemustats).stores, 0);
2322 __this_cpu_write((mipsr2emustats).llsc, 0);
2323 __this_cpu_write((mipsr2bdemustats).llsc, 0);
2324 __this_cpu_write((mipsr2emustats).dsemul, 0);
2325 __this_cpu_write((mipsr2bdemustats).dsemul, 0);
2326 __this_cpu_write((mipsr2bremustats).jrs, 0);
2327 __this_cpu_write((mipsr2bremustats).bltzl, 0);
2328 __this_cpu_write((mipsr2bremustats).bgezl, 0);
2329 __this_cpu_write((mipsr2bremustats).bltzll, 0);
2330 __this_cpu_write((mipsr2bremustats).bgezll, 0);
2331 __this_cpu_write((mipsr2bremustats).bltzal, 0);
2332 __this_cpu_write((mipsr2bremustats).bgezal, 0);
2333 __this_cpu_write((mipsr2bremustats).beql, 0);
2334 __this_cpu_write((mipsr2bremustats).bnel, 0);
2335 __this_cpu_write((mipsr2bremustats).blezl, 0);
2336 __this_cpu_write((mipsr2bremustats).bgtzl, 0);
2337
2338 return 0;
2339 }
2340
2341 static int mipsr2_stats_open(struct inode *inode, struct file *file)
2342 {
2343 return single_open(file, mipsr2_stats_show, inode->i_private);
2344 }
2345
2346 static int mipsr2_stats_clear_open(struct inode *inode, struct file *file)
2347 {
2348 return single_open(file, mipsr2_stats_clear_show, inode->i_private);
2349 }
2350
2351 static const struct file_operations mipsr2_emul_fops = {
2352 .open = mipsr2_stats_open,
2353 .read = seq_read,
2354 .llseek = seq_lseek,
2355 .release = single_release,
2356 };
2357
2358 static const struct file_operations mipsr2_clear_fops = {
2359 .open = mipsr2_stats_clear_open,
2360 .read = seq_read,
2361 .llseek = seq_lseek,
2362 .release = single_release,
2363 };
2364
2365
2366 static int __init mipsr2_init_debugfs(void)
2367 {
2368 struct dentry *mipsr2_emul;
2369
2370 if (!mips_debugfs_dir)
2371 return -ENODEV;
2372
2373 mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO,
2374 mips_debugfs_dir, NULL,
2375 &mipsr2_emul_fops);
2376 if (!mipsr2_emul)
2377 return -ENOMEM;
2378
2379 mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO,
2380 mips_debugfs_dir, NULL,
2381 &mipsr2_clear_fops);
2382 if (!mipsr2_emul)
2383 return -ENOMEM;
2384
2385 return 0;
2386 }
2387
2388 device_initcall(mipsr2_init_debugfs);
2389
2390 #endif /* CONFIG_DEBUG_FS */