]> git.proxmox.com Git - qemu.git/blob - target-xtensa/translate.c
target-xtensa: implement RST2 group (32 bit mul/div/rem)
[qemu.git] / target-xtensa / translate.c
1 /*
2 * Xtensa ISA:
3 * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
4 *
5 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of the Open Source and Linux Lab nor the
16 * names of its contributors may be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <stdio.h>
32
33 #include "cpu.h"
34 #include "exec-all.h"
35 #include "disas.h"
36 #include "tcg-op.h"
37 #include "qemu-log.h"
38
39 #include "helpers.h"
40 #define GEN_HELPER 1
41 #include "helpers.h"
42
43 typedef struct DisasContext {
44 const XtensaConfig *config;
45 TranslationBlock *tb;
46 uint32_t pc;
47 uint32_t next_pc;
48 int cring;
49 int ring;
50 int is_jmp;
51 int singlestep_enabled;
52
53 bool sar_5bit;
54 bool sar_m32_5bit;
55 bool sar_m32_allocated;
56 TCGv_i32 sar_m32;
57 } DisasContext;
58
59 static TCGv_ptr cpu_env;
60 static TCGv_i32 cpu_pc;
61 static TCGv_i32 cpu_R[16];
62 static TCGv_i32 cpu_SR[256];
63 static TCGv_i32 cpu_UR[256];
64
65 #include "gen-icount.h"
66
67 static const char * const sregnames[256] = {
68 [SAR] = "SAR",
69 [SCOMPARE1] = "SCOMPARE1",
70 [EPC1] = "EPC1",
71 [DEPC] = "DEPC",
72 [EXCSAVE1] = "EXCSAVE1",
73 [PS] = "PS",
74 [EXCCAUSE] = "EXCCAUSE",
75 [EXCVADDR] = "EXCVADDR",
76 };
77
78 static const char * const uregnames[256] = {
79 [THREADPTR] = "THREADPTR",
80 [FCR] = "FCR",
81 [FSR] = "FSR",
82 };
83
84 void xtensa_translate_init(void)
85 {
86 static const char * const regnames[] = {
87 "ar0", "ar1", "ar2", "ar3",
88 "ar4", "ar5", "ar6", "ar7",
89 "ar8", "ar9", "ar10", "ar11",
90 "ar12", "ar13", "ar14", "ar15",
91 };
92 int i;
93
94 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
95 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
96 offsetof(CPUState, pc), "pc");
97
98 for (i = 0; i < 16; i++) {
99 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
100 offsetof(CPUState, regs[i]),
101 regnames[i]);
102 }
103
104 for (i = 0; i < 256; ++i) {
105 if (sregnames[i]) {
106 cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0,
107 offsetof(CPUState, sregs[i]),
108 sregnames[i]);
109 }
110 }
111
112 for (i = 0; i < 256; ++i) {
113 if (uregnames[i]) {
114 cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, uregs[i]),
116 uregnames[i]);
117 }
118 }
119 #define GEN_HELPER 2
120 #include "helpers.h"
121 }
122
123 static inline bool option_enabled(DisasContext *dc, int opt)
124 {
125 return xtensa_option_enabled(dc->config, opt);
126 }
127
128 static void init_sar_tracker(DisasContext *dc)
129 {
130 dc->sar_5bit = false;
131 dc->sar_m32_5bit = false;
132 dc->sar_m32_allocated = false;
133 }
134
135 static void reset_sar_tracker(DisasContext *dc)
136 {
137 if (dc->sar_m32_allocated) {
138 tcg_temp_free(dc->sar_m32);
139 }
140 }
141
142 static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
143 {
144 tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
145 if (dc->sar_m32_5bit) {
146 tcg_gen_discard_i32(dc->sar_m32);
147 }
148 dc->sar_5bit = true;
149 dc->sar_m32_5bit = false;
150 }
151
152 static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
153 {
154 TCGv_i32 tmp = tcg_const_i32(32);
155 if (!dc->sar_m32_allocated) {
156 dc->sar_m32 = tcg_temp_local_new_i32();
157 dc->sar_m32_allocated = true;
158 }
159 tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
160 tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
161 dc->sar_5bit = false;
162 dc->sar_m32_5bit = true;
163 tcg_temp_free(tmp);
164 }
165
166 static void gen_exception(int excp)
167 {
168 TCGv_i32 tmp = tcg_const_i32(excp);
169 gen_helper_exception(tmp);
170 tcg_temp_free(tmp);
171 }
172
173 static void gen_exception_cause(DisasContext *dc, uint32_t cause)
174 {
175 TCGv_i32 tpc = tcg_const_i32(dc->pc);
176 TCGv_i32 tcause = tcg_const_i32(cause);
177 gen_helper_exception_cause(tpc, tcause);
178 tcg_temp_free(tpc);
179 tcg_temp_free(tcause);
180 }
181
182 static void gen_check_privilege(DisasContext *dc)
183 {
184 if (dc->cring) {
185 gen_exception_cause(dc, PRIVILEGED_CAUSE);
186 }
187 }
188
189 static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
190 {
191 tcg_gen_mov_i32(cpu_pc, dest);
192 if (dc->singlestep_enabled) {
193 gen_exception(EXCP_DEBUG);
194 } else {
195 if (slot >= 0) {
196 tcg_gen_goto_tb(slot);
197 tcg_gen_exit_tb((tcg_target_long)dc->tb + slot);
198 } else {
199 tcg_gen_exit_tb(0);
200 }
201 }
202 dc->is_jmp = DISAS_UPDATE;
203 }
204
205 static void gen_jump(DisasContext *dc, TCGv dest)
206 {
207 gen_jump_slot(dc, dest, -1);
208 }
209
210 static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
211 {
212 TCGv_i32 tmp = tcg_const_i32(dest);
213 if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
214 slot = -1;
215 }
216 gen_jump_slot(dc, tmp, slot);
217 tcg_temp_free(tmp);
218 }
219
220 static void gen_brcond(DisasContext *dc, TCGCond cond,
221 TCGv_i32 t0, TCGv_i32 t1, uint32_t offset)
222 {
223 int label = gen_new_label();
224
225 tcg_gen_brcond_i32(cond, t0, t1, label);
226 gen_jumpi(dc, dc->next_pc, 0);
227 gen_set_label(label);
228 gen_jumpi(dc, dc->pc + offset, 1);
229 }
230
231 static void gen_brcondi(DisasContext *dc, TCGCond cond,
232 TCGv_i32 t0, uint32_t t1, uint32_t offset)
233 {
234 TCGv_i32 tmp = tcg_const_i32(t1);
235 gen_brcond(dc, cond, t0, tmp, offset);
236 tcg_temp_free(tmp);
237 }
238
239 static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
240 {
241 static void (* const rsr_handler[256])(DisasContext *dc,
242 TCGv_i32 d, uint32_t sr) = {
243 };
244
245 if (sregnames[sr]) {
246 if (rsr_handler[sr]) {
247 rsr_handler[sr](dc, d, sr);
248 } else {
249 tcg_gen_mov_i32(d, cpu_SR[sr]);
250 }
251 } else {
252 qemu_log("RSR %d not implemented, ", sr);
253 }
254 }
255
256 static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s)
257 {
258 tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
259 if (dc->sar_m32_5bit) {
260 tcg_gen_discard_i32(dc->sar_m32);
261 }
262 dc->sar_5bit = false;
263 dc->sar_m32_5bit = false;
264 }
265
266 static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v)
267 {
268 uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
269 PS_UM | PS_EXCM | PS_INTLEVEL;
270
271 if (option_enabled(dc, XTENSA_OPTION_MMU)) {
272 mask |= PS_RING;
273 }
274 tcg_gen_andi_i32(cpu_SR[sr], v, mask);
275 /* This can change mmu index, so exit tb */
276 gen_jumpi(dc, dc->next_pc, -1);
277 }
278
279 static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
280 {
281 static void (* const wsr_handler[256])(DisasContext *dc,
282 uint32_t sr, TCGv_i32 v) = {
283 [SAR] = gen_wsr_sar,
284 [PS] = gen_wsr_ps,
285 };
286
287 if (sregnames[sr]) {
288 if (wsr_handler[sr]) {
289 wsr_handler[sr](dc, sr, s);
290 } else {
291 tcg_gen_mov_i32(cpu_SR[sr], s);
292 }
293 } else {
294 qemu_log("WSR %d not implemented, ", sr);
295 }
296 }
297
298 static void disas_xtensa_insn(DisasContext *dc)
299 {
300 #define HAS_OPTION(opt) do { \
301 if (!option_enabled(dc, opt)) { \
302 qemu_log("Option %d is not enabled %s:%d\n", \
303 (opt), __FILE__, __LINE__); \
304 goto invalid_opcode; \
305 } \
306 } while (0)
307
308 #define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__)
309 #define RESERVED() do { \
310 qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
311 dc->pc, b0, b1, b2, __FILE__, __LINE__); \
312 goto invalid_opcode; \
313 } while (0)
314
315
316 #ifdef TARGET_WORDS_BIGENDIAN
317 #define OP0 (((b0) & 0xf0) >> 4)
318 #define OP1 (((b2) & 0xf0) >> 4)
319 #define OP2 ((b2) & 0xf)
320 #define RRR_R ((b1) & 0xf)
321 #define RRR_S (((b1) & 0xf0) >> 4)
322 #define RRR_T ((b0) & 0xf)
323 #else
324 #define OP0 (((b0) & 0xf))
325 #define OP1 (((b2) & 0xf))
326 #define OP2 (((b2) & 0xf0) >> 4)
327 #define RRR_R (((b1) & 0xf0) >> 4)
328 #define RRR_S (((b1) & 0xf))
329 #define RRR_T (((b0) & 0xf0) >> 4)
330 #endif
331
332 #define RRRN_R RRR_R
333 #define RRRN_S RRR_S
334 #define RRRN_T RRR_T
335
336 #define RRI8_R RRR_R
337 #define RRI8_S RRR_S
338 #define RRI8_T RRR_T
339 #define RRI8_IMM8 (b2)
340 #define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8)
341
342 #ifdef TARGET_WORDS_BIGENDIAN
343 #define RI16_IMM16 (((b1) << 8) | (b2))
344 #else
345 #define RI16_IMM16 (((b2) << 8) | (b1))
346 #endif
347
348 #ifdef TARGET_WORDS_BIGENDIAN
349 #define CALL_N (((b0) & 0xc) >> 2)
350 #define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2))
351 #else
352 #define CALL_N (((b0) & 0x30) >> 4)
353 #define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10))
354 #endif
355 #define CALL_OFFSET_SE \
356 (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET)
357
358 #define CALLX_N CALL_N
359 #ifdef TARGET_WORDS_BIGENDIAN
360 #define CALLX_M ((b0) & 0x3)
361 #else
362 #define CALLX_M (((b0) & 0xc0) >> 6)
363 #endif
364 #define CALLX_S RRR_S
365
366 #define BRI12_M CALLX_M
367 #define BRI12_S RRR_S
368 #ifdef TARGET_WORDS_BIGENDIAN
369 #define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2))
370 #else
371 #define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4))
372 #endif
373 #define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12)
374
375 #define BRI8_M BRI12_M
376 #define BRI8_R RRI8_R
377 #define BRI8_S RRI8_S
378 #define BRI8_IMM8 RRI8_IMM8
379 #define BRI8_IMM8_SE RRI8_IMM8_SE
380
381 #define RSR_SR (b1)
382
383 uint8_t b0 = ldub_code(dc->pc);
384 uint8_t b1 = ldub_code(dc->pc + 1);
385 uint8_t b2 = ldub_code(dc->pc + 2);
386
387 static const uint32_t B4CONST[] = {
388 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
389 };
390
391 static const uint32_t B4CONSTU[] = {
392 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
393 };
394
395 if (OP0 >= 8) {
396 dc->next_pc = dc->pc + 2;
397 HAS_OPTION(XTENSA_OPTION_CODE_DENSITY);
398 } else {
399 dc->next_pc = dc->pc + 3;
400 }
401
402 switch (OP0) {
403 case 0: /*QRST*/
404 switch (OP1) {
405 case 0: /*RST0*/
406 switch (OP2) {
407 case 0: /*ST0*/
408 if ((RRR_R & 0xc) == 0x8) {
409 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
410 }
411
412 switch (RRR_R) {
413 case 0: /*SNM0*/
414 switch (CALLX_M) {
415 case 0: /*ILL*/
416 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
417 break;
418
419 case 1: /*reserved*/
420 RESERVED();
421 break;
422
423 case 2: /*JR*/
424 switch (CALLX_N) {
425 case 0: /*RET*/
426 case 2: /*JX*/
427 gen_jump(dc, cpu_R[CALLX_S]);
428 break;
429
430 case 1: /*RETWw*/
431 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
432 TBD();
433 break;
434
435 case 3: /*reserved*/
436 RESERVED();
437 break;
438 }
439 break;
440
441 case 3: /*CALLX*/
442 switch (CALLX_N) {
443 case 0: /*CALLX0*/
444 {
445 TCGv_i32 tmp = tcg_temp_new_i32();
446 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
447 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
448 gen_jump(dc, tmp);
449 tcg_temp_free(tmp);
450 }
451 break;
452
453 case 1: /*CALLX4w*/
454 case 2: /*CALLX8w*/
455 case 3: /*CALLX12w*/
456 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
457 TBD();
458 break;
459 }
460 break;
461 }
462 break;
463
464 case 1: /*MOVSPw*/
465 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
466 TBD();
467 break;
468
469 case 2: /*SYNC*/
470 switch (RRR_T) {
471 case 0: /*ISYNC*/
472 break;
473
474 case 1: /*RSYNC*/
475 break;
476
477 case 2: /*ESYNC*/
478 break;
479
480 case 3: /*DSYNC*/
481 break;
482
483 case 8: /*EXCW*/
484 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
485 break;
486
487 case 12: /*MEMW*/
488 break;
489
490 case 13: /*EXTW*/
491 break;
492
493 case 15: /*NOP*/
494 break;
495
496 default: /*reserved*/
497 RESERVED();
498 break;
499 }
500 break;
501
502 case 3: /*RFEIx*/
503 switch (RRR_T) {
504 case 0: /*RFETx*/
505 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
506 switch (RRR_S) {
507 case 0: /*RFEx*/
508 gen_check_privilege(dc);
509 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
510 gen_jump(dc, cpu_SR[EPC1]);
511 break;
512
513 case 1: /*RFUEx*/
514 RESERVED();
515 break;
516
517 case 2: /*RFDEx*/
518 gen_check_privilege(dc);
519 gen_jump(dc, cpu_SR[
520 dc->config->ndepc ? DEPC : EPC1]);
521 break;
522
523 case 4: /*RFWOw*/
524 case 5: /*RFWUw*/
525 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
526 TBD();
527 break;
528
529 default: /*reserved*/
530 RESERVED();
531 break;
532 }
533 break;
534
535 case 1: /*RFIx*/
536 HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT);
537 TBD();
538 break;
539
540 case 2: /*RFME*/
541 TBD();
542 break;
543
544 default: /*reserved*/
545 RESERVED();
546 break;
547
548 }
549 break;
550
551 case 4: /*BREAKx*/
552 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
553 TBD();
554 break;
555
556 case 5: /*SYSCALLx*/
557 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
558 switch (RRR_S) {
559 case 0: /*SYSCALLx*/
560 gen_exception_cause(dc, SYSCALL_CAUSE);
561 break;
562
563 case 1: /*SIMCALL*/
564 TBD();
565 break;
566
567 default:
568 RESERVED();
569 break;
570 }
571 break;
572
573 case 6: /*RSILx*/
574 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
575 gen_check_privilege(dc);
576 tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]);
577 tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S);
578 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS],
579 RRR_S | ~PS_INTLEVEL);
580 break;
581
582 case 7: /*WAITIx*/
583 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
584 TBD();
585 break;
586
587 case 8: /*ANY4p*/
588 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
589 TBD();
590 break;
591
592 case 9: /*ALL4p*/
593 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
594 TBD();
595 break;
596
597 case 10: /*ANY8p*/
598 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
599 TBD();
600 break;
601
602 case 11: /*ALL8p*/
603 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
604 TBD();
605 break;
606
607 default: /*reserved*/
608 RESERVED();
609 break;
610
611 }
612 break;
613
614 case 1: /*AND*/
615 tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
616 break;
617
618 case 2: /*OR*/
619 tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
620 break;
621
622 case 3: /*XOR*/
623 tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
624 break;
625
626 case 4: /*ST1*/
627 switch (RRR_R) {
628 case 0: /*SSR*/
629 gen_right_shift_sar(dc, cpu_R[RRR_S]);
630 break;
631
632 case 1: /*SSL*/
633 gen_left_shift_sar(dc, cpu_R[RRR_S]);
634 break;
635
636 case 2: /*SSA8L*/
637 {
638 TCGv_i32 tmp = tcg_temp_new_i32();
639 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
640 gen_right_shift_sar(dc, tmp);
641 tcg_temp_free(tmp);
642 }
643 break;
644
645 case 3: /*SSA8B*/
646 {
647 TCGv_i32 tmp = tcg_temp_new_i32();
648 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
649 gen_left_shift_sar(dc, tmp);
650 tcg_temp_free(tmp);
651 }
652 break;
653
654 case 4: /*SSAI*/
655 {
656 TCGv_i32 tmp = tcg_const_i32(
657 RRR_S | ((RRR_T & 1) << 4));
658 gen_right_shift_sar(dc, tmp);
659 tcg_temp_free(tmp);
660 }
661 break;
662
663 case 6: /*RER*/
664 TBD();
665 break;
666
667 case 7: /*WER*/
668 TBD();
669 break;
670
671 case 8: /*ROTWw*/
672 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
673 TBD();
674 break;
675
676 case 14: /*NSAu*/
677 HAS_OPTION(XTENSA_OPTION_MISC_OP);
678 gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]);
679 break;
680
681 case 15: /*NSAUu*/
682 HAS_OPTION(XTENSA_OPTION_MISC_OP);
683 gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]);
684 break;
685
686 default: /*reserved*/
687 RESERVED();
688 break;
689 }
690 break;
691
692 case 5: /*TLB*/
693 TBD();
694 break;
695
696 case 6: /*RT0*/
697 switch (RRR_S) {
698 case 0: /*NEG*/
699 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
700 break;
701
702 case 1: /*ABS*/
703 {
704 int label = gen_new_label();
705 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
706 tcg_gen_brcondi_i32(
707 TCG_COND_GE, cpu_R[RRR_R], 0, label);
708 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
709 gen_set_label(label);
710 }
711 break;
712
713 default: /*reserved*/
714 RESERVED();
715 break;
716 }
717 break;
718
719 case 7: /*reserved*/
720 RESERVED();
721 break;
722
723 case 8: /*ADD*/
724 tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
725 break;
726
727 case 9: /*ADD**/
728 case 10:
729 case 11:
730 {
731 TCGv_i32 tmp = tcg_temp_new_i32();
732 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8);
733 tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
734 tcg_temp_free(tmp);
735 }
736 break;
737
738 case 12: /*SUB*/
739 tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
740 break;
741
742 case 13: /*SUB**/
743 case 14:
744 case 15:
745 {
746 TCGv_i32 tmp = tcg_temp_new_i32();
747 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12);
748 tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
749 tcg_temp_free(tmp);
750 }
751 break;
752 }
753 break;
754
755 case 1: /*RST1*/
756 switch (OP2) {
757 case 0: /*SLLI*/
758 case 1:
759 tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S],
760 32 - (RRR_T | ((OP2 & 1) << 4)));
761 break;
762
763 case 2: /*SRAI*/
764 case 3:
765 tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T],
766 RRR_S | ((OP2 & 1) << 4));
767 break;
768
769 case 4: /*SRLI*/
770 tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S);
771 break;
772
773 case 6: /*XSR*/
774 {
775 TCGv_i32 tmp = tcg_temp_new_i32();
776 if (RSR_SR >= 64) {
777 gen_check_privilege(dc);
778 }
779 tcg_gen_mov_i32(tmp, cpu_R[RRR_T]);
780 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
781 gen_wsr(dc, RSR_SR, tmp);
782 tcg_temp_free(tmp);
783 if (!sregnames[RSR_SR]) {
784 TBD();
785 }
786 }
787 break;
788
789 /*
790 * Note: 64 bit ops are used here solely because SAR values
791 * have range 0..63
792 */
793 #define gen_shift_reg(cmd, reg) do { \
794 TCGv_i64 tmp = tcg_temp_new_i64(); \
795 tcg_gen_extu_i32_i64(tmp, reg); \
796 tcg_gen_##cmd##_i64(v, v, tmp); \
797 tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \
798 tcg_temp_free_i64(v); \
799 tcg_temp_free_i64(tmp); \
800 } while (0)
801
802 #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
803
804 case 8: /*SRC*/
805 {
806 TCGv_i64 v = tcg_temp_new_i64();
807 tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]);
808 gen_shift(shr);
809 }
810 break;
811
812 case 9: /*SRL*/
813 if (dc->sar_5bit) {
814 tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
815 } else {
816 TCGv_i64 v = tcg_temp_new_i64();
817 tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]);
818 gen_shift(shr);
819 }
820 break;
821
822 case 10: /*SLL*/
823 if (dc->sar_m32_5bit) {
824 tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32);
825 } else {
826 TCGv_i64 v = tcg_temp_new_i64();
827 TCGv_i32 s = tcg_const_i32(32);
828 tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
829 tcg_gen_andi_i32(s, s, 0x3f);
830 tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]);
831 gen_shift_reg(shl, s);
832 tcg_temp_free(s);
833 }
834 break;
835
836 case 11: /*SRA*/
837 if (dc->sar_5bit) {
838 tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
839 } else {
840 TCGv_i64 v = tcg_temp_new_i64();
841 tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]);
842 gen_shift(sar);
843 }
844 break;
845 #undef gen_shift
846 #undef gen_shift_reg
847
848 case 12: /*MUL16U*/
849 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
850 {
851 TCGv_i32 v1 = tcg_temp_new_i32();
852 TCGv_i32 v2 = tcg_temp_new_i32();
853 tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]);
854 tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]);
855 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
856 tcg_temp_free(v2);
857 tcg_temp_free(v1);
858 }
859 break;
860
861 case 13: /*MUL16S*/
862 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
863 {
864 TCGv_i32 v1 = tcg_temp_new_i32();
865 TCGv_i32 v2 = tcg_temp_new_i32();
866 tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]);
867 tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]);
868 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
869 tcg_temp_free(v2);
870 tcg_temp_free(v1);
871 }
872 break;
873
874 default: /*reserved*/
875 RESERVED();
876 break;
877 }
878 break;
879
880 case 2: /*RST2*/
881 if (OP2 >= 12) {
882 HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV);
883 int label = gen_new_label();
884 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label);
885 gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE);
886 gen_set_label(label);
887 }
888
889 switch (OP2) {
890 case 8: /*MULLi*/
891 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL);
892 tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
893 break;
894
895 case 10: /*MULUHi*/
896 case 11: /*MULSHi*/
897 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL);
898 {
899 TCGv_i64 r = tcg_temp_new_i64();
900 TCGv_i64 s = tcg_temp_new_i64();
901 TCGv_i64 t = tcg_temp_new_i64();
902
903 if (OP2 == 10) {
904 tcg_gen_extu_i32_i64(s, cpu_R[RRR_S]);
905 tcg_gen_extu_i32_i64(t, cpu_R[RRR_T]);
906 } else {
907 tcg_gen_ext_i32_i64(s, cpu_R[RRR_S]);
908 tcg_gen_ext_i32_i64(t, cpu_R[RRR_T]);
909 }
910 tcg_gen_mul_i64(r, s, t);
911 tcg_gen_shri_i64(r, r, 32);
912 tcg_gen_trunc_i64_i32(cpu_R[RRR_R], r);
913
914 tcg_temp_free_i64(r);
915 tcg_temp_free_i64(s);
916 tcg_temp_free_i64(t);
917 }
918 break;
919
920 case 12: /*QUOUi*/
921 tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
922 break;
923
924 case 13: /*QUOSi*/
925 case 15: /*REMSi*/
926 {
927 int label1 = gen_new_label();
928 int label2 = gen_new_label();
929
930 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000,
931 label1);
932 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff,
933 label1);
934 tcg_gen_movi_i32(cpu_R[RRR_R],
935 OP2 == 13 ? 0x80000000 : 0);
936 tcg_gen_br(label2);
937 gen_set_label(label1);
938 if (OP2 == 13) {
939 tcg_gen_div_i32(cpu_R[RRR_R],
940 cpu_R[RRR_S], cpu_R[RRR_T]);
941 } else {
942 tcg_gen_rem_i32(cpu_R[RRR_R],
943 cpu_R[RRR_S], cpu_R[RRR_T]);
944 }
945 gen_set_label(label2);
946 }
947 break;
948
949 case 14: /*REMUi*/
950 tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
951 break;
952
953 default: /*reserved*/
954 RESERVED();
955 break;
956 }
957 break;
958
959 case 3: /*RST3*/
960 switch (OP2) {
961 case 0: /*RSR*/
962 if (RSR_SR >= 64) {
963 gen_check_privilege(dc);
964 }
965 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
966 if (!sregnames[RSR_SR]) {
967 TBD();
968 }
969 break;
970
971 case 1: /*WSR*/
972 if (RSR_SR >= 64) {
973 gen_check_privilege(dc);
974 }
975 gen_wsr(dc, RSR_SR, cpu_R[RRR_T]);
976 if (!sregnames[RSR_SR]) {
977 TBD();
978 }
979 break;
980
981 case 2: /*SEXTu*/
982 HAS_OPTION(XTENSA_OPTION_MISC_OP);
983 {
984 int shift = 24 - RRR_T;
985
986 if (shift == 24) {
987 tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
988 } else if (shift == 16) {
989 tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
990 } else {
991 TCGv_i32 tmp = tcg_temp_new_i32();
992 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift);
993 tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift);
994 tcg_temp_free(tmp);
995 }
996 }
997 break;
998
999 case 3: /*CLAMPSu*/
1000 HAS_OPTION(XTENSA_OPTION_MISC_OP);
1001 {
1002 TCGv_i32 tmp1 = tcg_temp_new_i32();
1003 TCGv_i32 tmp2 = tcg_temp_new_i32();
1004 int label = gen_new_label();
1005
1006 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T);
1007 tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]);
1008 tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7));
1009 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1010 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp2, 0, label);
1011
1012 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31);
1013 tcg_gen_xori_i32(cpu_R[RRR_R], tmp1,
1014 0xffffffff >> (25 - RRR_T));
1015
1016 gen_set_label(label);
1017
1018 tcg_temp_free(tmp1);
1019 tcg_temp_free(tmp2);
1020 }
1021 break;
1022
1023 case 4: /*MINu*/
1024 case 5: /*MAXu*/
1025 case 6: /*MINUu*/
1026 case 7: /*MAXUu*/
1027 HAS_OPTION(XTENSA_OPTION_MISC_OP);
1028 {
1029 static const TCGCond cond[] = {
1030 TCG_COND_LE,
1031 TCG_COND_GE,
1032 TCG_COND_LEU,
1033 TCG_COND_GEU
1034 };
1035 int label = gen_new_label();
1036
1037 if (RRR_R != RRR_T) {
1038 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1039 tcg_gen_brcond_i32(cond[OP2 - 4],
1040 cpu_R[RRR_S], cpu_R[RRR_T], label);
1041 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1042 } else {
1043 tcg_gen_brcond_i32(cond[OP2 - 4],
1044 cpu_R[RRR_T], cpu_R[RRR_S], label);
1045 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1046 }
1047 gen_set_label(label);
1048 }
1049 break;
1050
1051 case 8: /*MOVEQZ*/
1052 case 9: /*MOVNEZ*/
1053 case 10: /*MOVLTZ*/
1054 case 11: /*MOVGEZ*/
1055 {
1056 static const TCGCond cond[] = {
1057 TCG_COND_NE,
1058 TCG_COND_EQ,
1059 TCG_COND_GE,
1060 TCG_COND_LT
1061 };
1062 int label = gen_new_label();
1063 tcg_gen_brcondi_i32(cond[OP2 - 8], cpu_R[RRR_T], 0, label);
1064 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1065 gen_set_label(label);
1066 }
1067 break;
1068
1069 case 12: /*MOVFp*/
1070 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1071 TBD();
1072 break;
1073
1074 case 13: /*MOVTp*/
1075 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1076 TBD();
1077 break;
1078
1079 case 14: /*RUR*/
1080 {
1081 int st = (RRR_S << 4) + RRR_T;
1082 if (uregnames[st]) {
1083 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]);
1084 } else {
1085 qemu_log("RUR %d not implemented, ", st);
1086 TBD();
1087 }
1088 }
1089 break;
1090
1091 case 15: /*WUR*/
1092 {
1093 if (uregnames[RSR_SR]) {
1094 tcg_gen_mov_i32(cpu_UR[RSR_SR], cpu_R[RRR_T]);
1095 } else {
1096 qemu_log("WUR %d not implemented, ", RSR_SR);
1097 TBD();
1098 }
1099 }
1100 break;
1101
1102 }
1103 break;
1104
1105 case 4: /*EXTUI*/
1106 case 5:
1107 {
1108 int shiftimm = RRR_S | (OP1 << 4);
1109 int maskimm = (1 << (OP2 + 1)) - 1;
1110
1111 TCGv_i32 tmp = tcg_temp_new_i32();
1112 tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm);
1113 tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm);
1114 tcg_temp_free(tmp);
1115 }
1116 break;
1117
1118 case 6: /*CUST0*/
1119 RESERVED();
1120 break;
1121
1122 case 7: /*CUST1*/
1123 RESERVED();
1124 break;
1125
1126 case 8: /*LSCXp*/
1127 HAS_OPTION(XTENSA_OPTION_COPROCESSOR);
1128 TBD();
1129 break;
1130
1131 case 9: /*LSC4*/
1132 TBD();
1133 break;
1134
1135 case 10: /*FP0*/
1136 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1137 TBD();
1138 break;
1139
1140 case 11: /*FP1*/
1141 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1142 TBD();
1143 break;
1144
1145 default: /*reserved*/
1146 RESERVED();
1147 break;
1148 }
1149 break;
1150
1151 case 1: /*L32R*/
1152 {
1153 TCGv_i32 tmp = tcg_const_i32(
1154 (0xfffc0000 | (RI16_IMM16 << 2)) +
1155 ((dc->pc + 3) & ~3));
1156
1157 /* no ext L32R */
1158
1159 tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring);
1160 tcg_temp_free(tmp);
1161 }
1162 break;
1163
1164 case 2: /*LSAI*/
1165 #define gen_load_store(type, shift) do { \
1166 TCGv_i32 addr = tcg_temp_new_i32(); \
1167 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \
1168 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
1169 tcg_temp_free(addr); \
1170 } while (0)
1171
1172 switch (RRI8_R) {
1173 case 0: /*L8UI*/
1174 gen_load_store(ld8u, 0);
1175 break;
1176
1177 case 1: /*L16UI*/
1178 gen_load_store(ld16u, 1);
1179 break;
1180
1181 case 2: /*L32I*/
1182 gen_load_store(ld32u, 2);
1183 break;
1184
1185 case 4: /*S8I*/
1186 gen_load_store(st8, 0);
1187 break;
1188
1189 case 5: /*S16I*/
1190 gen_load_store(st16, 1);
1191 break;
1192
1193 case 6: /*S32I*/
1194 gen_load_store(st32, 2);
1195 break;
1196
1197 case 7: /*CACHEc*/
1198 if (RRI8_T < 8) {
1199 HAS_OPTION(XTENSA_OPTION_DCACHE);
1200 }
1201
1202 switch (RRI8_T) {
1203 case 0: /*DPFRc*/
1204 break;
1205
1206 case 1: /*DPFWc*/
1207 break;
1208
1209 case 2: /*DPFROc*/
1210 break;
1211
1212 case 3: /*DPFWOc*/
1213 break;
1214
1215 case 4: /*DHWBc*/
1216 break;
1217
1218 case 5: /*DHWBIc*/
1219 break;
1220
1221 case 6: /*DHIc*/
1222 break;
1223
1224 case 7: /*DIIc*/
1225 break;
1226
1227 case 8: /*DCEc*/
1228 switch (OP1) {
1229 case 0: /*DPFLl*/
1230 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1231 break;
1232
1233 case 2: /*DHUl*/
1234 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1235 break;
1236
1237 case 3: /*DIUl*/
1238 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1239 break;
1240
1241 case 4: /*DIWBc*/
1242 HAS_OPTION(XTENSA_OPTION_DCACHE);
1243 break;
1244
1245 case 5: /*DIWBIc*/
1246 HAS_OPTION(XTENSA_OPTION_DCACHE);
1247 break;
1248
1249 default: /*reserved*/
1250 RESERVED();
1251 break;
1252
1253 }
1254 break;
1255
1256 case 12: /*IPFc*/
1257 HAS_OPTION(XTENSA_OPTION_ICACHE);
1258 break;
1259
1260 case 13: /*ICEc*/
1261 switch (OP1) {
1262 case 0: /*IPFLl*/
1263 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1264 break;
1265
1266 case 2: /*IHUl*/
1267 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1268 break;
1269
1270 case 3: /*IIUl*/
1271 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1272 break;
1273
1274 default: /*reserved*/
1275 RESERVED();
1276 break;
1277 }
1278 break;
1279
1280 case 14: /*IHIc*/
1281 HAS_OPTION(XTENSA_OPTION_ICACHE);
1282 break;
1283
1284 case 15: /*IIIc*/
1285 HAS_OPTION(XTENSA_OPTION_ICACHE);
1286 break;
1287
1288 default: /*reserved*/
1289 RESERVED();
1290 break;
1291 }
1292 break;
1293
1294 case 9: /*L16SI*/
1295 gen_load_store(ld16s, 1);
1296 break;
1297
1298 case 10: /*MOVI*/
1299 tcg_gen_movi_i32(cpu_R[RRI8_T],
1300 RRI8_IMM8 | (RRI8_S << 8) |
1301 ((RRI8_S & 0x8) ? 0xfffff000 : 0));
1302 break;
1303
1304 case 11: /*L32AIy*/
1305 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
1306 gen_load_store(ld32u, 2); /*TODO acquire?*/
1307 break;
1308
1309 case 12: /*ADDI*/
1310 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE);
1311 break;
1312
1313 case 13: /*ADDMI*/
1314 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8);
1315 break;
1316
1317 case 14: /*S32C1Iy*/
1318 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
1319 {
1320 int label = gen_new_label();
1321 TCGv_i32 tmp = tcg_temp_local_new_i32();
1322 TCGv_i32 addr = tcg_temp_local_new_i32();
1323
1324 tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]);
1325 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
1326 tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring);
1327 tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T],
1328 cpu_SR[SCOMPARE1], label);
1329
1330 tcg_gen_qemu_st32(tmp, addr, dc->cring);
1331
1332 gen_set_label(label);
1333 tcg_temp_free(addr);
1334 tcg_temp_free(tmp);
1335 }
1336 break;
1337
1338 case 15: /*S32RIy*/
1339 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
1340 gen_load_store(st32, 2); /*TODO release?*/
1341 break;
1342
1343 default: /*reserved*/
1344 RESERVED();
1345 break;
1346 }
1347 break;
1348 #undef gen_load_store
1349
1350 case 3: /*LSCIp*/
1351 HAS_OPTION(XTENSA_OPTION_COPROCESSOR);
1352 TBD();
1353 break;
1354
1355 case 4: /*MAC16d*/
1356 HAS_OPTION(XTENSA_OPTION_MAC16);
1357 TBD();
1358 break;
1359
1360 case 5: /*CALLN*/
1361 switch (CALL_N) {
1362 case 0: /*CALL0*/
1363 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
1364 gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
1365 break;
1366
1367 case 1: /*CALL4w*/
1368 case 2: /*CALL8w*/
1369 case 3: /*CALL12w*/
1370 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1371 TBD();
1372 break;
1373 }
1374 break;
1375
1376 case 6: /*SI*/
1377 switch (CALL_N) {
1378 case 0: /*J*/
1379 gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0);
1380 break;
1381
1382 case 1: /*BZ*/
1383 {
1384 static const TCGCond cond[] = {
1385 TCG_COND_EQ, /*BEQZ*/
1386 TCG_COND_NE, /*BNEZ*/
1387 TCG_COND_LT, /*BLTZ*/
1388 TCG_COND_GE, /*BGEZ*/
1389 };
1390
1391 gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0,
1392 4 + BRI12_IMM12_SE);
1393 }
1394 break;
1395
1396 case 2: /*BI0*/
1397 {
1398 static const TCGCond cond[] = {
1399 TCG_COND_EQ, /*BEQI*/
1400 TCG_COND_NE, /*BNEI*/
1401 TCG_COND_LT, /*BLTI*/
1402 TCG_COND_GE, /*BGEI*/
1403 };
1404
1405 gen_brcondi(dc, cond[BRI8_M & 3],
1406 cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE);
1407 }
1408 break;
1409
1410 case 3: /*BI1*/
1411 switch (BRI8_M) {
1412 case 0: /*ENTRYw*/
1413 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1414 TBD();
1415 break;
1416
1417 case 1: /*B1*/
1418 switch (BRI8_R) {
1419 case 0: /*BFp*/
1420 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1421 TBD();
1422 break;
1423
1424 case 1: /*BTp*/
1425 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1426 TBD();
1427 break;
1428
1429 case 8: /*LOOP*/
1430 TBD();
1431 break;
1432
1433 case 9: /*LOOPNEZ*/
1434 TBD();
1435 break;
1436
1437 case 10: /*LOOPGTZ*/
1438 TBD();
1439 break;
1440
1441 default: /*reserved*/
1442 RESERVED();
1443 break;
1444
1445 }
1446 break;
1447
1448 case 2: /*BLTUI*/
1449 case 3: /*BGEUI*/
1450 gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU,
1451 cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE);
1452 break;
1453 }
1454 break;
1455
1456 }
1457 break;
1458
1459 case 7: /*B*/
1460 {
1461 TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ;
1462
1463 switch (RRI8_R & 7) {
1464 case 0: /*BNONE*/ /*BANY*/
1465 {
1466 TCGv_i32 tmp = tcg_temp_new_i32();
1467 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
1468 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
1469 tcg_temp_free(tmp);
1470 }
1471 break;
1472
1473 case 1: /*BEQ*/ /*BNE*/
1474 case 2: /*BLT*/ /*BGE*/
1475 case 3: /*BLTU*/ /*BGEU*/
1476 {
1477 static const TCGCond cond[] = {
1478 [1] = TCG_COND_EQ,
1479 [2] = TCG_COND_LT,
1480 [3] = TCG_COND_LTU,
1481 [9] = TCG_COND_NE,
1482 [10] = TCG_COND_GE,
1483 [11] = TCG_COND_GEU,
1484 };
1485 gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T],
1486 4 + RRI8_IMM8_SE);
1487 }
1488 break;
1489
1490 case 4: /*BALL*/ /*BNALL*/
1491 {
1492 TCGv_i32 tmp = tcg_temp_new_i32();
1493 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
1494 gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T],
1495 4 + RRI8_IMM8_SE);
1496 tcg_temp_free(tmp);
1497 }
1498 break;
1499
1500 case 5: /*BBC*/ /*BBS*/
1501 {
1502 TCGv_i32 bit = tcg_const_i32(1);
1503 TCGv_i32 tmp = tcg_temp_new_i32();
1504 tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
1505 tcg_gen_shl_i32(bit, bit, tmp);
1506 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit);
1507 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
1508 tcg_temp_free(tmp);
1509 tcg_temp_free(bit);
1510 }
1511 break;
1512
1513 case 6: /*BBCI*/ /*BBSI*/
1514 case 7:
1515 {
1516 TCGv_i32 tmp = tcg_temp_new_i32();
1517 tcg_gen_andi_i32(tmp, cpu_R[RRI8_S],
1518 1 << (((RRI8_R & 1) << 4) | RRI8_T));
1519 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
1520 tcg_temp_free(tmp);
1521 }
1522 break;
1523
1524 }
1525 }
1526 break;
1527
1528 #define gen_narrow_load_store(type) do { \
1529 TCGv_i32 addr = tcg_temp_new_i32(); \
1530 tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
1531 tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \
1532 tcg_temp_free(addr); \
1533 } while (0)
1534
1535 case 8: /*L32I.Nn*/
1536 gen_narrow_load_store(ld32u);
1537 break;
1538
1539 case 9: /*S32I.Nn*/
1540 gen_narrow_load_store(st32);
1541 break;
1542 #undef gen_narrow_load_store
1543
1544 case 10: /*ADD.Nn*/
1545 tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]);
1546 break;
1547
1548 case 11: /*ADDI.Nn*/
1549 tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1);
1550 break;
1551
1552 case 12: /*ST2n*/
1553 if (RRRN_T < 8) { /*MOVI.Nn*/
1554 tcg_gen_movi_i32(cpu_R[RRRN_S],
1555 RRRN_R | (RRRN_T << 4) |
1556 ((RRRN_T & 6) == 6 ? 0xffffff80 : 0));
1557 } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/
1558 TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ;
1559
1560 gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0,
1561 4 + (RRRN_R | ((RRRN_T & 3) << 4)));
1562 }
1563 break;
1564
1565 case 13: /*ST3n*/
1566 switch (RRRN_R) {
1567 case 0: /*MOV.Nn*/
1568 tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]);
1569 break;
1570
1571 case 15: /*S3*/
1572 switch (RRRN_T) {
1573 case 0: /*RET.Nn*/
1574 gen_jump(dc, cpu_R[0]);
1575 break;
1576
1577 case 1: /*RETW.Nn*/
1578 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1579 TBD();
1580 break;
1581
1582 case 2: /*BREAK.Nn*/
1583 TBD();
1584 break;
1585
1586 case 3: /*NOP.Nn*/
1587 break;
1588
1589 case 6: /*ILL.Nn*/
1590 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1591 break;
1592
1593 default: /*reserved*/
1594 RESERVED();
1595 break;
1596 }
1597 break;
1598
1599 default: /*reserved*/
1600 RESERVED();
1601 break;
1602 }
1603 break;
1604
1605 default: /*reserved*/
1606 RESERVED();
1607 break;
1608 }
1609
1610 dc->pc = dc->next_pc;
1611 return;
1612
1613 invalid_opcode:
1614 qemu_log("INVALID(pc = %08x)\n", dc->pc);
1615 dc->pc = dc->next_pc;
1616 #undef HAS_OPTION
1617 }
1618
1619 static void check_breakpoint(CPUState *env, DisasContext *dc)
1620 {
1621 CPUBreakpoint *bp;
1622
1623 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1624 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1625 if (bp->pc == dc->pc) {
1626 tcg_gen_movi_i32(cpu_pc, dc->pc);
1627 gen_exception(EXCP_DEBUG);
1628 dc->is_jmp = DISAS_UPDATE;
1629 }
1630 }
1631 }
1632 }
1633
1634 static void gen_intermediate_code_internal(
1635 CPUState *env, TranslationBlock *tb, int search_pc)
1636 {
1637 DisasContext dc;
1638 int insn_count = 0;
1639 int j, lj = -1;
1640 uint16_t *gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1641 int max_insns = tb->cflags & CF_COUNT_MASK;
1642 uint32_t pc_start = tb->pc;
1643 uint32_t next_page_start =
1644 (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1645
1646 if (max_insns == 0) {
1647 max_insns = CF_COUNT_MASK;
1648 }
1649
1650 dc.config = env->config;
1651 dc.singlestep_enabled = env->singlestep_enabled;
1652 dc.tb = tb;
1653 dc.pc = pc_start;
1654 dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK;
1655 dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring;
1656 dc.is_jmp = DISAS_NEXT;
1657
1658 init_sar_tracker(&dc);
1659
1660 gen_icount_start();
1661
1662 if (env->singlestep_enabled && env->exception_taken) {
1663 env->exception_taken = 0;
1664 tcg_gen_movi_i32(cpu_pc, dc.pc);
1665 gen_exception(EXCP_DEBUG);
1666 }
1667
1668 do {
1669 check_breakpoint(env, &dc);
1670
1671 if (search_pc) {
1672 j = gen_opc_ptr - gen_opc_buf;
1673 if (lj < j) {
1674 lj++;
1675 while (lj < j) {
1676 gen_opc_instr_start[lj++] = 0;
1677 }
1678 }
1679 gen_opc_pc[lj] = dc.pc;
1680 gen_opc_instr_start[lj] = 1;
1681 gen_opc_icount[lj] = insn_count;
1682 }
1683
1684 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
1685 tcg_gen_debug_insn_start(dc.pc);
1686 }
1687
1688 disas_xtensa_insn(&dc);
1689 ++insn_count;
1690 if (env->singlestep_enabled) {
1691 tcg_gen_movi_i32(cpu_pc, dc.pc);
1692 gen_exception(EXCP_DEBUG);
1693 break;
1694 }
1695 } while (dc.is_jmp == DISAS_NEXT &&
1696 insn_count < max_insns &&
1697 dc.pc < next_page_start &&
1698 gen_opc_ptr < gen_opc_end);
1699
1700 reset_sar_tracker(&dc);
1701
1702 if (dc.is_jmp == DISAS_NEXT) {
1703 gen_jumpi(&dc, dc.pc, 0);
1704 }
1705 gen_icount_end(tb, insn_count);
1706 *gen_opc_ptr = INDEX_op_end;
1707
1708 if (!search_pc) {
1709 tb->size = dc.pc - pc_start;
1710 tb->icount = insn_count;
1711 }
1712 }
1713
1714 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
1715 {
1716 gen_intermediate_code_internal(env, tb, 0);
1717 }
1718
1719 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
1720 {
1721 gen_intermediate_code_internal(env, tb, 1);
1722 }
1723
1724 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
1725 int flags)
1726 {
1727 int i, j;
1728
1729 cpu_fprintf(f, "PC=%08x\n\n", env->pc);
1730
1731 for (i = j = 0; i < 256; ++i) {
1732 if (sregnames[i]) {
1733 cpu_fprintf(f, "%s=%08x%c", sregnames[i], env->sregs[i],
1734 (j++ % 4) == 3 ? '\n' : ' ');
1735 }
1736 }
1737
1738 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
1739
1740 for (i = j = 0; i < 256; ++i) {
1741 if (uregnames[i]) {
1742 cpu_fprintf(f, "%s=%08x%c", uregnames[i], env->uregs[i],
1743 (j++ % 4) == 3 ? '\n' : ' ');
1744 }
1745 }
1746
1747 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
1748
1749 for (i = 0; i < 16; ++i) {
1750 cpu_fprintf(f, "A%02d=%08x%c", i, env->regs[i],
1751 (i % 4) == 3 ? '\n' : ' ');
1752 }
1753 }
1754
1755 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
1756 {
1757 env->pc = gen_opc_pc[pc_pos];
1758 }