]> git.proxmox.com Git - qemu.git/blame - target-xtensa/translate.c
target-xtensa: implement LSAI group
[qemu.git] / target-xtensa / translate.c
CommitLineData
2328826b
MF
1/*
2 * Xtensa ISA:
3 * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
4 *
5 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of the Open Source and Linux Lab nor the
16 * names of its contributors may be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <stdio.h>
32
33#include "cpu.h"
34#include "exec-all.h"
35#include "disas.h"
36#include "tcg-op.h"
37#include "qemu-log.h"
38
dedc5eae
MF
39#include "helpers.h"
40#define GEN_HELPER 1
41#include "helpers.h"
42
43typedef struct DisasContext {
44 const XtensaConfig *config;
45 TranslationBlock *tb;
46 uint32_t pc;
47 uint32_t next_pc;
48 int is_jmp;
49 int singlestep_enabled;
3580ecad
MF
50
51 bool sar_5bit;
52 bool sar_m32_5bit;
53 bool sar_m32_allocated;
54 TCGv_i32 sar_m32;
dedc5eae
MF
55} DisasContext;
56
57static TCGv_ptr cpu_env;
58static TCGv_i32 cpu_pc;
59static TCGv_i32 cpu_R[16];
2af3da91
MF
60static TCGv_i32 cpu_SR[256];
61static TCGv_i32 cpu_UR[256];
dedc5eae
MF
62
63#include "gen-icount.h"
2328826b 64
2af3da91 65static const char * const sregnames[256] = {
3580ecad 66 [SAR] = "SAR",
809377aa 67 [SCOMPARE1] = "SCOMPARE1",
2af3da91
MF
68};
69
70static const char * const uregnames[256] = {
71 [THREADPTR] = "THREADPTR",
72 [FCR] = "FCR",
73 [FSR] = "FSR",
74};
75
2328826b
MF
76void xtensa_translate_init(void)
77{
dedc5eae
MF
78 static const char * const regnames[] = {
79 "ar0", "ar1", "ar2", "ar3",
80 "ar4", "ar5", "ar6", "ar7",
81 "ar8", "ar9", "ar10", "ar11",
82 "ar12", "ar13", "ar14", "ar15",
83 };
84 int i;
85
86 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
87 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
88 offsetof(CPUState, pc), "pc");
89
90 for (i = 0; i < 16; i++) {
91 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
92 offsetof(CPUState, regs[i]),
93 regnames[i]);
94 }
2af3da91
MF
95
96 for (i = 0; i < 256; ++i) {
97 if (sregnames[i]) {
98 cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0,
99 offsetof(CPUState, sregs[i]),
100 sregnames[i]);
101 }
102 }
103
104 for (i = 0; i < 256; ++i) {
105 if (uregnames[i]) {
106 cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0,
107 offsetof(CPUState, uregs[i]),
108 uregnames[i]);
109 }
110 }
dedc5eae
MF
111#define GEN_HELPER 2
112#include "helpers.h"
113}
114
115static inline bool option_enabled(DisasContext *dc, int opt)
116{
117 return xtensa_option_enabled(dc->config, opt);
118}
119
3580ecad
MF
120static void init_sar_tracker(DisasContext *dc)
121{
122 dc->sar_5bit = false;
123 dc->sar_m32_5bit = false;
124 dc->sar_m32_allocated = false;
125}
126
127static void reset_sar_tracker(DisasContext *dc)
128{
129 if (dc->sar_m32_allocated) {
130 tcg_temp_free(dc->sar_m32);
131 }
132}
133
134static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
135{
136 tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
137 if (dc->sar_m32_5bit) {
138 tcg_gen_discard_i32(dc->sar_m32);
139 }
140 dc->sar_5bit = true;
141 dc->sar_m32_5bit = false;
142}
143
144static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
145{
146 TCGv_i32 tmp = tcg_const_i32(32);
147 if (!dc->sar_m32_allocated) {
148 dc->sar_m32 = tcg_temp_local_new_i32();
149 dc->sar_m32_allocated = true;
150 }
151 tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
152 tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
153 dc->sar_5bit = false;
154 dc->sar_m32_5bit = true;
155 tcg_temp_free(tmp);
156}
157
dedc5eae
MF
158static void gen_exception(int excp)
159{
160 TCGv_i32 tmp = tcg_const_i32(excp);
161 gen_helper_exception(tmp);
162 tcg_temp_free(tmp);
163}
164
165static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
166{
167 tcg_gen_mov_i32(cpu_pc, dest);
168 if (dc->singlestep_enabled) {
169 gen_exception(EXCP_DEBUG);
170 } else {
171 if (slot >= 0) {
172 tcg_gen_goto_tb(slot);
173 tcg_gen_exit_tb((tcg_target_long)dc->tb + slot);
174 } else {
175 tcg_gen_exit_tb(0);
176 }
177 }
178 dc->is_jmp = DISAS_UPDATE;
179}
180
67882fd1
MF
181static void gen_jump(DisasContext *dc, TCGv dest)
182{
183 gen_jump_slot(dc, dest, -1);
184}
185
dedc5eae
MF
186static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
187{
188 TCGv_i32 tmp = tcg_const_i32(dest);
189 if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
190 slot = -1;
191 }
192 gen_jump_slot(dc, tmp, slot);
193 tcg_temp_free(tmp);
194}
195
bd57fb91
MF
196static void gen_brcond(DisasContext *dc, TCGCond cond,
197 TCGv_i32 t0, TCGv_i32 t1, uint32_t offset)
198{
199 int label = gen_new_label();
200
201 tcg_gen_brcond_i32(cond, t0, t1, label);
202 gen_jumpi(dc, dc->next_pc, 0);
203 gen_set_label(label);
204 gen_jumpi(dc, dc->pc + offset, 1);
205}
206
207static void gen_brcondi(DisasContext *dc, TCGCond cond,
208 TCGv_i32 t0, uint32_t t1, uint32_t offset)
209{
210 TCGv_i32 tmp = tcg_const_i32(t1);
211 gen_brcond(dc, cond, t0, tmp, offset);
212 tcg_temp_free(tmp);
213}
214
b8132eff
MF
215static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
216{
217 static void (* const rsr_handler[256])(DisasContext *dc,
218 TCGv_i32 d, uint32_t sr) = {
219 };
220
221 if (sregnames[sr]) {
222 if (rsr_handler[sr]) {
223 rsr_handler[sr](dc, d, sr);
224 } else {
225 tcg_gen_mov_i32(d, cpu_SR[sr]);
226 }
227 } else {
228 qemu_log("RSR %d not implemented, ", sr);
229 }
230}
231
3580ecad
MF
232static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s)
233{
234 tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
235 if (dc->sar_m32_5bit) {
236 tcg_gen_discard_i32(dc->sar_m32);
237 }
238 dc->sar_5bit = false;
239 dc->sar_m32_5bit = false;
240}
241
b8132eff
MF
242static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
243{
244 static void (* const wsr_handler[256])(DisasContext *dc,
245 uint32_t sr, TCGv_i32 v) = {
3580ecad 246 [SAR] = gen_wsr_sar,
b8132eff
MF
247 };
248
249 if (sregnames[sr]) {
250 if (wsr_handler[sr]) {
251 wsr_handler[sr](dc, sr, s);
252 } else {
253 tcg_gen_mov_i32(cpu_SR[sr], s);
254 }
255 } else {
256 qemu_log("WSR %d not implemented, ", sr);
257 }
258}
259
dedc5eae
MF
260static void disas_xtensa_insn(DisasContext *dc)
261{
262#define HAS_OPTION(opt) do { \
263 if (!option_enabled(dc, opt)) { \
264 qemu_log("Option %d is not enabled %s:%d\n", \
265 (opt), __FILE__, __LINE__); \
266 goto invalid_opcode; \
267 } \
268 } while (0)
269
270#ifdef TARGET_WORDS_BIGENDIAN
271#define OP0 (((b0) & 0xf0) >> 4)
272#define OP1 (((b2) & 0xf0) >> 4)
273#define OP2 ((b2) & 0xf)
274#define RRR_R ((b1) & 0xf)
275#define RRR_S (((b1) & 0xf0) >> 4)
276#define RRR_T ((b0) & 0xf)
277#else
278#define OP0 (((b0) & 0xf))
279#define OP1 (((b2) & 0xf))
280#define OP2 (((b2) & 0xf0) >> 4)
281#define RRR_R (((b1) & 0xf0) >> 4)
282#define RRR_S (((b1) & 0xf))
283#define RRR_T (((b0) & 0xf0) >> 4)
284#endif
285
286#define RRRN_R RRR_R
287#define RRRN_S RRR_S
288#define RRRN_T RRR_T
289
290#define RRI8_R RRR_R
291#define RRI8_S RRR_S
292#define RRI8_T RRR_T
293#define RRI8_IMM8 (b2)
294#define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8)
295
296#ifdef TARGET_WORDS_BIGENDIAN
297#define RI16_IMM16 (((b1) << 8) | (b2))
298#else
299#define RI16_IMM16 (((b2) << 8) | (b1))
300#endif
301
302#ifdef TARGET_WORDS_BIGENDIAN
303#define CALL_N (((b0) & 0xc) >> 2)
304#define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2))
305#else
306#define CALL_N (((b0) & 0x30) >> 4)
307#define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10))
308#endif
309#define CALL_OFFSET_SE \
310 (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET)
311
312#define CALLX_N CALL_N
313#ifdef TARGET_WORDS_BIGENDIAN
314#define CALLX_M ((b0) & 0x3)
315#else
316#define CALLX_M (((b0) & 0xc0) >> 6)
317#endif
318#define CALLX_S RRR_S
319
320#define BRI12_M CALLX_M
321#define BRI12_S RRR_S
322#ifdef TARGET_WORDS_BIGENDIAN
323#define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2))
324#else
325#define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4))
326#endif
327#define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12)
328
329#define BRI8_M BRI12_M
330#define BRI8_R RRI8_R
331#define BRI8_S RRI8_S
332#define BRI8_IMM8 RRI8_IMM8
333#define BRI8_IMM8_SE RRI8_IMM8_SE
334
335#define RSR_SR (b1)
336
337 uint8_t b0 = ldub_code(dc->pc);
338 uint8_t b1 = ldub_code(dc->pc + 1);
339 uint8_t b2 = ldub_code(dc->pc + 2);
340
bd57fb91
MF
341 static const uint32_t B4CONST[] = {
342 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
343 };
344
345 static const uint32_t B4CONSTU[] = {
346 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
347 };
348
dedc5eae
MF
349 if (OP0 >= 8) {
350 dc->next_pc = dc->pc + 2;
351 HAS_OPTION(XTENSA_OPTION_CODE_DENSITY);
352 } else {
353 dc->next_pc = dc->pc + 3;
354 }
355
356 switch (OP0) {
357 case 0: /*QRST*/
358 switch (OP1) {
359 case 0: /*RST0*/
360 switch (OP2) {
361 case 0: /*ST0*/
362 if ((RRR_R & 0xc) == 0x8) {
363 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
364 }
365
366 switch (RRR_R) {
367 case 0: /*SNM0*/
5da4a6a8
MF
368 switch (CALLX_M) {
369 case 0: /*ILL*/
370 break;
371
372 case 1: /*reserved*/
373 break;
374
375 case 2: /*JR*/
376 switch (CALLX_N) {
377 case 0: /*RET*/
378 case 2: /*JX*/
379 gen_jump(dc, cpu_R[CALLX_S]);
380 break;
381
382 case 1: /*RETWw*/
383 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
384 break;
385
386 case 3: /*reserved*/
387 break;
388 }
389 break;
390
391 case 3: /*CALLX*/
392 switch (CALLX_N) {
393 case 0: /*CALLX0*/
394 {
395 TCGv_i32 tmp = tcg_temp_new_i32();
396 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
397 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
398 gen_jump(dc, tmp);
399 tcg_temp_free(tmp);
400 }
401 break;
402
403 case 1: /*CALLX4w*/
404 case 2: /*CALLX8w*/
405 case 3: /*CALLX12w*/
406 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
407 break;
408 }
409 break;
410 }
dedc5eae
MF
411 break;
412
413 case 1: /*MOVSPw*/
414 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
415 break;
416
417 case 2: /*SYNC*/
418 break;
419
420 case 3:
421 break;
422
423 }
424 break;
425
426 case 1: /*AND*/
427 tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
428 break;
429
430 case 2: /*OR*/
431 tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
432 break;
433
434 case 3: /*XOR*/
435 tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
436 break;
437
438 case 4: /*ST1*/
3580ecad
MF
439 switch (RRR_R) {
440 case 0: /*SSR*/
441 gen_right_shift_sar(dc, cpu_R[RRR_S]);
442 break;
443
444 case 1: /*SSL*/
445 gen_left_shift_sar(dc, cpu_R[RRR_S]);
446 break;
447
448 case 2: /*SSA8L*/
449 {
450 TCGv_i32 tmp = tcg_temp_new_i32();
451 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
452 gen_right_shift_sar(dc, tmp);
453 tcg_temp_free(tmp);
454 }
455 break;
456
457 case 3: /*SSA8B*/
458 {
459 TCGv_i32 tmp = tcg_temp_new_i32();
460 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
461 gen_left_shift_sar(dc, tmp);
462 tcg_temp_free(tmp);
463 }
464 break;
465
466 case 4: /*SSAI*/
467 {
468 TCGv_i32 tmp = tcg_const_i32(
469 RRR_S | ((RRR_T & 1) << 4));
470 gen_right_shift_sar(dc, tmp);
471 tcg_temp_free(tmp);
472 }
473 break;
474
475 case 6: /*RER*/
476 break;
477
478 case 7: /*WER*/
479 break;
480
481 case 8: /*ROTWw*/
482 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
483 break;
484
485 case 14: /*NSAu*/
486 HAS_OPTION(XTENSA_OPTION_MISC_OP);
487 gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]);
488 break;
489
490 case 15: /*NSAUu*/
491 HAS_OPTION(XTENSA_OPTION_MISC_OP);
492 gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]);
493 break;
494
495 default: /*reserved*/
496 break;
497 }
dedc5eae
MF
498 break;
499
500 case 5: /*TLB*/
501 break;
502
503 case 6: /*RT0*/
f331fe5e
MF
504 switch (RRR_S) {
505 case 0: /*NEG*/
506 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
507 break;
508
509 case 1: /*ABS*/
510 {
511 int label = gen_new_label();
512 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
513 tcg_gen_brcondi_i32(
514 TCG_COND_GE, cpu_R[RRR_R], 0, label);
515 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
516 gen_set_label(label);
517 }
518 break;
519
520 default: /*reserved*/
521 break;
522 }
dedc5eae
MF
523 break;
524
525 case 7: /*reserved*/
526 break;
527
528 case 8: /*ADD*/
529 tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
530 break;
531
532 case 9: /*ADD**/
533 case 10:
534 case 11:
535 {
536 TCGv_i32 tmp = tcg_temp_new_i32();
537 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8);
538 tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
539 tcg_temp_free(tmp);
540 }
541 break;
542
543 case 12: /*SUB*/
544 tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
545 break;
546
547 case 13: /*SUB**/
548 case 14:
549 case 15:
550 {
551 TCGv_i32 tmp = tcg_temp_new_i32();
552 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12);
553 tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
554 tcg_temp_free(tmp);
555 }
556 break;
557 }
558 break;
559
560 case 1: /*RST1*/
3580ecad
MF
561 switch (OP2) {
562 case 0: /*SLLI*/
563 case 1:
564 tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S],
565 32 - (RRR_T | ((OP2 & 1) << 4)));
566 break;
567
568 case 2: /*SRAI*/
569 case 3:
570 tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T],
571 RRR_S | ((OP2 & 1) << 4));
572 break;
573
574 case 4: /*SRLI*/
575 tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S);
576 break;
577
578 case 6: /*XSR*/
579 {
580 TCGv_i32 tmp = tcg_temp_new_i32();
581 tcg_gen_mov_i32(tmp, cpu_R[RRR_T]);
582 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
583 gen_wsr(dc, RSR_SR, tmp);
584 tcg_temp_free(tmp);
585 }
586 break;
587
588 /*
589 * Note: 64 bit ops are used here solely because SAR values
590 * have range 0..63
591 */
592#define gen_shift_reg(cmd, reg) do { \
593 TCGv_i64 tmp = tcg_temp_new_i64(); \
594 tcg_gen_extu_i32_i64(tmp, reg); \
595 tcg_gen_##cmd##_i64(v, v, tmp); \
596 tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \
597 tcg_temp_free_i64(v); \
598 tcg_temp_free_i64(tmp); \
599 } while (0)
600
601#define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
602
603 case 8: /*SRC*/
604 {
605 TCGv_i64 v = tcg_temp_new_i64();
606 tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]);
607 gen_shift(shr);
608 }
609 break;
610
611 case 9: /*SRL*/
612 if (dc->sar_5bit) {
613 tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
614 } else {
615 TCGv_i64 v = tcg_temp_new_i64();
616 tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]);
617 gen_shift(shr);
618 }
619 break;
620
621 case 10: /*SLL*/
622 if (dc->sar_m32_5bit) {
623 tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32);
624 } else {
625 TCGv_i64 v = tcg_temp_new_i64();
626 TCGv_i32 s = tcg_const_i32(32);
627 tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
628 tcg_gen_andi_i32(s, s, 0x3f);
629 tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]);
630 gen_shift_reg(shl, s);
631 tcg_temp_free(s);
632 }
633 break;
634
635 case 11: /*SRA*/
636 if (dc->sar_5bit) {
637 tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
638 } else {
639 TCGv_i64 v = tcg_temp_new_i64();
640 tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]);
641 gen_shift(sar);
642 }
643 break;
644#undef gen_shift
645#undef gen_shift_reg
646
647 case 12: /*MUL16U*/
648 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
649 {
650 TCGv_i32 v1 = tcg_temp_new_i32();
651 TCGv_i32 v2 = tcg_temp_new_i32();
652 tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]);
653 tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]);
654 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
655 tcg_temp_free(v2);
656 tcg_temp_free(v1);
657 }
658 break;
659
660 case 13: /*MUL16S*/
661 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
662 {
663 TCGv_i32 v1 = tcg_temp_new_i32();
664 TCGv_i32 v2 = tcg_temp_new_i32();
665 tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]);
666 tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]);
667 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
668 tcg_temp_free(v2);
669 tcg_temp_free(v1);
670 }
671 break;
672
673 default: /*reserved*/
674 break;
675 }
dedc5eae
MF
676 break;
677
678 case 2: /*RST2*/
679 break;
680
681 case 3: /*RST3*/
b8132eff
MF
682 switch (OP2) {
683 case 0: /*RSR*/
684 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
685 break;
686
687 case 1: /*WSR*/
688 gen_wsr(dc, RSR_SR, cpu_R[RRR_T]);
689 break;
690
691 case 2: /*SEXTu*/
692 HAS_OPTION(XTENSA_OPTION_MISC_OP);
693 {
694 int shift = 24 - RRR_T;
695
696 if (shift == 24) {
697 tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
698 } else if (shift == 16) {
699 tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
700 } else {
701 TCGv_i32 tmp = tcg_temp_new_i32();
702 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift);
703 tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift);
704 tcg_temp_free(tmp);
705 }
706 }
707 break;
708
709 case 3: /*CLAMPSu*/
710 HAS_OPTION(XTENSA_OPTION_MISC_OP);
711 {
712 TCGv_i32 tmp1 = tcg_temp_new_i32();
713 TCGv_i32 tmp2 = tcg_temp_new_i32();
714 int label = gen_new_label();
715
716 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T);
717 tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]);
718 tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7));
719 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
720 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp2, 0, label);
721
722 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31);
723 tcg_gen_xori_i32(cpu_R[RRR_R], tmp1,
724 0xffffffff >> (25 - RRR_T));
725
726 gen_set_label(label);
727
728 tcg_temp_free(tmp1);
729 tcg_temp_free(tmp2);
730 }
731 break;
732
733 case 4: /*MINu*/
734 case 5: /*MAXu*/
735 case 6: /*MINUu*/
736 case 7: /*MAXUu*/
737 HAS_OPTION(XTENSA_OPTION_MISC_OP);
738 {
739 static const TCGCond cond[] = {
740 TCG_COND_LE,
741 TCG_COND_GE,
742 TCG_COND_LEU,
743 TCG_COND_GEU
744 };
745 int label = gen_new_label();
746
747 if (RRR_R != RRR_T) {
748 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
749 tcg_gen_brcond_i32(cond[OP2 - 4],
750 cpu_R[RRR_S], cpu_R[RRR_T], label);
751 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
752 } else {
753 tcg_gen_brcond_i32(cond[OP2 - 4],
754 cpu_R[RRR_T], cpu_R[RRR_S], label);
755 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
756 }
757 gen_set_label(label);
758 }
759 break;
760
761 case 8: /*MOVEQZ*/
762 case 9: /*MOVNEZ*/
763 case 10: /*MOVLTZ*/
764 case 11: /*MOVGEZ*/
765 {
766 static const TCGCond cond[] = {
767 TCG_COND_NE,
768 TCG_COND_EQ,
769 TCG_COND_GE,
770 TCG_COND_LT
771 };
772 int label = gen_new_label();
773 tcg_gen_brcondi_i32(cond[OP2 - 8], cpu_R[RRR_T], 0, label);
774 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
775 gen_set_label(label);
776 }
777 break;
778
779 case 12: /*MOVFp*/
780 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
781 break;
782
783 case 13: /*MOVTp*/
784 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
785 break;
786
787 case 14: /*RUR*/
788 {
789 int st = (RRR_S << 4) + RRR_T;
790 if (uregnames[st]) {
791 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]);
792 } else {
793 qemu_log("RUR %d not implemented, ", st);
794 }
795 }
796 break;
797
798 case 15: /*WUR*/
799 {
800 if (uregnames[RSR_SR]) {
801 tcg_gen_mov_i32(cpu_UR[RSR_SR], cpu_R[RRR_T]);
802 } else {
803 qemu_log("WUR %d not implemented, ", RSR_SR);
804 }
805 }
806 break;
807
808 }
dedc5eae
MF
809 break;
810
811 case 4: /*EXTUI*/
812 case 5:
3580ecad
MF
813 {
814 int shiftimm = RRR_S | (OP1 << 4);
815 int maskimm = (1 << (OP2 + 1)) - 1;
816
817 TCGv_i32 tmp = tcg_temp_new_i32();
818 tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm);
819 tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm);
820 tcg_temp_free(tmp);
821 }
dedc5eae
MF
822 break;
823
824 case 6: /*CUST0*/
825 break;
826
827 case 7: /*CUST1*/
828 break;
829
830 case 8: /*LSCXp*/
831 HAS_OPTION(XTENSA_OPTION_COPROCESSOR);
832 break;
833
834 case 9: /*LSC4*/
835 break;
836
837 case 10: /*FP0*/
838 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
839 break;
840
841 case 11: /*FP1*/
842 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
843 break;
844
845 default: /*reserved*/
846 break;
847 }
848 break;
849
850 case 1: /*L32R*/
851 {
852 TCGv_i32 tmp = tcg_const_i32(
853 (0xfffc0000 | (RI16_IMM16 << 2)) +
854 ((dc->pc + 3) & ~3));
855
856 /* no ext L32R */
857
858 tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, 0);
859 tcg_temp_free(tmp);
860 }
861 break;
862
863 case 2: /*LSAI*/
809377aa
MF
864#define gen_load_store(type, shift) do { \
865 TCGv_i32 addr = tcg_temp_new_i32(); \
866 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \
867 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, 0); \
868 tcg_temp_free(addr); \
869 } while (0)
870
871 switch (RRI8_R) {
872 case 0: /*L8UI*/
873 gen_load_store(ld8u, 0);
874 break;
875
876 case 1: /*L16UI*/
877 gen_load_store(ld16u, 1);
878 break;
879
880 case 2: /*L32I*/
881 gen_load_store(ld32u, 2);
882 break;
883
884 case 4: /*S8I*/
885 gen_load_store(st8, 0);
886 break;
887
888 case 5: /*S16I*/
889 gen_load_store(st16, 1);
890 break;
891
892 case 6: /*S32I*/
893 gen_load_store(st32, 2);
894 break;
895
896 case 7: /*CACHEc*/
897 break;
898
899 case 9: /*L16SI*/
900 gen_load_store(ld16s, 1);
901 break;
902
903 case 10: /*MOVI*/
904 tcg_gen_movi_i32(cpu_R[RRI8_T],
905 RRI8_IMM8 | (RRI8_S << 8) |
906 ((RRI8_S & 0x8) ? 0xfffff000 : 0));
907 break;
908
909 case 11: /*L32AIy*/
910 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
911 gen_load_store(ld32u, 2); /*TODO acquire?*/
912 break;
913
914 case 12: /*ADDI*/
915 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE);
916 break;
917
918 case 13: /*ADDMI*/
919 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8);
920 break;
921
922 case 14: /*S32C1Iy*/
923 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
924 {
925 int label = gen_new_label();
926 TCGv_i32 tmp = tcg_temp_local_new_i32();
927 TCGv_i32 addr = tcg_temp_local_new_i32();
928
929 tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]);
930 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
931 tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, 0);
932 tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T],
933 cpu_SR[SCOMPARE1], label);
934
935 tcg_gen_qemu_st32(tmp, addr, 0);
936
937 gen_set_label(label);
938 tcg_temp_free(addr);
939 tcg_temp_free(tmp);
940 }
941 break;
942
943 case 15: /*S32RIy*/
944 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
945 gen_load_store(st32, 2); /*TODO release?*/
946 break;
947
948 default: /*reserved*/
949 break;
950 }
dedc5eae 951 break;
809377aa 952#undef gen_load_store
dedc5eae
MF
953
954 case 3: /*LSCIp*/
955 HAS_OPTION(XTENSA_OPTION_COPROCESSOR);
956 break;
957
958 case 4: /*MAC16d*/
959 HAS_OPTION(XTENSA_OPTION_MAC16);
960 break;
961
962 case 5: /*CALLN*/
963 switch (CALL_N) {
964 case 0: /*CALL0*/
965 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
966 gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
967 break;
968
969 case 1: /*CALL4w*/
970 case 2: /*CALL8w*/
971 case 3: /*CALL12w*/
972 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
973 break;
974 }
975 break;
976
977 case 6: /*SI*/
978 switch (CALL_N) {
979 case 0: /*J*/
980 gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0);
981 break;
982
bd57fb91
MF
983 case 1: /*BZ*/
984 {
985 static const TCGCond cond[] = {
986 TCG_COND_EQ, /*BEQZ*/
987 TCG_COND_NE, /*BNEZ*/
988 TCG_COND_LT, /*BLTZ*/
989 TCG_COND_GE, /*BGEZ*/
990 };
991
992 gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0,
993 4 + BRI12_IMM12_SE);
994 }
995 break;
996
997 case 2: /*BI0*/
998 {
999 static const TCGCond cond[] = {
1000 TCG_COND_EQ, /*BEQI*/
1001 TCG_COND_NE, /*BNEI*/
1002 TCG_COND_LT, /*BLTI*/
1003 TCG_COND_GE, /*BGEI*/
1004 };
1005
1006 gen_brcondi(dc, cond[BRI8_M & 3],
1007 cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE);
1008 }
1009 break;
1010
1011 case 3: /*BI1*/
1012 switch (BRI8_M) {
1013 case 0: /*ENTRYw*/
1014 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1015 break;
1016
1017 case 1: /*B1*/
1018 switch (BRI8_R) {
1019 case 0: /*BFp*/
1020 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1021 break;
1022
1023 case 1: /*BTp*/
1024 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1025 break;
1026
1027 case 8: /*LOOP*/
1028 break;
1029
1030 case 9: /*LOOPNEZ*/
1031 break;
1032
1033 case 10: /*LOOPGTZ*/
1034 break;
1035
1036 default: /*reserved*/
1037 break;
1038
1039 }
1040 break;
1041
1042 case 2: /*BLTUI*/
1043 case 3: /*BGEUI*/
1044 gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU,
1045 cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE);
1046 break;
1047 }
1048 break;
1049
dedc5eae
MF
1050 }
1051 break;
1052
1053 case 7: /*B*/
bd57fb91
MF
1054 {
1055 TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ;
1056
1057 switch (RRI8_R & 7) {
1058 case 0: /*BNONE*/ /*BANY*/
1059 {
1060 TCGv_i32 tmp = tcg_temp_new_i32();
1061 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
1062 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
1063 tcg_temp_free(tmp);
1064 }
1065 break;
1066
1067 case 1: /*BEQ*/ /*BNE*/
1068 case 2: /*BLT*/ /*BGE*/
1069 case 3: /*BLTU*/ /*BGEU*/
1070 {
1071 static const TCGCond cond[] = {
1072 [1] = TCG_COND_EQ,
1073 [2] = TCG_COND_LT,
1074 [3] = TCG_COND_LTU,
1075 [9] = TCG_COND_NE,
1076 [10] = TCG_COND_GE,
1077 [11] = TCG_COND_GEU,
1078 };
1079 gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T],
1080 4 + RRI8_IMM8_SE);
1081 }
1082 break;
1083
1084 case 4: /*BALL*/ /*BNALL*/
1085 {
1086 TCGv_i32 tmp = tcg_temp_new_i32();
1087 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
1088 gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T],
1089 4 + RRI8_IMM8_SE);
1090 tcg_temp_free(tmp);
1091 }
1092 break;
1093
1094 case 5: /*BBC*/ /*BBS*/
1095 {
1096 TCGv_i32 bit = tcg_const_i32(1);
1097 TCGv_i32 tmp = tcg_temp_new_i32();
1098 tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
1099 tcg_gen_shl_i32(bit, bit, tmp);
1100 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit);
1101 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
1102 tcg_temp_free(tmp);
1103 tcg_temp_free(bit);
1104 }
1105 break;
1106
1107 case 6: /*BBCI*/ /*BBSI*/
1108 case 7:
1109 {
1110 TCGv_i32 tmp = tcg_temp_new_i32();
1111 tcg_gen_andi_i32(tmp, cpu_R[RRI8_S],
1112 1 << (((RRI8_R & 1) << 4) | RRI8_T));
1113 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
1114 tcg_temp_free(tmp);
1115 }
1116 break;
1117
1118 }
1119 }
dedc5eae
MF
1120 break;
1121
67882fd1
MF
1122#define gen_narrow_load_store(type) do { \
1123 TCGv_i32 addr = tcg_temp_new_i32(); \
1124 tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
1125 tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, 0); \
1126 tcg_temp_free(addr); \
1127 } while (0)
1128
dedc5eae 1129 case 8: /*L32I.Nn*/
67882fd1 1130 gen_narrow_load_store(ld32u);
dedc5eae
MF
1131 break;
1132
1133 case 9: /*S32I.Nn*/
67882fd1 1134 gen_narrow_load_store(st32);
dedc5eae 1135 break;
67882fd1 1136#undef gen_narrow_load_store
dedc5eae
MF
1137
1138 case 10: /*ADD.Nn*/
67882fd1 1139 tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]);
dedc5eae
MF
1140 break;
1141
1142 case 11: /*ADDI.Nn*/
67882fd1 1143 tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1);
dedc5eae
MF
1144 break;
1145
1146 case 12: /*ST2n*/
67882fd1
MF
1147 if (RRRN_T < 8) { /*MOVI.Nn*/
1148 tcg_gen_movi_i32(cpu_R[RRRN_S],
1149 RRRN_R | (RRRN_T << 4) |
1150 ((RRRN_T & 6) == 6 ? 0xffffff80 : 0));
1151 } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/
bd57fb91
MF
1152 TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ;
1153
1154 gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0,
1155 4 + (RRRN_R | ((RRRN_T & 3) << 4)));
67882fd1 1156 }
dedc5eae
MF
1157 break;
1158
1159 case 13: /*ST3n*/
67882fd1
MF
1160 switch (RRRN_R) {
1161 case 0: /*MOV.Nn*/
1162 tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]);
1163 break;
1164
1165 case 15: /*S3*/
1166 switch (RRRN_T) {
1167 case 0: /*RET.Nn*/
1168 gen_jump(dc, cpu_R[0]);
1169 break;
1170
1171 case 1: /*RETW.Nn*/
1172 break;
1173
1174 case 2: /*BREAK.Nn*/
1175 break;
1176
1177 case 3: /*NOP.Nn*/
1178 break;
1179
1180 case 6: /*ILL.Nn*/
1181 break;
1182
1183 default: /*reserved*/
1184 break;
1185 }
1186 break;
1187
1188 default: /*reserved*/
1189 break;
1190 }
dedc5eae
MF
1191 break;
1192
1193 default: /*reserved*/
1194 break;
1195 }
1196
1197 dc->pc = dc->next_pc;
1198 return;
1199
1200invalid_opcode:
1201 qemu_log("INVALID(pc = %08x)\n", dc->pc);
1202 dc->pc = dc->next_pc;
1203#undef HAS_OPTION
1204}
1205
1206static void check_breakpoint(CPUState *env, DisasContext *dc)
1207{
1208 CPUBreakpoint *bp;
1209
1210 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1211 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1212 if (bp->pc == dc->pc) {
1213 tcg_gen_movi_i32(cpu_pc, dc->pc);
1214 gen_exception(EXCP_DEBUG);
1215 dc->is_jmp = DISAS_UPDATE;
1216 }
1217 }
1218 }
1219}
1220
1221static void gen_intermediate_code_internal(
1222 CPUState *env, TranslationBlock *tb, int search_pc)
1223{
1224 DisasContext dc;
1225 int insn_count = 0;
1226 int j, lj = -1;
1227 uint16_t *gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1228 int max_insns = tb->cflags & CF_COUNT_MASK;
1229 uint32_t pc_start = tb->pc;
1230 uint32_t next_page_start =
1231 (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1232
1233 if (max_insns == 0) {
1234 max_insns = CF_COUNT_MASK;
1235 }
1236
1237 dc.config = env->config;
1238 dc.singlestep_enabled = env->singlestep_enabled;
1239 dc.tb = tb;
1240 dc.pc = pc_start;
1241 dc.is_jmp = DISAS_NEXT;
1242
3580ecad
MF
1243 init_sar_tracker(&dc);
1244
dedc5eae
MF
1245 gen_icount_start();
1246
1247 do {
1248 check_breakpoint(env, &dc);
1249
1250 if (search_pc) {
1251 j = gen_opc_ptr - gen_opc_buf;
1252 if (lj < j) {
1253 lj++;
1254 while (lj < j) {
1255 gen_opc_instr_start[lj++] = 0;
1256 }
1257 }
1258 gen_opc_pc[lj] = dc.pc;
1259 gen_opc_instr_start[lj] = 1;
1260 gen_opc_icount[lj] = insn_count;
1261 }
1262
1263 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
1264 tcg_gen_debug_insn_start(dc.pc);
1265 }
1266
1267 disas_xtensa_insn(&dc);
1268 ++insn_count;
1269 if (env->singlestep_enabled) {
1270 tcg_gen_movi_i32(cpu_pc, dc.pc);
1271 gen_exception(EXCP_DEBUG);
1272 break;
1273 }
1274 } while (dc.is_jmp == DISAS_NEXT &&
1275 insn_count < max_insns &&
1276 dc.pc < next_page_start &&
1277 gen_opc_ptr < gen_opc_end);
1278
3580ecad
MF
1279 reset_sar_tracker(&dc);
1280
dedc5eae
MF
1281 if (dc.is_jmp == DISAS_NEXT) {
1282 gen_jumpi(&dc, dc.pc, 0);
1283 }
1284 gen_icount_end(tb, insn_count);
1285 *gen_opc_ptr = INDEX_op_end;
1286
1287 if (!search_pc) {
1288 tb->size = dc.pc - pc_start;
1289 tb->icount = insn_count;
1290 }
2328826b
MF
1291}
1292
1293void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
1294{
dedc5eae 1295 gen_intermediate_code_internal(env, tb, 0);
2328826b
MF
1296}
1297
1298void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
1299{
dedc5eae 1300 gen_intermediate_code_internal(env, tb, 1);
2328826b
MF
1301}
1302
1303void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
1304 int flags)
1305{
2af3da91
MF
1306 int i, j;
1307
1308 cpu_fprintf(f, "PC=%08x\n\n", env->pc);
1309
1310 for (i = j = 0; i < 256; ++i) {
1311 if (sregnames[i]) {
1312 cpu_fprintf(f, "%s=%08x%c", sregnames[i], env->sregs[i],
1313 (j++ % 4) == 3 ? '\n' : ' ');
1314 }
1315 }
1316
1317 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
1318
1319 for (i = j = 0; i < 256; ++i) {
1320 if (uregnames[i]) {
1321 cpu_fprintf(f, "%s=%08x%c", uregnames[i], env->uregs[i],
1322 (j++ % 4) == 3 ? '\n' : ' ');
1323 }
1324 }
2328826b 1325
2af3da91 1326 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
2328826b
MF
1327
1328 for (i = 0; i < 16; ++i) {
1329 cpu_fprintf(f, "A%02d=%08x%c", i, env->regs[i],
1330 (i % 4) == 3 ? '\n' : ' ');
1331 }
1332}
1333
1334void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
1335{
1336 env->pc = gen_opc_pc[pc_pos];
1337}