]> git.proxmox.com Git - qemu.git/blob - target-xtensa/translate.c
target-xtensa: fetch 3rd opcode byte only when needed
[qemu.git] / target-xtensa / translate.c
1 /*
2 * Xtensa ISA:
3 * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
4 *
5 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of the Open Source and Linux Lab nor the
16 * names of its contributors may be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <stdio.h>
32
33 #include "cpu.h"
34 #include "exec-all.h"
35 #include "disas.h"
36 #include "tcg-op.h"
37 #include "qemu-log.h"
38 #include "sysemu.h"
39
40 #include "helpers.h"
41 #define GEN_HELPER 1
42 #include "helpers.h"
43
44 typedef struct DisasContext {
45 const XtensaConfig *config;
46 TranslationBlock *tb;
47 uint32_t pc;
48 uint32_t next_pc;
49 int cring;
50 int ring;
51 uint32_t lbeg;
52 uint32_t lend;
53 TCGv_i32 litbase;
54 int is_jmp;
55 int singlestep_enabled;
56
57 bool sar_5bit;
58 bool sar_m32_5bit;
59 bool sar_m32_allocated;
60 TCGv_i32 sar_m32;
61
62 uint32_t ccount_delta;
63 unsigned used_window;
64 } DisasContext;
65
66 static TCGv_ptr cpu_env;
67 static TCGv_i32 cpu_pc;
68 static TCGv_i32 cpu_R[16];
69 static TCGv_i32 cpu_SR[256];
70 static TCGv_i32 cpu_UR[256];
71
72 #include "gen-icount.h"
73
74 static const char * const sregnames[256] = {
75 [LBEG] = "LBEG",
76 [LEND] = "LEND",
77 [LCOUNT] = "LCOUNT",
78 [SAR] = "SAR",
79 [BR] = "BR",
80 [LITBASE] = "LITBASE",
81 [SCOMPARE1] = "SCOMPARE1",
82 [ACCLO] = "ACCLO",
83 [ACCHI] = "ACCHI",
84 [MR] = "MR0",
85 [MR + 1] = "MR1",
86 [MR + 2] = "MR2",
87 [MR + 3] = "MR3",
88 [WINDOW_BASE] = "WINDOW_BASE",
89 [WINDOW_START] = "WINDOW_START",
90 [PTEVADDR] = "PTEVADDR",
91 [RASID] = "RASID",
92 [ITLBCFG] = "ITLBCFG",
93 [DTLBCFG] = "DTLBCFG",
94 [EPC1] = "EPC1",
95 [EPC1 + 1] = "EPC2",
96 [EPC1 + 2] = "EPC3",
97 [EPC1 + 3] = "EPC4",
98 [EPC1 + 4] = "EPC5",
99 [EPC1 + 5] = "EPC6",
100 [EPC1 + 6] = "EPC7",
101 [DEPC] = "DEPC",
102 [EPS2] = "EPS2",
103 [EPS2 + 1] = "EPS3",
104 [EPS2 + 2] = "EPS4",
105 [EPS2 + 3] = "EPS5",
106 [EPS2 + 4] = "EPS6",
107 [EPS2 + 5] = "EPS7",
108 [EXCSAVE1] = "EXCSAVE1",
109 [EXCSAVE1 + 1] = "EXCSAVE2",
110 [EXCSAVE1 + 2] = "EXCSAVE3",
111 [EXCSAVE1 + 3] = "EXCSAVE4",
112 [EXCSAVE1 + 4] = "EXCSAVE5",
113 [EXCSAVE1 + 5] = "EXCSAVE6",
114 [EXCSAVE1 + 6] = "EXCSAVE7",
115 [CPENABLE] = "CPENABLE",
116 [INTSET] = "INTSET",
117 [INTCLEAR] = "INTCLEAR",
118 [INTENABLE] = "INTENABLE",
119 [PS] = "PS",
120 [VECBASE] = "VECBASE",
121 [EXCCAUSE] = "EXCCAUSE",
122 [CCOUNT] = "CCOUNT",
123 [PRID] = "PRID",
124 [EXCVADDR] = "EXCVADDR",
125 [CCOMPARE] = "CCOMPARE0",
126 [CCOMPARE + 1] = "CCOMPARE1",
127 [CCOMPARE + 2] = "CCOMPARE2",
128 };
129
130 static const char * const uregnames[256] = {
131 [THREADPTR] = "THREADPTR",
132 [FCR] = "FCR",
133 [FSR] = "FSR",
134 };
135
136 void xtensa_translate_init(void)
137 {
138 static const char * const regnames[] = {
139 "ar0", "ar1", "ar2", "ar3",
140 "ar4", "ar5", "ar6", "ar7",
141 "ar8", "ar9", "ar10", "ar11",
142 "ar12", "ar13", "ar14", "ar15",
143 };
144 int i;
145
146 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
147 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
148 offsetof(CPUState, pc), "pc");
149
150 for (i = 0; i < 16; i++) {
151 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
152 offsetof(CPUState, regs[i]),
153 regnames[i]);
154 }
155
156 for (i = 0; i < 256; ++i) {
157 if (sregnames[i]) {
158 cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0,
159 offsetof(CPUState, sregs[i]),
160 sregnames[i]);
161 }
162 }
163
164 for (i = 0; i < 256; ++i) {
165 if (uregnames[i]) {
166 cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0,
167 offsetof(CPUState, uregs[i]),
168 uregnames[i]);
169 }
170 }
171 #define GEN_HELPER 2
172 #include "helpers.h"
173 }
174
175 static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt)
176 {
177 return xtensa_option_bits_enabled(dc->config, opt);
178 }
179
180 static inline bool option_enabled(DisasContext *dc, int opt)
181 {
182 return xtensa_option_enabled(dc->config, opt);
183 }
184
185 static void init_litbase(DisasContext *dc)
186 {
187 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
188 dc->litbase = tcg_temp_local_new_i32();
189 tcg_gen_andi_i32(dc->litbase, cpu_SR[LITBASE], 0xfffff000);
190 }
191 }
192
193 static void reset_litbase(DisasContext *dc)
194 {
195 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
196 tcg_temp_free(dc->litbase);
197 }
198 }
199
200 static void init_sar_tracker(DisasContext *dc)
201 {
202 dc->sar_5bit = false;
203 dc->sar_m32_5bit = false;
204 dc->sar_m32_allocated = false;
205 }
206
207 static void reset_sar_tracker(DisasContext *dc)
208 {
209 if (dc->sar_m32_allocated) {
210 tcg_temp_free(dc->sar_m32);
211 }
212 }
213
214 static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
215 {
216 tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
217 if (dc->sar_m32_5bit) {
218 tcg_gen_discard_i32(dc->sar_m32);
219 }
220 dc->sar_5bit = true;
221 dc->sar_m32_5bit = false;
222 }
223
224 static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
225 {
226 TCGv_i32 tmp = tcg_const_i32(32);
227 if (!dc->sar_m32_allocated) {
228 dc->sar_m32 = tcg_temp_local_new_i32();
229 dc->sar_m32_allocated = true;
230 }
231 tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
232 tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
233 dc->sar_5bit = false;
234 dc->sar_m32_5bit = true;
235 tcg_temp_free(tmp);
236 }
237
238 static void gen_advance_ccount(DisasContext *dc)
239 {
240 if (dc->ccount_delta > 0) {
241 TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta);
242 dc->ccount_delta = 0;
243 gen_helper_advance_ccount(tmp);
244 tcg_temp_free(tmp);
245 }
246 }
247
248 static void reset_used_window(DisasContext *dc)
249 {
250 dc->used_window = 0;
251 }
252
253 static void gen_exception(DisasContext *dc, int excp)
254 {
255 TCGv_i32 tmp = tcg_const_i32(excp);
256 gen_advance_ccount(dc);
257 gen_helper_exception(tmp);
258 tcg_temp_free(tmp);
259 }
260
261 static void gen_exception_cause(DisasContext *dc, uint32_t cause)
262 {
263 TCGv_i32 tpc = tcg_const_i32(dc->pc);
264 TCGv_i32 tcause = tcg_const_i32(cause);
265 gen_advance_ccount(dc);
266 gen_helper_exception_cause(tpc, tcause);
267 tcg_temp_free(tpc);
268 tcg_temp_free(tcause);
269 if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
270 cause == SYSCALL_CAUSE) {
271 dc->is_jmp = DISAS_UPDATE;
272 }
273 }
274
275 static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause,
276 TCGv_i32 vaddr)
277 {
278 TCGv_i32 tpc = tcg_const_i32(dc->pc);
279 TCGv_i32 tcause = tcg_const_i32(cause);
280 gen_advance_ccount(dc);
281 gen_helper_exception_cause_vaddr(tpc, tcause, vaddr);
282 tcg_temp_free(tpc);
283 tcg_temp_free(tcause);
284 }
285
286 static void gen_check_privilege(DisasContext *dc)
287 {
288 if (dc->cring) {
289 gen_exception_cause(dc, PRIVILEGED_CAUSE);
290 dc->is_jmp = DISAS_UPDATE;
291 }
292 }
293
294 static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
295 {
296 tcg_gen_mov_i32(cpu_pc, dest);
297 if (dc->singlestep_enabled) {
298 gen_exception(dc, EXCP_DEBUG);
299 } else {
300 gen_advance_ccount(dc);
301 if (slot >= 0) {
302 tcg_gen_goto_tb(slot);
303 tcg_gen_exit_tb((tcg_target_long)dc->tb + slot);
304 } else {
305 tcg_gen_exit_tb(0);
306 }
307 }
308 dc->is_jmp = DISAS_UPDATE;
309 }
310
311 static void gen_jump(DisasContext *dc, TCGv dest)
312 {
313 gen_jump_slot(dc, dest, -1);
314 }
315
316 static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
317 {
318 TCGv_i32 tmp = tcg_const_i32(dest);
319 if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
320 slot = -1;
321 }
322 gen_jump_slot(dc, tmp, slot);
323 tcg_temp_free(tmp);
324 }
325
326 static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest,
327 int slot)
328 {
329 TCGv_i32 tcallinc = tcg_const_i32(callinc);
330
331 tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS],
332 tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN);
333 tcg_temp_free(tcallinc);
334 tcg_gen_movi_i32(cpu_R[callinc << 2],
335 (callinc << 30) | (dc->next_pc & 0x3fffffff));
336 gen_jump_slot(dc, dest, slot);
337 }
338
339 static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest)
340 {
341 gen_callw_slot(dc, callinc, dest, -1);
342 }
343
344 static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
345 {
346 TCGv_i32 tmp = tcg_const_i32(dest);
347 if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
348 slot = -1;
349 }
350 gen_callw_slot(dc, callinc, tmp, slot);
351 tcg_temp_free(tmp);
352 }
353
354 static bool gen_check_loop_end(DisasContext *dc, int slot)
355 {
356 if (option_enabled(dc, XTENSA_OPTION_LOOP) &&
357 !(dc->tb->flags & XTENSA_TBFLAG_EXCM) &&
358 dc->next_pc == dc->lend) {
359 int label = gen_new_label();
360
361 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
362 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
363 gen_jumpi(dc, dc->lbeg, slot);
364 gen_set_label(label);
365 gen_jumpi(dc, dc->next_pc, -1);
366 return true;
367 }
368 return false;
369 }
370
371 static void gen_jumpi_check_loop_end(DisasContext *dc, int slot)
372 {
373 if (!gen_check_loop_end(dc, slot)) {
374 gen_jumpi(dc, dc->next_pc, slot);
375 }
376 }
377
378 static void gen_brcond(DisasContext *dc, TCGCond cond,
379 TCGv_i32 t0, TCGv_i32 t1, uint32_t offset)
380 {
381 int label = gen_new_label();
382
383 tcg_gen_brcond_i32(cond, t0, t1, label);
384 gen_jumpi_check_loop_end(dc, 0);
385 gen_set_label(label);
386 gen_jumpi(dc, dc->pc + offset, 1);
387 }
388
389 static void gen_brcondi(DisasContext *dc, TCGCond cond,
390 TCGv_i32 t0, uint32_t t1, uint32_t offset)
391 {
392 TCGv_i32 tmp = tcg_const_i32(t1);
393 gen_brcond(dc, cond, t0, tmp, offset);
394 tcg_temp_free(tmp);
395 }
396
397 static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
398 {
399 gen_advance_ccount(dc);
400 tcg_gen_mov_i32(d, cpu_SR[sr]);
401 }
402
403 static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
404 {
405 tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10);
406 tcg_gen_or_i32(d, d, cpu_SR[sr]);
407 tcg_gen_andi_i32(d, d, 0xfffffffc);
408 }
409
410 static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
411 {
412 static void (* const rsr_handler[256])(DisasContext *dc,
413 TCGv_i32 d, uint32_t sr) = {
414 [CCOUNT] = gen_rsr_ccount,
415 [PTEVADDR] = gen_rsr_ptevaddr,
416 };
417
418 if (sregnames[sr]) {
419 if (rsr_handler[sr]) {
420 rsr_handler[sr](dc, d, sr);
421 } else {
422 tcg_gen_mov_i32(d, cpu_SR[sr]);
423 }
424 } else {
425 qemu_log("RSR %d not implemented, ", sr);
426 }
427 }
428
429 static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s)
430 {
431 gen_helper_wsr_lbeg(s);
432 }
433
434 static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s)
435 {
436 gen_helper_wsr_lend(s);
437 }
438
439 static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s)
440 {
441 tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
442 if (dc->sar_m32_5bit) {
443 tcg_gen_discard_i32(dc->sar_m32);
444 }
445 dc->sar_5bit = false;
446 dc->sar_m32_5bit = false;
447 }
448
449 static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s)
450 {
451 tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff);
452 }
453
454 static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s)
455 {
456 tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001);
457 /* This can change tb->flags, so exit tb */
458 gen_jumpi_check_loop_end(dc, -1);
459 }
460
461 static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s)
462 {
463 tcg_gen_ext8s_i32(cpu_SR[sr], s);
464 }
465
466 static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v)
467 {
468 gen_helper_wsr_windowbase(v);
469 reset_used_window(dc);
470 }
471
472 static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v)
473 {
474 tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1);
475 reset_used_window(dc);
476 }
477
478 static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v)
479 {
480 tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000);
481 }
482
483 static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
484 {
485 gen_helper_wsr_rasid(v);
486 /* This can change tb->flags, so exit tb */
487 gen_jumpi_check_loop_end(dc, -1);
488 }
489
490 static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v)
491 {
492 tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000);
493 }
494
495 static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v)
496 {
497 tcg_gen_andi_i32(cpu_SR[sr], v,
498 dc->config->inttype_mask[INTTYPE_SOFTWARE]);
499 gen_helper_check_interrupts(cpu_env);
500 gen_jumpi_check_loop_end(dc, 0);
501 }
502
503 static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v)
504 {
505 TCGv_i32 tmp = tcg_temp_new_i32();
506
507 tcg_gen_andi_i32(tmp, v,
508 dc->config->inttype_mask[INTTYPE_EDGE] |
509 dc->config->inttype_mask[INTTYPE_NMI] |
510 dc->config->inttype_mask[INTTYPE_SOFTWARE]);
511 tcg_gen_andc_i32(cpu_SR[INTSET], cpu_SR[INTSET], tmp);
512 tcg_temp_free(tmp);
513 gen_helper_check_interrupts(cpu_env);
514 }
515
516 static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
517 {
518 tcg_gen_mov_i32(cpu_SR[sr], v);
519 gen_helper_check_interrupts(cpu_env);
520 gen_jumpi_check_loop_end(dc, 0);
521 }
522
523 static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v)
524 {
525 uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
526 PS_UM | PS_EXCM | PS_INTLEVEL;
527
528 if (option_enabled(dc, XTENSA_OPTION_MMU)) {
529 mask |= PS_RING;
530 }
531 tcg_gen_andi_i32(cpu_SR[sr], v, mask);
532 reset_used_window(dc);
533 gen_helper_check_interrupts(cpu_env);
534 /* This can change mmu index and tb->flags, so exit tb */
535 gen_jumpi_check_loop_end(dc, -1);
536 }
537
538 static void gen_wsr_prid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
539 {
540 }
541
542 static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v)
543 {
544 uint32_t id = sr - CCOMPARE;
545 if (id < dc->config->nccompare) {
546 uint32_t int_bit = 1 << dc->config->timerint[id];
547 gen_advance_ccount(dc);
548 tcg_gen_mov_i32(cpu_SR[sr], v);
549 tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit);
550 gen_helper_check_interrupts(cpu_env);
551 }
552 }
553
554 static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
555 {
556 static void (* const wsr_handler[256])(DisasContext *dc,
557 uint32_t sr, TCGv_i32 v) = {
558 [LBEG] = gen_wsr_lbeg,
559 [LEND] = gen_wsr_lend,
560 [SAR] = gen_wsr_sar,
561 [BR] = gen_wsr_br,
562 [LITBASE] = gen_wsr_litbase,
563 [ACCHI] = gen_wsr_acchi,
564 [WINDOW_BASE] = gen_wsr_windowbase,
565 [WINDOW_START] = gen_wsr_windowstart,
566 [PTEVADDR] = gen_wsr_ptevaddr,
567 [RASID] = gen_wsr_rasid,
568 [ITLBCFG] = gen_wsr_tlbcfg,
569 [DTLBCFG] = gen_wsr_tlbcfg,
570 [INTSET] = gen_wsr_intset,
571 [INTCLEAR] = gen_wsr_intclear,
572 [INTENABLE] = gen_wsr_intenable,
573 [PS] = gen_wsr_ps,
574 [PRID] = gen_wsr_prid,
575 [CCOMPARE] = gen_wsr_ccompare,
576 [CCOMPARE + 1] = gen_wsr_ccompare,
577 [CCOMPARE + 2] = gen_wsr_ccompare,
578 };
579
580 if (sregnames[sr]) {
581 if (wsr_handler[sr]) {
582 wsr_handler[sr](dc, sr, s);
583 } else {
584 tcg_gen_mov_i32(cpu_SR[sr], s);
585 }
586 } else {
587 qemu_log("WSR %d not implemented, ", sr);
588 }
589 }
590
591 static void gen_load_store_alignment(DisasContext *dc, int shift,
592 TCGv_i32 addr, bool no_hw_alignment)
593 {
594 if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
595 tcg_gen_andi_i32(addr, addr, ~0 << shift);
596 } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) &&
597 no_hw_alignment) {
598 int label = gen_new_label();
599 TCGv_i32 tmp = tcg_temp_new_i32();
600 tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
601 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
602 gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
603 gen_set_label(label);
604 tcg_temp_free(tmp);
605 }
606 }
607
608 static void gen_waiti(DisasContext *dc, uint32_t imm4)
609 {
610 TCGv_i32 pc = tcg_const_i32(dc->next_pc);
611 TCGv_i32 intlevel = tcg_const_i32(imm4);
612 gen_advance_ccount(dc);
613 gen_helper_waiti(pc, intlevel);
614 tcg_temp_free(pc);
615 tcg_temp_free(intlevel);
616 }
617
618 static void gen_window_check1(DisasContext *dc, unsigned r1)
619 {
620 if (dc->tb->flags & XTENSA_TBFLAG_EXCM) {
621 return;
622 }
623 if (option_enabled(dc, XTENSA_OPTION_WINDOWED_REGISTER) &&
624 r1 / 4 > dc->used_window) {
625 TCGv_i32 pc = tcg_const_i32(dc->pc);
626 TCGv_i32 w = tcg_const_i32(r1 / 4);
627
628 dc->used_window = r1 / 4;
629 gen_advance_ccount(dc);
630 gen_helper_window_check(pc, w);
631
632 tcg_temp_free(w);
633 tcg_temp_free(pc);
634 }
635 }
636
637 static void gen_window_check2(DisasContext *dc, unsigned r1, unsigned r2)
638 {
639 gen_window_check1(dc, r1 > r2 ? r1 : r2);
640 }
641
642 static void gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2,
643 unsigned r3)
644 {
645 gen_window_check2(dc, r1, r2 > r3 ? r2 : r3);
646 }
647
648 static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned)
649 {
650 TCGv_i32 m = tcg_temp_new_i32();
651
652 if (hi) {
653 (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
654 } else {
655 (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v);
656 }
657 return m;
658 }
659
660 static void disas_xtensa_insn(DisasContext *dc)
661 {
662 #define HAS_OPTION_BITS(opt) do { \
663 if (!option_bits_enabled(dc, opt)) { \
664 qemu_log("Option is not enabled %s:%d\n", \
665 __FILE__, __LINE__); \
666 goto invalid_opcode; \
667 } \
668 } while (0)
669
670 #define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt))
671
672 #define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__)
673 #define RESERVED() do { \
674 qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
675 dc->pc, b0, b1, b2, __FILE__, __LINE__); \
676 goto invalid_opcode; \
677 } while (0)
678
679
680 #ifdef TARGET_WORDS_BIGENDIAN
681 #define OP0 (((b0) & 0xf0) >> 4)
682 #define OP1 (((b2) & 0xf0) >> 4)
683 #define OP2 ((b2) & 0xf)
684 #define RRR_R ((b1) & 0xf)
685 #define RRR_S (((b1) & 0xf0) >> 4)
686 #define RRR_T ((b0) & 0xf)
687 #else
688 #define OP0 (((b0) & 0xf))
689 #define OP1 (((b2) & 0xf))
690 #define OP2 (((b2) & 0xf0) >> 4)
691 #define RRR_R (((b1) & 0xf0) >> 4)
692 #define RRR_S (((b1) & 0xf))
693 #define RRR_T (((b0) & 0xf0) >> 4)
694 #endif
695 #define RRR_X ((RRR_R & 0x4) >> 2)
696 #define RRR_Y ((RRR_T & 0x4) >> 2)
697 #define RRR_W (RRR_R & 0x3)
698
699 #define RRRN_R RRR_R
700 #define RRRN_S RRR_S
701 #define RRRN_T RRR_T
702
703 #define RRI8_R RRR_R
704 #define RRI8_S RRR_S
705 #define RRI8_T RRR_T
706 #define RRI8_IMM8 (b2)
707 #define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8)
708
709 #ifdef TARGET_WORDS_BIGENDIAN
710 #define RI16_IMM16 (((b1) << 8) | (b2))
711 #else
712 #define RI16_IMM16 (((b2) << 8) | (b1))
713 #endif
714
715 #ifdef TARGET_WORDS_BIGENDIAN
716 #define CALL_N (((b0) & 0xc) >> 2)
717 #define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2))
718 #else
719 #define CALL_N (((b0) & 0x30) >> 4)
720 #define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10))
721 #endif
722 #define CALL_OFFSET_SE \
723 (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET)
724
725 #define CALLX_N CALL_N
726 #ifdef TARGET_WORDS_BIGENDIAN
727 #define CALLX_M ((b0) & 0x3)
728 #else
729 #define CALLX_M (((b0) & 0xc0) >> 6)
730 #endif
731 #define CALLX_S RRR_S
732
733 #define BRI12_M CALLX_M
734 #define BRI12_S RRR_S
735 #ifdef TARGET_WORDS_BIGENDIAN
736 #define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2))
737 #else
738 #define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4))
739 #endif
740 #define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12)
741
742 #define BRI8_M BRI12_M
743 #define BRI8_R RRI8_R
744 #define BRI8_S RRI8_S
745 #define BRI8_IMM8 RRI8_IMM8
746 #define BRI8_IMM8_SE RRI8_IMM8_SE
747
748 #define RSR_SR (b1)
749
750 uint8_t b0 = ldub_code(dc->pc);
751 uint8_t b1 = ldub_code(dc->pc + 1);
752 uint8_t b2 = 0;
753
754 static const uint32_t B4CONST[] = {
755 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
756 };
757
758 static const uint32_t B4CONSTU[] = {
759 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
760 };
761
762 if (OP0 >= 8) {
763 dc->next_pc = dc->pc + 2;
764 HAS_OPTION(XTENSA_OPTION_CODE_DENSITY);
765 } else {
766 dc->next_pc = dc->pc + 3;
767 b2 = ldub_code(dc->pc + 2);
768 }
769
770 switch (OP0) {
771 case 0: /*QRST*/
772 switch (OP1) {
773 case 0: /*RST0*/
774 switch (OP2) {
775 case 0: /*ST0*/
776 if ((RRR_R & 0xc) == 0x8) {
777 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
778 }
779
780 switch (RRR_R) {
781 case 0: /*SNM0*/
782 switch (CALLX_M) {
783 case 0: /*ILL*/
784 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
785 break;
786
787 case 1: /*reserved*/
788 RESERVED();
789 break;
790
791 case 2: /*JR*/
792 switch (CALLX_N) {
793 case 0: /*RET*/
794 case 2: /*JX*/
795 gen_window_check1(dc, CALLX_S);
796 gen_jump(dc, cpu_R[CALLX_S]);
797 break;
798
799 case 1: /*RETWw*/
800 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
801 {
802 TCGv_i32 tmp = tcg_const_i32(dc->pc);
803 gen_advance_ccount(dc);
804 gen_helper_retw(tmp, tmp);
805 gen_jump(dc, tmp);
806 tcg_temp_free(tmp);
807 }
808 break;
809
810 case 3: /*reserved*/
811 RESERVED();
812 break;
813 }
814 break;
815
816 case 3: /*CALLX*/
817 gen_window_check2(dc, CALLX_S, CALLX_N << 2);
818 switch (CALLX_N) {
819 case 0: /*CALLX0*/
820 {
821 TCGv_i32 tmp = tcg_temp_new_i32();
822 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
823 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
824 gen_jump(dc, tmp);
825 tcg_temp_free(tmp);
826 }
827 break;
828
829 case 1: /*CALLX4w*/
830 case 2: /*CALLX8w*/
831 case 3: /*CALLX12w*/
832 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
833 {
834 TCGv_i32 tmp = tcg_temp_new_i32();
835
836 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
837 gen_callw(dc, CALLX_N, tmp);
838 tcg_temp_free(tmp);
839 }
840 break;
841 }
842 break;
843 }
844 break;
845
846 case 1: /*MOVSPw*/
847 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
848 gen_window_check2(dc, RRR_T, RRR_S);
849 {
850 TCGv_i32 pc = tcg_const_i32(dc->pc);
851 gen_advance_ccount(dc);
852 gen_helper_movsp(pc);
853 tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]);
854 tcg_temp_free(pc);
855 }
856 break;
857
858 case 2: /*SYNC*/
859 switch (RRR_T) {
860 case 0: /*ISYNC*/
861 break;
862
863 case 1: /*RSYNC*/
864 break;
865
866 case 2: /*ESYNC*/
867 break;
868
869 case 3: /*DSYNC*/
870 break;
871
872 case 8: /*EXCW*/
873 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
874 break;
875
876 case 12: /*MEMW*/
877 break;
878
879 case 13: /*EXTW*/
880 break;
881
882 case 15: /*NOP*/
883 break;
884
885 default: /*reserved*/
886 RESERVED();
887 break;
888 }
889 break;
890
891 case 3: /*RFEIx*/
892 switch (RRR_T) {
893 case 0: /*RFETx*/
894 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
895 switch (RRR_S) {
896 case 0: /*RFEx*/
897 gen_check_privilege(dc);
898 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
899 gen_helper_check_interrupts(cpu_env);
900 gen_jump(dc, cpu_SR[EPC1]);
901 break;
902
903 case 1: /*RFUEx*/
904 RESERVED();
905 break;
906
907 case 2: /*RFDEx*/
908 gen_check_privilege(dc);
909 gen_jump(dc, cpu_SR[
910 dc->config->ndepc ? DEPC : EPC1]);
911 break;
912
913 case 4: /*RFWOw*/
914 case 5: /*RFWUw*/
915 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
916 gen_check_privilege(dc);
917 {
918 TCGv_i32 tmp = tcg_const_i32(1);
919
920 tcg_gen_andi_i32(
921 cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
922 tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
923
924 if (RRR_S == 4) {
925 tcg_gen_andc_i32(cpu_SR[WINDOW_START],
926 cpu_SR[WINDOW_START], tmp);
927 } else {
928 tcg_gen_or_i32(cpu_SR[WINDOW_START],
929 cpu_SR[WINDOW_START], tmp);
930 }
931
932 gen_helper_restore_owb();
933 gen_helper_check_interrupts(cpu_env);
934 gen_jump(dc, cpu_SR[EPC1]);
935
936 tcg_temp_free(tmp);
937 }
938 break;
939
940 default: /*reserved*/
941 RESERVED();
942 break;
943 }
944 break;
945
946 case 1: /*RFIx*/
947 HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT);
948 if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) {
949 gen_check_privilege(dc);
950 tcg_gen_mov_i32(cpu_SR[PS],
951 cpu_SR[EPS2 + RRR_S - 2]);
952 gen_helper_check_interrupts(cpu_env);
953 gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]);
954 } else {
955 qemu_log("RFI %d is illegal\n", RRR_S);
956 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
957 }
958 break;
959
960 case 2: /*RFME*/
961 TBD();
962 break;
963
964 default: /*reserved*/
965 RESERVED();
966 break;
967
968 }
969 break;
970
971 case 4: /*BREAKx*/
972 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
973 TBD();
974 break;
975
976 case 5: /*SYSCALLx*/
977 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
978 switch (RRR_S) {
979 case 0: /*SYSCALLx*/
980 gen_exception_cause(dc, SYSCALL_CAUSE);
981 break;
982
983 case 1: /*SIMCALL*/
984 if (semihosting_enabled) {
985 gen_check_privilege(dc);
986 gen_helper_simcall(cpu_env);
987 } else {
988 qemu_log("SIMCALL but semihosting is disabled\n");
989 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
990 }
991 break;
992
993 default:
994 RESERVED();
995 break;
996 }
997 break;
998
999 case 6: /*RSILx*/
1000 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1001 gen_check_privilege(dc);
1002 gen_window_check1(dc, RRR_T);
1003 tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]);
1004 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL);
1005 tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S);
1006 gen_helper_check_interrupts(cpu_env);
1007 gen_jumpi_check_loop_end(dc, 0);
1008 break;
1009
1010 case 7: /*WAITIx*/
1011 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1012 gen_check_privilege(dc);
1013 gen_waiti(dc, RRR_S);
1014 break;
1015
1016 case 8: /*ANY4p*/
1017 case 9: /*ALL4p*/
1018 case 10: /*ANY8p*/
1019 case 11: /*ALL8p*/
1020 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1021 {
1022 const unsigned shift = (RRR_R & 2) ? 8 : 4;
1023 TCGv_i32 mask = tcg_const_i32(
1024 ((1 << shift) - 1) << RRR_S);
1025 TCGv_i32 tmp = tcg_temp_new_i32();
1026
1027 tcg_gen_and_i32(tmp, cpu_SR[BR], mask);
1028 if (RRR_R & 1) { /*ALL*/
1029 tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S);
1030 } else { /*ANY*/
1031 tcg_gen_add_i32(tmp, tmp, mask);
1032 }
1033 tcg_gen_shri_i32(tmp, tmp, RRR_S + shift);
1034 tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR],
1035 tmp, RRR_T, 1);
1036 tcg_temp_free(mask);
1037 tcg_temp_free(tmp);
1038 }
1039 break;
1040
1041 default: /*reserved*/
1042 RESERVED();
1043 break;
1044
1045 }
1046 break;
1047
1048 case 1: /*AND*/
1049 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1050 tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1051 break;
1052
1053 case 2: /*OR*/
1054 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1055 tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1056 break;
1057
1058 case 3: /*XOR*/
1059 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1060 tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1061 break;
1062
1063 case 4: /*ST1*/
1064 switch (RRR_R) {
1065 case 0: /*SSR*/
1066 gen_window_check1(dc, RRR_S);
1067 gen_right_shift_sar(dc, cpu_R[RRR_S]);
1068 break;
1069
1070 case 1: /*SSL*/
1071 gen_window_check1(dc, RRR_S);
1072 gen_left_shift_sar(dc, cpu_R[RRR_S]);
1073 break;
1074
1075 case 2: /*SSA8L*/
1076 gen_window_check1(dc, RRR_S);
1077 {
1078 TCGv_i32 tmp = tcg_temp_new_i32();
1079 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1080 gen_right_shift_sar(dc, tmp);
1081 tcg_temp_free(tmp);
1082 }
1083 break;
1084
1085 case 3: /*SSA8B*/
1086 gen_window_check1(dc, RRR_S);
1087 {
1088 TCGv_i32 tmp = tcg_temp_new_i32();
1089 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1090 gen_left_shift_sar(dc, tmp);
1091 tcg_temp_free(tmp);
1092 }
1093 break;
1094
1095 case 4: /*SSAI*/
1096 {
1097 TCGv_i32 tmp = tcg_const_i32(
1098 RRR_S | ((RRR_T & 1) << 4));
1099 gen_right_shift_sar(dc, tmp);
1100 tcg_temp_free(tmp);
1101 }
1102 break;
1103
1104 case 6: /*RER*/
1105 TBD();
1106 break;
1107
1108 case 7: /*WER*/
1109 TBD();
1110 break;
1111
1112 case 8: /*ROTWw*/
1113 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1114 gen_check_privilege(dc);
1115 {
1116 TCGv_i32 tmp = tcg_const_i32(
1117 RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0));
1118 gen_helper_rotw(tmp);
1119 tcg_temp_free(tmp);
1120 reset_used_window(dc);
1121 }
1122 break;
1123
1124 case 14: /*NSAu*/
1125 HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1126 gen_window_check2(dc, RRR_S, RRR_T);
1127 gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]);
1128 break;
1129
1130 case 15: /*NSAUu*/
1131 HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1132 gen_window_check2(dc, RRR_S, RRR_T);
1133 gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]);
1134 break;
1135
1136 default: /*reserved*/
1137 RESERVED();
1138 break;
1139 }
1140 break;
1141
1142 case 5: /*TLB*/
1143 HAS_OPTION_BITS(
1144 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) |
1145 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
1146 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION));
1147 gen_check_privilege(dc);
1148 gen_window_check2(dc, RRR_S, RRR_T);
1149 {
1150 TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0);
1151
1152 switch (RRR_R & 7) {
1153 case 3: /*RITLB0*/ /*RDTLB0*/
1154 gen_helper_rtlb0(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1155 break;
1156
1157 case 4: /*IITLB*/ /*IDTLB*/
1158 gen_helper_itlb(cpu_R[RRR_S], dtlb);
1159 /* This could change memory mapping, so exit tb */
1160 gen_jumpi_check_loop_end(dc, -1);
1161 break;
1162
1163 case 5: /*PITLB*/ /*PDTLB*/
1164 tcg_gen_movi_i32(cpu_pc, dc->pc);
1165 gen_helper_ptlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1166 break;
1167
1168 case 6: /*WITLB*/ /*WDTLB*/
1169 gen_helper_wtlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1170 /* This could change memory mapping, so exit tb */
1171 gen_jumpi_check_loop_end(dc, -1);
1172 break;
1173
1174 case 7: /*RITLB1*/ /*RDTLB1*/
1175 gen_helper_rtlb1(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1176 break;
1177
1178 default:
1179 tcg_temp_free(dtlb);
1180 RESERVED();
1181 break;
1182 }
1183 tcg_temp_free(dtlb);
1184 }
1185 break;
1186
1187 case 6: /*RT0*/
1188 gen_window_check2(dc, RRR_R, RRR_T);
1189 switch (RRR_S) {
1190 case 0: /*NEG*/
1191 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1192 break;
1193
1194 case 1: /*ABS*/
1195 {
1196 int label = gen_new_label();
1197 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1198 tcg_gen_brcondi_i32(
1199 TCG_COND_GE, cpu_R[RRR_R], 0, label);
1200 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1201 gen_set_label(label);
1202 }
1203 break;
1204
1205 default: /*reserved*/
1206 RESERVED();
1207 break;
1208 }
1209 break;
1210
1211 case 7: /*reserved*/
1212 RESERVED();
1213 break;
1214
1215 case 8: /*ADD*/
1216 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1217 tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1218 break;
1219
1220 case 9: /*ADD**/
1221 case 10:
1222 case 11:
1223 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1224 {
1225 TCGv_i32 tmp = tcg_temp_new_i32();
1226 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8);
1227 tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1228 tcg_temp_free(tmp);
1229 }
1230 break;
1231
1232 case 12: /*SUB*/
1233 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1234 tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1235 break;
1236
1237 case 13: /*SUB**/
1238 case 14:
1239 case 15:
1240 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1241 {
1242 TCGv_i32 tmp = tcg_temp_new_i32();
1243 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12);
1244 tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1245 tcg_temp_free(tmp);
1246 }
1247 break;
1248 }
1249 break;
1250
1251 case 1: /*RST1*/
1252 switch (OP2) {
1253 case 0: /*SLLI*/
1254 case 1:
1255 gen_window_check2(dc, RRR_R, RRR_S);
1256 tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S],
1257 32 - (RRR_T | ((OP2 & 1) << 4)));
1258 break;
1259
1260 case 2: /*SRAI*/
1261 case 3:
1262 gen_window_check2(dc, RRR_R, RRR_T);
1263 tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T],
1264 RRR_S | ((OP2 & 1) << 4));
1265 break;
1266
1267 case 4: /*SRLI*/
1268 gen_window_check2(dc, RRR_R, RRR_T);
1269 tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S);
1270 break;
1271
1272 case 6: /*XSR*/
1273 {
1274 TCGv_i32 tmp = tcg_temp_new_i32();
1275 if (RSR_SR >= 64) {
1276 gen_check_privilege(dc);
1277 }
1278 gen_window_check1(dc, RRR_T);
1279 tcg_gen_mov_i32(tmp, cpu_R[RRR_T]);
1280 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1281 gen_wsr(dc, RSR_SR, tmp);
1282 tcg_temp_free(tmp);
1283 if (!sregnames[RSR_SR]) {
1284 TBD();
1285 }
1286 }
1287 break;
1288
1289 /*
1290 * Note: 64 bit ops are used here solely because SAR values
1291 * have range 0..63
1292 */
1293 #define gen_shift_reg(cmd, reg) do { \
1294 TCGv_i64 tmp = tcg_temp_new_i64(); \
1295 tcg_gen_extu_i32_i64(tmp, reg); \
1296 tcg_gen_##cmd##_i64(v, v, tmp); \
1297 tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \
1298 tcg_temp_free_i64(v); \
1299 tcg_temp_free_i64(tmp); \
1300 } while (0)
1301
1302 #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
1303
1304 case 8: /*SRC*/
1305 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1306 {
1307 TCGv_i64 v = tcg_temp_new_i64();
1308 tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]);
1309 gen_shift(shr);
1310 }
1311 break;
1312
1313 case 9: /*SRL*/
1314 gen_window_check2(dc, RRR_R, RRR_T);
1315 if (dc->sar_5bit) {
1316 tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1317 } else {
1318 TCGv_i64 v = tcg_temp_new_i64();
1319 tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]);
1320 gen_shift(shr);
1321 }
1322 break;
1323
1324 case 10: /*SLL*/
1325 gen_window_check2(dc, RRR_R, RRR_S);
1326 if (dc->sar_m32_5bit) {
1327 tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32);
1328 } else {
1329 TCGv_i64 v = tcg_temp_new_i64();
1330 TCGv_i32 s = tcg_const_i32(32);
1331 tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
1332 tcg_gen_andi_i32(s, s, 0x3f);
1333 tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]);
1334 gen_shift_reg(shl, s);
1335 tcg_temp_free(s);
1336 }
1337 break;
1338
1339 case 11: /*SRA*/
1340 gen_window_check2(dc, RRR_R, RRR_T);
1341 if (dc->sar_5bit) {
1342 tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1343 } else {
1344 TCGv_i64 v = tcg_temp_new_i64();
1345 tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]);
1346 gen_shift(sar);
1347 }
1348 break;
1349 #undef gen_shift
1350 #undef gen_shift_reg
1351
1352 case 12: /*MUL16U*/
1353 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1354 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1355 {
1356 TCGv_i32 v1 = tcg_temp_new_i32();
1357 TCGv_i32 v2 = tcg_temp_new_i32();
1358 tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]);
1359 tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]);
1360 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1361 tcg_temp_free(v2);
1362 tcg_temp_free(v1);
1363 }
1364 break;
1365
1366 case 13: /*MUL16S*/
1367 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1368 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1369 {
1370 TCGv_i32 v1 = tcg_temp_new_i32();
1371 TCGv_i32 v2 = tcg_temp_new_i32();
1372 tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]);
1373 tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]);
1374 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1375 tcg_temp_free(v2);
1376 tcg_temp_free(v1);
1377 }
1378 break;
1379
1380 default: /*reserved*/
1381 RESERVED();
1382 break;
1383 }
1384 break;
1385
1386 case 2: /*RST2*/
1387 if (OP2 >= 8) {
1388 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1389 }
1390
1391 if (OP2 >= 12) {
1392 HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV);
1393 int label = gen_new_label();
1394 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label);
1395 gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE);
1396 gen_set_label(label);
1397 }
1398
1399 switch (OP2) {
1400 #define BOOLEAN_LOGIC(fn, r, s, t) \
1401 do { \
1402 HAS_OPTION(XTENSA_OPTION_BOOLEAN); \
1403 TCGv_i32 tmp1 = tcg_temp_new_i32(); \
1404 TCGv_i32 tmp2 = tcg_temp_new_i32(); \
1405 \
1406 tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \
1407 tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \
1408 tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \
1409 tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \
1410 tcg_temp_free(tmp1); \
1411 tcg_temp_free(tmp2); \
1412 } while (0)
1413
1414 case 0: /*ANDBp*/
1415 BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T);
1416 break;
1417
1418 case 1: /*ANDBCp*/
1419 BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T);
1420 break;
1421
1422 case 2: /*ORBp*/
1423 BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T);
1424 break;
1425
1426 case 3: /*ORBCp*/
1427 BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T);
1428 break;
1429
1430 case 4: /*XORBp*/
1431 BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T);
1432 break;
1433
1434 #undef BOOLEAN_LOGIC
1435
1436 case 8: /*MULLi*/
1437 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL);
1438 tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1439 break;
1440
1441 case 10: /*MULUHi*/
1442 case 11: /*MULSHi*/
1443 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH);
1444 {
1445 TCGv_i64 r = tcg_temp_new_i64();
1446 TCGv_i64 s = tcg_temp_new_i64();
1447 TCGv_i64 t = tcg_temp_new_i64();
1448
1449 if (OP2 == 10) {
1450 tcg_gen_extu_i32_i64(s, cpu_R[RRR_S]);
1451 tcg_gen_extu_i32_i64(t, cpu_R[RRR_T]);
1452 } else {
1453 tcg_gen_ext_i32_i64(s, cpu_R[RRR_S]);
1454 tcg_gen_ext_i32_i64(t, cpu_R[RRR_T]);
1455 }
1456 tcg_gen_mul_i64(r, s, t);
1457 tcg_gen_shri_i64(r, r, 32);
1458 tcg_gen_trunc_i64_i32(cpu_R[RRR_R], r);
1459
1460 tcg_temp_free_i64(r);
1461 tcg_temp_free_i64(s);
1462 tcg_temp_free_i64(t);
1463 }
1464 break;
1465
1466 case 12: /*QUOUi*/
1467 tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1468 break;
1469
1470 case 13: /*QUOSi*/
1471 case 15: /*REMSi*/
1472 {
1473 int label1 = gen_new_label();
1474 int label2 = gen_new_label();
1475
1476 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000,
1477 label1);
1478 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff,
1479 label1);
1480 tcg_gen_movi_i32(cpu_R[RRR_R],
1481 OP2 == 13 ? 0x80000000 : 0);
1482 tcg_gen_br(label2);
1483 gen_set_label(label1);
1484 if (OP2 == 13) {
1485 tcg_gen_div_i32(cpu_R[RRR_R],
1486 cpu_R[RRR_S], cpu_R[RRR_T]);
1487 } else {
1488 tcg_gen_rem_i32(cpu_R[RRR_R],
1489 cpu_R[RRR_S], cpu_R[RRR_T]);
1490 }
1491 gen_set_label(label2);
1492 }
1493 break;
1494
1495 case 14: /*REMUi*/
1496 tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1497 break;
1498
1499 default: /*reserved*/
1500 RESERVED();
1501 break;
1502 }
1503 break;
1504
1505 case 3: /*RST3*/
1506 switch (OP2) {
1507 case 0: /*RSR*/
1508 if (RSR_SR >= 64) {
1509 gen_check_privilege(dc);
1510 }
1511 gen_window_check1(dc, RRR_T);
1512 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1513 if (!sregnames[RSR_SR]) {
1514 TBD();
1515 }
1516 break;
1517
1518 case 1: /*WSR*/
1519 if (RSR_SR >= 64) {
1520 gen_check_privilege(dc);
1521 }
1522 gen_window_check1(dc, RRR_T);
1523 gen_wsr(dc, RSR_SR, cpu_R[RRR_T]);
1524 if (!sregnames[RSR_SR]) {
1525 TBD();
1526 }
1527 break;
1528
1529 case 2: /*SEXTu*/
1530 HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT);
1531 gen_window_check2(dc, RRR_R, RRR_S);
1532 {
1533 int shift = 24 - RRR_T;
1534
1535 if (shift == 24) {
1536 tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1537 } else if (shift == 16) {
1538 tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1539 } else {
1540 TCGv_i32 tmp = tcg_temp_new_i32();
1541 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift);
1542 tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift);
1543 tcg_temp_free(tmp);
1544 }
1545 }
1546 break;
1547
1548 case 3: /*CLAMPSu*/
1549 HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS);
1550 gen_window_check2(dc, RRR_R, RRR_S);
1551 {
1552 TCGv_i32 tmp1 = tcg_temp_new_i32();
1553 TCGv_i32 tmp2 = tcg_temp_new_i32();
1554 int label = gen_new_label();
1555
1556 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T);
1557 tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]);
1558 tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7));
1559 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1560 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp2, 0, label);
1561
1562 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31);
1563 tcg_gen_xori_i32(cpu_R[RRR_R], tmp1,
1564 0xffffffff >> (25 - RRR_T));
1565
1566 gen_set_label(label);
1567
1568 tcg_temp_free(tmp1);
1569 tcg_temp_free(tmp2);
1570 }
1571 break;
1572
1573 case 4: /*MINu*/
1574 case 5: /*MAXu*/
1575 case 6: /*MINUu*/
1576 case 7: /*MAXUu*/
1577 HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX);
1578 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1579 {
1580 static const TCGCond cond[] = {
1581 TCG_COND_LE,
1582 TCG_COND_GE,
1583 TCG_COND_LEU,
1584 TCG_COND_GEU
1585 };
1586 int label = gen_new_label();
1587
1588 if (RRR_R != RRR_T) {
1589 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1590 tcg_gen_brcond_i32(cond[OP2 - 4],
1591 cpu_R[RRR_S], cpu_R[RRR_T], label);
1592 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1593 } else {
1594 tcg_gen_brcond_i32(cond[OP2 - 4],
1595 cpu_R[RRR_T], cpu_R[RRR_S], label);
1596 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1597 }
1598 gen_set_label(label);
1599 }
1600 break;
1601
1602 case 8: /*MOVEQZ*/
1603 case 9: /*MOVNEZ*/
1604 case 10: /*MOVLTZ*/
1605 case 11: /*MOVGEZ*/
1606 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1607 {
1608 static const TCGCond cond[] = {
1609 TCG_COND_NE,
1610 TCG_COND_EQ,
1611 TCG_COND_GE,
1612 TCG_COND_LT
1613 };
1614 int label = gen_new_label();
1615 tcg_gen_brcondi_i32(cond[OP2 - 8], cpu_R[RRR_T], 0, label);
1616 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1617 gen_set_label(label);
1618 }
1619 break;
1620
1621 case 12: /*MOVFp*/
1622 case 13: /*MOVTp*/
1623 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1624 gen_window_check2(dc, RRR_R, RRR_S);
1625 {
1626 int label = gen_new_label();
1627 TCGv_i32 tmp = tcg_temp_new_i32();
1628
1629 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
1630 tcg_gen_brcondi_i32(
1631 OP2 & 1 ? TCG_COND_EQ : TCG_COND_NE,
1632 tmp, 0, label);
1633 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1634 gen_set_label(label);
1635 tcg_temp_free(tmp);
1636 }
1637 break;
1638
1639 case 14: /*RUR*/
1640 gen_window_check1(dc, RRR_R);
1641 {
1642 int st = (RRR_S << 4) + RRR_T;
1643 if (uregnames[st]) {
1644 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]);
1645 } else {
1646 qemu_log("RUR %d not implemented, ", st);
1647 TBD();
1648 }
1649 }
1650 break;
1651
1652 case 15: /*WUR*/
1653 gen_window_check1(dc, RRR_T);
1654 {
1655 if (uregnames[RSR_SR]) {
1656 tcg_gen_mov_i32(cpu_UR[RSR_SR], cpu_R[RRR_T]);
1657 } else {
1658 qemu_log("WUR %d not implemented, ", RSR_SR);
1659 TBD();
1660 }
1661 }
1662 break;
1663
1664 }
1665 break;
1666
1667 case 4: /*EXTUI*/
1668 case 5:
1669 gen_window_check2(dc, RRR_R, RRR_T);
1670 {
1671 int shiftimm = RRR_S | (OP1 << 4);
1672 int maskimm = (1 << (OP2 + 1)) - 1;
1673
1674 TCGv_i32 tmp = tcg_temp_new_i32();
1675 tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm);
1676 tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm);
1677 tcg_temp_free(tmp);
1678 }
1679 break;
1680
1681 case 6: /*CUST0*/
1682 RESERVED();
1683 break;
1684
1685 case 7: /*CUST1*/
1686 RESERVED();
1687 break;
1688
1689 case 8: /*LSCXp*/
1690 HAS_OPTION(XTENSA_OPTION_COPROCESSOR);
1691 TBD();
1692 break;
1693
1694 case 9: /*LSC4*/
1695 gen_window_check2(dc, RRR_S, RRR_T);
1696 switch (OP2) {
1697 case 0: /*L32E*/
1698 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1699 gen_check_privilege(dc);
1700 {
1701 TCGv_i32 addr = tcg_temp_new_i32();
1702 tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1703 (0xffffffc0 | (RRR_R << 2)));
1704 tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring);
1705 tcg_temp_free(addr);
1706 }
1707 break;
1708
1709 case 4: /*S32E*/
1710 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1711 gen_check_privilege(dc);
1712 {
1713 TCGv_i32 addr = tcg_temp_new_i32();
1714 tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1715 (0xffffffc0 | (RRR_R << 2)));
1716 tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring);
1717 tcg_temp_free(addr);
1718 }
1719 break;
1720
1721 default:
1722 RESERVED();
1723 break;
1724 }
1725 break;
1726
1727 case 10: /*FP0*/
1728 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1729 TBD();
1730 break;
1731
1732 case 11: /*FP1*/
1733 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1734 TBD();
1735 break;
1736
1737 default: /*reserved*/
1738 RESERVED();
1739 break;
1740 }
1741 break;
1742
1743 case 1: /*L32R*/
1744 gen_window_check1(dc, RRR_T);
1745 {
1746 TCGv_i32 tmp = tcg_const_i32(
1747 ((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ?
1748 0 : ((dc->pc + 3) & ~3)) +
1749 (0xfffc0000 | (RI16_IMM16 << 2)));
1750
1751 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
1752 tcg_gen_add_i32(tmp, tmp, dc->litbase);
1753 }
1754 tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring);
1755 tcg_temp_free(tmp);
1756 }
1757 break;
1758
1759 case 2: /*LSAI*/
1760 #define gen_load_store(type, shift) do { \
1761 TCGv_i32 addr = tcg_temp_new_i32(); \
1762 gen_window_check2(dc, RRI8_S, RRI8_T); \
1763 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \
1764 if (shift) { \
1765 gen_load_store_alignment(dc, shift, addr, false); \
1766 } \
1767 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
1768 tcg_temp_free(addr); \
1769 } while (0)
1770
1771 switch (RRI8_R) {
1772 case 0: /*L8UI*/
1773 gen_load_store(ld8u, 0);
1774 break;
1775
1776 case 1: /*L16UI*/
1777 gen_load_store(ld16u, 1);
1778 break;
1779
1780 case 2: /*L32I*/
1781 gen_load_store(ld32u, 2);
1782 break;
1783
1784 case 4: /*S8I*/
1785 gen_load_store(st8, 0);
1786 break;
1787
1788 case 5: /*S16I*/
1789 gen_load_store(st16, 1);
1790 break;
1791
1792 case 6: /*S32I*/
1793 gen_load_store(st32, 2);
1794 break;
1795
1796 case 7: /*CACHEc*/
1797 if (RRI8_T < 8) {
1798 HAS_OPTION(XTENSA_OPTION_DCACHE);
1799 }
1800
1801 switch (RRI8_T) {
1802 case 0: /*DPFRc*/
1803 break;
1804
1805 case 1: /*DPFWc*/
1806 break;
1807
1808 case 2: /*DPFROc*/
1809 break;
1810
1811 case 3: /*DPFWOc*/
1812 break;
1813
1814 case 4: /*DHWBc*/
1815 break;
1816
1817 case 5: /*DHWBIc*/
1818 break;
1819
1820 case 6: /*DHIc*/
1821 break;
1822
1823 case 7: /*DIIc*/
1824 break;
1825
1826 case 8: /*DCEc*/
1827 switch (OP1) {
1828 case 0: /*DPFLl*/
1829 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1830 break;
1831
1832 case 2: /*DHUl*/
1833 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1834 break;
1835
1836 case 3: /*DIUl*/
1837 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1838 break;
1839
1840 case 4: /*DIWBc*/
1841 HAS_OPTION(XTENSA_OPTION_DCACHE);
1842 break;
1843
1844 case 5: /*DIWBIc*/
1845 HAS_OPTION(XTENSA_OPTION_DCACHE);
1846 break;
1847
1848 default: /*reserved*/
1849 RESERVED();
1850 break;
1851
1852 }
1853 break;
1854
1855 case 12: /*IPFc*/
1856 HAS_OPTION(XTENSA_OPTION_ICACHE);
1857 break;
1858
1859 case 13: /*ICEc*/
1860 switch (OP1) {
1861 case 0: /*IPFLl*/
1862 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1863 break;
1864
1865 case 2: /*IHUl*/
1866 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1867 break;
1868
1869 case 3: /*IIUl*/
1870 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1871 break;
1872
1873 default: /*reserved*/
1874 RESERVED();
1875 break;
1876 }
1877 break;
1878
1879 case 14: /*IHIc*/
1880 HAS_OPTION(XTENSA_OPTION_ICACHE);
1881 break;
1882
1883 case 15: /*IIIc*/
1884 HAS_OPTION(XTENSA_OPTION_ICACHE);
1885 break;
1886
1887 default: /*reserved*/
1888 RESERVED();
1889 break;
1890 }
1891 break;
1892
1893 case 9: /*L16SI*/
1894 gen_load_store(ld16s, 1);
1895 break;
1896 #undef gen_load_store
1897
1898 case 10: /*MOVI*/
1899 gen_window_check1(dc, RRI8_T);
1900 tcg_gen_movi_i32(cpu_R[RRI8_T],
1901 RRI8_IMM8 | (RRI8_S << 8) |
1902 ((RRI8_S & 0x8) ? 0xfffff000 : 0));
1903 break;
1904
1905 #define gen_load_store_no_hw_align(type) do { \
1906 TCGv_i32 addr = tcg_temp_local_new_i32(); \
1907 gen_window_check2(dc, RRI8_S, RRI8_T); \
1908 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \
1909 gen_load_store_alignment(dc, 2, addr, true); \
1910 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
1911 tcg_temp_free(addr); \
1912 } while (0)
1913
1914 case 11: /*L32AIy*/
1915 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
1916 gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/
1917 break;
1918
1919 case 12: /*ADDI*/
1920 gen_window_check2(dc, RRI8_S, RRI8_T);
1921 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE);
1922 break;
1923
1924 case 13: /*ADDMI*/
1925 gen_window_check2(dc, RRI8_S, RRI8_T);
1926 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8);
1927 break;
1928
1929 case 14: /*S32C1Iy*/
1930 HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE);
1931 gen_window_check2(dc, RRI8_S, RRI8_T);
1932 {
1933 int label = gen_new_label();
1934 TCGv_i32 tmp = tcg_temp_local_new_i32();
1935 TCGv_i32 addr = tcg_temp_local_new_i32();
1936
1937 tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]);
1938 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
1939 gen_load_store_alignment(dc, 2, addr, true);
1940 tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring);
1941 tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T],
1942 cpu_SR[SCOMPARE1], label);
1943
1944 tcg_gen_qemu_st32(tmp, addr, dc->cring);
1945
1946 gen_set_label(label);
1947 tcg_temp_free(addr);
1948 tcg_temp_free(tmp);
1949 }
1950 break;
1951
1952 case 15: /*S32RIy*/
1953 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
1954 gen_load_store_no_hw_align(st32); /*TODO release?*/
1955 break;
1956 #undef gen_load_store_no_hw_align
1957
1958 default: /*reserved*/
1959 RESERVED();
1960 break;
1961 }
1962 break;
1963
1964 case 3: /*LSCIp*/
1965 HAS_OPTION(XTENSA_OPTION_COPROCESSOR);
1966 TBD();
1967 break;
1968
1969 case 4: /*MAC16d*/
1970 HAS_OPTION(XTENSA_OPTION_MAC16);
1971 {
1972 enum {
1973 MAC16_UMUL = 0x0,
1974 MAC16_MUL = 0x4,
1975 MAC16_MULA = 0x8,
1976 MAC16_MULS = 0xc,
1977 MAC16_NONE = 0xf,
1978 } op = OP1 & 0xc;
1979 bool is_m1_sr = (OP2 & 0x3) == 2;
1980 bool is_m2_sr = (OP2 & 0xc) == 0;
1981 uint32_t ld_offset = 0;
1982
1983 if (OP2 > 9) {
1984 RESERVED();
1985 }
1986
1987 switch (OP2 & 2) {
1988 case 0: /*MACI?/MACC?*/
1989 is_m1_sr = true;
1990 ld_offset = (OP2 & 1) ? -4 : 4;
1991
1992 if (OP2 >= 8) { /*MACI/MACC*/
1993 if (OP1 == 0) { /*LDINC/LDDEC*/
1994 op = MAC16_NONE;
1995 } else {
1996 RESERVED();
1997 }
1998 } else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/
1999 RESERVED();
2000 }
2001 break;
2002
2003 case 2: /*MACD?/MACA?*/
2004 if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/
2005 RESERVED();
2006 }
2007 break;
2008 }
2009
2010 if (op != MAC16_NONE) {
2011 if (!is_m1_sr) {
2012 gen_window_check1(dc, RRR_S);
2013 }
2014 if (!is_m2_sr) {
2015 gen_window_check1(dc, RRR_T);
2016 }
2017 }
2018
2019 {
2020 TCGv_i32 vaddr = tcg_temp_new_i32();
2021 TCGv_i32 mem32 = tcg_temp_new_i32();
2022
2023 if (ld_offset) {
2024 gen_window_check1(dc, RRR_S);
2025 tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset);
2026 gen_load_store_alignment(dc, 2, vaddr, false);
2027 tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring);
2028 }
2029 if (op != MAC16_NONE) {
2030 TCGv_i32 m1 = gen_mac16_m(
2031 is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S],
2032 OP1 & 1, op == MAC16_UMUL);
2033 TCGv_i32 m2 = gen_mac16_m(
2034 is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T],
2035 OP1 & 2, op == MAC16_UMUL);
2036
2037 if (op == MAC16_MUL || op == MAC16_UMUL) {
2038 tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2);
2039 if (op == MAC16_UMUL) {
2040 tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
2041 } else {
2042 tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
2043 }
2044 } else {
2045 TCGv_i32 res = tcg_temp_new_i32();
2046 TCGv_i64 res64 = tcg_temp_new_i64();
2047 TCGv_i64 tmp = tcg_temp_new_i64();
2048
2049 tcg_gen_mul_i32(res, m1, m2);
2050 tcg_gen_ext_i32_i64(res64, res);
2051 tcg_gen_concat_i32_i64(tmp,
2052 cpu_SR[ACCLO], cpu_SR[ACCHI]);
2053 if (op == MAC16_MULA) {
2054 tcg_gen_add_i64(tmp, tmp, res64);
2055 } else {
2056 tcg_gen_sub_i64(tmp, tmp, res64);
2057 }
2058 tcg_gen_trunc_i64_i32(cpu_SR[ACCLO], tmp);
2059 tcg_gen_shri_i64(tmp, tmp, 32);
2060 tcg_gen_trunc_i64_i32(cpu_SR[ACCHI], tmp);
2061 tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]);
2062
2063 tcg_temp_free(res);
2064 tcg_temp_free_i64(res64);
2065 tcg_temp_free_i64(tmp);
2066 }
2067 tcg_temp_free(m1);
2068 tcg_temp_free(m2);
2069 }
2070 if (ld_offset) {
2071 tcg_gen_mov_i32(cpu_R[RRR_S], vaddr);
2072 tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32);
2073 }
2074 tcg_temp_free(vaddr);
2075 tcg_temp_free(mem32);
2076 }
2077 }
2078 break;
2079
2080 case 5: /*CALLN*/
2081 switch (CALL_N) {
2082 case 0: /*CALL0*/
2083 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
2084 gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2085 break;
2086
2087 case 1: /*CALL4w*/
2088 case 2: /*CALL8w*/
2089 case 3: /*CALL12w*/
2090 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2091 gen_window_check1(dc, CALL_N << 2);
2092 gen_callwi(dc, CALL_N,
2093 (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2094 break;
2095 }
2096 break;
2097
2098 case 6: /*SI*/
2099 switch (CALL_N) {
2100 case 0: /*J*/
2101 gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0);
2102 break;
2103
2104 case 1: /*BZ*/
2105 gen_window_check1(dc, BRI12_S);
2106 {
2107 static const TCGCond cond[] = {
2108 TCG_COND_EQ, /*BEQZ*/
2109 TCG_COND_NE, /*BNEZ*/
2110 TCG_COND_LT, /*BLTZ*/
2111 TCG_COND_GE, /*BGEZ*/
2112 };
2113
2114 gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0,
2115 4 + BRI12_IMM12_SE);
2116 }
2117 break;
2118
2119 case 2: /*BI0*/
2120 gen_window_check1(dc, BRI8_S);
2121 {
2122 static const TCGCond cond[] = {
2123 TCG_COND_EQ, /*BEQI*/
2124 TCG_COND_NE, /*BNEI*/
2125 TCG_COND_LT, /*BLTI*/
2126 TCG_COND_GE, /*BGEI*/
2127 };
2128
2129 gen_brcondi(dc, cond[BRI8_M & 3],
2130 cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE);
2131 }
2132 break;
2133
2134 case 3: /*BI1*/
2135 switch (BRI8_M) {
2136 case 0: /*ENTRYw*/
2137 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2138 {
2139 TCGv_i32 pc = tcg_const_i32(dc->pc);
2140 TCGv_i32 s = tcg_const_i32(BRI12_S);
2141 TCGv_i32 imm = tcg_const_i32(BRI12_IMM12);
2142 gen_advance_ccount(dc);
2143 gen_helper_entry(pc, s, imm);
2144 tcg_temp_free(imm);
2145 tcg_temp_free(s);
2146 tcg_temp_free(pc);
2147 reset_used_window(dc);
2148 }
2149 break;
2150
2151 case 1: /*B1*/
2152 switch (BRI8_R) {
2153 case 0: /*BFp*/
2154 case 1: /*BTp*/
2155 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
2156 {
2157 TCGv_i32 tmp = tcg_temp_new_i32();
2158 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S);
2159 gen_brcondi(dc,
2160 BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ,
2161 tmp, 0, 4 + RRI8_IMM8_SE);
2162 tcg_temp_free(tmp);
2163 }
2164 break;
2165
2166 case 8: /*LOOP*/
2167 case 9: /*LOOPNEZ*/
2168 case 10: /*LOOPGTZ*/
2169 HAS_OPTION(XTENSA_OPTION_LOOP);
2170 gen_window_check1(dc, RRI8_S);
2171 {
2172 uint32_t lend = dc->pc + RRI8_IMM8 + 4;
2173 TCGv_i32 tmp = tcg_const_i32(lend);
2174
2175 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1);
2176 tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc);
2177 gen_wsr_lend(dc, LEND, tmp);
2178 tcg_temp_free(tmp);
2179
2180 if (BRI8_R > 8) {
2181 int label = gen_new_label();
2182 tcg_gen_brcondi_i32(
2183 BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT,
2184 cpu_R[RRI8_S], 0, label);
2185 gen_jumpi(dc, lend, 1);
2186 gen_set_label(label);
2187 }
2188
2189 gen_jumpi(dc, dc->next_pc, 0);
2190 }
2191 break;
2192
2193 default: /*reserved*/
2194 RESERVED();
2195 break;
2196
2197 }
2198 break;
2199
2200 case 2: /*BLTUI*/
2201 case 3: /*BGEUI*/
2202 gen_window_check1(dc, BRI8_S);
2203 gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU,
2204 cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE);
2205 break;
2206 }
2207 break;
2208
2209 }
2210 break;
2211
2212 case 7: /*B*/
2213 {
2214 TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ;
2215
2216 switch (RRI8_R & 7) {
2217 case 0: /*BNONE*/ /*BANY*/
2218 gen_window_check2(dc, RRI8_S, RRI8_T);
2219 {
2220 TCGv_i32 tmp = tcg_temp_new_i32();
2221 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2222 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2223 tcg_temp_free(tmp);
2224 }
2225 break;
2226
2227 case 1: /*BEQ*/ /*BNE*/
2228 case 2: /*BLT*/ /*BGE*/
2229 case 3: /*BLTU*/ /*BGEU*/
2230 gen_window_check2(dc, RRI8_S, RRI8_T);
2231 {
2232 static const TCGCond cond[] = {
2233 [1] = TCG_COND_EQ,
2234 [2] = TCG_COND_LT,
2235 [3] = TCG_COND_LTU,
2236 [9] = TCG_COND_NE,
2237 [10] = TCG_COND_GE,
2238 [11] = TCG_COND_GEU,
2239 };
2240 gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T],
2241 4 + RRI8_IMM8_SE);
2242 }
2243 break;
2244
2245 case 4: /*BALL*/ /*BNALL*/
2246 gen_window_check2(dc, RRI8_S, RRI8_T);
2247 {
2248 TCGv_i32 tmp = tcg_temp_new_i32();
2249 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2250 gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T],
2251 4 + RRI8_IMM8_SE);
2252 tcg_temp_free(tmp);
2253 }
2254 break;
2255
2256 case 5: /*BBC*/ /*BBS*/
2257 gen_window_check2(dc, RRI8_S, RRI8_T);
2258 {
2259 TCGv_i32 bit = tcg_const_i32(1);
2260 TCGv_i32 tmp = tcg_temp_new_i32();
2261 tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
2262 tcg_gen_shl_i32(bit, bit, tmp);
2263 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit);
2264 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2265 tcg_temp_free(tmp);
2266 tcg_temp_free(bit);
2267 }
2268 break;
2269
2270 case 6: /*BBCI*/ /*BBSI*/
2271 case 7:
2272 gen_window_check1(dc, RRI8_S);
2273 {
2274 TCGv_i32 tmp = tcg_temp_new_i32();
2275 tcg_gen_andi_i32(tmp, cpu_R[RRI8_S],
2276 1 << (((RRI8_R & 1) << 4) | RRI8_T));
2277 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2278 tcg_temp_free(tmp);
2279 }
2280 break;
2281
2282 }
2283 }
2284 break;
2285
2286 #define gen_narrow_load_store(type) do { \
2287 TCGv_i32 addr = tcg_temp_new_i32(); \
2288 gen_window_check2(dc, RRRN_S, RRRN_T); \
2289 tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
2290 gen_load_store_alignment(dc, 2, addr, false); \
2291 tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \
2292 tcg_temp_free(addr); \
2293 } while (0)
2294
2295 case 8: /*L32I.Nn*/
2296 gen_narrow_load_store(ld32u);
2297 break;
2298
2299 case 9: /*S32I.Nn*/
2300 gen_narrow_load_store(st32);
2301 break;
2302 #undef gen_narrow_load_store
2303
2304 case 10: /*ADD.Nn*/
2305 gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T);
2306 tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]);
2307 break;
2308
2309 case 11: /*ADDI.Nn*/
2310 gen_window_check2(dc, RRRN_R, RRRN_S);
2311 tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1);
2312 break;
2313
2314 case 12: /*ST2n*/
2315 gen_window_check1(dc, RRRN_S);
2316 if (RRRN_T < 8) { /*MOVI.Nn*/
2317 tcg_gen_movi_i32(cpu_R[RRRN_S],
2318 RRRN_R | (RRRN_T << 4) |
2319 ((RRRN_T & 6) == 6 ? 0xffffff80 : 0));
2320 } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/
2321 TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ;
2322
2323 gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0,
2324 4 + (RRRN_R | ((RRRN_T & 3) << 4)));
2325 }
2326 break;
2327
2328 case 13: /*ST3n*/
2329 switch (RRRN_R) {
2330 case 0: /*MOV.Nn*/
2331 gen_window_check2(dc, RRRN_S, RRRN_T);
2332 tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]);
2333 break;
2334
2335 case 15: /*S3*/
2336 switch (RRRN_T) {
2337 case 0: /*RET.Nn*/
2338 gen_jump(dc, cpu_R[0]);
2339 break;
2340
2341 case 1: /*RETW.Nn*/
2342 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2343 {
2344 TCGv_i32 tmp = tcg_const_i32(dc->pc);
2345 gen_advance_ccount(dc);
2346 gen_helper_retw(tmp, tmp);
2347 gen_jump(dc, tmp);
2348 tcg_temp_free(tmp);
2349 }
2350 break;
2351
2352 case 2: /*BREAK.Nn*/
2353 TBD();
2354 break;
2355
2356 case 3: /*NOP.Nn*/
2357 break;
2358
2359 case 6: /*ILL.Nn*/
2360 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2361 break;
2362
2363 default: /*reserved*/
2364 RESERVED();
2365 break;
2366 }
2367 break;
2368
2369 default: /*reserved*/
2370 RESERVED();
2371 break;
2372 }
2373 break;
2374
2375 default: /*reserved*/
2376 RESERVED();
2377 break;
2378 }
2379
2380 gen_check_loop_end(dc, 0);
2381 dc->pc = dc->next_pc;
2382
2383 return;
2384
2385 invalid_opcode:
2386 qemu_log("INVALID(pc = %08x)\n", dc->pc);
2387 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2388 #undef HAS_OPTION
2389 }
2390
2391 static void check_breakpoint(CPUState *env, DisasContext *dc)
2392 {
2393 CPUBreakpoint *bp;
2394
2395 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2396 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2397 if (bp->pc == dc->pc) {
2398 tcg_gen_movi_i32(cpu_pc, dc->pc);
2399 gen_exception(dc, EXCP_DEBUG);
2400 dc->is_jmp = DISAS_UPDATE;
2401 }
2402 }
2403 }
2404 }
2405
2406 static void gen_intermediate_code_internal(
2407 CPUState *env, TranslationBlock *tb, int search_pc)
2408 {
2409 DisasContext dc;
2410 int insn_count = 0;
2411 int j, lj = -1;
2412 uint16_t *gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2413 int max_insns = tb->cflags & CF_COUNT_MASK;
2414 uint32_t pc_start = tb->pc;
2415 uint32_t next_page_start =
2416 (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2417
2418 if (max_insns == 0) {
2419 max_insns = CF_COUNT_MASK;
2420 }
2421
2422 dc.config = env->config;
2423 dc.singlestep_enabled = env->singlestep_enabled;
2424 dc.tb = tb;
2425 dc.pc = pc_start;
2426 dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK;
2427 dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring;
2428 dc.lbeg = env->sregs[LBEG];
2429 dc.lend = env->sregs[LEND];
2430 dc.is_jmp = DISAS_NEXT;
2431 dc.ccount_delta = 0;
2432
2433 init_litbase(&dc);
2434 init_sar_tracker(&dc);
2435 reset_used_window(&dc);
2436
2437 gen_icount_start();
2438
2439 if (env->singlestep_enabled && env->exception_taken) {
2440 env->exception_taken = 0;
2441 tcg_gen_movi_i32(cpu_pc, dc.pc);
2442 gen_exception(&dc, EXCP_DEBUG);
2443 }
2444
2445 do {
2446 check_breakpoint(env, &dc);
2447
2448 if (search_pc) {
2449 j = gen_opc_ptr - gen_opc_buf;
2450 if (lj < j) {
2451 lj++;
2452 while (lj < j) {
2453 gen_opc_instr_start[lj++] = 0;
2454 }
2455 }
2456 gen_opc_pc[lj] = dc.pc;
2457 gen_opc_instr_start[lj] = 1;
2458 gen_opc_icount[lj] = insn_count;
2459 }
2460
2461 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2462 tcg_gen_debug_insn_start(dc.pc);
2463 }
2464
2465 ++dc.ccount_delta;
2466
2467 if (insn_count + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2468 gen_io_start();
2469 }
2470
2471 disas_xtensa_insn(&dc);
2472 ++insn_count;
2473 if (env->singlestep_enabled) {
2474 tcg_gen_movi_i32(cpu_pc, dc.pc);
2475 gen_exception(&dc, EXCP_DEBUG);
2476 break;
2477 }
2478 } while (dc.is_jmp == DISAS_NEXT &&
2479 insn_count < max_insns &&
2480 dc.pc < next_page_start &&
2481 gen_opc_ptr < gen_opc_end);
2482
2483 reset_litbase(&dc);
2484 reset_sar_tracker(&dc);
2485
2486 if (tb->cflags & CF_LAST_IO) {
2487 gen_io_end();
2488 }
2489
2490 if (dc.is_jmp == DISAS_NEXT) {
2491 gen_jumpi(&dc, dc.pc, 0);
2492 }
2493 gen_icount_end(tb, insn_count);
2494 *gen_opc_ptr = INDEX_op_end;
2495
2496 if (!search_pc) {
2497 tb->size = dc.pc - pc_start;
2498 tb->icount = insn_count;
2499 }
2500 }
2501
2502 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2503 {
2504 gen_intermediate_code_internal(env, tb, 0);
2505 }
2506
2507 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2508 {
2509 gen_intermediate_code_internal(env, tb, 1);
2510 }
2511
2512 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
2513 int flags)
2514 {
2515 int i, j;
2516
2517 cpu_fprintf(f, "PC=%08x\n\n", env->pc);
2518
2519 for (i = j = 0; i < 256; ++i) {
2520 if (sregnames[i]) {
2521 cpu_fprintf(f, "%s=%08x%c", sregnames[i], env->sregs[i],
2522 (j++ % 4) == 3 ? '\n' : ' ');
2523 }
2524 }
2525
2526 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
2527
2528 for (i = j = 0; i < 256; ++i) {
2529 if (uregnames[i]) {
2530 cpu_fprintf(f, "%s=%08x%c", uregnames[i], env->uregs[i],
2531 (j++ % 4) == 3 ? '\n' : ' ');
2532 }
2533 }
2534
2535 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
2536
2537 for (i = 0; i < 16; ++i) {
2538 cpu_fprintf(f, "A%02d=%08x%c", i, env->regs[i],
2539 (i % 4) == 3 ? '\n' : ' ');
2540 }
2541
2542 cpu_fprintf(f, "\n");
2543
2544 for (i = 0; i < env->config->nareg; ++i) {
2545 cpu_fprintf(f, "AR%02d=%08x%c", i, env->phys_regs[i],
2546 (i % 4) == 3 ? '\n' : ' ');
2547 }
2548 }
2549
2550 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
2551 {
2552 env->pc = gen_opc_pc[pc_pos];
2553 }