]> git.proxmox.com Git - qemu.git/blob - target-xtensa/translate.c
target-xtensa: Don't overuse CPUState
[qemu.git] / target-xtensa / translate.c
1 /*
2 * Xtensa ISA:
3 * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
4 *
5 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of the Open Source and Linux Lab nor the
16 * names of its contributors may be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <stdio.h>
32
33 #include "cpu.h"
34 #include "exec-all.h"
35 #include "disas.h"
36 #include "tcg-op.h"
37 #include "qemu-log.h"
38 #include "sysemu.h"
39
40 #include "helpers.h"
41 #define GEN_HELPER 1
42 #include "helpers.h"
43
44 typedef struct DisasContext {
45 const XtensaConfig *config;
46 TranslationBlock *tb;
47 uint32_t pc;
48 uint32_t next_pc;
49 int cring;
50 int ring;
51 uint32_t lbeg;
52 uint32_t lend;
53 TCGv_i32 litbase;
54 int is_jmp;
55 int singlestep_enabled;
56
57 bool sar_5bit;
58 bool sar_m32_5bit;
59 bool sar_m32_allocated;
60 TCGv_i32 sar_m32;
61
62 uint32_t ccount_delta;
63 unsigned used_window;
64
65 bool debug;
66 bool icount;
67 TCGv_i32 next_icount;
68 } DisasContext;
69
70 static TCGv_ptr cpu_env;
71 static TCGv_i32 cpu_pc;
72 static TCGv_i32 cpu_R[16];
73 static TCGv_i32 cpu_SR[256];
74 static TCGv_i32 cpu_UR[256];
75
76 #include "gen-icount.h"
77
78 static const char * const sregnames[256] = {
79 [LBEG] = "LBEG",
80 [LEND] = "LEND",
81 [LCOUNT] = "LCOUNT",
82 [SAR] = "SAR",
83 [BR] = "BR",
84 [LITBASE] = "LITBASE",
85 [SCOMPARE1] = "SCOMPARE1",
86 [ACCLO] = "ACCLO",
87 [ACCHI] = "ACCHI",
88 [MR] = "MR0",
89 [MR + 1] = "MR1",
90 [MR + 2] = "MR2",
91 [MR + 3] = "MR3",
92 [WINDOW_BASE] = "WINDOW_BASE",
93 [WINDOW_START] = "WINDOW_START",
94 [PTEVADDR] = "PTEVADDR",
95 [RASID] = "RASID",
96 [ITLBCFG] = "ITLBCFG",
97 [DTLBCFG] = "DTLBCFG",
98 [IBREAKENABLE] = "IBREAKENABLE",
99 [IBREAKA] = "IBREAKA0",
100 [IBREAKA + 1] = "IBREAKA1",
101 [DBREAKA] = "DBREAKA0",
102 [DBREAKA + 1] = "DBREAKA1",
103 [DBREAKC] = "DBREAKC0",
104 [DBREAKC + 1] = "DBREAKC1",
105 [EPC1] = "EPC1",
106 [EPC1 + 1] = "EPC2",
107 [EPC1 + 2] = "EPC3",
108 [EPC1 + 3] = "EPC4",
109 [EPC1 + 4] = "EPC5",
110 [EPC1 + 5] = "EPC6",
111 [EPC1 + 6] = "EPC7",
112 [DEPC] = "DEPC",
113 [EPS2] = "EPS2",
114 [EPS2 + 1] = "EPS3",
115 [EPS2 + 2] = "EPS4",
116 [EPS2 + 3] = "EPS5",
117 [EPS2 + 4] = "EPS6",
118 [EPS2 + 5] = "EPS7",
119 [EXCSAVE1] = "EXCSAVE1",
120 [EXCSAVE1 + 1] = "EXCSAVE2",
121 [EXCSAVE1 + 2] = "EXCSAVE3",
122 [EXCSAVE1 + 3] = "EXCSAVE4",
123 [EXCSAVE1 + 4] = "EXCSAVE5",
124 [EXCSAVE1 + 5] = "EXCSAVE6",
125 [EXCSAVE1 + 6] = "EXCSAVE7",
126 [CPENABLE] = "CPENABLE",
127 [INTSET] = "INTSET",
128 [INTCLEAR] = "INTCLEAR",
129 [INTENABLE] = "INTENABLE",
130 [PS] = "PS",
131 [VECBASE] = "VECBASE",
132 [EXCCAUSE] = "EXCCAUSE",
133 [DEBUGCAUSE] = "DEBUGCAUSE",
134 [CCOUNT] = "CCOUNT",
135 [PRID] = "PRID",
136 [ICOUNT] = "ICOUNT",
137 [ICOUNTLEVEL] = "ICOUNTLEVEL",
138 [EXCVADDR] = "EXCVADDR",
139 [CCOMPARE] = "CCOMPARE0",
140 [CCOMPARE + 1] = "CCOMPARE1",
141 [CCOMPARE + 2] = "CCOMPARE2",
142 };
143
144 static const char * const uregnames[256] = {
145 [THREADPTR] = "THREADPTR",
146 [FCR] = "FCR",
147 [FSR] = "FSR",
148 };
149
150 void xtensa_translate_init(void)
151 {
152 static const char * const regnames[] = {
153 "ar0", "ar1", "ar2", "ar3",
154 "ar4", "ar5", "ar6", "ar7",
155 "ar8", "ar9", "ar10", "ar11",
156 "ar12", "ar13", "ar14", "ar15",
157 };
158 int i;
159
160 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
161 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
162 offsetof(CPUXtensaState, pc), "pc");
163
164 for (i = 0; i < 16; i++) {
165 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
166 offsetof(CPUXtensaState, regs[i]),
167 regnames[i]);
168 }
169
170 for (i = 0; i < 256; ++i) {
171 if (sregnames[i]) {
172 cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0,
173 offsetof(CPUXtensaState, sregs[i]),
174 sregnames[i]);
175 }
176 }
177
178 for (i = 0; i < 256; ++i) {
179 if (uregnames[i]) {
180 cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0,
181 offsetof(CPUXtensaState, uregs[i]),
182 uregnames[i]);
183 }
184 }
185 #define GEN_HELPER 2
186 #include "helpers.h"
187 }
188
189 static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt)
190 {
191 return xtensa_option_bits_enabled(dc->config, opt);
192 }
193
194 static inline bool option_enabled(DisasContext *dc, int opt)
195 {
196 return xtensa_option_enabled(dc->config, opt);
197 }
198
199 static void init_litbase(DisasContext *dc)
200 {
201 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
202 dc->litbase = tcg_temp_local_new_i32();
203 tcg_gen_andi_i32(dc->litbase, cpu_SR[LITBASE], 0xfffff000);
204 }
205 }
206
207 static void reset_litbase(DisasContext *dc)
208 {
209 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
210 tcg_temp_free(dc->litbase);
211 }
212 }
213
214 static void init_sar_tracker(DisasContext *dc)
215 {
216 dc->sar_5bit = false;
217 dc->sar_m32_5bit = false;
218 dc->sar_m32_allocated = false;
219 }
220
221 static void reset_sar_tracker(DisasContext *dc)
222 {
223 if (dc->sar_m32_allocated) {
224 tcg_temp_free(dc->sar_m32);
225 }
226 }
227
228 static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
229 {
230 tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
231 if (dc->sar_m32_5bit) {
232 tcg_gen_discard_i32(dc->sar_m32);
233 }
234 dc->sar_5bit = true;
235 dc->sar_m32_5bit = false;
236 }
237
238 static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
239 {
240 TCGv_i32 tmp = tcg_const_i32(32);
241 if (!dc->sar_m32_allocated) {
242 dc->sar_m32 = tcg_temp_local_new_i32();
243 dc->sar_m32_allocated = true;
244 }
245 tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
246 tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
247 dc->sar_5bit = false;
248 dc->sar_m32_5bit = true;
249 tcg_temp_free(tmp);
250 }
251
252 static void gen_advance_ccount(DisasContext *dc)
253 {
254 if (dc->ccount_delta > 0) {
255 TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta);
256 dc->ccount_delta = 0;
257 gen_helper_advance_ccount(tmp);
258 tcg_temp_free(tmp);
259 }
260 }
261
262 static void reset_used_window(DisasContext *dc)
263 {
264 dc->used_window = 0;
265 }
266
267 static void gen_exception(DisasContext *dc, int excp)
268 {
269 TCGv_i32 tmp = tcg_const_i32(excp);
270 gen_advance_ccount(dc);
271 gen_helper_exception(tmp);
272 tcg_temp_free(tmp);
273 }
274
275 static void gen_exception_cause(DisasContext *dc, uint32_t cause)
276 {
277 TCGv_i32 tpc = tcg_const_i32(dc->pc);
278 TCGv_i32 tcause = tcg_const_i32(cause);
279 gen_advance_ccount(dc);
280 gen_helper_exception_cause(tpc, tcause);
281 tcg_temp_free(tpc);
282 tcg_temp_free(tcause);
283 if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
284 cause == SYSCALL_CAUSE) {
285 dc->is_jmp = DISAS_UPDATE;
286 }
287 }
288
289 static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause,
290 TCGv_i32 vaddr)
291 {
292 TCGv_i32 tpc = tcg_const_i32(dc->pc);
293 TCGv_i32 tcause = tcg_const_i32(cause);
294 gen_advance_ccount(dc);
295 gen_helper_exception_cause_vaddr(tpc, tcause, vaddr);
296 tcg_temp_free(tpc);
297 tcg_temp_free(tcause);
298 }
299
300 static void gen_debug_exception(DisasContext *dc, uint32_t cause)
301 {
302 TCGv_i32 tpc = tcg_const_i32(dc->pc);
303 TCGv_i32 tcause = tcg_const_i32(cause);
304 gen_advance_ccount(dc);
305 gen_helper_debug_exception(tpc, tcause);
306 tcg_temp_free(tpc);
307 tcg_temp_free(tcause);
308 if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
309 dc->is_jmp = DISAS_UPDATE;
310 }
311 }
312
313 static void gen_check_privilege(DisasContext *dc)
314 {
315 if (dc->cring) {
316 gen_exception_cause(dc, PRIVILEGED_CAUSE);
317 dc->is_jmp = DISAS_UPDATE;
318 }
319 }
320
321 static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
322 {
323 tcg_gen_mov_i32(cpu_pc, dest);
324 gen_advance_ccount(dc);
325 if (dc->icount) {
326 tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
327 }
328 if (dc->singlestep_enabled) {
329 gen_exception(dc, EXCP_DEBUG);
330 } else {
331 if (slot >= 0) {
332 tcg_gen_goto_tb(slot);
333 tcg_gen_exit_tb((tcg_target_long)dc->tb + slot);
334 } else {
335 tcg_gen_exit_tb(0);
336 }
337 }
338 dc->is_jmp = DISAS_UPDATE;
339 }
340
341 static void gen_jump(DisasContext *dc, TCGv dest)
342 {
343 gen_jump_slot(dc, dest, -1);
344 }
345
346 static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
347 {
348 TCGv_i32 tmp = tcg_const_i32(dest);
349 if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
350 slot = -1;
351 }
352 gen_jump_slot(dc, tmp, slot);
353 tcg_temp_free(tmp);
354 }
355
356 static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest,
357 int slot)
358 {
359 TCGv_i32 tcallinc = tcg_const_i32(callinc);
360
361 tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS],
362 tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN);
363 tcg_temp_free(tcallinc);
364 tcg_gen_movi_i32(cpu_R[callinc << 2],
365 (callinc << 30) | (dc->next_pc & 0x3fffffff));
366 gen_jump_slot(dc, dest, slot);
367 }
368
369 static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest)
370 {
371 gen_callw_slot(dc, callinc, dest, -1);
372 }
373
374 static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
375 {
376 TCGv_i32 tmp = tcg_const_i32(dest);
377 if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
378 slot = -1;
379 }
380 gen_callw_slot(dc, callinc, tmp, slot);
381 tcg_temp_free(tmp);
382 }
383
384 static bool gen_check_loop_end(DisasContext *dc, int slot)
385 {
386 if (option_enabled(dc, XTENSA_OPTION_LOOP) &&
387 !(dc->tb->flags & XTENSA_TBFLAG_EXCM) &&
388 dc->next_pc == dc->lend) {
389 int label = gen_new_label();
390
391 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
392 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
393 gen_jumpi(dc, dc->lbeg, slot);
394 gen_set_label(label);
395 gen_jumpi(dc, dc->next_pc, -1);
396 return true;
397 }
398 return false;
399 }
400
401 static void gen_jumpi_check_loop_end(DisasContext *dc, int slot)
402 {
403 if (!gen_check_loop_end(dc, slot)) {
404 gen_jumpi(dc, dc->next_pc, slot);
405 }
406 }
407
408 static void gen_brcond(DisasContext *dc, TCGCond cond,
409 TCGv_i32 t0, TCGv_i32 t1, uint32_t offset)
410 {
411 int label = gen_new_label();
412
413 tcg_gen_brcond_i32(cond, t0, t1, label);
414 gen_jumpi_check_loop_end(dc, 0);
415 gen_set_label(label);
416 gen_jumpi(dc, dc->pc + offset, 1);
417 }
418
419 static void gen_brcondi(DisasContext *dc, TCGCond cond,
420 TCGv_i32 t0, uint32_t t1, uint32_t offset)
421 {
422 TCGv_i32 tmp = tcg_const_i32(t1);
423 gen_brcond(dc, cond, t0, tmp, offset);
424 tcg_temp_free(tmp);
425 }
426
427 static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
428 {
429 gen_advance_ccount(dc);
430 tcg_gen_mov_i32(d, cpu_SR[sr]);
431 }
432
433 static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
434 {
435 tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10);
436 tcg_gen_or_i32(d, d, cpu_SR[sr]);
437 tcg_gen_andi_i32(d, d, 0xfffffffc);
438 }
439
440 static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
441 {
442 static void (* const rsr_handler[256])(DisasContext *dc,
443 TCGv_i32 d, uint32_t sr) = {
444 [CCOUNT] = gen_rsr_ccount,
445 [PTEVADDR] = gen_rsr_ptevaddr,
446 };
447
448 if (sregnames[sr]) {
449 if (rsr_handler[sr]) {
450 rsr_handler[sr](dc, d, sr);
451 } else {
452 tcg_gen_mov_i32(d, cpu_SR[sr]);
453 }
454 } else {
455 qemu_log("RSR %d not implemented, ", sr);
456 }
457 }
458
459 static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s)
460 {
461 gen_helper_wsr_lbeg(s);
462 }
463
464 static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s)
465 {
466 gen_helper_wsr_lend(s);
467 }
468
469 static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s)
470 {
471 tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
472 if (dc->sar_m32_5bit) {
473 tcg_gen_discard_i32(dc->sar_m32);
474 }
475 dc->sar_5bit = false;
476 dc->sar_m32_5bit = false;
477 }
478
479 static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s)
480 {
481 tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff);
482 }
483
484 static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s)
485 {
486 tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001);
487 /* This can change tb->flags, so exit tb */
488 gen_jumpi_check_loop_end(dc, -1);
489 }
490
491 static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s)
492 {
493 tcg_gen_ext8s_i32(cpu_SR[sr], s);
494 }
495
496 static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v)
497 {
498 gen_helper_wsr_windowbase(v);
499 reset_used_window(dc);
500 }
501
502 static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v)
503 {
504 tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1);
505 reset_used_window(dc);
506 }
507
508 static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v)
509 {
510 tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000);
511 }
512
513 static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
514 {
515 gen_helper_wsr_rasid(v);
516 /* This can change tb->flags, so exit tb */
517 gen_jumpi_check_loop_end(dc, -1);
518 }
519
520 static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v)
521 {
522 tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000);
523 }
524
525 static void gen_wsr_ibreakenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
526 {
527 gen_helper_wsr_ibreakenable(v);
528 gen_jumpi_check_loop_end(dc, 0);
529 }
530
531 static void gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v)
532 {
533 unsigned id = sr - IBREAKA;
534
535 if (id < dc->config->nibreak) {
536 TCGv_i32 tmp = tcg_const_i32(id);
537 gen_helper_wsr_ibreaka(tmp, v);
538 tcg_temp_free(tmp);
539 gen_jumpi_check_loop_end(dc, 0);
540 }
541 }
542
543 static void gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v)
544 {
545 unsigned id = sr - DBREAKA;
546
547 if (id < dc->config->ndbreak) {
548 TCGv_i32 tmp = tcg_const_i32(id);
549 gen_helper_wsr_dbreaka(tmp, v);
550 tcg_temp_free(tmp);
551 }
552 }
553
554 static void gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v)
555 {
556 unsigned id = sr - DBREAKC;
557
558 if (id < dc->config->ndbreak) {
559 TCGv_i32 tmp = tcg_const_i32(id);
560 gen_helper_wsr_dbreakc(tmp, v);
561 tcg_temp_free(tmp);
562 }
563 }
564
565 static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v)
566 {
567 tcg_gen_andi_i32(cpu_SR[sr], v,
568 dc->config->inttype_mask[INTTYPE_SOFTWARE]);
569 gen_helper_check_interrupts(cpu_env);
570 gen_jumpi_check_loop_end(dc, 0);
571 }
572
573 static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v)
574 {
575 TCGv_i32 tmp = tcg_temp_new_i32();
576
577 tcg_gen_andi_i32(tmp, v,
578 dc->config->inttype_mask[INTTYPE_EDGE] |
579 dc->config->inttype_mask[INTTYPE_NMI] |
580 dc->config->inttype_mask[INTTYPE_SOFTWARE]);
581 tcg_gen_andc_i32(cpu_SR[INTSET], cpu_SR[INTSET], tmp);
582 tcg_temp_free(tmp);
583 gen_helper_check_interrupts(cpu_env);
584 }
585
586 static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
587 {
588 tcg_gen_mov_i32(cpu_SR[sr], v);
589 gen_helper_check_interrupts(cpu_env);
590 gen_jumpi_check_loop_end(dc, 0);
591 }
592
593 static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v)
594 {
595 uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
596 PS_UM | PS_EXCM | PS_INTLEVEL;
597
598 if (option_enabled(dc, XTENSA_OPTION_MMU)) {
599 mask |= PS_RING;
600 }
601 tcg_gen_andi_i32(cpu_SR[sr], v, mask);
602 reset_used_window(dc);
603 gen_helper_check_interrupts(cpu_env);
604 /* This can change mmu index and tb->flags, so exit tb */
605 gen_jumpi_check_loop_end(dc, -1);
606 }
607
608 static void gen_wsr_debugcause(DisasContext *dc, uint32_t sr, TCGv_i32 v)
609 {
610 }
611
612 static void gen_wsr_prid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
613 {
614 }
615
616 static void gen_wsr_icount(DisasContext *dc, uint32_t sr, TCGv_i32 v)
617 {
618 if (dc->icount) {
619 tcg_gen_mov_i32(dc->next_icount, v);
620 } else {
621 tcg_gen_mov_i32(cpu_SR[sr], v);
622 }
623 }
624
625 static void gen_wsr_icountlevel(DisasContext *dc, uint32_t sr, TCGv_i32 v)
626 {
627 tcg_gen_andi_i32(cpu_SR[sr], v, 0xf);
628 /* This can change tb->flags, so exit tb */
629 gen_jumpi_check_loop_end(dc, -1);
630 }
631
632 static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v)
633 {
634 uint32_t id = sr - CCOMPARE;
635 if (id < dc->config->nccompare) {
636 uint32_t int_bit = 1 << dc->config->timerint[id];
637 gen_advance_ccount(dc);
638 tcg_gen_mov_i32(cpu_SR[sr], v);
639 tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit);
640 gen_helper_check_interrupts(cpu_env);
641 }
642 }
643
644 static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
645 {
646 static void (* const wsr_handler[256])(DisasContext *dc,
647 uint32_t sr, TCGv_i32 v) = {
648 [LBEG] = gen_wsr_lbeg,
649 [LEND] = gen_wsr_lend,
650 [SAR] = gen_wsr_sar,
651 [BR] = gen_wsr_br,
652 [LITBASE] = gen_wsr_litbase,
653 [ACCHI] = gen_wsr_acchi,
654 [WINDOW_BASE] = gen_wsr_windowbase,
655 [WINDOW_START] = gen_wsr_windowstart,
656 [PTEVADDR] = gen_wsr_ptevaddr,
657 [RASID] = gen_wsr_rasid,
658 [ITLBCFG] = gen_wsr_tlbcfg,
659 [DTLBCFG] = gen_wsr_tlbcfg,
660 [IBREAKENABLE] = gen_wsr_ibreakenable,
661 [IBREAKA] = gen_wsr_ibreaka,
662 [IBREAKA + 1] = gen_wsr_ibreaka,
663 [DBREAKA] = gen_wsr_dbreaka,
664 [DBREAKA + 1] = gen_wsr_dbreaka,
665 [DBREAKC] = gen_wsr_dbreakc,
666 [DBREAKC + 1] = gen_wsr_dbreakc,
667 [INTSET] = gen_wsr_intset,
668 [INTCLEAR] = gen_wsr_intclear,
669 [INTENABLE] = gen_wsr_intenable,
670 [PS] = gen_wsr_ps,
671 [DEBUGCAUSE] = gen_wsr_debugcause,
672 [PRID] = gen_wsr_prid,
673 [ICOUNT] = gen_wsr_icount,
674 [ICOUNTLEVEL] = gen_wsr_icountlevel,
675 [CCOMPARE] = gen_wsr_ccompare,
676 [CCOMPARE + 1] = gen_wsr_ccompare,
677 [CCOMPARE + 2] = gen_wsr_ccompare,
678 };
679
680 if (sregnames[sr]) {
681 if (wsr_handler[sr]) {
682 wsr_handler[sr](dc, sr, s);
683 } else {
684 tcg_gen_mov_i32(cpu_SR[sr], s);
685 }
686 } else {
687 qemu_log("WSR %d not implemented, ", sr);
688 }
689 }
690
691 static void gen_load_store_alignment(DisasContext *dc, int shift,
692 TCGv_i32 addr, bool no_hw_alignment)
693 {
694 if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
695 tcg_gen_andi_i32(addr, addr, ~0 << shift);
696 } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) &&
697 no_hw_alignment) {
698 int label = gen_new_label();
699 TCGv_i32 tmp = tcg_temp_new_i32();
700 tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
701 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
702 gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
703 gen_set_label(label);
704 tcg_temp_free(tmp);
705 }
706 }
707
708 static void gen_waiti(DisasContext *dc, uint32_t imm4)
709 {
710 TCGv_i32 pc = tcg_const_i32(dc->next_pc);
711 TCGv_i32 intlevel = tcg_const_i32(imm4);
712 gen_advance_ccount(dc);
713 gen_helper_waiti(pc, intlevel);
714 tcg_temp_free(pc);
715 tcg_temp_free(intlevel);
716 }
717
718 static void gen_window_check1(DisasContext *dc, unsigned r1)
719 {
720 if (dc->tb->flags & XTENSA_TBFLAG_EXCM) {
721 return;
722 }
723 if (option_enabled(dc, XTENSA_OPTION_WINDOWED_REGISTER) &&
724 r1 / 4 > dc->used_window) {
725 TCGv_i32 pc = tcg_const_i32(dc->pc);
726 TCGv_i32 w = tcg_const_i32(r1 / 4);
727
728 dc->used_window = r1 / 4;
729 gen_advance_ccount(dc);
730 gen_helper_window_check(pc, w);
731
732 tcg_temp_free(w);
733 tcg_temp_free(pc);
734 }
735 }
736
737 static void gen_window_check2(DisasContext *dc, unsigned r1, unsigned r2)
738 {
739 gen_window_check1(dc, r1 > r2 ? r1 : r2);
740 }
741
742 static void gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2,
743 unsigned r3)
744 {
745 gen_window_check2(dc, r1, r2 > r3 ? r2 : r3);
746 }
747
748 static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned)
749 {
750 TCGv_i32 m = tcg_temp_new_i32();
751
752 if (hi) {
753 (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
754 } else {
755 (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v);
756 }
757 return m;
758 }
759
760 static void disas_xtensa_insn(DisasContext *dc)
761 {
762 #define HAS_OPTION_BITS(opt) do { \
763 if (!option_bits_enabled(dc, opt)) { \
764 qemu_log("Option is not enabled %s:%d\n", \
765 __FILE__, __LINE__); \
766 goto invalid_opcode; \
767 } \
768 } while (0)
769
770 #define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt))
771
772 #define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__)
773 #define RESERVED() do { \
774 qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
775 dc->pc, b0, b1, b2, __FILE__, __LINE__); \
776 goto invalid_opcode; \
777 } while (0)
778
779
780 #ifdef TARGET_WORDS_BIGENDIAN
781 #define OP0 (((b0) & 0xf0) >> 4)
782 #define OP1 (((b2) & 0xf0) >> 4)
783 #define OP2 ((b2) & 0xf)
784 #define RRR_R ((b1) & 0xf)
785 #define RRR_S (((b1) & 0xf0) >> 4)
786 #define RRR_T ((b0) & 0xf)
787 #else
788 #define OP0 (((b0) & 0xf))
789 #define OP1 (((b2) & 0xf))
790 #define OP2 (((b2) & 0xf0) >> 4)
791 #define RRR_R (((b1) & 0xf0) >> 4)
792 #define RRR_S (((b1) & 0xf))
793 #define RRR_T (((b0) & 0xf0) >> 4)
794 #endif
795 #define RRR_X ((RRR_R & 0x4) >> 2)
796 #define RRR_Y ((RRR_T & 0x4) >> 2)
797 #define RRR_W (RRR_R & 0x3)
798
799 #define RRRN_R RRR_R
800 #define RRRN_S RRR_S
801 #define RRRN_T RRR_T
802
803 #define RRI8_R RRR_R
804 #define RRI8_S RRR_S
805 #define RRI8_T RRR_T
806 #define RRI8_IMM8 (b2)
807 #define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8)
808
809 #ifdef TARGET_WORDS_BIGENDIAN
810 #define RI16_IMM16 (((b1) << 8) | (b2))
811 #else
812 #define RI16_IMM16 (((b2) << 8) | (b1))
813 #endif
814
815 #ifdef TARGET_WORDS_BIGENDIAN
816 #define CALL_N (((b0) & 0xc) >> 2)
817 #define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2))
818 #else
819 #define CALL_N (((b0) & 0x30) >> 4)
820 #define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10))
821 #endif
822 #define CALL_OFFSET_SE \
823 (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET)
824
825 #define CALLX_N CALL_N
826 #ifdef TARGET_WORDS_BIGENDIAN
827 #define CALLX_M ((b0) & 0x3)
828 #else
829 #define CALLX_M (((b0) & 0xc0) >> 6)
830 #endif
831 #define CALLX_S RRR_S
832
833 #define BRI12_M CALLX_M
834 #define BRI12_S RRR_S
835 #ifdef TARGET_WORDS_BIGENDIAN
836 #define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2))
837 #else
838 #define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4))
839 #endif
840 #define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12)
841
842 #define BRI8_M BRI12_M
843 #define BRI8_R RRI8_R
844 #define BRI8_S RRI8_S
845 #define BRI8_IMM8 RRI8_IMM8
846 #define BRI8_IMM8_SE RRI8_IMM8_SE
847
848 #define RSR_SR (b1)
849
850 uint8_t b0 = ldub_code(dc->pc);
851 uint8_t b1 = ldub_code(dc->pc + 1);
852 uint8_t b2 = 0;
853
854 static const uint32_t B4CONST[] = {
855 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
856 };
857
858 static const uint32_t B4CONSTU[] = {
859 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
860 };
861
862 if (OP0 >= 8) {
863 dc->next_pc = dc->pc + 2;
864 HAS_OPTION(XTENSA_OPTION_CODE_DENSITY);
865 } else {
866 dc->next_pc = dc->pc + 3;
867 b2 = ldub_code(dc->pc + 2);
868 }
869
870 switch (OP0) {
871 case 0: /*QRST*/
872 switch (OP1) {
873 case 0: /*RST0*/
874 switch (OP2) {
875 case 0: /*ST0*/
876 if ((RRR_R & 0xc) == 0x8) {
877 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
878 }
879
880 switch (RRR_R) {
881 case 0: /*SNM0*/
882 switch (CALLX_M) {
883 case 0: /*ILL*/
884 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
885 break;
886
887 case 1: /*reserved*/
888 RESERVED();
889 break;
890
891 case 2: /*JR*/
892 switch (CALLX_N) {
893 case 0: /*RET*/
894 case 2: /*JX*/
895 gen_window_check1(dc, CALLX_S);
896 gen_jump(dc, cpu_R[CALLX_S]);
897 break;
898
899 case 1: /*RETWw*/
900 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
901 {
902 TCGv_i32 tmp = tcg_const_i32(dc->pc);
903 gen_advance_ccount(dc);
904 gen_helper_retw(tmp, tmp);
905 gen_jump(dc, tmp);
906 tcg_temp_free(tmp);
907 }
908 break;
909
910 case 3: /*reserved*/
911 RESERVED();
912 break;
913 }
914 break;
915
916 case 3: /*CALLX*/
917 gen_window_check2(dc, CALLX_S, CALLX_N << 2);
918 switch (CALLX_N) {
919 case 0: /*CALLX0*/
920 {
921 TCGv_i32 tmp = tcg_temp_new_i32();
922 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
923 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
924 gen_jump(dc, tmp);
925 tcg_temp_free(tmp);
926 }
927 break;
928
929 case 1: /*CALLX4w*/
930 case 2: /*CALLX8w*/
931 case 3: /*CALLX12w*/
932 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
933 {
934 TCGv_i32 tmp = tcg_temp_new_i32();
935
936 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
937 gen_callw(dc, CALLX_N, tmp);
938 tcg_temp_free(tmp);
939 }
940 break;
941 }
942 break;
943 }
944 break;
945
946 case 1: /*MOVSPw*/
947 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
948 gen_window_check2(dc, RRR_T, RRR_S);
949 {
950 TCGv_i32 pc = tcg_const_i32(dc->pc);
951 gen_advance_ccount(dc);
952 gen_helper_movsp(pc);
953 tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]);
954 tcg_temp_free(pc);
955 }
956 break;
957
958 case 2: /*SYNC*/
959 switch (RRR_T) {
960 case 0: /*ISYNC*/
961 break;
962
963 case 1: /*RSYNC*/
964 break;
965
966 case 2: /*ESYNC*/
967 break;
968
969 case 3: /*DSYNC*/
970 break;
971
972 case 8: /*EXCW*/
973 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
974 break;
975
976 case 12: /*MEMW*/
977 break;
978
979 case 13: /*EXTW*/
980 break;
981
982 case 15: /*NOP*/
983 break;
984
985 default: /*reserved*/
986 RESERVED();
987 break;
988 }
989 break;
990
991 case 3: /*RFEIx*/
992 switch (RRR_T) {
993 case 0: /*RFETx*/
994 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
995 switch (RRR_S) {
996 case 0: /*RFEx*/
997 gen_check_privilege(dc);
998 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
999 gen_helper_check_interrupts(cpu_env);
1000 gen_jump(dc, cpu_SR[EPC1]);
1001 break;
1002
1003 case 1: /*RFUEx*/
1004 RESERVED();
1005 break;
1006
1007 case 2: /*RFDEx*/
1008 gen_check_privilege(dc);
1009 gen_jump(dc, cpu_SR[
1010 dc->config->ndepc ? DEPC : EPC1]);
1011 break;
1012
1013 case 4: /*RFWOw*/
1014 case 5: /*RFWUw*/
1015 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1016 gen_check_privilege(dc);
1017 {
1018 TCGv_i32 tmp = tcg_const_i32(1);
1019
1020 tcg_gen_andi_i32(
1021 cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
1022 tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
1023
1024 if (RRR_S == 4) {
1025 tcg_gen_andc_i32(cpu_SR[WINDOW_START],
1026 cpu_SR[WINDOW_START], tmp);
1027 } else {
1028 tcg_gen_or_i32(cpu_SR[WINDOW_START],
1029 cpu_SR[WINDOW_START], tmp);
1030 }
1031
1032 gen_helper_restore_owb();
1033 gen_helper_check_interrupts(cpu_env);
1034 gen_jump(dc, cpu_SR[EPC1]);
1035
1036 tcg_temp_free(tmp);
1037 }
1038 break;
1039
1040 default: /*reserved*/
1041 RESERVED();
1042 break;
1043 }
1044 break;
1045
1046 case 1: /*RFIx*/
1047 HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT);
1048 if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) {
1049 gen_check_privilege(dc);
1050 tcg_gen_mov_i32(cpu_SR[PS],
1051 cpu_SR[EPS2 + RRR_S - 2]);
1052 gen_helper_check_interrupts(cpu_env);
1053 gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]);
1054 } else {
1055 qemu_log("RFI %d is illegal\n", RRR_S);
1056 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1057 }
1058 break;
1059
1060 case 2: /*RFME*/
1061 TBD();
1062 break;
1063
1064 default: /*reserved*/
1065 RESERVED();
1066 break;
1067
1068 }
1069 break;
1070
1071 case 4: /*BREAKx*/
1072 HAS_OPTION(XTENSA_OPTION_DEBUG);
1073 if (dc->debug) {
1074 gen_debug_exception(dc, DEBUGCAUSE_BI);
1075 }
1076 break;
1077
1078 case 5: /*SYSCALLx*/
1079 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
1080 switch (RRR_S) {
1081 case 0: /*SYSCALLx*/
1082 gen_exception_cause(dc, SYSCALL_CAUSE);
1083 break;
1084
1085 case 1: /*SIMCALL*/
1086 if (semihosting_enabled) {
1087 gen_check_privilege(dc);
1088 gen_helper_simcall(cpu_env);
1089 } else {
1090 qemu_log("SIMCALL but semihosting is disabled\n");
1091 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1092 }
1093 break;
1094
1095 default:
1096 RESERVED();
1097 break;
1098 }
1099 break;
1100
1101 case 6: /*RSILx*/
1102 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1103 gen_check_privilege(dc);
1104 gen_window_check1(dc, RRR_T);
1105 tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]);
1106 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL);
1107 tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S);
1108 gen_helper_check_interrupts(cpu_env);
1109 gen_jumpi_check_loop_end(dc, 0);
1110 break;
1111
1112 case 7: /*WAITIx*/
1113 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1114 gen_check_privilege(dc);
1115 gen_waiti(dc, RRR_S);
1116 break;
1117
1118 case 8: /*ANY4p*/
1119 case 9: /*ALL4p*/
1120 case 10: /*ANY8p*/
1121 case 11: /*ALL8p*/
1122 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1123 {
1124 const unsigned shift = (RRR_R & 2) ? 8 : 4;
1125 TCGv_i32 mask = tcg_const_i32(
1126 ((1 << shift) - 1) << RRR_S);
1127 TCGv_i32 tmp = tcg_temp_new_i32();
1128
1129 tcg_gen_and_i32(tmp, cpu_SR[BR], mask);
1130 if (RRR_R & 1) { /*ALL*/
1131 tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S);
1132 } else { /*ANY*/
1133 tcg_gen_add_i32(tmp, tmp, mask);
1134 }
1135 tcg_gen_shri_i32(tmp, tmp, RRR_S + shift);
1136 tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR],
1137 tmp, RRR_T, 1);
1138 tcg_temp_free(mask);
1139 tcg_temp_free(tmp);
1140 }
1141 break;
1142
1143 default: /*reserved*/
1144 RESERVED();
1145 break;
1146
1147 }
1148 break;
1149
1150 case 1: /*AND*/
1151 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1152 tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1153 break;
1154
1155 case 2: /*OR*/
1156 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1157 tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1158 break;
1159
1160 case 3: /*XOR*/
1161 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1162 tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1163 break;
1164
1165 case 4: /*ST1*/
1166 switch (RRR_R) {
1167 case 0: /*SSR*/
1168 gen_window_check1(dc, RRR_S);
1169 gen_right_shift_sar(dc, cpu_R[RRR_S]);
1170 break;
1171
1172 case 1: /*SSL*/
1173 gen_window_check1(dc, RRR_S);
1174 gen_left_shift_sar(dc, cpu_R[RRR_S]);
1175 break;
1176
1177 case 2: /*SSA8L*/
1178 gen_window_check1(dc, RRR_S);
1179 {
1180 TCGv_i32 tmp = tcg_temp_new_i32();
1181 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1182 gen_right_shift_sar(dc, tmp);
1183 tcg_temp_free(tmp);
1184 }
1185 break;
1186
1187 case 3: /*SSA8B*/
1188 gen_window_check1(dc, RRR_S);
1189 {
1190 TCGv_i32 tmp = tcg_temp_new_i32();
1191 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1192 gen_left_shift_sar(dc, tmp);
1193 tcg_temp_free(tmp);
1194 }
1195 break;
1196
1197 case 4: /*SSAI*/
1198 {
1199 TCGv_i32 tmp = tcg_const_i32(
1200 RRR_S | ((RRR_T & 1) << 4));
1201 gen_right_shift_sar(dc, tmp);
1202 tcg_temp_free(tmp);
1203 }
1204 break;
1205
1206 case 6: /*RER*/
1207 TBD();
1208 break;
1209
1210 case 7: /*WER*/
1211 TBD();
1212 break;
1213
1214 case 8: /*ROTWw*/
1215 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1216 gen_check_privilege(dc);
1217 {
1218 TCGv_i32 tmp = tcg_const_i32(
1219 RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0));
1220 gen_helper_rotw(tmp);
1221 tcg_temp_free(tmp);
1222 reset_used_window(dc);
1223 }
1224 break;
1225
1226 case 14: /*NSAu*/
1227 HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1228 gen_window_check2(dc, RRR_S, RRR_T);
1229 gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]);
1230 break;
1231
1232 case 15: /*NSAUu*/
1233 HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1234 gen_window_check2(dc, RRR_S, RRR_T);
1235 gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]);
1236 break;
1237
1238 default: /*reserved*/
1239 RESERVED();
1240 break;
1241 }
1242 break;
1243
1244 case 5: /*TLB*/
1245 HAS_OPTION_BITS(
1246 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) |
1247 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
1248 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION));
1249 gen_check_privilege(dc);
1250 gen_window_check2(dc, RRR_S, RRR_T);
1251 {
1252 TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0);
1253
1254 switch (RRR_R & 7) {
1255 case 3: /*RITLB0*/ /*RDTLB0*/
1256 gen_helper_rtlb0(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1257 break;
1258
1259 case 4: /*IITLB*/ /*IDTLB*/
1260 gen_helper_itlb(cpu_R[RRR_S], dtlb);
1261 /* This could change memory mapping, so exit tb */
1262 gen_jumpi_check_loop_end(dc, -1);
1263 break;
1264
1265 case 5: /*PITLB*/ /*PDTLB*/
1266 tcg_gen_movi_i32(cpu_pc, dc->pc);
1267 gen_helper_ptlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1268 break;
1269
1270 case 6: /*WITLB*/ /*WDTLB*/
1271 gen_helper_wtlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1272 /* This could change memory mapping, so exit tb */
1273 gen_jumpi_check_loop_end(dc, -1);
1274 break;
1275
1276 case 7: /*RITLB1*/ /*RDTLB1*/
1277 gen_helper_rtlb1(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1278 break;
1279
1280 default:
1281 tcg_temp_free(dtlb);
1282 RESERVED();
1283 break;
1284 }
1285 tcg_temp_free(dtlb);
1286 }
1287 break;
1288
1289 case 6: /*RT0*/
1290 gen_window_check2(dc, RRR_R, RRR_T);
1291 switch (RRR_S) {
1292 case 0: /*NEG*/
1293 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1294 break;
1295
1296 case 1: /*ABS*/
1297 {
1298 int label = gen_new_label();
1299 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1300 tcg_gen_brcondi_i32(
1301 TCG_COND_GE, cpu_R[RRR_R], 0, label);
1302 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1303 gen_set_label(label);
1304 }
1305 break;
1306
1307 default: /*reserved*/
1308 RESERVED();
1309 break;
1310 }
1311 break;
1312
1313 case 7: /*reserved*/
1314 RESERVED();
1315 break;
1316
1317 case 8: /*ADD*/
1318 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1319 tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1320 break;
1321
1322 case 9: /*ADD**/
1323 case 10:
1324 case 11:
1325 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1326 {
1327 TCGv_i32 tmp = tcg_temp_new_i32();
1328 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8);
1329 tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1330 tcg_temp_free(tmp);
1331 }
1332 break;
1333
1334 case 12: /*SUB*/
1335 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1336 tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1337 break;
1338
1339 case 13: /*SUB**/
1340 case 14:
1341 case 15:
1342 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1343 {
1344 TCGv_i32 tmp = tcg_temp_new_i32();
1345 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12);
1346 tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1347 tcg_temp_free(tmp);
1348 }
1349 break;
1350 }
1351 break;
1352
1353 case 1: /*RST1*/
1354 switch (OP2) {
1355 case 0: /*SLLI*/
1356 case 1:
1357 gen_window_check2(dc, RRR_R, RRR_S);
1358 tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S],
1359 32 - (RRR_T | ((OP2 & 1) << 4)));
1360 break;
1361
1362 case 2: /*SRAI*/
1363 case 3:
1364 gen_window_check2(dc, RRR_R, RRR_T);
1365 tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T],
1366 RRR_S | ((OP2 & 1) << 4));
1367 break;
1368
1369 case 4: /*SRLI*/
1370 gen_window_check2(dc, RRR_R, RRR_T);
1371 tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S);
1372 break;
1373
1374 case 6: /*XSR*/
1375 {
1376 TCGv_i32 tmp = tcg_temp_new_i32();
1377 if (RSR_SR >= 64) {
1378 gen_check_privilege(dc);
1379 }
1380 gen_window_check1(dc, RRR_T);
1381 tcg_gen_mov_i32(tmp, cpu_R[RRR_T]);
1382 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1383 gen_wsr(dc, RSR_SR, tmp);
1384 tcg_temp_free(tmp);
1385 if (!sregnames[RSR_SR]) {
1386 TBD();
1387 }
1388 }
1389 break;
1390
1391 /*
1392 * Note: 64 bit ops are used here solely because SAR values
1393 * have range 0..63
1394 */
1395 #define gen_shift_reg(cmd, reg) do { \
1396 TCGv_i64 tmp = tcg_temp_new_i64(); \
1397 tcg_gen_extu_i32_i64(tmp, reg); \
1398 tcg_gen_##cmd##_i64(v, v, tmp); \
1399 tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \
1400 tcg_temp_free_i64(v); \
1401 tcg_temp_free_i64(tmp); \
1402 } while (0)
1403
1404 #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
1405
1406 case 8: /*SRC*/
1407 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1408 {
1409 TCGv_i64 v = tcg_temp_new_i64();
1410 tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]);
1411 gen_shift(shr);
1412 }
1413 break;
1414
1415 case 9: /*SRL*/
1416 gen_window_check2(dc, RRR_R, RRR_T);
1417 if (dc->sar_5bit) {
1418 tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1419 } else {
1420 TCGv_i64 v = tcg_temp_new_i64();
1421 tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]);
1422 gen_shift(shr);
1423 }
1424 break;
1425
1426 case 10: /*SLL*/
1427 gen_window_check2(dc, RRR_R, RRR_S);
1428 if (dc->sar_m32_5bit) {
1429 tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32);
1430 } else {
1431 TCGv_i64 v = tcg_temp_new_i64();
1432 TCGv_i32 s = tcg_const_i32(32);
1433 tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
1434 tcg_gen_andi_i32(s, s, 0x3f);
1435 tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]);
1436 gen_shift_reg(shl, s);
1437 tcg_temp_free(s);
1438 }
1439 break;
1440
1441 case 11: /*SRA*/
1442 gen_window_check2(dc, RRR_R, RRR_T);
1443 if (dc->sar_5bit) {
1444 tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1445 } else {
1446 TCGv_i64 v = tcg_temp_new_i64();
1447 tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]);
1448 gen_shift(sar);
1449 }
1450 break;
1451 #undef gen_shift
1452 #undef gen_shift_reg
1453
1454 case 12: /*MUL16U*/
1455 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1456 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1457 {
1458 TCGv_i32 v1 = tcg_temp_new_i32();
1459 TCGv_i32 v2 = tcg_temp_new_i32();
1460 tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]);
1461 tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]);
1462 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1463 tcg_temp_free(v2);
1464 tcg_temp_free(v1);
1465 }
1466 break;
1467
1468 case 13: /*MUL16S*/
1469 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1470 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1471 {
1472 TCGv_i32 v1 = tcg_temp_new_i32();
1473 TCGv_i32 v2 = tcg_temp_new_i32();
1474 tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]);
1475 tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]);
1476 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1477 tcg_temp_free(v2);
1478 tcg_temp_free(v1);
1479 }
1480 break;
1481
1482 default: /*reserved*/
1483 RESERVED();
1484 break;
1485 }
1486 break;
1487
1488 case 2: /*RST2*/
1489 if (OP2 >= 8) {
1490 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1491 }
1492
1493 if (OP2 >= 12) {
1494 HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV);
1495 int label = gen_new_label();
1496 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label);
1497 gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE);
1498 gen_set_label(label);
1499 }
1500
1501 switch (OP2) {
1502 #define BOOLEAN_LOGIC(fn, r, s, t) \
1503 do { \
1504 HAS_OPTION(XTENSA_OPTION_BOOLEAN); \
1505 TCGv_i32 tmp1 = tcg_temp_new_i32(); \
1506 TCGv_i32 tmp2 = tcg_temp_new_i32(); \
1507 \
1508 tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \
1509 tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \
1510 tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \
1511 tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \
1512 tcg_temp_free(tmp1); \
1513 tcg_temp_free(tmp2); \
1514 } while (0)
1515
1516 case 0: /*ANDBp*/
1517 BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T);
1518 break;
1519
1520 case 1: /*ANDBCp*/
1521 BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T);
1522 break;
1523
1524 case 2: /*ORBp*/
1525 BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T);
1526 break;
1527
1528 case 3: /*ORBCp*/
1529 BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T);
1530 break;
1531
1532 case 4: /*XORBp*/
1533 BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T);
1534 break;
1535
1536 #undef BOOLEAN_LOGIC
1537
1538 case 8: /*MULLi*/
1539 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL);
1540 tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1541 break;
1542
1543 case 10: /*MULUHi*/
1544 case 11: /*MULSHi*/
1545 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH);
1546 {
1547 TCGv_i64 r = tcg_temp_new_i64();
1548 TCGv_i64 s = tcg_temp_new_i64();
1549 TCGv_i64 t = tcg_temp_new_i64();
1550
1551 if (OP2 == 10) {
1552 tcg_gen_extu_i32_i64(s, cpu_R[RRR_S]);
1553 tcg_gen_extu_i32_i64(t, cpu_R[RRR_T]);
1554 } else {
1555 tcg_gen_ext_i32_i64(s, cpu_R[RRR_S]);
1556 tcg_gen_ext_i32_i64(t, cpu_R[RRR_T]);
1557 }
1558 tcg_gen_mul_i64(r, s, t);
1559 tcg_gen_shri_i64(r, r, 32);
1560 tcg_gen_trunc_i64_i32(cpu_R[RRR_R], r);
1561
1562 tcg_temp_free_i64(r);
1563 tcg_temp_free_i64(s);
1564 tcg_temp_free_i64(t);
1565 }
1566 break;
1567
1568 case 12: /*QUOUi*/
1569 tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1570 break;
1571
1572 case 13: /*QUOSi*/
1573 case 15: /*REMSi*/
1574 {
1575 int label1 = gen_new_label();
1576 int label2 = gen_new_label();
1577
1578 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000,
1579 label1);
1580 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff,
1581 label1);
1582 tcg_gen_movi_i32(cpu_R[RRR_R],
1583 OP2 == 13 ? 0x80000000 : 0);
1584 tcg_gen_br(label2);
1585 gen_set_label(label1);
1586 if (OP2 == 13) {
1587 tcg_gen_div_i32(cpu_R[RRR_R],
1588 cpu_R[RRR_S], cpu_R[RRR_T]);
1589 } else {
1590 tcg_gen_rem_i32(cpu_R[RRR_R],
1591 cpu_R[RRR_S], cpu_R[RRR_T]);
1592 }
1593 gen_set_label(label2);
1594 }
1595 break;
1596
1597 case 14: /*REMUi*/
1598 tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1599 break;
1600
1601 default: /*reserved*/
1602 RESERVED();
1603 break;
1604 }
1605 break;
1606
1607 case 3: /*RST3*/
1608 switch (OP2) {
1609 case 0: /*RSR*/
1610 if (RSR_SR >= 64) {
1611 gen_check_privilege(dc);
1612 }
1613 gen_window_check1(dc, RRR_T);
1614 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1615 if (!sregnames[RSR_SR]) {
1616 TBD();
1617 }
1618 break;
1619
1620 case 1: /*WSR*/
1621 if (RSR_SR >= 64) {
1622 gen_check_privilege(dc);
1623 }
1624 gen_window_check1(dc, RRR_T);
1625 gen_wsr(dc, RSR_SR, cpu_R[RRR_T]);
1626 if (!sregnames[RSR_SR]) {
1627 TBD();
1628 }
1629 break;
1630
1631 case 2: /*SEXTu*/
1632 HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT);
1633 gen_window_check2(dc, RRR_R, RRR_S);
1634 {
1635 int shift = 24 - RRR_T;
1636
1637 if (shift == 24) {
1638 tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1639 } else if (shift == 16) {
1640 tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1641 } else {
1642 TCGv_i32 tmp = tcg_temp_new_i32();
1643 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift);
1644 tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift);
1645 tcg_temp_free(tmp);
1646 }
1647 }
1648 break;
1649
1650 case 3: /*CLAMPSu*/
1651 HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS);
1652 gen_window_check2(dc, RRR_R, RRR_S);
1653 {
1654 TCGv_i32 tmp1 = tcg_temp_new_i32();
1655 TCGv_i32 tmp2 = tcg_temp_new_i32();
1656 int label = gen_new_label();
1657
1658 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T);
1659 tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]);
1660 tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7));
1661 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1662 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp2, 0, label);
1663
1664 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31);
1665 tcg_gen_xori_i32(cpu_R[RRR_R], tmp1,
1666 0xffffffff >> (25 - RRR_T));
1667
1668 gen_set_label(label);
1669
1670 tcg_temp_free(tmp1);
1671 tcg_temp_free(tmp2);
1672 }
1673 break;
1674
1675 case 4: /*MINu*/
1676 case 5: /*MAXu*/
1677 case 6: /*MINUu*/
1678 case 7: /*MAXUu*/
1679 HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX);
1680 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1681 {
1682 static const TCGCond cond[] = {
1683 TCG_COND_LE,
1684 TCG_COND_GE,
1685 TCG_COND_LEU,
1686 TCG_COND_GEU
1687 };
1688 int label = gen_new_label();
1689
1690 if (RRR_R != RRR_T) {
1691 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1692 tcg_gen_brcond_i32(cond[OP2 - 4],
1693 cpu_R[RRR_S], cpu_R[RRR_T], label);
1694 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1695 } else {
1696 tcg_gen_brcond_i32(cond[OP2 - 4],
1697 cpu_R[RRR_T], cpu_R[RRR_S], label);
1698 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1699 }
1700 gen_set_label(label);
1701 }
1702 break;
1703
1704 case 8: /*MOVEQZ*/
1705 case 9: /*MOVNEZ*/
1706 case 10: /*MOVLTZ*/
1707 case 11: /*MOVGEZ*/
1708 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1709 {
1710 static const TCGCond cond[] = {
1711 TCG_COND_NE,
1712 TCG_COND_EQ,
1713 TCG_COND_GE,
1714 TCG_COND_LT
1715 };
1716 int label = gen_new_label();
1717 tcg_gen_brcondi_i32(cond[OP2 - 8], cpu_R[RRR_T], 0, label);
1718 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1719 gen_set_label(label);
1720 }
1721 break;
1722
1723 case 12: /*MOVFp*/
1724 case 13: /*MOVTp*/
1725 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1726 gen_window_check2(dc, RRR_R, RRR_S);
1727 {
1728 int label = gen_new_label();
1729 TCGv_i32 tmp = tcg_temp_new_i32();
1730
1731 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
1732 tcg_gen_brcondi_i32(
1733 OP2 & 1 ? TCG_COND_EQ : TCG_COND_NE,
1734 tmp, 0, label);
1735 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1736 gen_set_label(label);
1737 tcg_temp_free(tmp);
1738 }
1739 break;
1740
1741 case 14: /*RUR*/
1742 gen_window_check1(dc, RRR_R);
1743 {
1744 int st = (RRR_S << 4) + RRR_T;
1745 if (uregnames[st]) {
1746 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]);
1747 } else {
1748 qemu_log("RUR %d not implemented, ", st);
1749 TBD();
1750 }
1751 }
1752 break;
1753
1754 case 15: /*WUR*/
1755 gen_window_check1(dc, RRR_T);
1756 {
1757 if (uregnames[RSR_SR]) {
1758 tcg_gen_mov_i32(cpu_UR[RSR_SR], cpu_R[RRR_T]);
1759 } else {
1760 qemu_log("WUR %d not implemented, ", RSR_SR);
1761 TBD();
1762 }
1763 }
1764 break;
1765
1766 }
1767 break;
1768
1769 case 4: /*EXTUI*/
1770 case 5:
1771 gen_window_check2(dc, RRR_R, RRR_T);
1772 {
1773 int shiftimm = RRR_S | (OP1 << 4);
1774 int maskimm = (1 << (OP2 + 1)) - 1;
1775
1776 TCGv_i32 tmp = tcg_temp_new_i32();
1777 tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm);
1778 tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm);
1779 tcg_temp_free(tmp);
1780 }
1781 break;
1782
1783 case 6: /*CUST0*/
1784 RESERVED();
1785 break;
1786
1787 case 7: /*CUST1*/
1788 RESERVED();
1789 break;
1790
1791 case 8: /*LSCXp*/
1792 HAS_OPTION(XTENSA_OPTION_COPROCESSOR);
1793 TBD();
1794 break;
1795
1796 case 9: /*LSC4*/
1797 gen_window_check2(dc, RRR_S, RRR_T);
1798 switch (OP2) {
1799 case 0: /*L32E*/
1800 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1801 gen_check_privilege(dc);
1802 {
1803 TCGv_i32 addr = tcg_temp_new_i32();
1804 tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1805 (0xffffffc0 | (RRR_R << 2)));
1806 tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring);
1807 tcg_temp_free(addr);
1808 }
1809 break;
1810
1811 case 4: /*S32E*/
1812 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1813 gen_check_privilege(dc);
1814 {
1815 TCGv_i32 addr = tcg_temp_new_i32();
1816 tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1817 (0xffffffc0 | (RRR_R << 2)));
1818 tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring);
1819 tcg_temp_free(addr);
1820 }
1821 break;
1822
1823 default:
1824 RESERVED();
1825 break;
1826 }
1827 break;
1828
1829 case 10: /*FP0*/
1830 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1831 TBD();
1832 break;
1833
1834 case 11: /*FP1*/
1835 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1836 TBD();
1837 break;
1838
1839 default: /*reserved*/
1840 RESERVED();
1841 break;
1842 }
1843 break;
1844
1845 case 1: /*L32R*/
1846 gen_window_check1(dc, RRR_T);
1847 {
1848 TCGv_i32 tmp = tcg_const_i32(
1849 ((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ?
1850 0 : ((dc->pc + 3) & ~3)) +
1851 (0xfffc0000 | (RI16_IMM16 << 2)));
1852
1853 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
1854 tcg_gen_add_i32(tmp, tmp, dc->litbase);
1855 }
1856 tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring);
1857 tcg_temp_free(tmp);
1858 }
1859 break;
1860
1861 case 2: /*LSAI*/
1862 #define gen_load_store(type, shift) do { \
1863 TCGv_i32 addr = tcg_temp_new_i32(); \
1864 gen_window_check2(dc, RRI8_S, RRI8_T); \
1865 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \
1866 if (shift) { \
1867 gen_load_store_alignment(dc, shift, addr, false); \
1868 } \
1869 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
1870 tcg_temp_free(addr); \
1871 } while (0)
1872
1873 switch (RRI8_R) {
1874 case 0: /*L8UI*/
1875 gen_load_store(ld8u, 0);
1876 break;
1877
1878 case 1: /*L16UI*/
1879 gen_load_store(ld16u, 1);
1880 break;
1881
1882 case 2: /*L32I*/
1883 gen_load_store(ld32u, 2);
1884 break;
1885
1886 case 4: /*S8I*/
1887 gen_load_store(st8, 0);
1888 break;
1889
1890 case 5: /*S16I*/
1891 gen_load_store(st16, 1);
1892 break;
1893
1894 case 6: /*S32I*/
1895 gen_load_store(st32, 2);
1896 break;
1897
1898 case 7: /*CACHEc*/
1899 if (RRI8_T < 8) {
1900 HAS_OPTION(XTENSA_OPTION_DCACHE);
1901 }
1902
1903 switch (RRI8_T) {
1904 case 0: /*DPFRc*/
1905 break;
1906
1907 case 1: /*DPFWc*/
1908 break;
1909
1910 case 2: /*DPFROc*/
1911 break;
1912
1913 case 3: /*DPFWOc*/
1914 break;
1915
1916 case 4: /*DHWBc*/
1917 break;
1918
1919 case 5: /*DHWBIc*/
1920 break;
1921
1922 case 6: /*DHIc*/
1923 break;
1924
1925 case 7: /*DIIc*/
1926 break;
1927
1928 case 8: /*DCEc*/
1929 switch (OP1) {
1930 case 0: /*DPFLl*/
1931 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1932 break;
1933
1934 case 2: /*DHUl*/
1935 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1936 break;
1937
1938 case 3: /*DIUl*/
1939 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1940 break;
1941
1942 case 4: /*DIWBc*/
1943 HAS_OPTION(XTENSA_OPTION_DCACHE);
1944 break;
1945
1946 case 5: /*DIWBIc*/
1947 HAS_OPTION(XTENSA_OPTION_DCACHE);
1948 break;
1949
1950 default: /*reserved*/
1951 RESERVED();
1952 break;
1953
1954 }
1955 break;
1956
1957 case 12: /*IPFc*/
1958 HAS_OPTION(XTENSA_OPTION_ICACHE);
1959 break;
1960
1961 case 13: /*ICEc*/
1962 switch (OP1) {
1963 case 0: /*IPFLl*/
1964 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1965 break;
1966
1967 case 2: /*IHUl*/
1968 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1969 break;
1970
1971 case 3: /*IIUl*/
1972 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1973 break;
1974
1975 default: /*reserved*/
1976 RESERVED();
1977 break;
1978 }
1979 break;
1980
1981 case 14: /*IHIc*/
1982 HAS_OPTION(XTENSA_OPTION_ICACHE);
1983 break;
1984
1985 case 15: /*IIIc*/
1986 HAS_OPTION(XTENSA_OPTION_ICACHE);
1987 break;
1988
1989 default: /*reserved*/
1990 RESERVED();
1991 break;
1992 }
1993 break;
1994
1995 case 9: /*L16SI*/
1996 gen_load_store(ld16s, 1);
1997 break;
1998 #undef gen_load_store
1999
2000 case 10: /*MOVI*/
2001 gen_window_check1(dc, RRI8_T);
2002 tcg_gen_movi_i32(cpu_R[RRI8_T],
2003 RRI8_IMM8 | (RRI8_S << 8) |
2004 ((RRI8_S & 0x8) ? 0xfffff000 : 0));
2005 break;
2006
2007 #define gen_load_store_no_hw_align(type) do { \
2008 TCGv_i32 addr = tcg_temp_local_new_i32(); \
2009 gen_window_check2(dc, RRI8_S, RRI8_T); \
2010 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \
2011 gen_load_store_alignment(dc, 2, addr, true); \
2012 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
2013 tcg_temp_free(addr); \
2014 } while (0)
2015
2016 case 11: /*L32AIy*/
2017 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
2018 gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/
2019 break;
2020
2021 case 12: /*ADDI*/
2022 gen_window_check2(dc, RRI8_S, RRI8_T);
2023 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE);
2024 break;
2025
2026 case 13: /*ADDMI*/
2027 gen_window_check2(dc, RRI8_S, RRI8_T);
2028 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8);
2029 break;
2030
2031 case 14: /*S32C1Iy*/
2032 HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE);
2033 gen_window_check2(dc, RRI8_S, RRI8_T);
2034 {
2035 int label = gen_new_label();
2036 TCGv_i32 tmp = tcg_temp_local_new_i32();
2037 TCGv_i32 addr = tcg_temp_local_new_i32();
2038
2039 tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]);
2040 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
2041 gen_load_store_alignment(dc, 2, addr, true);
2042 tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring);
2043 tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T],
2044 cpu_SR[SCOMPARE1], label);
2045
2046 tcg_gen_qemu_st32(tmp, addr, dc->cring);
2047
2048 gen_set_label(label);
2049 tcg_temp_free(addr);
2050 tcg_temp_free(tmp);
2051 }
2052 break;
2053
2054 case 15: /*S32RIy*/
2055 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
2056 gen_load_store_no_hw_align(st32); /*TODO release?*/
2057 break;
2058 #undef gen_load_store_no_hw_align
2059
2060 default: /*reserved*/
2061 RESERVED();
2062 break;
2063 }
2064 break;
2065
2066 case 3: /*LSCIp*/
2067 HAS_OPTION(XTENSA_OPTION_COPROCESSOR);
2068 TBD();
2069 break;
2070
2071 case 4: /*MAC16d*/
2072 HAS_OPTION(XTENSA_OPTION_MAC16);
2073 {
2074 enum {
2075 MAC16_UMUL = 0x0,
2076 MAC16_MUL = 0x4,
2077 MAC16_MULA = 0x8,
2078 MAC16_MULS = 0xc,
2079 MAC16_NONE = 0xf,
2080 } op = OP1 & 0xc;
2081 bool is_m1_sr = (OP2 & 0x3) == 2;
2082 bool is_m2_sr = (OP2 & 0xc) == 0;
2083 uint32_t ld_offset = 0;
2084
2085 if (OP2 > 9) {
2086 RESERVED();
2087 }
2088
2089 switch (OP2 & 2) {
2090 case 0: /*MACI?/MACC?*/
2091 is_m1_sr = true;
2092 ld_offset = (OP2 & 1) ? -4 : 4;
2093
2094 if (OP2 >= 8) { /*MACI/MACC*/
2095 if (OP1 == 0) { /*LDINC/LDDEC*/
2096 op = MAC16_NONE;
2097 } else {
2098 RESERVED();
2099 }
2100 } else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/
2101 RESERVED();
2102 }
2103 break;
2104
2105 case 2: /*MACD?/MACA?*/
2106 if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/
2107 RESERVED();
2108 }
2109 break;
2110 }
2111
2112 if (op != MAC16_NONE) {
2113 if (!is_m1_sr) {
2114 gen_window_check1(dc, RRR_S);
2115 }
2116 if (!is_m2_sr) {
2117 gen_window_check1(dc, RRR_T);
2118 }
2119 }
2120
2121 {
2122 TCGv_i32 vaddr = tcg_temp_new_i32();
2123 TCGv_i32 mem32 = tcg_temp_new_i32();
2124
2125 if (ld_offset) {
2126 gen_window_check1(dc, RRR_S);
2127 tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset);
2128 gen_load_store_alignment(dc, 2, vaddr, false);
2129 tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring);
2130 }
2131 if (op != MAC16_NONE) {
2132 TCGv_i32 m1 = gen_mac16_m(
2133 is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S],
2134 OP1 & 1, op == MAC16_UMUL);
2135 TCGv_i32 m2 = gen_mac16_m(
2136 is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T],
2137 OP1 & 2, op == MAC16_UMUL);
2138
2139 if (op == MAC16_MUL || op == MAC16_UMUL) {
2140 tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2);
2141 if (op == MAC16_UMUL) {
2142 tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
2143 } else {
2144 tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
2145 }
2146 } else {
2147 TCGv_i32 res = tcg_temp_new_i32();
2148 TCGv_i64 res64 = tcg_temp_new_i64();
2149 TCGv_i64 tmp = tcg_temp_new_i64();
2150
2151 tcg_gen_mul_i32(res, m1, m2);
2152 tcg_gen_ext_i32_i64(res64, res);
2153 tcg_gen_concat_i32_i64(tmp,
2154 cpu_SR[ACCLO], cpu_SR[ACCHI]);
2155 if (op == MAC16_MULA) {
2156 tcg_gen_add_i64(tmp, tmp, res64);
2157 } else {
2158 tcg_gen_sub_i64(tmp, tmp, res64);
2159 }
2160 tcg_gen_trunc_i64_i32(cpu_SR[ACCLO], tmp);
2161 tcg_gen_shri_i64(tmp, tmp, 32);
2162 tcg_gen_trunc_i64_i32(cpu_SR[ACCHI], tmp);
2163 tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]);
2164
2165 tcg_temp_free(res);
2166 tcg_temp_free_i64(res64);
2167 tcg_temp_free_i64(tmp);
2168 }
2169 tcg_temp_free(m1);
2170 tcg_temp_free(m2);
2171 }
2172 if (ld_offset) {
2173 tcg_gen_mov_i32(cpu_R[RRR_S], vaddr);
2174 tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32);
2175 }
2176 tcg_temp_free(vaddr);
2177 tcg_temp_free(mem32);
2178 }
2179 }
2180 break;
2181
2182 case 5: /*CALLN*/
2183 switch (CALL_N) {
2184 case 0: /*CALL0*/
2185 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
2186 gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2187 break;
2188
2189 case 1: /*CALL4w*/
2190 case 2: /*CALL8w*/
2191 case 3: /*CALL12w*/
2192 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2193 gen_window_check1(dc, CALL_N << 2);
2194 gen_callwi(dc, CALL_N,
2195 (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2196 break;
2197 }
2198 break;
2199
2200 case 6: /*SI*/
2201 switch (CALL_N) {
2202 case 0: /*J*/
2203 gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0);
2204 break;
2205
2206 case 1: /*BZ*/
2207 gen_window_check1(dc, BRI12_S);
2208 {
2209 static const TCGCond cond[] = {
2210 TCG_COND_EQ, /*BEQZ*/
2211 TCG_COND_NE, /*BNEZ*/
2212 TCG_COND_LT, /*BLTZ*/
2213 TCG_COND_GE, /*BGEZ*/
2214 };
2215
2216 gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0,
2217 4 + BRI12_IMM12_SE);
2218 }
2219 break;
2220
2221 case 2: /*BI0*/
2222 gen_window_check1(dc, BRI8_S);
2223 {
2224 static const TCGCond cond[] = {
2225 TCG_COND_EQ, /*BEQI*/
2226 TCG_COND_NE, /*BNEI*/
2227 TCG_COND_LT, /*BLTI*/
2228 TCG_COND_GE, /*BGEI*/
2229 };
2230
2231 gen_brcondi(dc, cond[BRI8_M & 3],
2232 cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE);
2233 }
2234 break;
2235
2236 case 3: /*BI1*/
2237 switch (BRI8_M) {
2238 case 0: /*ENTRYw*/
2239 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2240 {
2241 TCGv_i32 pc = tcg_const_i32(dc->pc);
2242 TCGv_i32 s = tcg_const_i32(BRI12_S);
2243 TCGv_i32 imm = tcg_const_i32(BRI12_IMM12);
2244 gen_advance_ccount(dc);
2245 gen_helper_entry(pc, s, imm);
2246 tcg_temp_free(imm);
2247 tcg_temp_free(s);
2248 tcg_temp_free(pc);
2249 reset_used_window(dc);
2250 }
2251 break;
2252
2253 case 1: /*B1*/
2254 switch (BRI8_R) {
2255 case 0: /*BFp*/
2256 case 1: /*BTp*/
2257 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
2258 {
2259 TCGv_i32 tmp = tcg_temp_new_i32();
2260 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S);
2261 gen_brcondi(dc,
2262 BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ,
2263 tmp, 0, 4 + RRI8_IMM8_SE);
2264 tcg_temp_free(tmp);
2265 }
2266 break;
2267
2268 case 8: /*LOOP*/
2269 case 9: /*LOOPNEZ*/
2270 case 10: /*LOOPGTZ*/
2271 HAS_OPTION(XTENSA_OPTION_LOOP);
2272 gen_window_check1(dc, RRI8_S);
2273 {
2274 uint32_t lend = dc->pc + RRI8_IMM8 + 4;
2275 TCGv_i32 tmp = tcg_const_i32(lend);
2276
2277 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1);
2278 tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc);
2279 gen_wsr_lend(dc, LEND, tmp);
2280 tcg_temp_free(tmp);
2281
2282 if (BRI8_R > 8) {
2283 int label = gen_new_label();
2284 tcg_gen_brcondi_i32(
2285 BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT,
2286 cpu_R[RRI8_S], 0, label);
2287 gen_jumpi(dc, lend, 1);
2288 gen_set_label(label);
2289 }
2290
2291 gen_jumpi(dc, dc->next_pc, 0);
2292 }
2293 break;
2294
2295 default: /*reserved*/
2296 RESERVED();
2297 break;
2298
2299 }
2300 break;
2301
2302 case 2: /*BLTUI*/
2303 case 3: /*BGEUI*/
2304 gen_window_check1(dc, BRI8_S);
2305 gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU,
2306 cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE);
2307 break;
2308 }
2309 break;
2310
2311 }
2312 break;
2313
2314 case 7: /*B*/
2315 {
2316 TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ;
2317
2318 switch (RRI8_R & 7) {
2319 case 0: /*BNONE*/ /*BANY*/
2320 gen_window_check2(dc, RRI8_S, RRI8_T);
2321 {
2322 TCGv_i32 tmp = tcg_temp_new_i32();
2323 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2324 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2325 tcg_temp_free(tmp);
2326 }
2327 break;
2328
2329 case 1: /*BEQ*/ /*BNE*/
2330 case 2: /*BLT*/ /*BGE*/
2331 case 3: /*BLTU*/ /*BGEU*/
2332 gen_window_check2(dc, RRI8_S, RRI8_T);
2333 {
2334 static const TCGCond cond[] = {
2335 [1] = TCG_COND_EQ,
2336 [2] = TCG_COND_LT,
2337 [3] = TCG_COND_LTU,
2338 [9] = TCG_COND_NE,
2339 [10] = TCG_COND_GE,
2340 [11] = TCG_COND_GEU,
2341 };
2342 gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T],
2343 4 + RRI8_IMM8_SE);
2344 }
2345 break;
2346
2347 case 4: /*BALL*/ /*BNALL*/
2348 gen_window_check2(dc, RRI8_S, RRI8_T);
2349 {
2350 TCGv_i32 tmp = tcg_temp_new_i32();
2351 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2352 gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T],
2353 4 + RRI8_IMM8_SE);
2354 tcg_temp_free(tmp);
2355 }
2356 break;
2357
2358 case 5: /*BBC*/ /*BBS*/
2359 gen_window_check2(dc, RRI8_S, RRI8_T);
2360 {
2361 TCGv_i32 bit = tcg_const_i32(1);
2362 TCGv_i32 tmp = tcg_temp_new_i32();
2363 tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
2364 tcg_gen_shl_i32(bit, bit, tmp);
2365 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit);
2366 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2367 tcg_temp_free(tmp);
2368 tcg_temp_free(bit);
2369 }
2370 break;
2371
2372 case 6: /*BBCI*/ /*BBSI*/
2373 case 7:
2374 gen_window_check1(dc, RRI8_S);
2375 {
2376 TCGv_i32 tmp = tcg_temp_new_i32();
2377 tcg_gen_andi_i32(tmp, cpu_R[RRI8_S],
2378 1 << (((RRI8_R & 1) << 4) | RRI8_T));
2379 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2380 tcg_temp_free(tmp);
2381 }
2382 break;
2383
2384 }
2385 }
2386 break;
2387
2388 #define gen_narrow_load_store(type) do { \
2389 TCGv_i32 addr = tcg_temp_new_i32(); \
2390 gen_window_check2(dc, RRRN_S, RRRN_T); \
2391 tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
2392 gen_load_store_alignment(dc, 2, addr, false); \
2393 tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \
2394 tcg_temp_free(addr); \
2395 } while (0)
2396
2397 case 8: /*L32I.Nn*/
2398 gen_narrow_load_store(ld32u);
2399 break;
2400
2401 case 9: /*S32I.Nn*/
2402 gen_narrow_load_store(st32);
2403 break;
2404 #undef gen_narrow_load_store
2405
2406 case 10: /*ADD.Nn*/
2407 gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T);
2408 tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]);
2409 break;
2410
2411 case 11: /*ADDI.Nn*/
2412 gen_window_check2(dc, RRRN_R, RRRN_S);
2413 tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1);
2414 break;
2415
2416 case 12: /*ST2n*/
2417 gen_window_check1(dc, RRRN_S);
2418 if (RRRN_T < 8) { /*MOVI.Nn*/
2419 tcg_gen_movi_i32(cpu_R[RRRN_S],
2420 RRRN_R | (RRRN_T << 4) |
2421 ((RRRN_T & 6) == 6 ? 0xffffff80 : 0));
2422 } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/
2423 TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ;
2424
2425 gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0,
2426 4 + (RRRN_R | ((RRRN_T & 3) << 4)));
2427 }
2428 break;
2429
2430 case 13: /*ST3n*/
2431 switch (RRRN_R) {
2432 case 0: /*MOV.Nn*/
2433 gen_window_check2(dc, RRRN_S, RRRN_T);
2434 tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]);
2435 break;
2436
2437 case 15: /*S3*/
2438 switch (RRRN_T) {
2439 case 0: /*RET.Nn*/
2440 gen_jump(dc, cpu_R[0]);
2441 break;
2442
2443 case 1: /*RETW.Nn*/
2444 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2445 {
2446 TCGv_i32 tmp = tcg_const_i32(dc->pc);
2447 gen_advance_ccount(dc);
2448 gen_helper_retw(tmp, tmp);
2449 gen_jump(dc, tmp);
2450 tcg_temp_free(tmp);
2451 }
2452 break;
2453
2454 case 2: /*BREAK.Nn*/
2455 HAS_OPTION(XTENSA_OPTION_DEBUG);
2456 if (dc->debug) {
2457 gen_debug_exception(dc, DEBUGCAUSE_BN);
2458 }
2459 break;
2460
2461 case 3: /*NOP.Nn*/
2462 break;
2463
2464 case 6: /*ILL.Nn*/
2465 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2466 break;
2467
2468 default: /*reserved*/
2469 RESERVED();
2470 break;
2471 }
2472 break;
2473
2474 default: /*reserved*/
2475 RESERVED();
2476 break;
2477 }
2478 break;
2479
2480 default: /*reserved*/
2481 RESERVED();
2482 break;
2483 }
2484
2485 gen_check_loop_end(dc, 0);
2486 dc->pc = dc->next_pc;
2487
2488 return;
2489
2490 invalid_opcode:
2491 qemu_log("INVALID(pc = %08x)\n", dc->pc);
2492 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2493 #undef HAS_OPTION
2494 }
2495
2496 static void check_breakpoint(CPUXtensaState *env, DisasContext *dc)
2497 {
2498 CPUBreakpoint *bp;
2499
2500 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2501 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2502 if (bp->pc == dc->pc) {
2503 tcg_gen_movi_i32(cpu_pc, dc->pc);
2504 gen_exception(dc, EXCP_DEBUG);
2505 dc->is_jmp = DISAS_UPDATE;
2506 }
2507 }
2508 }
2509 }
2510
2511 static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc)
2512 {
2513 unsigned i;
2514
2515 for (i = 0; i < dc->config->nibreak; ++i) {
2516 if ((env->sregs[IBREAKENABLE] & (1 << i)) &&
2517 env->sregs[IBREAKA + i] == dc->pc) {
2518 gen_debug_exception(dc, DEBUGCAUSE_IB);
2519 break;
2520 }
2521 }
2522 }
2523
2524 static void gen_intermediate_code_internal(
2525 CPUXtensaState *env, TranslationBlock *tb, int search_pc)
2526 {
2527 DisasContext dc;
2528 int insn_count = 0;
2529 int j, lj = -1;
2530 uint16_t *gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2531 int max_insns = tb->cflags & CF_COUNT_MASK;
2532 uint32_t pc_start = tb->pc;
2533 uint32_t next_page_start =
2534 (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2535
2536 if (max_insns == 0) {
2537 max_insns = CF_COUNT_MASK;
2538 }
2539
2540 dc.config = env->config;
2541 dc.singlestep_enabled = env->singlestep_enabled;
2542 dc.tb = tb;
2543 dc.pc = pc_start;
2544 dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK;
2545 dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring;
2546 dc.lbeg = env->sregs[LBEG];
2547 dc.lend = env->sregs[LEND];
2548 dc.is_jmp = DISAS_NEXT;
2549 dc.ccount_delta = 0;
2550 dc.debug = tb->flags & XTENSA_TBFLAG_DEBUG;
2551 dc.icount = tb->flags & XTENSA_TBFLAG_ICOUNT;
2552
2553 init_litbase(&dc);
2554 init_sar_tracker(&dc);
2555 reset_used_window(&dc);
2556 if (dc.icount) {
2557 dc.next_icount = tcg_temp_local_new_i32();
2558 }
2559
2560 gen_icount_start();
2561
2562 if (env->singlestep_enabled && env->exception_taken) {
2563 env->exception_taken = 0;
2564 tcg_gen_movi_i32(cpu_pc, dc.pc);
2565 gen_exception(&dc, EXCP_DEBUG);
2566 }
2567
2568 do {
2569 check_breakpoint(env, &dc);
2570
2571 if (search_pc) {
2572 j = gen_opc_ptr - gen_opc_buf;
2573 if (lj < j) {
2574 lj++;
2575 while (lj < j) {
2576 gen_opc_instr_start[lj++] = 0;
2577 }
2578 }
2579 gen_opc_pc[lj] = dc.pc;
2580 gen_opc_instr_start[lj] = 1;
2581 gen_opc_icount[lj] = insn_count;
2582 }
2583
2584 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2585 tcg_gen_debug_insn_start(dc.pc);
2586 }
2587
2588 ++dc.ccount_delta;
2589
2590 if (insn_count + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2591 gen_io_start();
2592 }
2593
2594 if (dc.icount) {
2595 int label = gen_new_label();
2596
2597 tcg_gen_addi_i32(dc.next_icount, cpu_SR[ICOUNT], 1);
2598 tcg_gen_brcondi_i32(TCG_COND_NE, dc.next_icount, 0, label);
2599 tcg_gen_mov_i32(dc.next_icount, cpu_SR[ICOUNT]);
2600 if (dc.debug) {
2601 gen_debug_exception(&dc, DEBUGCAUSE_IC);
2602 }
2603 gen_set_label(label);
2604 }
2605
2606 if (dc.debug) {
2607 gen_ibreak_check(env, &dc);
2608 }
2609
2610 disas_xtensa_insn(&dc);
2611 ++insn_count;
2612 if (dc.icount) {
2613 tcg_gen_mov_i32(cpu_SR[ICOUNT], dc.next_icount);
2614 }
2615 if (env->singlestep_enabled) {
2616 tcg_gen_movi_i32(cpu_pc, dc.pc);
2617 gen_exception(&dc, EXCP_DEBUG);
2618 break;
2619 }
2620 } while (dc.is_jmp == DISAS_NEXT &&
2621 insn_count < max_insns &&
2622 dc.pc < next_page_start &&
2623 gen_opc_ptr < gen_opc_end);
2624
2625 reset_litbase(&dc);
2626 reset_sar_tracker(&dc);
2627 if (dc.icount) {
2628 tcg_temp_free(dc.next_icount);
2629 }
2630
2631 if (tb->cflags & CF_LAST_IO) {
2632 gen_io_end();
2633 }
2634
2635 if (dc.is_jmp == DISAS_NEXT) {
2636 gen_jumpi(&dc, dc.pc, 0);
2637 }
2638 gen_icount_end(tb, insn_count);
2639 *gen_opc_ptr = INDEX_op_end;
2640
2641 if (!search_pc) {
2642 tb->size = dc.pc - pc_start;
2643 tb->icount = insn_count;
2644 }
2645 }
2646
2647 void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb)
2648 {
2649 gen_intermediate_code_internal(env, tb, 0);
2650 }
2651
2652 void gen_intermediate_code_pc(CPUXtensaState *env, TranslationBlock *tb)
2653 {
2654 gen_intermediate_code_internal(env, tb, 1);
2655 }
2656
2657 void cpu_dump_state(CPUXtensaState *env, FILE *f, fprintf_function cpu_fprintf,
2658 int flags)
2659 {
2660 int i, j;
2661
2662 cpu_fprintf(f, "PC=%08x\n\n", env->pc);
2663
2664 for (i = j = 0; i < 256; ++i) {
2665 if (sregnames[i]) {
2666 cpu_fprintf(f, "%s=%08x%c", sregnames[i], env->sregs[i],
2667 (j++ % 4) == 3 ? '\n' : ' ');
2668 }
2669 }
2670
2671 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
2672
2673 for (i = j = 0; i < 256; ++i) {
2674 if (uregnames[i]) {
2675 cpu_fprintf(f, "%s=%08x%c", uregnames[i], env->uregs[i],
2676 (j++ % 4) == 3 ? '\n' : ' ');
2677 }
2678 }
2679
2680 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
2681
2682 for (i = 0; i < 16; ++i) {
2683 cpu_fprintf(f, "A%02d=%08x%c", i, env->regs[i],
2684 (i % 4) == 3 ? '\n' : ' ');
2685 }
2686
2687 cpu_fprintf(f, "\n");
2688
2689 for (i = 0; i < env->config->nareg; ++i) {
2690 cpu_fprintf(f, "AR%02d=%08x%c", i, env->phys_regs[i],
2691 (i % 4) == 3 ? '\n' : ' ');
2692 }
2693 }
2694
2695 void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb, int pc_pos)
2696 {
2697 env->pc = gen_opc_pc[pc_pos];
2698 }