]> git.proxmox.com Git - mirror_qemu.git/blob - target-unicore32/translate.c
unicore32-softmmu: Add coprocessor 0(sysctrl) and 1(ocd) instruction support
[mirror_qemu.git] / target-unicore32 / translate.c
1 /*
2 * UniCore32 translation
3 *
4 * Copyright (C) 2010-2012 Guan Xuetao
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation, or (at your option) any
9 * later version. See the COPYING file in the top-level directory.
10 */
11 #include <stdarg.h>
12 #include <stdlib.h>
13 #include <stdio.h>
14 #include <string.h>
15 #include <inttypes.h>
16
17 #include "cpu.h"
18 #include "disas.h"
19 #include "tcg-op.h"
20 #include "qemu-log.h"
21
22 #include "helper.h"
23 #define GEN_HELPER 1
24 #include "helper.h"
25
26 /* internal defines */
27 typedef struct DisasContext {
28 target_ulong pc;
29 int is_jmp;
30 /* Nonzero if this instruction has been conditionally skipped. */
31 int condjmp;
32 /* The label that will be jumped to when the instruction is skipped. */
33 int condlabel;
34 struct TranslationBlock *tb;
35 int singlestep_enabled;
36 } DisasContext;
37
38 #define IS_USER(s) 1
39
40 /* These instructions trap after executing, so defer them until after the
41 conditional executions state has been updated. */
42 #define DISAS_SYSCALL 5
43
44 static TCGv_ptr cpu_env;
45 static TCGv_i32 cpu_R[32];
46
47 /* FIXME: These should be removed. */
48 static TCGv cpu_F0s, cpu_F1s;
49 static TCGv_i64 cpu_F0d, cpu_F1d;
50
51 #include "gen-icount.h"
52
53 static const char *regnames[] = {
54 "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
55 "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
56 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
57 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
58
59 /* initialize TCG globals. */
60 void uc32_translate_init(void)
61 {
62 int i;
63
64 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
65
66 for (i = 0; i < 32; i++) {
67 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
68 offsetof(CPUUniCore32State, regs[i]), regnames[i]);
69 }
70
71 #define GEN_HELPER 2
72 #include "helper.h"
73 }
74
75 static int num_temps;
76
77 /* Allocate a temporary variable. */
78 static TCGv_i32 new_tmp(void)
79 {
80 num_temps++;
81 return tcg_temp_new_i32();
82 }
83
84 /* Release a temporary variable. */
85 static void dead_tmp(TCGv tmp)
86 {
87 tcg_temp_free(tmp);
88 num_temps--;
89 }
90
91 static inline TCGv load_cpu_offset(int offset)
92 {
93 TCGv tmp = new_tmp();
94 tcg_gen_ld_i32(tmp, cpu_env, offset);
95 return tmp;
96 }
97
98 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
99
100 static inline void store_cpu_offset(TCGv var, int offset)
101 {
102 tcg_gen_st_i32(var, cpu_env, offset);
103 dead_tmp(var);
104 }
105
106 #define store_cpu_field(var, name) \
107 store_cpu_offset(var, offsetof(CPUUniCore32State, name))
108
109 /* Set a variable to the value of a CPU register. */
110 static void load_reg_var(DisasContext *s, TCGv var, int reg)
111 {
112 if (reg == 31) {
113 uint32_t addr;
114 /* normaly, since we updated PC */
115 addr = (long)s->pc;
116 tcg_gen_movi_i32(var, addr);
117 } else {
118 tcg_gen_mov_i32(var, cpu_R[reg]);
119 }
120 }
121
122 /* Create a new temporary and set it to the value of a CPU register. */
123 static inline TCGv load_reg(DisasContext *s, int reg)
124 {
125 TCGv tmp = new_tmp();
126 load_reg_var(s, tmp, reg);
127 return tmp;
128 }
129
130 /* Set a CPU register. The source must be a temporary and will be
131 marked as dead. */
132 static void store_reg(DisasContext *s, int reg, TCGv var)
133 {
134 if (reg == 31) {
135 tcg_gen_andi_i32(var, var, ~3);
136 s->is_jmp = DISAS_JUMP;
137 }
138 tcg_gen_mov_i32(cpu_R[reg], var);
139 dead_tmp(var);
140 }
141
142 /* Value extensions. */
143 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
144 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
145 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
146 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
147
148 #define UCOP_REG_M (((insn) >> 0) & 0x1f)
149 #define UCOP_REG_N (((insn) >> 19) & 0x1f)
150 #define UCOP_REG_D (((insn) >> 14) & 0x1f)
151 #define UCOP_REG_S (((insn) >> 9) & 0x1f)
152 #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
153 #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
154 #define UCOP_SH_OP (((insn) >> 6) & 0x03)
155 #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
156 #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
157 #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
158 #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
159 #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
160 #define UCOP_COND (((insn) >> 25) & 0x0f)
161 #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
162 #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
163 #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
164 #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
165 #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
166
167 #define UCOP_SET(i) ((insn) & (1 << (i)))
168 #define UCOP_SET_P UCOP_SET(28)
169 #define UCOP_SET_U UCOP_SET(27)
170 #define UCOP_SET_B UCOP_SET(26)
171 #define UCOP_SET_W UCOP_SET(25)
172 #define UCOP_SET_L UCOP_SET(24)
173 #define UCOP_SET_S UCOP_SET(24)
174
175 #define ILLEGAL cpu_abort(env, \
176 "Illegal UniCore32 instruction %x at line %d!", \
177 insn, __LINE__)
178
179 #ifndef CONFIG_USER_ONLY
180 static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s,
181 uint32_t insn)
182 {
183 TCGv tmp, tmp2, tmp3;
184 if ((insn & 0xfe000000) == 0xe0000000) {
185 tmp2 = new_tmp();
186 tmp3 = new_tmp();
187 tcg_gen_movi_i32(tmp2, UCOP_REG_N);
188 tcg_gen_movi_i32(tmp3, UCOP_IMM10);
189 if (UCOP_SET_L) {
190 tmp = new_tmp();
191 gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3);
192 store_reg(s, UCOP_REG_D, tmp);
193 } else {
194 tmp = load_reg(s, UCOP_REG_D);
195 gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3);
196 dead_tmp(tmp);
197 }
198 dead_tmp(tmp2);
199 dead_tmp(tmp3);
200 return;
201 }
202 ILLEGAL;
203 }
204
205 static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s,
206 uint32_t insn)
207 {
208 TCGv tmp;
209
210 if ((insn & 0xff003fff) == 0xe1000400) {
211 /*
212 * movc rd, pp.nn, #imm9
213 * rd: UCOP_REG_D
214 * nn: UCOP_REG_N (must be 0)
215 * imm9: 0
216 */
217 if (UCOP_REG_N == 0) {
218 tmp = new_tmp();
219 tcg_gen_movi_i32(tmp, 0);
220 store_reg(s, UCOP_REG_D, tmp);
221 return;
222 } else {
223 ILLEGAL;
224 }
225 }
226 if ((insn & 0xff003fff) == 0xe0000401) {
227 /*
228 * movc pp.nn, rn, #imm9
229 * rn: UCOP_REG_D
230 * nn: UCOP_REG_N (must be 1)
231 * imm9: 1
232 */
233 if (UCOP_REG_N == 1) {
234 tmp = load_reg(s, UCOP_REG_D);
235 gen_helper_cp1_putc(tmp);
236 dead_tmp(tmp);
237 return;
238 } else {
239 ILLEGAL;
240 }
241 }
242 ILLEGAL;
243 }
244 #endif
245
246 static inline void gen_set_asr(TCGv var, uint32_t mask)
247 {
248 TCGv tmp_mask = tcg_const_i32(mask);
249 gen_helper_asr_write(var, tmp_mask);
250 tcg_temp_free_i32(tmp_mask);
251 }
252 /* Set NZCV flags from the high 4 bits of var. */
253 #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
254
255 static void gen_exception(int excp)
256 {
257 TCGv tmp = new_tmp();
258 tcg_gen_movi_i32(tmp, excp);
259 gen_helper_exception(tmp);
260 dead_tmp(tmp);
261 }
262
263 /* FIXME: Most targets have native widening multiplication.
264 It would be good to use that instead of a full wide multiply. */
265 /* 32x32->64 multiply. Marks inputs as dead. */
266 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
267 {
268 TCGv_i64 tmp1 = tcg_temp_new_i64();
269 TCGv_i64 tmp2 = tcg_temp_new_i64();
270
271 tcg_gen_extu_i32_i64(tmp1, a);
272 dead_tmp(a);
273 tcg_gen_extu_i32_i64(tmp2, b);
274 dead_tmp(b);
275 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
276 tcg_temp_free_i64(tmp2);
277 return tmp1;
278 }
279
280 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
281 {
282 TCGv_i64 tmp1 = tcg_temp_new_i64();
283 TCGv_i64 tmp2 = tcg_temp_new_i64();
284
285 tcg_gen_ext_i32_i64(tmp1, a);
286 dead_tmp(a);
287 tcg_gen_ext_i32_i64(tmp2, b);
288 dead_tmp(b);
289 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
290 tcg_temp_free_i64(tmp2);
291 return tmp1;
292 }
293
294 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
295
296 /* Set CF to the top bit of var. */
297 static void gen_set_CF_bit31(TCGv var)
298 {
299 TCGv tmp = new_tmp();
300 tcg_gen_shri_i32(tmp, var, 31);
301 gen_set_CF(tmp);
302 dead_tmp(tmp);
303 }
304
305 /* Set N and Z flags from var. */
306 static inline void gen_logic_CC(TCGv var)
307 {
308 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF));
309 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF));
310 }
311
312 /* dest = T0 + T1 + CF. */
313 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
314 {
315 TCGv tmp;
316 tcg_gen_add_i32(dest, t0, t1);
317 tmp = load_cpu_field(CF);
318 tcg_gen_add_i32(dest, dest, tmp);
319 dead_tmp(tmp);
320 }
321
322 /* dest = T0 - T1 + CF - 1. */
323 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
324 {
325 TCGv tmp;
326 tcg_gen_sub_i32(dest, t0, t1);
327 tmp = load_cpu_field(CF);
328 tcg_gen_add_i32(dest, dest, tmp);
329 tcg_gen_subi_i32(dest, dest, 1);
330 dead_tmp(tmp);
331 }
332
333 static void shifter_out_im(TCGv var, int shift)
334 {
335 TCGv tmp = new_tmp();
336 if (shift == 0) {
337 tcg_gen_andi_i32(tmp, var, 1);
338 } else {
339 tcg_gen_shri_i32(tmp, var, shift);
340 if (shift != 31) {
341 tcg_gen_andi_i32(tmp, tmp, 1);
342 }
343 }
344 gen_set_CF(tmp);
345 dead_tmp(tmp);
346 }
347
348 /* Shift by immediate. Includes special handling for shift == 0. */
349 static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
350 int flags)
351 {
352 switch (shiftop) {
353 case 0: /* LSL */
354 if (shift != 0) {
355 if (flags) {
356 shifter_out_im(var, 32 - shift);
357 }
358 tcg_gen_shli_i32(var, var, shift);
359 }
360 break;
361 case 1: /* LSR */
362 if (shift == 0) {
363 if (flags) {
364 tcg_gen_shri_i32(var, var, 31);
365 gen_set_CF(var);
366 }
367 tcg_gen_movi_i32(var, 0);
368 } else {
369 if (flags) {
370 shifter_out_im(var, shift - 1);
371 }
372 tcg_gen_shri_i32(var, var, shift);
373 }
374 break;
375 case 2: /* ASR */
376 if (shift == 0) {
377 shift = 32;
378 }
379 if (flags) {
380 shifter_out_im(var, shift - 1);
381 }
382 if (shift == 32) {
383 shift = 31;
384 }
385 tcg_gen_sari_i32(var, var, shift);
386 break;
387 case 3: /* ROR/RRX */
388 if (shift != 0) {
389 if (flags) {
390 shifter_out_im(var, shift - 1);
391 }
392 tcg_gen_rotri_i32(var, var, shift); break;
393 } else {
394 TCGv tmp = load_cpu_field(CF);
395 if (flags) {
396 shifter_out_im(var, 0);
397 }
398 tcg_gen_shri_i32(var, var, 1);
399 tcg_gen_shli_i32(tmp, tmp, 31);
400 tcg_gen_or_i32(var, var, tmp);
401 dead_tmp(tmp);
402 }
403 }
404 };
405
406 static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
407 TCGv shift, int flags)
408 {
409 if (flags) {
410 switch (shiftop) {
411 case 0:
412 gen_helper_shl_cc(var, var, shift);
413 break;
414 case 1:
415 gen_helper_shr_cc(var, var, shift);
416 break;
417 case 2:
418 gen_helper_sar_cc(var, var, shift);
419 break;
420 case 3:
421 gen_helper_ror_cc(var, var, shift);
422 break;
423 }
424 } else {
425 switch (shiftop) {
426 case 0:
427 gen_helper_shl(var, var, shift);
428 break;
429 case 1:
430 gen_helper_shr(var, var, shift);
431 break;
432 case 2:
433 gen_helper_sar(var, var, shift);
434 break;
435 case 3:
436 tcg_gen_andi_i32(shift, shift, 0x1f);
437 tcg_gen_rotr_i32(var, var, shift);
438 break;
439 }
440 }
441 dead_tmp(shift);
442 }
443
444 static void gen_test_cc(int cc, int label)
445 {
446 TCGv tmp;
447 TCGv tmp2;
448 int inv;
449
450 switch (cc) {
451 case 0: /* eq: Z */
452 tmp = load_cpu_field(ZF);
453 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
454 break;
455 case 1: /* ne: !Z */
456 tmp = load_cpu_field(ZF);
457 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
458 break;
459 case 2: /* cs: C */
460 tmp = load_cpu_field(CF);
461 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
462 break;
463 case 3: /* cc: !C */
464 tmp = load_cpu_field(CF);
465 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
466 break;
467 case 4: /* mi: N */
468 tmp = load_cpu_field(NF);
469 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
470 break;
471 case 5: /* pl: !N */
472 tmp = load_cpu_field(NF);
473 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
474 break;
475 case 6: /* vs: V */
476 tmp = load_cpu_field(VF);
477 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
478 break;
479 case 7: /* vc: !V */
480 tmp = load_cpu_field(VF);
481 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
482 break;
483 case 8: /* hi: C && !Z */
484 inv = gen_new_label();
485 tmp = load_cpu_field(CF);
486 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
487 dead_tmp(tmp);
488 tmp = load_cpu_field(ZF);
489 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
490 gen_set_label(inv);
491 break;
492 case 9: /* ls: !C || Z */
493 tmp = load_cpu_field(CF);
494 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
495 dead_tmp(tmp);
496 tmp = load_cpu_field(ZF);
497 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
498 break;
499 case 10: /* ge: N == V -> N ^ V == 0 */
500 tmp = load_cpu_field(VF);
501 tmp2 = load_cpu_field(NF);
502 tcg_gen_xor_i32(tmp, tmp, tmp2);
503 dead_tmp(tmp2);
504 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
505 break;
506 case 11: /* lt: N != V -> N ^ V != 0 */
507 tmp = load_cpu_field(VF);
508 tmp2 = load_cpu_field(NF);
509 tcg_gen_xor_i32(tmp, tmp, tmp2);
510 dead_tmp(tmp2);
511 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
512 break;
513 case 12: /* gt: !Z && N == V */
514 inv = gen_new_label();
515 tmp = load_cpu_field(ZF);
516 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
517 dead_tmp(tmp);
518 tmp = load_cpu_field(VF);
519 tmp2 = load_cpu_field(NF);
520 tcg_gen_xor_i32(tmp, tmp, tmp2);
521 dead_tmp(tmp2);
522 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
523 gen_set_label(inv);
524 break;
525 case 13: /* le: Z || N != V */
526 tmp = load_cpu_field(ZF);
527 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
528 dead_tmp(tmp);
529 tmp = load_cpu_field(VF);
530 tmp2 = load_cpu_field(NF);
531 tcg_gen_xor_i32(tmp, tmp, tmp2);
532 dead_tmp(tmp2);
533 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
534 break;
535 default:
536 fprintf(stderr, "Bad condition code 0x%x\n", cc);
537 abort();
538 }
539 dead_tmp(tmp);
540 }
541
542 static const uint8_t table_logic_cc[16] = {
543 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
544 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
545 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
546 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
547 };
548
549 /* Set PC state from an immediate address. */
550 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
551 {
552 s->is_jmp = DISAS_UPDATE;
553 tcg_gen_movi_i32(cpu_R[31], addr & ~3);
554 }
555
556 /* Set PC state from var. var is marked as dead. */
557 static inline void gen_bx(DisasContext *s, TCGv var)
558 {
559 s->is_jmp = DISAS_UPDATE;
560 tcg_gen_andi_i32(cpu_R[31], var, ~3);
561 dead_tmp(var);
562 }
563
564 static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
565 {
566 store_reg(s, reg, var);
567 }
568
569 static inline TCGv gen_ld8s(TCGv addr, int index)
570 {
571 TCGv tmp = new_tmp();
572 tcg_gen_qemu_ld8s(tmp, addr, index);
573 return tmp;
574 }
575
576 static inline TCGv gen_ld8u(TCGv addr, int index)
577 {
578 TCGv tmp = new_tmp();
579 tcg_gen_qemu_ld8u(tmp, addr, index);
580 return tmp;
581 }
582
583 static inline TCGv gen_ld16s(TCGv addr, int index)
584 {
585 TCGv tmp = new_tmp();
586 tcg_gen_qemu_ld16s(tmp, addr, index);
587 return tmp;
588 }
589
590 static inline TCGv gen_ld16u(TCGv addr, int index)
591 {
592 TCGv tmp = new_tmp();
593 tcg_gen_qemu_ld16u(tmp, addr, index);
594 return tmp;
595 }
596
597 static inline TCGv gen_ld32(TCGv addr, int index)
598 {
599 TCGv tmp = new_tmp();
600 tcg_gen_qemu_ld32u(tmp, addr, index);
601 return tmp;
602 }
603
604 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
605 {
606 TCGv_i64 tmp = tcg_temp_new_i64();
607 tcg_gen_qemu_ld64(tmp, addr, index);
608 return tmp;
609 }
610
611 static inline void gen_st8(TCGv val, TCGv addr, int index)
612 {
613 tcg_gen_qemu_st8(val, addr, index);
614 dead_tmp(val);
615 }
616
617 static inline void gen_st16(TCGv val, TCGv addr, int index)
618 {
619 tcg_gen_qemu_st16(val, addr, index);
620 dead_tmp(val);
621 }
622
623 static inline void gen_st32(TCGv val, TCGv addr, int index)
624 {
625 tcg_gen_qemu_st32(val, addr, index);
626 dead_tmp(val);
627 }
628
629 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
630 {
631 tcg_gen_qemu_st64(val, addr, index);
632 tcg_temp_free_i64(val);
633 }
634
635 static inline void gen_set_pc_im(uint32_t val)
636 {
637 tcg_gen_movi_i32(cpu_R[31], val);
638 }
639
640 /* Force a TB lookup after an instruction that changes the CPU state. */
641 static inline void gen_lookup_tb(DisasContext *s)
642 {
643 tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
644 s->is_jmp = DISAS_UPDATE;
645 }
646
647 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
648 TCGv var)
649 {
650 int val;
651 TCGv offset;
652
653 if (UCOP_SET(29)) {
654 /* immediate */
655 val = UCOP_IMM14;
656 if (!UCOP_SET_U) {
657 val = -val;
658 }
659 if (val != 0) {
660 tcg_gen_addi_i32(var, var, val);
661 }
662 } else {
663 /* shift/register */
664 offset = load_reg(s, UCOP_REG_M);
665 gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
666 if (!UCOP_SET_U) {
667 tcg_gen_sub_i32(var, var, offset);
668 } else {
669 tcg_gen_add_i32(var, var, offset);
670 }
671 dead_tmp(offset);
672 }
673 }
674
675 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
676 TCGv var)
677 {
678 int val;
679 TCGv offset;
680
681 if (UCOP_SET(26)) {
682 /* immediate */
683 val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
684 if (!UCOP_SET_U) {
685 val = -val;
686 }
687 if (val != 0) {
688 tcg_gen_addi_i32(var, var, val);
689 }
690 } else {
691 /* register */
692 offset = load_reg(s, UCOP_REG_M);
693 if (!UCOP_SET_U) {
694 tcg_gen_sub_i32(var, var, offset);
695 } else {
696 tcg_gen_add_i32(var, var, offset);
697 }
698 dead_tmp(offset);
699 }
700 }
701
702 static inline long ucf64_reg_offset(int reg)
703 {
704 if (reg & 1) {
705 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
706 + offsetof(CPU_DoubleU, l.upper);
707 } else {
708 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
709 + offsetof(CPU_DoubleU, l.lower);
710 }
711 }
712
713 #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
714 #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
715
716 /* UniCore-F64 single load/store I_offset */
717 static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
718 {
719 int offset;
720 TCGv tmp;
721 TCGv addr;
722
723 addr = load_reg(s, UCOP_REG_N);
724 if (!UCOP_SET_P && !UCOP_SET_W) {
725 ILLEGAL;
726 }
727
728 if (UCOP_SET_P) {
729 offset = UCOP_IMM10 << 2;
730 if (!UCOP_SET_U) {
731 offset = -offset;
732 }
733 if (offset != 0) {
734 tcg_gen_addi_i32(addr, addr, offset);
735 }
736 }
737
738 if (UCOP_SET_L) { /* load */
739 tmp = gen_ld32(addr, IS_USER(s));
740 ucf64_gen_st32(tmp, UCOP_REG_D);
741 } else { /* store */
742 tmp = ucf64_gen_ld32(UCOP_REG_D);
743 gen_st32(tmp, addr, IS_USER(s));
744 }
745
746 if (!UCOP_SET_P) {
747 offset = UCOP_IMM10 << 2;
748 if (!UCOP_SET_U) {
749 offset = -offset;
750 }
751 if (offset != 0) {
752 tcg_gen_addi_i32(addr, addr, offset);
753 }
754 }
755 if (UCOP_SET_W) {
756 store_reg(s, UCOP_REG_N, addr);
757 } else {
758 dead_tmp(addr);
759 }
760 }
761
762 /* UniCore-F64 load/store multiple words */
763 static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
764 {
765 unsigned int i;
766 int j, n, freg;
767 TCGv tmp;
768 TCGv addr;
769
770 if (UCOP_REG_D != 0) {
771 ILLEGAL;
772 }
773 if (UCOP_REG_N == 31) {
774 ILLEGAL;
775 }
776 if ((insn << 24) == 0) {
777 ILLEGAL;
778 }
779
780 addr = load_reg(s, UCOP_REG_N);
781
782 n = 0;
783 for (i = 0; i < 8; i++) {
784 if (UCOP_SET(i)) {
785 n++;
786 }
787 }
788
789 if (UCOP_SET_U) {
790 if (UCOP_SET_P) { /* pre increment */
791 tcg_gen_addi_i32(addr, addr, 4);
792 } /* unnecessary to do anything when post increment */
793 } else {
794 if (UCOP_SET_P) { /* pre decrement */
795 tcg_gen_addi_i32(addr, addr, -(n * 4));
796 } else { /* post decrement */
797 if (n != 1) {
798 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
799 }
800 }
801 }
802
803 freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
804
805 for (i = 0, j = 0; i < 8; i++, freg++) {
806 if (!UCOP_SET(i)) {
807 continue;
808 }
809
810 if (UCOP_SET_L) { /* load */
811 tmp = gen_ld32(addr, IS_USER(s));
812 ucf64_gen_st32(tmp, freg);
813 } else { /* store */
814 tmp = ucf64_gen_ld32(freg);
815 gen_st32(tmp, addr, IS_USER(s));
816 }
817
818 j++;
819 /* unnecessary to add after the last transfer */
820 if (j != n) {
821 tcg_gen_addi_i32(addr, addr, 4);
822 }
823 }
824
825 if (UCOP_SET_W) { /* write back */
826 if (UCOP_SET_U) {
827 if (!UCOP_SET_P) { /* post increment */
828 tcg_gen_addi_i32(addr, addr, 4);
829 } /* unnecessary to do anything when pre increment */
830 } else {
831 if (UCOP_SET_P) {
832 /* pre decrement */
833 if (n != 1) {
834 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
835 }
836 } else {
837 /* post decrement */
838 tcg_gen_addi_i32(addr, addr, -(n * 4));
839 }
840 }
841 store_reg(s, UCOP_REG_N, addr);
842 } else {
843 dead_tmp(addr);
844 }
845 }
846
847 /* UniCore-F64 mrc/mcr */
848 static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
849 {
850 TCGv tmp;
851
852 if ((insn & 0xfe0003ff) == 0xe2000000) {
853 /* control register */
854 if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
855 ILLEGAL;
856 }
857 if (UCOP_SET(24)) {
858 /* CFF */
859 tmp = new_tmp();
860 gen_helper_ucf64_get_fpscr(tmp, cpu_env);
861 store_reg(s, UCOP_REG_D, tmp);
862 } else {
863 /* CTF */
864 tmp = load_reg(s, UCOP_REG_D);
865 gen_helper_ucf64_set_fpscr(cpu_env, tmp);
866 dead_tmp(tmp);
867 gen_lookup_tb(s);
868 }
869 return;
870 }
871 if ((insn & 0xfe0003ff) == 0xe0000000) {
872 /* general register */
873 if (UCOP_REG_D == 31) {
874 ILLEGAL;
875 }
876 if (UCOP_SET(24)) { /* MFF */
877 tmp = ucf64_gen_ld32(UCOP_REG_N);
878 store_reg(s, UCOP_REG_D, tmp);
879 } else { /* MTF */
880 tmp = load_reg(s, UCOP_REG_D);
881 ucf64_gen_st32(tmp, UCOP_REG_N);
882 }
883 return;
884 }
885 if ((insn & 0xfb000000) == 0xe9000000) {
886 /* MFFC */
887 if (UCOP_REG_D != 31) {
888 ILLEGAL;
889 }
890 if (UCOP_UCF64_COND & 0x8) {
891 ILLEGAL;
892 }
893
894 tmp = new_tmp();
895 tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
896 if (UCOP_SET(26)) {
897 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
898 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
899 gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
900 } else {
901 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
902 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
903 gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
904 }
905 dead_tmp(tmp);
906 return;
907 }
908 ILLEGAL;
909 }
910
911 /* UniCore-F64 convert instructions */
912 static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
913 {
914 if (UCOP_UCF64_FMT == 3) {
915 ILLEGAL;
916 }
917 if (UCOP_REG_N != 0) {
918 ILLEGAL;
919 }
920 switch (UCOP_UCF64_FUNC) {
921 case 0: /* cvt.s */
922 switch (UCOP_UCF64_FMT) {
923 case 1 /* d */:
924 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
925 gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
926 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
927 break;
928 case 2 /* w */:
929 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
930 gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
931 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
932 break;
933 default /* s */:
934 ILLEGAL;
935 break;
936 }
937 break;
938 case 1: /* cvt.d */
939 switch (UCOP_UCF64_FMT) {
940 case 0 /* s */:
941 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
942 gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
943 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
944 break;
945 case 2 /* w */:
946 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
947 gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
948 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
949 break;
950 default /* d */:
951 ILLEGAL;
952 break;
953 }
954 break;
955 case 4: /* cvt.w */
956 switch (UCOP_UCF64_FMT) {
957 case 0 /* s */:
958 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
959 gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
960 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
961 break;
962 case 1 /* d */:
963 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
964 gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
965 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
966 break;
967 default /* w */:
968 ILLEGAL;
969 break;
970 }
971 break;
972 default:
973 ILLEGAL;
974 }
975 }
976
977 /* UniCore-F64 compare instructions */
978 static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
979 {
980 if (UCOP_SET(25)) {
981 ILLEGAL;
982 }
983 if (UCOP_REG_D != 0) {
984 ILLEGAL;
985 }
986
987 ILLEGAL; /* TODO */
988 if (UCOP_SET(24)) {
989 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
990 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
991 /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
992 } else {
993 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
994 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
995 /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
996 }
997 }
998
999 #define gen_helper_ucf64_movs(x, y) do { } while (0)
1000 #define gen_helper_ucf64_movd(x, y) do { } while (0)
1001
1002 #define UCF64_OP1(name) do { \
1003 if (UCOP_REG_N != 0) { \
1004 ILLEGAL; \
1005 } \
1006 switch (UCOP_UCF64_FMT) { \
1007 case 0 /* s */: \
1008 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
1009 ucf64_reg_offset(UCOP_REG_M)); \
1010 gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
1011 tcg_gen_st_i32(cpu_F0s, cpu_env, \
1012 ucf64_reg_offset(UCOP_REG_D)); \
1013 break; \
1014 case 1 /* d */: \
1015 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
1016 ucf64_reg_offset(UCOP_REG_M)); \
1017 gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
1018 tcg_gen_st_i64(cpu_F0d, cpu_env, \
1019 ucf64_reg_offset(UCOP_REG_D)); \
1020 break; \
1021 case 2 /* w */: \
1022 ILLEGAL; \
1023 break; \
1024 } \
1025 } while (0)
1026
1027 #define UCF64_OP2(name) do { \
1028 switch (UCOP_UCF64_FMT) { \
1029 case 0 /* s */: \
1030 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
1031 ucf64_reg_offset(UCOP_REG_N)); \
1032 tcg_gen_ld_i32(cpu_F1s, cpu_env, \
1033 ucf64_reg_offset(UCOP_REG_M)); \
1034 gen_helper_ucf64_##name##s(cpu_F0s, \
1035 cpu_F0s, cpu_F1s, cpu_env); \
1036 tcg_gen_st_i32(cpu_F0s, cpu_env, \
1037 ucf64_reg_offset(UCOP_REG_D)); \
1038 break; \
1039 case 1 /* d */: \
1040 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
1041 ucf64_reg_offset(UCOP_REG_N)); \
1042 tcg_gen_ld_i64(cpu_F1d, cpu_env, \
1043 ucf64_reg_offset(UCOP_REG_M)); \
1044 gen_helper_ucf64_##name##d(cpu_F0d, \
1045 cpu_F0d, cpu_F1d, cpu_env); \
1046 tcg_gen_st_i64(cpu_F0d, cpu_env, \
1047 ucf64_reg_offset(UCOP_REG_D)); \
1048 break; \
1049 case 2 /* w */: \
1050 ILLEGAL; \
1051 break; \
1052 } \
1053 } while (0)
1054
1055 /* UniCore-F64 data processing */
1056 static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1057 {
1058 if (UCOP_UCF64_FMT == 3) {
1059 ILLEGAL;
1060 }
1061 switch (UCOP_UCF64_FUNC) {
1062 case 0: /* add */
1063 UCF64_OP2(add);
1064 break;
1065 case 1: /* sub */
1066 UCF64_OP2(sub);
1067 break;
1068 case 2: /* mul */
1069 UCF64_OP2(mul);
1070 break;
1071 case 4: /* div */
1072 UCF64_OP2(div);
1073 break;
1074 case 5: /* abs */
1075 UCF64_OP1(abs);
1076 break;
1077 case 6: /* mov */
1078 UCF64_OP1(mov);
1079 break;
1080 case 7: /* neg */
1081 UCF64_OP1(neg);
1082 break;
1083 default:
1084 ILLEGAL;
1085 }
1086 }
1087
1088 /* Disassemble an F64 instruction */
1089 static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1090 {
1091 if (!UCOP_SET(29)) {
1092 if (UCOP_SET(26)) {
1093 do_ucf64_ldst_m(env, s, insn);
1094 } else {
1095 do_ucf64_ldst_i(env, s, insn);
1096 }
1097 } else {
1098 if (UCOP_SET(5)) {
1099 switch ((insn >> 26) & 0x3) {
1100 case 0:
1101 do_ucf64_datap(env, s, insn);
1102 break;
1103 case 1:
1104 ILLEGAL;
1105 break;
1106 case 2:
1107 do_ucf64_fcvt(env, s, insn);
1108 break;
1109 case 3:
1110 do_ucf64_fcmp(env, s, insn);
1111 break;
1112 }
1113 } else {
1114 do_ucf64_trans(env, s, insn);
1115 }
1116 }
1117 }
1118
1119 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1120 {
1121 TranslationBlock *tb;
1122
1123 tb = s->tb;
1124 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
1125 tcg_gen_goto_tb(n);
1126 gen_set_pc_im(dest);
1127 tcg_gen_exit_tb((tcg_target_long)tb + n);
1128 } else {
1129 gen_set_pc_im(dest);
1130 tcg_gen_exit_tb(0);
1131 }
1132 }
1133
1134 static inline void gen_jmp(DisasContext *s, uint32_t dest)
1135 {
1136 if (unlikely(s->singlestep_enabled)) {
1137 /* An indirect jump so that we still trigger the debug exception. */
1138 gen_bx_im(s, dest);
1139 } else {
1140 gen_goto_tb(s, 0, dest);
1141 s->is_jmp = DISAS_TB_JUMP;
1142 }
1143 }
1144
1145 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
1146 {
1147 if (x) {
1148 tcg_gen_sari_i32(t0, t0, 16);
1149 } else {
1150 gen_sxth(t0);
1151 }
1152 if (y) {
1153 tcg_gen_sari_i32(t1, t1, 16);
1154 } else {
1155 gen_sxth(t1);
1156 }
1157 tcg_gen_mul_i32(t0, t0, t1);
1158 }
1159
1160 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1161 static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
1162 {
1163 TCGv tmp;
1164 if (bsr) {
1165 /* ??? This is also undefined in system mode. */
1166 if (IS_USER(s)) {
1167 return 1;
1168 }
1169
1170 tmp = load_cpu_field(bsr);
1171 tcg_gen_andi_i32(tmp, tmp, ~mask);
1172 tcg_gen_andi_i32(t0, t0, mask);
1173 tcg_gen_or_i32(tmp, tmp, t0);
1174 store_cpu_field(tmp, bsr);
1175 } else {
1176 gen_set_asr(t0, mask);
1177 }
1178 dead_tmp(t0);
1179 gen_lookup_tb(s);
1180 return 0;
1181 }
1182
1183 /* Generate an old-style exception return. Marks pc as dead. */
1184 static void gen_exception_return(DisasContext *s, TCGv pc)
1185 {
1186 TCGv tmp;
1187 store_reg(s, 31, pc);
1188 tmp = load_cpu_field(bsr);
1189 gen_set_asr(tmp, 0xffffffff);
1190 dead_tmp(tmp);
1191 s->is_jmp = DISAS_UPDATE;
1192 }
1193
1194 static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s,
1195 uint32_t insn)
1196 {
1197 switch (UCOP_CPNUM) {
1198 #ifndef CONFIG_USER_ONLY
1199 case 0:
1200 disas_cp0_insn(env, s, insn);
1201 break;
1202 case 1:
1203 disas_ocd_insn(env, s, insn);
1204 break;
1205 #endif
1206 case 2:
1207 disas_ucf64_insn(env, s, insn);
1208 break;
1209 default:
1210 /* Unknown coprocessor. */
1211 cpu_abort(env, "Unknown coprocessor!");
1212 }
1213 }
1214
1215
1216 /* Store a 64-bit value to a register pair. Clobbers val. */
1217 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
1218 {
1219 TCGv tmp;
1220 tmp = new_tmp();
1221 tcg_gen_trunc_i64_i32(tmp, val);
1222 store_reg(s, rlow, tmp);
1223 tmp = new_tmp();
1224 tcg_gen_shri_i64(val, val, 32);
1225 tcg_gen_trunc_i64_i32(tmp, val);
1226 store_reg(s, rhigh, tmp);
1227 }
1228
1229 /* load and add a 64-bit value from a register pair. */
1230 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
1231 {
1232 TCGv_i64 tmp;
1233 TCGv tmpl;
1234 TCGv tmph;
1235
1236 /* Load 64-bit value rd:rn. */
1237 tmpl = load_reg(s, rlow);
1238 tmph = load_reg(s, rhigh);
1239 tmp = tcg_temp_new_i64();
1240 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
1241 dead_tmp(tmpl);
1242 dead_tmp(tmph);
1243 tcg_gen_add_i64(val, val, tmp);
1244 tcg_temp_free_i64(tmp);
1245 }
1246
1247 /* data processing instructions */
1248 static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1249 {
1250 TCGv tmp;
1251 TCGv tmp2;
1252 int logic_cc;
1253
1254 if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
1255 if (UCOP_SET(23)) { /* CMOV instructions */
1256 if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
1257 ILLEGAL;
1258 }
1259 /* if not always execute, we generate a conditional jump to
1260 next instruction */
1261 s->condlabel = gen_new_label();
1262 gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
1263 s->condjmp = 1;
1264 }
1265 }
1266
1267 logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
1268
1269 if (UCOP_SET(29)) {
1270 unsigned int val;
1271 /* immediate operand */
1272 val = UCOP_IMM_9;
1273 if (UCOP_SH_IM) {
1274 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1275 }
1276 tmp2 = new_tmp();
1277 tcg_gen_movi_i32(tmp2, val);
1278 if (logic_cc && UCOP_SH_IM) {
1279 gen_set_CF_bit31(tmp2);
1280 }
1281 } else {
1282 /* register */
1283 tmp2 = load_reg(s, UCOP_REG_M);
1284 if (UCOP_SET(5)) {
1285 tmp = load_reg(s, UCOP_REG_S);
1286 gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
1287 } else {
1288 gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
1289 }
1290 }
1291
1292 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1293 tmp = load_reg(s, UCOP_REG_N);
1294 } else {
1295 TCGV_UNUSED(tmp);
1296 }
1297
1298 switch (UCOP_OPCODES) {
1299 case 0x00:
1300 tcg_gen_and_i32(tmp, tmp, tmp2);
1301 if (logic_cc) {
1302 gen_logic_CC(tmp);
1303 }
1304 store_reg_bx(s, UCOP_REG_D, tmp);
1305 break;
1306 case 0x01:
1307 tcg_gen_xor_i32(tmp, tmp, tmp2);
1308 if (logic_cc) {
1309 gen_logic_CC(tmp);
1310 }
1311 store_reg_bx(s, UCOP_REG_D, tmp);
1312 break;
1313 case 0x02:
1314 if (UCOP_SET_S && UCOP_REG_D == 31) {
1315 /* SUBS r31, ... is used for exception return. */
1316 if (IS_USER(s)) {
1317 ILLEGAL;
1318 }
1319 gen_helper_sub_cc(tmp, tmp, tmp2);
1320 gen_exception_return(s, tmp);
1321 } else {
1322 if (UCOP_SET_S) {
1323 gen_helper_sub_cc(tmp, tmp, tmp2);
1324 } else {
1325 tcg_gen_sub_i32(tmp, tmp, tmp2);
1326 }
1327 store_reg_bx(s, UCOP_REG_D, tmp);
1328 }
1329 break;
1330 case 0x03:
1331 if (UCOP_SET_S) {
1332 gen_helper_sub_cc(tmp, tmp2, tmp);
1333 } else {
1334 tcg_gen_sub_i32(tmp, tmp2, tmp);
1335 }
1336 store_reg_bx(s, UCOP_REG_D, tmp);
1337 break;
1338 case 0x04:
1339 if (UCOP_SET_S) {
1340 gen_helper_add_cc(tmp, tmp, tmp2);
1341 } else {
1342 tcg_gen_add_i32(tmp, tmp, tmp2);
1343 }
1344 store_reg_bx(s, UCOP_REG_D, tmp);
1345 break;
1346 case 0x05:
1347 if (UCOP_SET_S) {
1348 gen_helper_adc_cc(tmp, tmp, tmp2);
1349 } else {
1350 gen_add_carry(tmp, tmp, tmp2);
1351 }
1352 store_reg_bx(s, UCOP_REG_D, tmp);
1353 break;
1354 case 0x06:
1355 if (UCOP_SET_S) {
1356 gen_helper_sbc_cc(tmp, tmp, tmp2);
1357 } else {
1358 gen_sub_carry(tmp, tmp, tmp2);
1359 }
1360 store_reg_bx(s, UCOP_REG_D, tmp);
1361 break;
1362 case 0x07:
1363 if (UCOP_SET_S) {
1364 gen_helper_sbc_cc(tmp, tmp2, tmp);
1365 } else {
1366 gen_sub_carry(tmp, tmp2, tmp);
1367 }
1368 store_reg_bx(s, UCOP_REG_D, tmp);
1369 break;
1370 case 0x08:
1371 if (UCOP_SET_S) {
1372 tcg_gen_and_i32(tmp, tmp, tmp2);
1373 gen_logic_CC(tmp);
1374 }
1375 dead_tmp(tmp);
1376 break;
1377 case 0x09:
1378 if (UCOP_SET_S) {
1379 tcg_gen_xor_i32(tmp, tmp, tmp2);
1380 gen_logic_CC(tmp);
1381 }
1382 dead_tmp(tmp);
1383 break;
1384 case 0x0a:
1385 if (UCOP_SET_S) {
1386 gen_helper_sub_cc(tmp, tmp, tmp2);
1387 }
1388 dead_tmp(tmp);
1389 break;
1390 case 0x0b:
1391 if (UCOP_SET_S) {
1392 gen_helper_add_cc(tmp, tmp, tmp2);
1393 }
1394 dead_tmp(tmp);
1395 break;
1396 case 0x0c:
1397 tcg_gen_or_i32(tmp, tmp, tmp2);
1398 if (logic_cc) {
1399 gen_logic_CC(tmp);
1400 }
1401 store_reg_bx(s, UCOP_REG_D, tmp);
1402 break;
1403 case 0x0d:
1404 if (logic_cc && UCOP_REG_D == 31) {
1405 /* MOVS r31, ... is used for exception return. */
1406 if (IS_USER(s)) {
1407 ILLEGAL;
1408 }
1409 gen_exception_return(s, tmp2);
1410 } else {
1411 if (logic_cc) {
1412 gen_logic_CC(tmp2);
1413 }
1414 store_reg_bx(s, UCOP_REG_D, tmp2);
1415 }
1416 break;
1417 case 0x0e:
1418 tcg_gen_andc_i32(tmp, tmp, tmp2);
1419 if (logic_cc) {
1420 gen_logic_CC(tmp);
1421 }
1422 store_reg_bx(s, UCOP_REG_D, tmp);
1423 break;
1424 default:
1425 case 0x0f:
1426 tcg_gen_not_i32(tmp2, tmp2);
1427 if (logic_cc) {
1428 gen_logic_CC(tmp2);
1429 }
1430 store_reg_bx(s, UCOP_REG_D, tmp2);
1431 break;
1432 }
1433 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1434 dead_tmp(tmp2);
1435 }
1436 }
1437
1438 /* multiply */
1439 static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1440 {
1441 TCGv tmp;
1442 TCGv tmp2;
1443 TCGv_i64 tmp64;
1444
1445 if (UCOP_SET(27)) {
1446 /* 64 bit mul */
1447 tmp = load_reg(s, UCOP_REG_M);
1448 tmp2 = load_reg(s, UCOP_REG_N);
1449 if (UCOP_SET(26)) {
1450 tmp64 = gen_muls_i64_i32(tmp, tmp2);
1451 } else {
1452 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
1453 }
1454 if (UCOP_SET(25)) { /* mult accumulate */
1455 gen_addq(s, tmp64, UCOP_REG_LO, UCOP_REG_HI);
1456 }
1457 gen_storeq_reg(s, UCOP_REG_LO, UCOP_REG_HI, tmp64);
1458 tcg_temp_free_i64(tmp64);
1459 } else {
1460 /* 32 bit mul */
1461 tmp = load_reg(s, UCOP_REG_M);
1462 tmp2 = load_reg(s, UCOP_REG_N);
1463 tcg_gen_mul_i32(tmp, tmp, tmp2);
1464 dead_tmp(tmp2);
1465 if (UCOP_SET(25)) {
1466 /* Add */
1467 tmp2 = load_reg(s, UCOP_REG_S);
1468 tcg_gen_add_i32(tmp, tmp, tmp2);
1469 dead_tmp(tmp2);
1470 }
1471 if (UCOP_SET_S) {
1472 gen_logic_CC(tmp);
1473 }
1474 store_reg(s, UCOP_REG_D, tmp);
1475 }
1476 }
1477
1478 /* miscellaneous instructions */
1479 static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1480 {
1481 unsigned int val;
1482 TCGv tmp;
1483
1484 if ((insn & 0xffffffe0) == 0x10ffc120) {
1485 /* Trivial implementation equivalent to bx. */
1486 tmp = load_reg(s, UCOP_REG_M);
1487 gen_bx(s, tmp);
1488 return;
1489 }
1490
1491 if ((insn & 0xfbffc000) == 0x30ffc000) {
1492 /* PSR = immediate */
1493 val = UCOP_IMM_9;
1494 if (UCOP_SH_IM) {
1495 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1496 }
1497 tmp = new_tmp();
1498 tcg_gen_movi_i32(tmp, val);
1499 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1500 ILLEGAL;
1501 }
1502 return;
1503 }
1504
1505 if ((insn & 0xfbffffe0) == 0x12ffc020) {
1506 /* PSR.flag = reg */
1507 tmp = load_reg(s, UCOP_REG_M);
1508 if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
1509 ILLEGAL;
1510 }
1511 return;
1512 }
1513
1514 if ((insn & 0xfbffffe0) == 0x10ffc020) {
1515 /* PSR = reg */
1516 tmp = load_reg(s, UCOP_REG_M);
1517 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1518 ILLEGAL;
1519 }
1520 return;
1521 }
1522
1523 if ((insn & 0xfbf83fff) == 0x10f80000) {
1524 /* reg = PSR */
1525 if (UCOP_SET_B) {
1526 if (IS_USER(s)) {
1527 ILLEGAL;
1528 }
1529 tmp = load_cpu_field(bsr);
1530 } else {
1531 tmp = new_tmp();
1532 gen_helper_asr_read(tmp);
1533 }
1534 store_reg(s, UCOP_REG_D, tmp);
1535 return;
1536 }
1537
1538 if ((insn & 0xfbf83fe0) == 0x12f80120) {
1539 /* clz */
1540 tmp = load_reg(s, UCOP_REG_M);
1541 if (UCOP_SET(26)) {
1542 gen_helper_clo(tmp, tmp);
1543 } else {
1544 gen_helper_clz(tmp, tmp);
1545 }
1546 store_reg(s, UCOP_REG_D, tmp);
1547 return;
1548 }
1549
1550 /* otherwise */
1551 ILLEGAL;
1552 }
1553
1554 /* load/store I_offset and R_offset */
1555 static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1556 {
1557 unsigned int i;
1558 TCGv tmp;
1559 TCGv tmp2;
1560
1561 tmp2 = load_reg(s, UCOP_REG_N);
1562 i = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1563
1564 /* immediate */
1565 if (UCOP_SET_P) {
1566 gen_add_data_offset(s, insn, tmp2);
1567 }
1568
1569 if (UCOP_SET_L) {
1570 /* load */
1571 if (UCOP_SET_B) {
1572 tmp = gen_ld8u(tmp2, i);
1573 } else {
1574 tmp = gen_ld32(tmp2, i);
1575 }
1576 } else {
1577 /* store */
1578 tmp = load_reg(s, UCOP_REG_D);
1579 if (UCOP_SET_B) {
1580 gen_st8(tmp, tmp2, i);
1581 } else {
1582 gen_st32(tmp, tmp2, i);
1583 }
1584 }
1585 if (!UCOP_SET_P) {
1586 gen_add_data_offset(s, insn, tmp2);
1587 store_reg(s, UCOP_REG_N, tmp2);
1588 } else if (UCOP_SET_W) {
1589 store_reg(s, UCOP_REG_N, tmp2);
1590 } else {
1591 dead_tmp(tmp2);
1592 }
1593 if (UCOP_SET_L) {
1594 /* Complete the load. */
1595 if (UCOP_REG_D == 31) {
1596 gen_bx(s, tmp);
1597 } else {
1598 store_reg(s, UCOP_REG_D, tmp);
1599 }
1600 }
1601 }
1602
1603 /* SWP instruction */
1604 static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1605 {
1606 TCGv addr;
1607 TCGv tmp;
1608 TCGv tmp2;
1609
1610 if ((insn & 0xff003fe0) != 0x40000120) {
1611 ILLEGAL;
1612 }
1613
1614 /* ??? This is not really atomic. However we know
1615 we never have multiple CPUs running in parallel,
1616 so it is good enough. */
1617 addr = load_reg(s, UCOP_REG_N);
1618 tmp = load_reg(s, UCOP_REG_M);
1619 if (UCOP_SET_B) {
1620 tmp2 = gen_ld8u(addr, IS_USER(s));
1621 gen_st8(tmp, addr, IS_USER(s));
1622 } else {
1623 tmp2 = gen_ld32(addr, IS_USER(s));
1624 gen_st32(tmp, addr, IS_USER(s));
1625 }
1626 dead_tmp(addr);
1627 store_reg(s, UCOP_REG_D, tmp2);
1628 }
1629
1630 /* load/store hw/sb */
1631 static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1632 {
1633 TCGv addr;
1634 TCGv tmp;
1635
1636 if (UCOP_SH_OP == 0) {
1637 do_swap(env, s, insn);
1638 return;
1639 }
1640
1641 addr = load_reg(s, UCOP_REG_N);
1642 if (UCOP_SET_P) {
1643 gen_add_datah_offset(s, insn, addr);
1644 }
1645
1646 if (UCOP_SET_L) { /* load */
1647 switch (UCOP_SH_OP) {
1648 case 1:
1649 tmp = gen_ld16u(addr, IS_USER(s));
1650 break;
1651 case 2:
1652 tmp = gen_ld8s(addr, IS_USER(s));
1653 break;
1654 default: /* see do_swap */
1655 case 3:
1656 tmp = gen_ld16s(addr, IS_USER(s));
1657 break;
1658 }
1659 } else { /* store */
1660 if (UCOP_SH_OP != 1) {
1661 ILLEGAL;
1662 }
1663 tmp = load_reg(s, UCOP_REG_D);
1664 gen_st16(tmp, addr, IS_USER(s));
1665 }
1666 /* Perform base writeback before the loaded value to
1667 ensure correct behavior with overlapping index registers. */
1668 if (!UCOP_SET_P) {
1669 gen_add_datah_offset(s, insn, addr);
1670 store_reg(s, UCOP_REG_N, addr);
1671 } else if (UCOP_SET_W) {
1672 store_reg(s, UCOP_REG_N, addr);
1673 } else {
1674 dead_tmp(addr);
1675 }
1676 if (UCOP_SET_L) {
1677 /* Complete the load. */
1678 store_reg(s, UCOP_REG_D, tmp);
1679 }
1680 }
1681
1682 /* load/store multiple words */
1683 static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1684 {
1685 unsigned int val, i;
1686 int j, n, reg, user, loaded_base;
1687 TCGv tmp;
1688 TCGv tmp2;
1689 TCGv addr;
1690 TCGv loaded_var;
1691
1692 if (UCOP_SET(7)) {
1693 ILLEGAL;
1694 }
1695 /* XXX: store correct base if write back */
1696 user = 0;
1697 if (UCOP_SET_B) { /* S bit in instruction table */
1698 if (IS_USER(s)) {
1699 ILLEGAL; /* only usable in supervisor mode */
1700 }
1701 if (UCOP_SET(18) == 0) { /* pc reg */
1702 user = 1;
1703 }
1704 }
1705
1706 addr = load_reg(s, UCOP_REG_N);
1707
1708 /* compute total size */
1709 loaded_base = 0;
1710 TCGV_UNUSED(loaded_var);
1711 n = 0;
1712 for (i = 0; i < 6; i++) {
1713 if (UCOP_SET(i)) {
1714 n++;
1715 }
1716 }
1717 for (i = 9; i < 19; i++) {
1718 if (UCOP_SET(i)) {
1719 n++;
1720 }
1721 }
1722 /* XXX: test invalid n == 0 case ? */
1723 if (UCOP_SET_U) {
1724 if (UCOP_SET_P) {
1725 /* pre increment */
1726 tcg_gen_addi_i32(addr, addr, 4);
1727 } else {
1728 /* post increment */
1729 }
1730 } else {
1731 if (UCOP_SET_P) {
1732 /* pre decrement */
1733 tcg_gen_addi_i32(addr, addr, -(n * 4));
1734 } else {
1735 /* post decrement */
1736 if (n != 1) {
1737 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1738 }
1739 }
1740 }
1741
1742 j = 0;
1743 reg = UCOP_SET(6) ? 16 : 0;
1744 for (i = 0; i < 19; i++, reg++) {
1745 if (i == 6) {
1746 i = i + 3;
1747 }
1748 if (UCOP_SET(i)) {
1749 if (UCOP_SET_L) { /* load */
1750 tmp = gen_ld32(addr, IS_USER(s));
1751 if (reg == 31) {
1752 gen_bx(s, tmp);
1753 } else if (user) {
1754 tmp2 = tcg_const_i32(reg);
1755 gen_helper_set_user_reg(tmp2, tmp);
1756 tcg_temp_free_i32(tmp2);
1757 dead_tmp(tmp);
1758 } else if (reg == UCOP_REG_N) {
1759 loaded_var = tmp;
1760 loaded_base = 1;
1761 } else {
1762 store_reg(s, reg, tmp);
1763 }
1764 } else { /* store */
1765 if (reg == 31) {
1766 /* special case: r31 = PC + 4 */
1767 val = (long)s->pc;
1768 tmp = new_tmp();
1769 tcg_gen_movi_i32(tmp, val);
1770 } else if (user) {
1771 tmp = new_tmp();
1772 tmp2 = tcg_const_i32(reg);
1773 gen_helper_get_user_reg(tmp, tmp2);
1774 tcg_temp_free_i32(tmp2);
1775 } else {
1776 tmp = load_reg(s, reg);
1777 }
1778 gen_st32(tmp, addr, IS_USER(s));
1779 }
1780 j++;
1781 /* no need to add after the last transfer */
1782 if (j != n) {
1783 tcg_gen_addi_i32(addr, addr, 4);
1784 }
1785 }
1786 }
1787 if (UCOP_SET_W) { /* write back */
1788 if (UCOP_SET_U) {
1789 if (UCOP_SET_P) {
1790 /* pre increment */
1791 } else {
1792 /* post increment */
1793 tcg_gen_addi_i32(addr, addr, 4);
1794 }
1795 } else {
1796 if (UCOP_SET_P) {
1797 /* pre decrement */
1798 if (n != 1) {
1799 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1800 }
1801 } else {
1802 /* post decrement */
1803 tcg_gen_addi_i32(addr, addr, -(n * 4));
1804 }
1805 }
1806 store_reg(s, UCOP_REG_N, addr);
1807 } else {
1808 dead_tmp(addr);
1809 }
1810 if (loaded_base) {
1811 store_reg(s, UCOP_REG_N, loaded_var);
1812 }
1813 if (UCOP_SET_B && !user) {
1814 /* Restore ASR from BSR. */
1815 tmp = load_cpu_field(bsr);
1816 gen_set_asr(tmp, 0xffffffff);
1817 dead_tmp(tmp);
1818 s->is_jmp = DISAS_UPDATE;
1819 }
1820 }
1821
1822 /* branch (and link) */
1823 static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1824 {
1825 unsigned int val;
1826 int32_t offset;
1827 TCGv tmp;
1828
1829 if (UCOP_COND == 0xf) {
1830 ILLEGAL;
1831 }
1832
1833 if (UCOP_COND != 0xe) {
1834 /* if not always execute, we generate a conditional jump to
1835 next instruction */
1836 s->condlabel = gen_new_label();
1837 gen_test_cc(UCOP_COND ^ 1, s->condlabel);
1838 s->condjmp = 1;
1839 }
1840
1841 val = (int32_t)s->pc;
1842 if (UCOP_SET_L) {
1843 tmp = new_tmp();
1844 tcg_gen_movi_i32(tmp, val);
1845 store_reg(s, 30, tmp);
1846 }
1847 offset = (((int32_t)insn << 8) >> 8);
1848 val += (offset << 2); /* unicore is pc+4 */
1849 gen_jmp(s, val);
1850 }
1851
1852 static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
1853 {
1854 unsigned int insn;
1855
1856 insn = ldl_code(s->pc);
1857 s->pc += 4;
1858
1859 /* UniCore instructions class:
1860 * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1861 * AAA : see switch case
1862 * BBBB : opcodes or cond or PUBW
1863 * C : S OR L
1864 * D : 8
1865 * E : 5
1866 */
1867 switch (insn >> 29) {
1868 case 0x0:
1869 if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1870 do_mult(env, s, insn);
1871 break;
1872 }
1873
1874 if (UCOP_SET(8)) {
1875 do_misc(env, s, insn);
1876 break;
1877 }
1878 case 0x1:
1879 if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
1880 do_misc(env, s, insn);
1881 break;
1882 }
1883 do_datap(env, s, insn);
1884 break;
1885
1886 case 0x2:
1887 if (UCOP_SET(8) && UCOP_SET(5)) {
1888 do_ldst_hwsb(env, s, insn);
1889 break;
1890 }
1891 if (UCOP_SET(8) || UCOP_SET(5)) {
1892 ILLEGAL;
1893 }
1894 case 0x3:
1895 do_ldst_ir(env, s, insn);
1896 break;
1897
1898 case 0x4:
1899 if (UCOP_SET(8)) {
1900 ILLEGAL; /* extended instructions */
1901 }
1902 do_ldst_m(env, s, insn);
1903 break;
1904 case 0x5:
1905 do_branch(env, s, insn);
1906 break;
1907 case 0x6:
1908 /* Coprocessor. */
1909 disas_coproc_insn(env, s, insn);
1910 break;
1911 case 0x7:
1912 if (!UCOP_SET(28)) {
1913 disas_coproc_insn(env, s, insn);
1914 break;
1915 }
1916 if ((insn & 0xff000000) == 0xff000000) { /* syscall */
1917 gen_set_pc_im(s->pc);
1918 s->is_jmp = DISAS_SYSCALL;
1919 break;
1920 }
1921 ILLEGAL;
1922 }
1923
1924 return;
1925 }
1926
1927 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
1928 basic block 'tb'. If search_pc is TRUE, also generate PC
1929 information for each intermediate instruction. */
1930 static inline void gen_intermediate_code_internal(CPUUniCore32State *env,
1931 TranslationBlock *tb, int search_pc)
1932 {
1933 DisasContext dc1, *dc = &dc1;
1934 CPUBreakpoint *bp;
1935 uint16_t *gen_opc_end;
1936 int j, lj;
1937 target_ulong pc_start;
1938 uint32_t next_page_start;
1939 int num_insns;
1940 int max_insns;
1941
1942 /* generate intermediate code */
1943 num_temps = 0;
1944
1945 pc_start = tb->pc;
1946
1947 dc->tb = tb;
1948
1949 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1950
1951 dc->is_jmp = DISAS_NEXT;
1952 dc->pc = pc_start;
1953 dc->singlestep_enabled = env->singlestep_enabled;
1954 dc->condjmp = 0;
1955 cpu_F0s = tcg_temp_new_i32();
1956 cpu_F1s = tcg_temp_new_i32();
1957 cpu_F0d = tcg_temp_new_i64();
1958 cpu_F1d = tcg_temp_new_i64();
1959 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1960 lj = -1;
1961 num_insns = 0;
1962 max_insns = tb->cflags & CF_COUNT_MASK;
1963 if (max_insns == 0) {
1964 max_insns = CF_COUNT_MASK;
1965 }
1966
1967 gen_icount_start();
1968 do {
1969 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1970 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1971 if (bp->pc == dc->pc) {
1972 gen_set_pc_im(dc->pc);
1973 gen_exception(EXCP_DEBUG);
1974 dc->is_jmp = DISAS_JUMP;
1975 /* Advance PC so that clearing the breakpoint will
1976 invalidate this TB. */
1977 dc->pc += 2; /* FIXME */
1978 goto done_generating;
1979 break;
1980 }
1981 }
1982 }
1983 if (search_pc) {
1984 j = gen_opc_ptr - gen_opc_buf;
1985 if (lj < j) {
1986 lj++;
1987 while (lj < j) {
1988 gen_opc_instr_start[lj++] = 0;
1989 }
1990 }
1991 gen_opc_pc[lj] = dc->pc;
1992 gen_opc_instr_start[lj] = 1;
1993 gen_opc_icount[lj] = num_insns;
1994 }
1995
1996 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
1997 gen_io_start();
1998 }
1999
2000 disas_uc32_insn(env, dc);
2001
2002 if (num_temps) {
2003 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
2004 num_temps = 0;
2005 }
2006
2007 if (dc->condjmp && !dc->is_jmp) {
2008 gen_set_label(dc->condlabel);
2009 dc->condjmp = 0;
2010 }
2011 /* Translation stops when a conditional branch is encountered.
2012 * Otherwise the subsequent code could get translated several times.
2013 * Also stop translation when a page boundary is reached. This
2014 * ensures prefetch aborts occur at the right place. */
2015 num_insns++;
2016 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2017 !env->singlestep_enabled &&
2018 !singlestep &&
2019 dc->pc < next_page_start &&
2020 num_insns < max_insns);
2021
2022 if (tb->cflags & CF_LAST_IO) {
2023 if (dc->condjmp) {
2024 /* FIXME: This can theoretically happen with self-modifying
2025 code. */
2026 cpu_abort(env, "IO on conditional branch instruction");
2027 }
2028 gen_io_end();
2029 }
2030
2031 /* At this stage dc->condjmp will only be set when the skipped
2032 instruction was a conditional branch or trap, and the PC has
2033 already been written. */
2034 if (unlikely(env->singlestep_enabled)) {
2035 /* Make sure the pc is updated, and raise a debug exception. */
2036 if (dc->condjmp) {
2037 if (dc->is_jmp == DISAS_SYSCALL) {
2038 gen_exception(UC32_EXCP_PRIV);
2039 } else {
2040 gen_exception(EXCP_DEBUG);
2041 }
2042 gen_set_label(dc->condlabel);
2043 }
2044 if (dc->condjmp || !dc->is_jmp) {
2045 gen_set_pc_im(dc->pc);
2046 dc->condjmp = 0;
2047 }
2048 if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
2049 gen_exception(UC32_EXCP_PRIV);
2050 } else {
2051 gen_exception(EXCP_DEBUG);
2052 }
2053 } else {
2054 /* While branches must always occur at the end of an IT block,
2055 there are a few other things that can cause us to terminate
2056 the TB in the middel of an IT block:
2057 - Exception generating instructions (bkpt, swi, undefined).
2058 - Page boundaries.
2059 - Hardware watchpoints.
2060 Hardware breakpoints have already been handled and skip this code.
2061 */
2062 switch (dc->is_jmp) {
2063 case DISAS_NEXT:
2064 gen_goto_tb(dc, 1, dc->pc);
2065 break;
2066 default:
2067 case DISAS_JUMP:
2068 case DISAS_UPDATE:
2069 /* indicate that the hash table must be used to find the next TB */
2070 tcg_gen_exit_tb(0);
2071 break;
2072 case DISAS_TB_JUMP:
2073 /* nothing more to generate */
2074 break;
2075 case DISAS_SYSCALL:
2076 gen_exception(UC32_EXCP_PRIV);
2077 break;
2078 }
2079 if (dc->condjmp) {
2080 gen_set_label(dc->condlabel);
2081 gen_goto_tb(dc, 1, dc->pc);
2082 dc->condjmp = 0;
2083 }
2084 }
2085
2086 done_generating:
2087 gen_icount_end(tb, num_insns);
2088 *gen_opc_ptr = INDEX_op_end;
2089
2090 #ifdef DEBUG_DISAS
2091 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2092 qemu_log("----------------\n");
2093 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2094 log_target_disas(pc_start, dc->pc - pc_start, 0);
2095 qemu_log("\n");
2096 }
2097 #endif
2098 if (search_pc) {
2099 j = gen_opc_ptr - gen_opc_buf;
2100 lj++;
2101 while (lj <= j) {
2102 gen_opc_instr_start[lj++] = 0;
2103 }
2104 } else {
2105 tb->size = dc->pc - pc_start;
2106 tb->icount = num_insns;
2107 }
2108 }
2109
2110 void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
2111 {
2112 gen_intermediate_code_internal(env, tb, 0);
2113 }
2114
2115 void gen_intermediate_code_pc(CPUUniCore32State *env, TranslationBlock *tb)
2116 {
2117 gen_intermediate_code_internal(env, tb, 1);
2118 }
2119
2120 static const char *cpu_mode_names[16] = {
2121 "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2122 "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2123 };
2124
2125 #define UCF64_DUMP_STATE
2126 void cpu_dump_state(CPUUniCore32State *env, FILE *f, fprintf_function cpu_fprintf,
2127 int flags)
2128 {
2129 int i;
2130 #ifdef UCF64_DUMP_STATE
2131 union {
2132 uint32_t i;
2133 float s;
2134 } s0, s1;
2135 CPU_DoubleU d;
2136 /* ??? This assumes float64 and double have the same layout.
2137 Oh well, it's only debug dumps. */
2138 union {
2139 float64 f64;
2140 double d;
2141 } d0;
2142 #endif
2143 uint32_t psr;
2144
2145 for (i = 0; i < 32; i++) {
2146 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2147 if ((i % 4) == 3) {
2148 cpu_fprintf(f, "\n");
2149 } else {
2150 cpu_fprintf(f, " ");
2151 }
2152 }
2153 psr = cpu_asr_read(env);
2154 cpu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
2155 psr,
2156 psr & (1 << 31) ? 'N' : '-',
2157 psr & (1 << 30) ? 'Z' : '-',
2158 psr & (1 << 29) ? 'C' : '-',
2159 psr & (1 << 28) ? 'V' : '-',
2160 cpu_mode_names[psr & 0xf]);
2161
2162 #ifdef UCF64_DUMP_STATE
2163 for (i = 0; i < 16; i++) {
2164 d.d = env->ucf64.regs[i];
2165 s0.i = d.l.lower;
2166 s1.i = d.l.upper;
2167 d0.f64 = d.d;
2168 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%" PRIx64 "(%8g)\n",
2169 i * 2, (int)s0.i, s0.s,
2170 i * 2 + 1, (int)s1.i, s1.s,
2171 i, (uint64_t)d0.f64, d0.d);
2172 }
2173 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
2174 #endif
2175 }
2176
2177 void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb, int pc_pos)
2178 {
2179 env->regs[31] = gen_opc_pc[pc_pos];
2180 }