]> git.proxmox.com Git - mirror_qemu.git/blob - target-microblaze/translate.c
microblaze: Clean up includes
[mirror_qemu.git] / target-microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "microblaze-decode.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-gen.h"
29
30 #include "trace-tcg.h"
31
32
33 #define SIM_COMPAT 0
34 #define DISAS_GNU 1
35 #define DISAS_MB 1
36 #if DISAS_MB && !SIM_COMPAT
37 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
38 #else
39 # define LOG_DIS(...) do { } while (0)
40 #endif
41
42 #define D(x)
43
44 #define EXTRACT_FIELD(src, start, end) \
45 (((src) >> start) & ((1 << (end - start + 1)) - 1))
46
47 static TCGv env_debug;
48 static TCGv_ptr cpu_env;
49 static TCGv cpu_R[32];
50 static TCGv cpu_SR[18];
51 static TCGv env_imm;
52 static TCGv env_btaken;
53 static TCGv env_btarget;
54 static TCGv env_iflags;
55 static TCGv env_res_addr;
56 static TCGv env_res_val;
57
58 #include "exec/gen-icount.h"
59
60 /* This is the state at translation time. */
61 typedef struct DisasContext {
62 MicroBlazeCPU *cpu;
63 target_ulong pc;
64
65 /* Decoder. */
66 int type_b;
67 uint32_t ir;
68 uint8_t opcode;
69 uint8_t rd, ra, rb;
70 uint16_t imm;
71
72 unsigned int cpustate_changed;
73 unsigned int delayed_branch;
74 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
75 unsigned int clear_imm;
76 int is_jmp;
77
78 #define JMP_NOJMP 0
79 #define JMP_DIRECT 1
80 #define JMP_DIRECT_CC 2
81 #define JMP_INDIRECT 3
82 unsigned int jmp;
83 uint32_t jmp_pc;
84
85 int abort_at_next_insn;
86 int nr_nops;
87 struct TranslationBlock *tb;
88 int singlestep_enabled;
89 } DisasContext;
90
91 static const char *regnames[] =
92 {
93 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
94 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
95 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
96 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
97 };
98
99 static const char *special_regnames[] =
100 {
101 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
102 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
103 "sr16", "sr17", "sr18"
104 };
105
106 static inline void t_sync_flags(DisasContext *dc)
107 {
108 /* Synch the tb dependent flags between translator and runtime. */
109 if (dc->tb_flags != dc->synced_flags) {
110 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
111 dc->synced_flags = dc->tb_flags;
112 }
113 }
114
115 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
116 {
117 TCGv_i32 tmp = tcg_const_i32(index);
118
119 t_sync_flags(dc);
120 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
121 gen_helper_raise_exception(cpu_env, tmp);
122 tcg_temp_free_i32(tmp);
123 dc->is_jmp = DISAS_UPDATE;
124 }
125
126 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
127 {
128 TranslationBlock *tb;
129 tb = dc->tb;
130 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
131 tcg_gen_goto_tb(n);
132 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
133 tcg_gen_exit_tb((uintptr_t)tb + n);
134 } else {
135 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
136 tcg_gen_exit_tb(0);
137 }
138 }
139
140 static void read_carry(DisasContext *dc, TCGv d)
141 {
142 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
143 }
144
145 /*
146 * write_carry sets the carry bits in MSR based on bit 0 of v.
147 * v[31:1] are ignored.
148 */
149 static void write_carry(DisasContext *dc, TCGv v)
150 {
151 TCGv t0 = tcg_temp_new();
152 tcg_gen_shli_tl(t0, v, 31);
153 tcg_gen_sari_tl(t0, t0, 31);
154 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
155 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
156 ~(MSR_C | MSR_CC));
157 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
158 tcg_temp_free(t0);
159 }
160
161 static void write_carryi(DisasContext *dc, bool carry)
162 {
163 TCGv t0 = tcg_temp_new();
164 tcg_gen_movi_tl(t0, carry);
165 write_carry(dc, t0);
166 tcg_temp_free(t0);
167 }
168
169 /* True if ALU operand b is a small immediate that may deserve
170 faster treatment. */
171 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
172 {
173 /* Immediate insn without the imm prefix ? */
174 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
175 }
176
177 static inline TCGv *dec_alu_op_b(DisasContext *dc)
178 {
179 if (dc->type_b) {
180 if (dc->tb_flags & IMM_FLAG)
181 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
182 else
183 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
184 return &env_imm;
185 } else
186 return &cpu_R[dc->rb];
187 }
188
189 static void dec_add(DisasContext *dc)
190 {
191 unsigned int k, c;
192 TCGv cf;
193
194 k = dc->opcode & 4;
195 c = dc->opcode & 2;
196
197 LOG_DIS("add%s%s%s r%d r%d r%d\n",
198 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
199 dc->rd, dc->ra, dc->rb);
200
201 /* Take care of the easy cases first. */
202 if (k) {
203 /* k - keep carry, no need to update MSR. */
204 /* If rd == r0, it's a nop. */
205 if (dc->rd) {
206 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
207
208 if (c) {
209 /* c - Add carry into the result. */
210 cf = tcg_temp_new();
211
212 read_carry(dc, cf);
213 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
214 tcg_temp_free(cf);
215 }
216 }
217 return;
218 }
219
220 /* From now on, we can assume k is zero. So we need to update MSR. */
221 /* Extract carry. */
222 cf = tcg_temp_new();
223 if (c) {
224 read_carry(dc, cf);
225 } else {
226 tcg_gen_movi_tl(cf, 0);
227 }
228
229 if (dc->rd) {
230 TCGv ncf = tcg_temp_new();
231 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
232 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
233 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
234 write_carry(dc, ncf);
235 tcg_temp_free(ncf);
236 } else {
237 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
238 write_carry(dc, cf);
239 }
240 tcg_temp_free(cf);
241 }
242
243 static void dec_sub(DisasContext *dc)
244 {
245 unsigned int u, cmp, k, c;
246 TCGv cf, na;
247
248 u = dc->imm & 2;
249 k = dc->opcode & 4;
250 c = dc->opcode & 2;
251 cmp = (dc->imm & 1) && (!dc->type_b) && k;
252
253 if (cmp) {
254 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
255 if (dc->rd) {
256 if (u)
257 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
258 else
259 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
260 }
261 return;
262 }
263
264 LOG_DIS("sub%s%s r%d, r%d r%d\n",
265 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
266
267 /* Take care of the easy cases first. */
268 if (k) {
269 /* k - keep carry, no need to update MSR. */
270 /* If rd == r0, it's a nop. */
271 if (dc->rd) {
272 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
273
274 if (c) {
275 /* c - Add carry into the result. */
276 cf = tcg_temp_new();
277
278 read_carry(dc, cf);
279 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
280 tcg_temp_free(cf);
281 }
282 }
283 return;
284 }
285
286 /* From now on, we can assume k is zero. So we need to update MSR. */
287 /* Extract carry. And complement a into na. */
288 cf = tcg_temp_new();
289 na = tcg_temp_new();
290 if (c) {
291 read_carry(dc, cf);
292 } else {
293 tcg_gen_movi_tl(cf, 1);
294 }
295
296 /* d = b + ~a + c. carry defaults to 1. */
297 tcg_gen_not_tl(na, cpu_R[dc->ra]);
298
299 if (dc->rd) {
300 TCGv ncf = tcg_temp_new();
301 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
302 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
303 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
304 write_carry(dc, ncf);
305 tcg_temp_free(ncf);
306 } else {
307 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
308 write_carry(dc, cf);
309 }
310 tcg_temp_free(cf);
311 tcg_temp_free(na);
312 }
313
314 static void dec_pattern(DisasContext *dc)
315 {
316 unsigned int mode;
317
318 if ((dc->tb_flags & MSR_EE_FLAG)
319 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
320 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
321 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
322 t_gen_raise_exception(dc, EXCP_HW_EXCP);
323 }
324
325 mode = dc->opcode & 3;
326 switch (mode) {
327 case 0:
328 /* pcmpbf. */
329 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
330 if (dc->rd)
331 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
332 break;
333 case 2:
334 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
335 if (dc->rd) {
336 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
337 cpu_R[dc->ra], cpu_R[dc->rb]);
338 }
339 break;
340 case 3:
341 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
342 if (dc->rd) {
343 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
344 cpu_R[dc->ra], cpu_R[dc->rb]);
345 }
346 break;
347 default:
348 cpu_abort(CPU(dc->cpu),
349 "unsupported pattern insn opcode=%x\n", dc->opcode);
350 break;
351 }
352 }
353
354 static void dec_and(DisasContext *dc)
355 {
356 unsigned int not;
357
358 if (!dc->type_b && (dc->imm & (1 << 10))) {
359 dec_pattern(dc);
360 return;
361 }
362
363 not = dc->opcode & (1 << 1);
364 LOG_DIS("and%s\n", not ? "n" : "");
365
366 if (!dc->rd)
367 return;
368
369 if (not) {
370 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
371 } else
372 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
373 }
374
375 static void dec_or(DisasContext *dc)
376 {
377 if (!dc->type_b && (dc->imm & (1 << 10))) {
378 dec_pattern(dc);
379 return;
380 }
381
382 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
383 if (dc->rd)
384 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
385 }
386
387 static void dec_xor(DisasContext *dc)
388 {
389 if (!dc->type_b && (dc->imm & (1 << 10))) {
390 dec_pattern(dc);
391 return;
392 }
393
394 LOG_DIS("xor r%d\n", dc->rd);
395 if (dc->rd)
396 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
397 }
398
399 static inline void msr_read(DisasContext *dc, TCGv d)
400 {
401 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
402 }
403
404 static inline void msr_write(DisasContext *dc, TCGv v)
405 {
406 TCGv t;
407
408 t = tcg_temp_new();
409 dc->cpustate_changed = 1;
410 /* PVR bit is not writable. */
411 tcg_gen_andi_tl(t, v, ~MSR_PVR);
412 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
413 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
414 tcg_temp_free(t);
415 }
416
417 static void dec_msr(DisasContext *dc)
418 {
419 CPUState *cs = CPU(dc->cpu);
420 TCGv t0, t1;
421 unsigned int sr, to, rn;
422 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
423
424 sr = dc->imm & ((1 << 14) - 1);
425 to = dc->imm & (1 << 14);
426 dc->type_b = 1;
427 if (to)
428 dc->cpustate_changed = 1;
429
430 /* msrclr and msrset. */
431 if (!(dc->imm & (1 << 15))) {
432 unsigned int clr = dc->ir & (1 << 16);
433
434 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
435 dc->rd, dc->imm);
436
437 if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
438 /* nop??? */
439 return;
440 }
441
442 if ((dc->tb_flags & MSR_EE_FLAG)
443 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
444 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
445 t_gen_raise_exception(dc, EXCP_HW_EXCP);
446 return;
447 }
448
449 if (dc->rd)
450 msr_read(dc, cpu_R[dc->rd]);
451
452 t0 = tcg_temp_new();
453 t1 = tcg_temp_new();
454 msr_read(dc, t0);
455 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
456
457 if (clr) {
458 tcg_gen_not_tl(t1, t1);
459 tcg_gen_and_tl(t0, t0, t1);
460 } else
461 tcg_gen_or_tl(t0, t0, t1);
462 msr_write(dc, t0);
463 tcg_temp_free(t0);
464 tcg_temp_free(t1);
465 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
466 dc->is_jmp = DISAS_UPDATE;
467 return;
468 }
469
470 if (to) {
471 if ((dc->tb_flags & MSR_EE_FLAG)
472 && mem_index == MMU_USER_IDX) {
473 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
474 t_gen_raise_exception(dc, EXCP_HW_EXCP);
475 return;
476 }
477 }
478
479 #if !defined(CONFIG_USER_ONLY)
480 /* Catch read/writes to the mmu block. */
481 if ((sr & ~0xff) == 0x1000) {
482 sr &= 7;
483 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
484 if (to)
485 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
486 else
487 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
488 return;
489 }
490 #endif
491
492 if (to) {
493 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
494 switch (sr) {
495 case 0:
496 break;
497 case 1:
498 msr_write(dc, cpu_R[dc->ra]);
499 break;
500 case 0x3:
501 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
502 break;
503 case 0x5:
504 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
505 break;
506 case 0x7:
507 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
508 break;
509 case 0x800:
510 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
511 break;
512 case 0x802:
513 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
514 break;
515 default:
516 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
517 break;
518 }
519 } else {
520 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
521
522 switch (sr) {
523 case 0:
524 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
525 break;
526 case 1:
527 msr_read(dc, cpu_R[dc->rd]);
528 break;
529 case 0x3:
530 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
531 break;
532 case 0x5:
533 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
534 break;
535 case 0x7:
536 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
537 break;
538 case 0xb:
539 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
540 break;
541 case 0x800:
542 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
543 break;
544 case 0x802:
545 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
546 break;
547 case 0x2000:
548 case 0x2001:
549 case 0x2002:
550 case 0x2003:
551 case 0x2004:
552 case 0x2005:
553 case 0x2006:
554 case 0x2007:
555 case 0x2008:
556 case 0x2009:
557 case 0x200a:
558 case 0x200b:
559 case 0x200c:
560 rn = sr & 0xf;
561 tcg_gen_ld_tl(cpu_R[dc->rd],
562 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
563 break;
564 default:
565 cpu_abort(cs, "unknown mfs reg %x\n", sr);
566 break;
567 }
568 }
569
570 if (dc->rd == 0) {
571 tcg_gen_movi_tl(cpu_R[0], 0);
572 }
573 }
574
575 /* 64-bit signed mul, lower result in d and upper in d2. */
576 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
577 {
578 TCGv_i64 t0, t1;
579
580 t0 = tcg_temp_new_i64();
581 t1 = tcg_temp_new_i64();
582
583 tcg_gen_ext_i32_i64(t0, a);
584 tcg_gen_ext_i32_i64(t1, b);
585 tcg_gen_mul_i64(t0, t0, t1);
586
587 tcg_gen_extrl_i64_i32(d, t0);
588 tcg_gen_shri_i64(t0, t0, 32);
589 tcg_gen_extrl_i64_i32(d2, t0);
590
591 tcg_temp_free_i64(t0);
592 tcg_temp_free_i64(t1);
593 }
594
595 /* 64-bit unsigned muls, lower result in d and upper in d2. */
596 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
597 {
598 TCGv_i64 t0, t1;
599
600 t0 = tcg_temp_new_i64();
601 t1 = tcg_temp_new_i64();
602
603 tcg_gen_extu_i32_i64(t0, a);
604 tcg_gen_extu_i32_i64(t1, b);
605 tcg_gen_mul_i64(t0, t0, t1);
606
607 tcg_gen_extrl_i64_i32(d, t0);
608 tcg_gen_shri_i64(t0, t0, 32);
609 tcg_gen_extrl_i64_i32(d2, t0);
610
611 tcg_temp_free_i64(t0);
612 tcg_temp_free_i64(t1);
613 }
614
615 /* Multiplier unit. */
616 static void dec_mul(DisasContext *dc)
617 {
618 TCGv d[2];
619 unsigned int subcode;
620
621 if ((dc->tb_flags & MSR_EE_FLAG)
622 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
623 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
624 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
625 t_gen_raise_exception(dc, EXCP_HW_EXCP);
626 return;
627 }
628
629 subcode = dc->imm & 3;
630 d[0] = tcg_temp_new();
631 d[1] = tcg_temp_new();
632
633 if (dc->type_b) {
634 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
635 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
636 goto done;
637 }
638
639 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
640 if (subcode >= 1 && subcode <= 3
641 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
642 /* nop??? */
643 }
644
645 switch (subcode) {
646 case 0:
647 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
648 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
649 break;
650 case 1:
651 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
652 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
653 break;
654 case 2:
655 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
656 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
657 break;
658 case 3:
659 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
660 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
661 break;
662 default:
663 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
664 break;
665 }
666 done:
667 tcg_temp_free(d[0]);
668 tcg_temp_free(d[1]);
669 }
670
671 /* Div unit. */
672 static void dec_div(DisasContext *dc)
673 {
674 unsigned int u;
675
676 u = dc->imm & 2;
677 LOG_DIS("div\n");
678
679 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
680 && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
681 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
682 t_gen_raise_exception(dc, EXCP_HW_EXCP);
683 }
684
685 if (u)
686 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
687 cpu_R[dc->ra]);
688 else
689 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
690 cpu_R[dc->ra]);
691 if (!dc->rd)
692 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
693 }
694
695 static void dec_barrel(DisasContext *dc)
696 {
697 TCGv t0;
698 unsigned int s, t;
699
700 if ((dc->tb_flags & MSR_EE_FLAG)
701 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
702 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
703 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
704 t_gen_raise_exception(dc, EXCP_HW_EXCP);
705 return;
706 }
707
708 s = dc->imm & (1 << 10);
709 t = dc->imm & (1 << 9);
710
711 LOG_DIS("bs%s%s r%d r%d r%d\n",
712 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
713
714 t0 = tcg_temp_new();
715
716 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
717 tcg_gen_andi_tl(t0, t0, 31);
718
719 if (s)
720 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
721 else {
722 if (t)
723 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
724 else
725 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
726 }
727 }
728
729 static void dec_bit(DisasContext *dc)
730 {
731 CPUState *cs = CPU(dc->cpu);
732 TCGv t0;
733 unsigned int op;
734 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
735
736 op = dc->ir & ((1 << 9) - 1);
737 switch (op) {
738 case 0x21:
739 /* src. */
740 t0 = tcg_temp_new();
741
742 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
743 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
744 write_carry(dc, cpu_R[dc->ra]);
745 if (dc->rd) {
746 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
747 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
748 }
749 tcg_temp_free(t0);
750 break;
751
752 case 0x1:
753 case 0x41:
754 /* srl. */
755 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
756
757 /* Update carry. Note that write carry only looks at the LSB. */
758 write_carry(dc, cpu_R[dc->ra]);
759 if (dc->rd) {
760 if (op == 0x41)
761 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
762 else
763 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
764 }
765 break;
766 case 0x60:
767 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
768 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
769 break;
770 case 0x61:
771 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
772 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
773 break;
774 case 0x64:
775 case 0x66:
776 case 0x74:
777 case 0x76:
778 /* wdc. */
779 LOG_DIS("wdc r%d\n", dc->ra);
780 if ((dc->tb_flags & MSR_EE_FLAG)
781 && mem_index == MMU_USER_IDX) {
782 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
783 t_gen_raise_exception(dc, EXCP_HW_EXCP);
784 return;
785 }
786 break;
787 case 0x68:
788 /* wic. */
789 LOG_DIS("wic r%d\n", dc->ra);
790 if ((dc->tb_flags & MSR_EE_FLAG)
791 && mem_index == MMU_USER_IDX) {
792 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
793 t_gen_raise_exception(dc, EXCP_HW_EXCP);
794 return;
795 }
796 break;
797 case 0xe0:
798 if ((dc->tb_flags & MSR_EE_FLAG)
799 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
800 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
801 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
802 t_gen_raise_exception(dc, EXCP_HW_EXCP);
803 }
804 if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
805 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
806 }
807 break;
808 case 0x1e0:
809 /* swapb */
810 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
811 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
812 break;
813 case 0x1e2:
814 /*swaph */
815 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
816 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
817 break;
818 default:
819 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
820 dc->pc, op, dc->rd, dc->ra, dc->rb);
821 break;
822 }
823 }
824
825 static inline void sync_jmpstate(DisasContext *dc)
826 {
827 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
828 if (dc->jmp == JMP_DIRECT) {
829 tcg_gen_movi_tl(env_btaken, 1);
830 }
831 dc->jmp = JMP_INDIRECT;
832 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
833 }
834 }
835
836 static void dec_imm(DisasContext *dc)
837 {
838 LOG_DIS("imm %x\n", dc->imm << 16);
839 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
840 dc->tb_flags |= IMM_FLAG;
841 dc->clear_imm = 0;
842 }
843
844 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
845 {
846 unsigned int extimm = dc->tb_flags & IMM_FLAG;
847 /* Should be set to one if r1 is used by loadstores. */
848 int stackprot = 0;
849
850 /* All load/stores use ra. */
851 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
852 stackprot = 1;
853 }
854
855 /* Treat the common cases first. */
856 if (!dc->type_b) {
857 /* If any of the regs is r0, return a ptr to the other. */
858 if (dc->ra == 0) {
859 return &cpu_R[dc->rb];
860 } else if (dc->rb == 0) {
861 return &cpu_R[dc->ra];
862 }
863
864 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
865 stackprot = 1;
866 }
867
868 *t = tcg_temp_new();
869 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
870
871 if (stackprot) {
872 gen_helper_stackprot(cpu_env, *t);
873 }
874 return t;
875 }
876 /* Immediate. */
877 if (!extimm) {
878 if (dc->imm == 0) {
879 return &cpu_R[dc->ra];
880 }
881 *t = tcg_temp_new();
882 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
883 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
884 } else {
885 *t = tcg_temp_new();
886 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
887 }
888
889 if (stackprot) {
890 gen_helper_stackprot(cpu_env, *t);
891 }
892 return t;
893 }
894
895 static void dec_load(DisasContext *dc)
896 {
897 TCGv t, v, *addr;
898 unsigned int size, rev = 0, ex = 0;
899 TCGMemOp mop;
900
901 mop = dc->opcode & 3;
902 size = 1 << mop;
903 if (!dc->type_b) {
904 rev = (dc->ir >> 9) & 1;
905 ex = (dc->ir >> 10) & 1;
906 }
907 mop |= MO_TE;
908 if (rev) {
909 mop ^= MO_BSWAP;
910 }
911
912 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
913 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
914 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
915 t_gen_raise_exception(dc, EXCP_HW_EXCP);
916 return;
917 }
918
919 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
920 ex ? "x" : "");
921
922 t_sync_flags(dc);
923 addr = compute_ldst_addr(dc, &t);
924
925 /*
926 * When doing reverse accesses we need to do two things.
927 *
928 * 1. Reverse the address wrt endianness.
929 * 2. Byteswap the data lanes on the way back into the CPU core.
930 */
931 if (rev && size != 4) {
932 /* Endian reverse the address. t is addr. */
933 switch (size) {
934 case 1:
935 {
936 /* 00 -> 11
937 01 -> 10
938 10 -> 10
939 11 -> 00 */
940 TCGv low = tcg_temp_new();
941
942 /* Force addr into the temp. */
943 if (addr != &t) {
944 t = tcg_temp_new();
945 tcg_gen_mov_tl(t, *addr);
946 addr = &t;
947 }
948
949 tcg_gen_andi_tl(low, t, 3);
950 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
951 tcg_gen_andi_tl(t, t, ~3);
952 tcg_gen_or_tl(t, t, low);
953 tcg_gen_mov_tl(env_imm, t);
954 tcg_temp_free(low);
955 break;
956 }
957
958 case 2:
959 /* 00 -> 10
960 10 -> 00. */
961 /* Force addr into the temp. */
962 if (addr != &t) {
963 t = tcg_temp_new();
964 tcg_gen_xori_tl(t, *addr, 2);
965 addr = &t;
966 } else {
967 tcg_gen_xori_tl(t, t, 2);
968 }
969 break;
970 default:
971 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
972 break;
973 }
974 }
975
976 /* lwx does not throw unaligned access errors, so force alignment */
977 if (ex) {
978 /* Force addr into the temp. */
979 if (addr != &t) {
980 t = tcg_temp_new();
981 tcg_gen_mov_tl(t, *addr);
982 addr = &t;
983 }
984 tcg_gen_andi_tl(t, t, ~3);
985 }
986
987 /* If we get a fault on a dslot, the jmpstate better be in sync. */
988 sync_jmpstate(dc);
989
990 /* Verify alignment if needed. */
991 /*
992 * Microblaze gives MMU faults priority over faults due to
993 * unaligned addresses. That's why we speculatively do the load
994 * into v. If the load succeeds, we verify alignment of the
995 * address and if that succeeds we write into the destination reg.
996 */
997 v = tcg_temp_new();
998 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
999
1000 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1001 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1002 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1003 tcg_const_tl(0), tcg_const_tl(size - 1));
1004 }
1005
1006 if (ex) {
1007 tcg_gen_mov_tl(env_res_addr, *addr);
1008 tcg_gen_mov_tl(env_res_val, v);
1009 }
1010 if (dc->rd) {
1011 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1012 }
1013 tcg_temp_free(v);
1014
1015 if (ex) { /* lwx */
1016 /* no support for AXI exclusive so always clear C */
1017 write_carryi(dc, 0);
1018 }
1019
1020 if (addr == &t)
1021 tcg_temp_free(t);
1022 }
1023
1024 static void dec_store(DisasContext *dc)
1025 {
1026 TCGv t, *addr, swx_addr;
1027 TCGLabel *swx_skip = NULL;
1028 unsigned int size, rev = 0, ex = 0;
1029 TCGMemOp mop;
1030
1031 mop = dc->opcode & 3;
1032 size = 1 << mop;
1033 if (!dc->type_b) {
1034 rev = (dc->ir >> 9) & 1;
1035 ex = (dc->ir >> 10) & 1;
1036 }
1037 mop |= MO_TE;
1038 if (rev) {
1039 mop ^= MO_BSWAP;
1040 }
1041
1042 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1043 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1044 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1045 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1046 return;
1047 }
1048
1049 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1050 ex ? "x" : "");
1051 t_sync_flags(dc);
1052 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1053 sync_jmpstate(dc);
1054 addr = compute_ldst_addr(dc, &t);
1055
1056 swx_addr = tcg_temp_local_new();
1057 if (ex) { /* swx */
1058 TCGv tval;
1059
1060 /* Force addr into the swx_addr. */
1061 tcg_gen_mov_tl(swx_addr, *addr);
1062 addr = &swx_addr;
1063 /* swx does not throw unaligned access errors, so force alignment */
1064 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1065
1066 write_carryi(dc, 1);
1067 swx_skip = gen_new_label();
1068 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1069
1070 /* Compare the value loaded at lwx with current contents of
1071 the reserved location.
1072 FIXME: This only works for system emulation where we can expect
1073 this compare and the following write to be atomic. For user
1074 emulation we need to add atomicity between threads. */
1075 tval = tcg_temp_new();
1076 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1077 MO_TEUL);
1078 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1079 write_carryi(dc, 0);
1080 tcg_temp_free(tval);
1081 }
1082
1083 if (rev && size != 4) {
1084 /* Endian reverse the address. t is addr. */
1085 switch (size) {
1086 case 1:
1087 {
1088 /* 00 -> 11
1089 01 -> 10
1090 10 -> 10
1091 11 -> 00 */
1092 TCGv low = tcg_temp_new();
1093
1094 /* Force addr into the temp. */
1095 if (addr != &t) {
1096 t = tcg_temp_new();
1097 tcg_gen_mov_tl(t, *addr);
1098 addr = &t;
1099 }
1100
1101 tcg_gen_andi_tl(low, t, 3);
1102 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1103 tcg_gen_andi_tl(t, t, ~3);
1104 tcg_gen_or_tl(t, t, low);
1105 tcg_gen_mov_tl(env_imm, t);
1106 tcg_temp_free(low);
1107 break;
1108 }
1109
1110 case 2:
1111 /* 00 -> 10
1112 10 -> 00. */
1113 /* Force addr into the temp. */
1114 if (addr != &t) {
1115 t = tcg_temp_new();
1116 tcg_gen_xori_tl(t, *addr, 2);
1117 addr = &t;
1118 } else {
1119 tcg_gen_xori_tl(t, t, 2);
1120 }
1121 break;
1122 default:
1123 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1124 break;
1125 }
1126 }
1127 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1128
1129 /* Verify alignment if needed. */
1130 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1131 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1132 /* FIXME: if the alignment is wrong, we should restore the value
1133 * in memory. One possible way to achieve this is to probe
1134 * the MMU prior to the memaccess, thay way we could put
1135 * the alignment checks in between the probe and the mem
1136 * access.
1137 */
1138 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1139 tcg_const_tl(1), tcg_const_tl(size - 1));
1140 }
1141
1142 if (ex) {
1143 gen_set_label(swx_skip);
1144 }
1145 tcg_temp_free(swx_addr);
1146
1147 if (addr == &t)
1148 tcg_temp_free(t);
1149 }
1150
1151 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1152 TCGv d, TCGv a, TCGv b)
1153 {
1154 switch (cc) {
1155 case CC_EQ:
1156 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1157 break;
1158 case CC_NE:
1159 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1160 break;
1161 case CC_LT:
1162 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1163 break;
1164 case CC_LE:
1165 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1166 break;
1167 case CC_GE:
1168 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1169 break;
1170 case CC_GT:
1171 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1172 break;
1173 default:
1174 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1175 break;
1176 }
1177 }
1178
1179 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1180 {
1181 TCGLabel *l1 = gen_new_label();
1182 /* Conditional jmp. */
1183 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1184 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1185 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1186 gen_set_label(l1);
1187 }
1188
1189 static void dec_bcc(DisasContext *dc)
1190 {
1191 unsigned int cc;
1192 unsigned int dslot;
1193
1194 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1195 dslot = dc->ir & (1 << 25);
1196 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1197
1198 dc->delayed_branch = 1;
1199 if (dslot) {
1200 dc->delayed_branch = 2;
1201 dc->tb_flags |= D_FLAG;
1202 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1203 cpu_env, offsetof(CPUMBState, bimm));
1204 }
1205
1206 if (dec_alu_op_b_is_small_imm(dc)) {
1207 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1208
1209 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1210 dc->jmp = JMP_DIRECT_CC;
1211 dc->jmp_pc = dc->pc + offset;
1212 } else {
1213 dc->jmp = JMP_INDIRECT;
1214 tcg_gen_movi_tl(env_btarget, dc->pc);
1215 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1216 }
1217 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1218 }
1219
1220 static void dec_br(DisasContext *dc)
1221 {
1222 unsigned int dslot, link, abs, mbar;
1223 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1224
1225 dslot = dc->ir & (1 << 20);
1226 abs = dc->ir & (1 << 19);
1227 link = dc->ir & (1 << 18);
1228
1229 /* Memory barrier. */
1230 mbar = (dc->ir >> 16) & 31;
1231 if (mbar == 2 && dc->imm == 4) {
1232 /* mbar IMM & 16 decodes to sleep. */
1233 if (dc->rd & 16) {
1234 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1235 TCGv_i32 tmp_1 = tcg_const_i32(1);
1236
1237 LOG_DIS("sleep\n");
1238
1239 t_sync_flags(dc);
1240 tcg_gen_st_i32(tmp_1, cpu_env,
1241 -offsetof(MicroBlazeCPU, env)
1242 +offsetof(CPUState, halted));
1243 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1244 gen_helper_raise_exception(cpu_env, tmp_hlt);
1245 tcg_temp_free_i32(tmp_hlt);
1246 tcg_temp_free_i32(tmp_1);
1247 return;
1248 }
1249 LOG_DIS("mbar %d\n", dc->rd);
1250 /* Break the TB. */
1251 dc->cpustate_changed = 1;
1252 return;
1253 }
1254
1255 LOG_DIS("br%s%s%s%s imm=%x\n",
1256 abs ? "a" : "", link ? "l" : "",
1257 dc->type_b ? "i" : "", dslot ? "d" : "",
1258 dc->imm);
1259
1260 dc->delayed_branch = 1;
1261 if (dslot) {
1262 dc->delayed_branch = 2;
1263 dc->tb_flags |= D_FLAG;
1264 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1265 cpu_env, offsetof(CPUMBState, bimm));
1266 }
1267 if (link && dc->rd)
1268 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1269
1270 dc->jmp = JMP_INDIRECT;
1271 if (abs) {
1272 tcg_gen_movi_tl(env_btaken, 1);
1273 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1274 if (link && !dslot) {
1275 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1276 t_gen_raise_exception(dc, EXCP_BREAK);
1277 if (dc->imm == 0) {
1278 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1279 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1280 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1281 return;
1282 }
1283
1284 t_gen_raise_exception(dc, EXCP_DEBUG);
1285 }
1286 }
1287 } else {
1288 if (dec_alu_op_b_is_small_imm(dc)) {
1289 dc->jmp = JMP_DIRECT;
1290 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1291 } else {
1292 tcg_gen_movi_tl(env_btaken, 1);
1293 tcg_gen_movi_tl(env_btarget, dc->pc);
1294 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1295 }
1296 }
1297 }
1298
1299 static inline void do_rti(DisasContext *dc)
1300 {
1301 TCGv t0, t1;
1302 t0 = tcg_temp_new();
1303 t1 = tcg_temp_new();
1304 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1305 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1306 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1307
1308 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1309 tcg_gen_or_tl(t1, t1, t0);
1310 msr_write(dc, t1);
1311 tcg_temp_free(t1);
1312 tcg_temp_free(t0);
1313 dc->tb_flags &= ~DRTI_FLAG;
1314 }
1315
1316 static inline void do_rtb(DisasContext *dc)
1317 {
1318 TCGv t0, t1;
1319 t0 = tcg_temp_new();
1320 t1 = tcg_temp_new();
1321 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1322 tcg_gen_shri_tl(t0, t1, 1);
1323 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1324
1325 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1326 tcg_gen_or_tl(t1, t1, t0);
1327 msr_write(dc, t1);
1328 tcg_temp_free(t1);
1329 tcg_temp_free(t0);
1330 dc->tb_flags &= ~DRTB_FLAG;
1331 }
1332
1333 static inline void do_rte(DisasContext *dc)
1334 {
1335 TCGv t0, t1;
1336 t0 = tcg_temp_new();
1337 t1 = tcg_temp_new();
1338
1339 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1340 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1341 tcg_gen_shri_tl(t0, t1, 1);
1342 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1343
1344 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1345 tcg_gen_or_tl(t1, t1, t0);
1346 msr_write(dc, t1);
1347 tcg_temp_free(t1);
1348 tcg_temp_free(t0);
1349 dc->tb_flags &= ~DRTE_FLAG;
1350 }
1351
1352 static void dec_rts(DisasContext *dc)
1353 {
1354 unsigned int b_bit, i_bit, e_bit;
1355 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1356
1357 i_bit = dc->ir & (1 << 21);
1358 b_bit = dc->ir & (1 << 22);
1359 e_bit = dc->ir & (1 << 23);
1360
1361 dc->delayed_branch = 2;
1362 dc->tb_flags |= D_FLAG;
1363 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1364 cpu_env, offsetof(CPUMBState, bimm));
1365
1366 if (i_bit) {
1367 LOG_DIS("rtid ir=%x\n", dc->ir);
1368 if ((dc->tb_flags & MSR_EE_FLAG)
1369 && mem_index == MMU_USER_IDX) {
1370 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1371 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1372 }
1373 dc->tb_flags |= DRTI_FLAG;
1374 } else if (b_bit) {
1375 LOG_DIS("rtbd ir=%x\n", dc->ir);
1376 if ((dc->tb_flags & MSR_EE_FLAG)
1377 && mem_index == MMU_USER_IDX) {
1378 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1379 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1380 }
1381 dc->tb_flags |= DRTB_FLAG;
1382 } else if (e_bit) {
1383 LOG_DIS("rted ir=%x\n", dc->ir);
1384 if ((dc->tb_flags & MSR_EE_FLAG)
1385 && mem_index == MMU_USER_IDX) {
1386 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1387 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1388 }
1389 dc->tb_flags |= DRTE_FLAG;
1390 } else
1391 LOG_DIS("rts ir=%x\n", dc->ir);
1392
1393 dc->jmp = JMP_INDIRECT;
1394 tcg_gen_movi_tl(env_btaken, 1);
1395 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1396 }
1397
1398 static int dec_check_fpuv2(DisasContext *dc)
1399 {
1400 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1401 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1402 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1403 }
1404 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1405 }
1406
1407 static void dec_fpu(DisasContext *dc)
1408 {
1409 unsigned int fpu_insn;
1410
1411 if ((dc->tb_flags & MSR_EE_FLAG)
1412 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1413 && (dc->cpu->cfg.use_fpu != 1)) {
1414 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1415 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1416 return;
1417 }
1418
1419 fpu_insn = (dc->ir >> 7) & 7;
1420
1421 switch (fpu_insn) {
1422 case 0:
1423 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1424 cpu_R[dc->rb]);
1425 break;
1426
1427 case 1:
1428 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1429 cpu_R[dc->rb]);
1430 break;
1431
1432 case 2:
1433 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1434 cpu_R[dc->rb]);
1435 break;
1436
1437 case 3:
1438 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1439 cpu_R[dc->rb]);
1440 break;
1441
1442 case 4:
1443 switch ((dc->ir >> 4) & 7) {
1444 case 0:
1445 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1446 cpu_R[dc->ra], cpu_R[dc->rb]);
1447 break;
1448 case 1:
1449 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1450 cpu_R[dc->ra], cpu_R[dc->rb]);
1451 break;
1452 case 2:
1453 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1454 cpu_R[dc->ra], cpu_R[dc->rb]);
1455 break;
1456 case 3:
1457 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1458 cpu_R[dc->ra], cpu_R[dc->rb]);
1459 break;
1460 case 4:
1461 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1462 cpu_R[dc->ra], cpu_R[dc->rb]);
1463 break;
1464 case 5:
1465 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1466 cpu_R[dc->ra], cpu_R[dc->rb]);
1467 break;
1468 case 6:
1469 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1470 cpu_R[dc->ra], cpu_R[dc->rb]);
1471 break;
1472 default:
1473 qemu_log_mask(LOG_UNIMP,
1474 "unimplemented fcmp fpu_insn=%x pc=%x"
1475 " opc=%x\n",
1476 fpu_insn, dc->pc, dc->opcode);
1477 dc->abort_at_next_insn = 1;
1478 break;
1479 }
1480 break;
1481
1482 case 5:
1483 if (!dec_check_fpuv2(dc)) {
1484 return;
1485 }
1486 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1487 break;
1488
1489 case 6:
1490 if (!dec_check_fpuv2(dc)) {
1491 return;
1492 }
1493 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1494 break;
1495
1496 case 7:
1497 if (!dec_check_fpuv2(dc)) {
1498 return;
1499 }
1500 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1501 break;
1502
1503 default:
1504 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1505 " opc=%x\n",
1506 fpu_insn, dc->pc, dc->opcode);
1507 dc->abort_at_next_insn = 1;
1508 break;
1509 }
1510 }
1511
1512 static void dec_null(DisasContext *dc)
1513 {
1514 if ((dc->tb_flags & MSR_EE_FLAG)
1515 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1516 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1517 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1518 return;
1519 }
1520 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1521 dc->abort_at_next_insn = 1;
1522 }
1523
1524 /* Insns connected to FSL or AXI stream attached devices. */
1525 static void dec_stream(DisasContext *dc)
1526 {
1527 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1528 TCGv_i32 t_id, t_ctrl;
1529 int ctrl;
1530
1531 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1532 dc->type_b ? "" : "d", dc->imm);
1533
1534 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1535 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1536 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1537 return;
1538 }
1539
1540 t_id = tcg_temp_new();
1541 if (dc->type_b) {
1542 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1543 ctrl = dc->imm >> 10;
1544 } else {
1545 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1546 ctrl = dc->imm >> 5;
1547 }
1548
1549 t_ctrl = tcg_const_tl(ctrl);
1550
1551 if (dc->rd == 0) {
1552 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1553 } else {
1554 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1555 }
1556 tcg_temp_free(t_id);
1557 tcg_temp_free(t_ctrl);
1558 }
1559
1560 static struct decoder_info {
1561 struct {
1562 uint32_t bits;
1563 uint32_t mask;
1564 };
1565 void (*dec)(DisasContext *dc);
1566 } decinfo[] = {
1567 {DEC_ADD, dec_add},
1568 {DEC_SUB, dec_sub},
1569 {DEC_AND, dec_and},
1570 {DEC_XOR, dec_xor},
1571 {DEC_OR, dec_or},
1572 {DEC_BIT, dec_bit},
1573 {DEC_BARREL, dec_barrel},
1574 {DEC_LD, dec_load},
1575 {DEC_ST, dec_store},
1576 {DEC_IMM, dec_imm},
1577 {DEC_BR, dec_br},
1578 {DEC_BCC, dec_bcc},
1579 {DEC_RTS, dec_rts},
1580 {DEC_FPU, dec_fpu},
1581 {DEC_MUL, dec_mul},
1582 {DEC_DIV, dec_div},
1583 {DEC_MSR, dec_msr},
1584 {DEC_STREAM, dec_stream},
1585 {{0, 0}, dec_null}
1586 };
1587
1588 static inline void decode(DisasContext *dc, uint32_t ir)
1589 {
1590 int i;
1591
1592 dc->ir = ir;
1593 LOG_DIS("%8.8x\t", dc->ir);
1594
1595 if (dc->ir)
1596 dc->nr_nops = 0;
1597 else {
1598 if ((dc->tb_flags & MSR_EE_FLAG)
1599 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1600 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1601 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1602 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1603 return;
1604 }
1605
1606 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1607 dc->nr_nops++;
1608 if (dc->nr_nops > 4) {
1609 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1610 }
1611 }
1612 /* bit 2 seems to indicate insn type. */
1613 dc->type_b = ir & (1 << 29);
1614
1615 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1616 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1617 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1618 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1619 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1620
1621 /* Large switch for all insns. */
1622 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1623 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1624 decinfo[i].dec(dc);
1625 break;
1626 }
1627 }
1628 }
1629
1630 /* generate intermediate code for basic block 'tb'. */
1631 void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
1632 {
1633 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1634 CPUState *cs = CPU(cpu);
1635 uint32_t pc_start;
1636 struct DisasContext ctx;
1637 struct DisasContext *dc = &ctx;
1638 uint32_t next_page_start, org_flags;
1639 target_ulong npc;
1640 int num_insns;
1641 int max_insns;
1642
1643 pc_start = tb->pc;
1644 dc->cpu = cpu;
1645 dc->tb = tb;
1646 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1647
1648 dc->is_jmp = DISAS_NEXT;
1649 dc->jmp = 0;
1650 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1651 if (dc->delayed_branch) {
1652 dc->jmp = JMP_INDIRECT;
1653 }
1654 dc->pc = pc_start;
1655 dc->singlestep_enabled = cs->singlestep_enabled;
1656 dc->cpustate_changed = 0;
1657 dc->abort_at_next_insn = 0;
1658 dc->nr_nops = 0;
1659
1660 if (pc_start & 3) {
1661 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1662 }
1663
1664 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1665 #if !SIM_COMPAT
1666 qemu_log("--------------\n");
1667 log_cpu_state(CPU(cpu), 0);
1668 #endif
1669 }
1670
1671 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1672 num_insns = 0;
1673 max_insns = tb->cflags & CF_COUNT_MASK;
1674 if (max_insns == 0) {
1675 max_insns = CF_COUNT_MASK;
1676 }
1677 if (max_insns > TCG_MAX_INSNS) {
1678 max_insns = TCG_MAX_INSNS;
1679 }
1680
1681 gen_tb_start(tb);
1682 do
1683 {
1684 tcg_gen_insn_start(dc->pc);
1685 num_insns++;
1686
1687 #if SIM_COMPAT
1688 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1689 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1690 gen_helper_debug();
1691 }
1692 #endif
1693
1694 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1695 t_gen_raise_exception(dc, EXCP_DEBUG);
1696 dc->is_jmp = DISAS_UPDATE;
1697 /* The address covered by the breakpoint must be included in
1698 [tb->pc, tb->pc + tb->size) in order to for it to be
1699 properly cleared -- thus we increment the PC here so that
1700 the logic setting tb->size below does the right thing. */
1701 dc->pc += 4;
1702 break;
1703 }
1704
1705 /* Pretty disas. */
1706 LOG_DIS("%8.8x:\t", dc->pc);
1707
1708 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1709 gen_io_start();
1710 }
1711
1712 dc->clear_imm = 1;
1713 decode(dc, cpu_ldl_code(env, dc->pc));
1714 if (dc->clear_imm)
1715 dc->tb_flags &= ~IMM_FLAG;
1716 dc->pc += 4;
1717
1718 if (dc->delayed_branch) {
1719 dc->delayed_branch--;
1720 if (!dc->delayed_branch) {
1721 if (dc->tb_flags & DRTI_FLAG)
1722 do_rti(dc);
1723 if (dc->tb_flags & DRTB_FLAG)
1724 do_rtb(dc);
1725 if (dc->tb_flags & DRTE_FLAG)
1726 do_rte(dc);
1727 /* Clear the delay slot flag. */
1728 dc->tb_flags &= ~D_FLAG;
1729 /* If it is a direct jump, try direct chaining. */
1730 if (dc->jmp == JMP_INDIRECT) {
1731 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1732 dc->is_jmp = DISAS_JUMP;
1733 } else if (dc->jmp == JMP_DIRECT) {
1734 t_sync_flags(dc);
1735 gen_goto_tb(dc, 0, dc->jmp_pc);
1736 dc->is_jmp = DISAS_TB_JUMP;
1737 } else if (dc->jmp == JMP_DIRECT_CC) {
1738 TCGLabel *l1 = gen_new_label();
1739 t_sync_flags(dc);
1740 /* Conditional jmp. */
1741 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1742 gen_goto_tb(dc, 1, dc->pc);
1743 gen_set_label(l1);
1744 gen_goto_tb(dc, 0, dc->jmp_pc);
1745
1746 dc->is_jmp = DISAS_TB_JUMP;
1747 }
1748 break;
1749 }
1750 }
1751 if (cs->singlestep_enabled) {
1752 break;
1753 }
1754 } while (!dc->is_jmp && !dc->cpustate_changed
1755 && !tcg_op_buf_full()
1756 && !singlestep
1757 && (dc->pc < next_page_start)
1758 && num_insns < max_insns);
1759
1760 npc = dc->pc;
1761 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1762 if (dc->tb_flags & D_FLAG) {
1763 dc->is_jmp = DISAS_UPDATE;
1764 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1765 sync_jmpstate(dc);
1766 } else
1767 npc = dc->jmp_pc;
1768 }
1769
1770 if (tb->cflags & CF_LAST_IO)
1771 gen_io_end();
1772 /* Force an update if the per-tb cpu state has changed. */
1773 if (dc->is_jmp == DISAS_NEXT
1774 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1775 dc->is_jmp = DISAS_UPDATE;
1776 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1777 }
1778 t_sync_flags(dc);
1779
1780 if (unlikely(cs->singlestep_enabled)) {
1781 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1782
1783 if (dc->is_jmp != DISAS_JUMP) {
1784 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1785 }
1786 gen_helper_raise_exception(cpu_env, tmp);
1787 tcg_temp_free_i32(tmp);
1788 } else {
1789 switch(dc->is_jmp) {
1790 case DISAS_NEXT:
1791 gen_goto_tb(dc, 1, npc);
1792 break;
1793 default:
1794 case DISAS_JUMP:
1795 case DISAS_UPDATE:
1796 /* indicate that the hash table must be used
1797 to find the next TB */
1798 tcg_gen_exit_tb(0);
1799 break;
1800 case DISAS_TB_JUMP:
1801 /* nothing more to generate */
1802 break;
1803 }
1804 }
1805 gen_tb_end(tb, num_insns);
1806
1807 tb->size = dc->pc - pc_start;
1808 tb->icount = num_insns;
1809
1810 #ifdef DEBUG_DISAS
1811 #if !SIM_COMPAT
1812 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1813 qemu_log("\n");
1814 #if DISAS_GNU
1815 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1816 #endif
1817 qemu_log("\nisize=%d osize=%d\n",
1818 dc->pc - pc_start, tcg_op_buf_count());
1819 }
1820 #endif
1821 #endif
1822 assert(!dc->abort_at_next_insn);
1823 }
1824
1825 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1826 int flags)
1827 {
1828 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1829 CPUMBState *env = &cpu->env;
1830 int i;
1831
1832 if (!env || !f)
1833 return;
1834
1835 cpu_fprintf(f, "IN: PC=%x %s\n",
1836 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1837 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1838 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1839 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1840 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1841 env->btaken, env->btarget,
1842 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1843 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1844 (env->sregs[SR_MSR] & MSR_EIP),
1845 (env->sregs[SR_MSR] & MSR_IE));
1846
1847 for (i = 0; i < 32; i++) {
1848 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1849 if ((i + 1) % 4 == 0)
1850 cpu_fprintf(f, "\n");
1851 }
1852 cpu_fprintf(f, "\n\n");
1853 }
1854
1855 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1856 {
1857 MicroBlazeCPU *cpu;
1858
1859 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1860
1861 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1862
1863 return cpu;
1864 }
1865
1866 void mb_tcg_init(void)
1867 {
1868 int i;
1869
1870 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1871
1872 env_debug = tcg_global_mem_new(TCG_AREG0,
1873 offsetof(CPUMBState, debug),
1874 "debug0");
1875 env_iflags = tcg_global_mem_new(TCG_AREG0,
1876 offsetof(CPUMBState, iflags),
1877 "iflags");
1878 env_imm = tcg_global_mem_new(TCG_AREG0,
1879 offsetof(CPUMBState, imm),
1880 "imm");
1881 env_btarget = tcg_global_mem_new(TCG_AREG0,
1882 offsetof(CPUMBState, btarget),
1883 "btarget");
1884 env_btaken = tcg_global_mem_new(TCG_AREG0,
1885 offsetof(CPUMBState, btaken),
1886 "btaken");
1887 env_res_addr = tcg_global_mem_new(TCG_AREG0,
1888 offsetof(CPUMBState, res_addr),
1889 "res_addr");
1890 env_res_val = tcg_global_mem_new(TCG_AREG0,
1891 offsetof(CPUMBState, res_val),
1892 "res_val");
1893 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1894 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1895 offsetof(CPUMBState, regs[i]),
1896 regnames[i]);
1897 }
1898 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1899 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
1900 offsetof(CPUMBState, sregs[i]),
1901 special_regnames[i]);
1902 }
1903 }
1904
1905 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1906 target_ulong *data)
1907 {
1908 env->sregs[SR_PC] = data[0];
1909 }