]> git.proxmox.com Git - mirror_qemu.git/blob - target-microblaze/translate.c
target-alpha: Convert opcode 0x11 to source/sink
[mirror_qemu.git] / target-microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "helper.h"
25 #include "microblaze-decode.h"
26
27 #define GEN_HELPER 1
28 #include "helper.h"
29
30 #define SIM_COMPAT 0
31 #define DISAS_GNU 1
32 #define DISAS_MB 1
33 #if DISAS_MB && !SIM_COMPAT
34 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 #else
36 # define LOG_DIS(...) do { } while (0)
37 #endif
38
39 #define D(x)
40
41 #define EXTRACT_FIELD(src, start, end) \
42 (((src) >> start) & ((1 << (end - start + 1)) - 1))
43
44 static TCGv env_debug;
45 static TCGv_ptr cpu_env;
46 static TCGv cpu_R[32];
47 static TCGv cpu_SR[18];
48 static TCGv env_imm;
49 static TCGv env_btaken;
50 static TCGv env_btarget;
51 static TCGv env_iflags;
52 static TCGv env_res_addr;
53 static TCGv env_res_val;
54
55 #include "exec/gen-icount.h"
56
57 /* This is the state at translation time. */
58 typedef struct DisasContext {
59 MicroBlazeCPU *cpu;
60 target_ulong pc;
61
62 /* Decoder. */
63 int type_b;
64 uint32_t ir;
65 uint8_t opcode;
66 uint8_t rd, ra, rb;
67 uint16_t imm;
68
69 unsigned int cpustate_changed;
70 unsigned int delayed_branch;
71 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
72 unsigned int clear_imm;
73 int is_jmp;
74
75 #define JMP_NOJMP 0
76 #define JMP_DIRECT 1
77 #define JMP_DIRECT_CC 2
78 #define JMP_INDIRECT 3
79 unsigned int jmp;
80 uint32_t jmp_pc;
81
82 int abort_at_next_insn;
83 int nr_nops;
84 struct TranslationBlock *tb;
85 int singlestep_enabled;
86 } DisasContext;
87
88 static const char *regnames[] =
89 {
90 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
91 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
92 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
93 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
94 };
95
96 static const char *special_regnames[] =
97 {
98 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
99 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
100 "sr16", "sr17", "sr18"
101 };
102
103 /* Sign extend at translation time. */
104 static inline int sign_extend(unsigned int val, unsigned int width)
105 {
106 int sval;
107
108 /* LSL. */
109 val <<= 31 - width;
110 sval = val;
111 /* ASR. */
112 sval >>= 31 - width;
113 return sval;
114 }
115
116 static inline void t_sync_flags(DisasContext *dc)
117 {
118 /* Synch the tb dependent flags between translator and runtime. */
119 if (dc->tb_flags != dc->synced_flags) {
120 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
121 dc->synced_flags = dc->tb_flags;
122 }
123 }
124
125 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
126 {
127 TCGv_i32 tmp = tcg_const_i32(index);
128
129 t_sync_flags(dc);
130 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
131 gen_helper_raise_exception(cpu_env, tmp);
132 tcg_temp_free_i32(tmp);
133 dc->is_jmp = DISAS_UPDATE;
134 }
135
136 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
137 {
138 TranslationBlock *tb;
139 tb = dc->tb;
140 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
141 tcg_gen_goto_tb(n);
142 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
143 tcg_gen_exit_tb((uintptr_t)tb + n);
144 } else {
145 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
146 tcg_gen_exit_tb(0);
147 }
148 }
149
150 static void read_carry(DisasContext *dc, TCGv d)
151 {
152 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
153 }
154
155 /*
156 * write_carry sets the carry bits in MSR based on bit 0 of v.
157 * v[31:1] are ignored.
158 */
159 static void write_carry(DisasContext *dc, TCGv v)
160 {
161 TCGv t0 = tcg_temp_new();
162 tcg_gen_shli_tl(t0, v, 31);
163 tcg_gen_sari_tl(t0, t0, 31);
164 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
165 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
166 ~(MSR_C | MSR_CC));
167 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
168 tcg_temp_free(t0);
169 }
170
171 static void write_carryi(DisasContext *dc, bool carry)
172 {
173 TCGv t0 = tcg_temp_new();
174 tcg_gen_movi_tl(t0, carry);
175 write_carry(dc, t0);
176 tcg_temp_free(t0);
177 }
178
179 /* True if ALU operand b is a small immediate that may deserve
180 faster treatment. */
181 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
182 {
183 /* Immediate insn without the imm prefix ? */
184 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
185 }
186
187 static inline TCGv *dec_alu_op_b(DisasContext *dc)
188 {
189 if (dc->type_b) {
190 if (dc->tb_flags & IMM_FLAG)
191 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
192 else
193 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
194 return &env_imm;
195 } else
196 return &cpu_R[dc->rb];
197 }
198
199 static void dec_add(DisasContext *dc)
200 {
201 unsigned int k, c;
202 TCGv cf;
203
204 k = dc->opcode & 4;
205 c = dc->opcode & 2;
206
207 LOG_DIS("add%s%s%s r%d r%d r%d\n",
208 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
209 dc->rd, dc->ra, dc->rb);
210
211 /* Take care of the easy cases first. */
212 if (k) {
213 /* k - keep carry, no need to update MSR. */
214 /* If rd == r0, it's a nop. */
215 if (dc->rd) {
216 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
217
218 if (c) {
219 /* c - Add carry into the result. */
220 cf = tcg_temp_new();
221
222 read_carry(dc, cf);
223 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
224 tcg_temp_free(cf);
225 }
226 }
227 return;
228 }
229
230 /* From now on, we can assume k is zero. So we need to update MSR. */
231 /* Extract carry. */
232 cf = tcg_temp_new();
233 if (c) {
234 read_carry(dc, cf);
235 } else {
236 tcg_gen_movi_tl(cf, 0);
237 }
238
239 if (dc->rd) {
240 TCGv ncf = tcg_temp_new();
241 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
243 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
244 write_carry(dc, ncf);
245 tcg_temp_free(ncf);
246 } else {
247 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
248 write_carry(dc, cf);
249 }
250 tcg_temp_free(cf);
251 }
252
253 static void dec_sub(DisasContext *dc)
254 {
255 unsigned int u, cmp, k, c;
256 TCGv cf, na;
257
258 u = dc->imm & 2;
259 k = dc->opcode & 4;
260 c = dc->opcode & 2;
261 cmp = (dc->imm & 1) && (!dc->type_b) && k;
262
263 if (cmp) {
264 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
265 if (dc->rd) {
266 if (u)
267 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
268 else
269 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
270 }
271 return;
272 }
273
274 LOG_DIS("sub%s%s r%d, r%d r%d\n",
275 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
276
277 /* Take care of the easy cases first. */
278 if (k) {
279 /* k - keep carry, no need to update MSR. */
280 /* If rd == r0, it's a nop. */
281 if (dc->rd) {
282 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
283
284 if (c) {
285 /* c - Add carry into the result. */
286 cf = tcg_temp_new();
287
288 read_carry(dc, cf);
289 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
290 tcg_temp_free(cf);
291 }
292 }
293 return;
294 }
295
296 /* From now on, we can assume k is zero. So we need to update MSR. */
297 /* Extract carry. And complement a into na. */
298 cf = tcg_temp_new();
299 na = tcg_temp_new();
300 if (c) {
301 read_carry(dc, cf);
302 } else {
303 tcg_gen_movi_tl(cf, 1);
304 }
305
306 /* d = b + ~a + c. carry defaults to 1. */
307 tcg_gen_not_tl(na, cpu_R[dc->ra]);
308
309 if (dc->rd) {
310 TCGv ncf = tcg_temp_new();
311 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
312 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
313 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
314 write_carry(dc, ncf);
315 tcg_temp_free(ncf);
316 } else {
317 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
318 write_carry(dc, cf);
319 }
320 tcg_temp_free(cf);
321 tcg_temp_free(na);
322 }
323
324 static void dec_pattern(DisasContext *dc)
325 {
326 unsigned int mode;
327 int l1;
328
329 if ((dc->tb_flags & MSR_EE_FLAG)
330 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
331 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
332 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
333 t_gen_raise_exception(dc, EXCP_HW_EXCP);
334 }
335
336 mode = dc->opcode & 3;
337 switch (mode) {
338 case 0:
339 /* pcmpbf. */
340 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
341 if (dc->rd)
342 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
343 break;
344 case 2:
345 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
346 if (dc->rd) {
347 TCGv t0 = tcg_temp_local_new();
348 l1 = gen_new_label();
349 tcg_gen_movi_tl(t0, 1);
350 tcg_gen_brcond_tl(TCG_COND_EQ,
351 cpu_R[dc->ra], cpu_R[dc->rb], l1);
352 tcg_gen_movi_tl(t0, 0);
353 gen_set_label(l1);
354 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
355 tcg_temp_free(t0);
356 }
357 break;
358 case 3:
359 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
360 l1 = gen_new_label();
361 if (dc->rd) {
362 TCGv t0 = tcg_temp_local_new();
363 tcg_gen_movi_tl(t0, 1);
364 tcg_gen_brcond_tl(TCG_COND_NE,
365 cpu_R[dc->ra], cpu_R[dc->rb], l1);
366 tcg_gen_movi_tl(t0, 0);
367 gen_set_label(l1);
368 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
369 tcg_temp_free(t0);
370 }
371 break;
372 default:
373 cpu_abort(CPU(dc->cpu),
374 "unsupported pattern insn opcode=%x\n", dc->opcode);
375 break;
376 }
377 }
378
379 static void dec_and(DisasContext *dc)
380 {
381 unsigned int not;
382
383 if (!dc->type_b && (dc->imm & (1 << 10))) {
384 dec_pattern(dc);
385 return;
386 }
387
388 not = dc->opcode & (1 << 1);
389 LOG_DIS("and%s\n", not ? "n" : "");
390
391 if (!dc->rd)
392 return;
393
394 if (not) {
395 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
396 } else
397 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
398 }
399
400 static void dec_or(DisasContext *dc)
401 {
402 if (!dc->type_b && (dc->imm & (1 << 10))) {
403 dec_pattern(dc);
404 return;
405 }
406
407 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
408 if (dc->rd)
409 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410 }
411
412 static void dec_xor(DisasContext *dc)
413 {
414 if (!dc->type_b && (dc->imm & (1 << 10))) {
415 dec_pattern(dc);
416 return;
417 }
418
419 LOG_DIS("xor r%d\n", dc->rd);
420 if (dc->rd)
421 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
422 }
423
424 static inline void msr_read(DisasContext *dc, TCGv d)
425 {
426 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
427 }
428
429 static inline void msr_write(DisasContext *dc, TCGv v)
430 {
431 TCGv t;
432
433 t = tcg_temp_new();
434 dc->cpustate_changed = 1;
435 /* PVR bit is not writable. */
436 tcg_gen_andi_tl(t, v, ~MSR_PVR);
437 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
438 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
439 tcg_temp_free(t);
440 }
441
442 static void dec_msr(DisasContext *dc)
443 {
444 CPUState *cs = CPU(dc->cpu);
445 TCGv t0, t1;
446 unsigned int sr, to, rn;
447 int mem_index = cpu_mmu_index(&dc->cpu->env);
448
449 sr = dc->imm & ((1 << 14) - 1);
450 to = dc->imm & (1 << 14);
451 dc->type_b = 1;
452 if (to)
453 dc->cpustate_changed = 1;
454
455 /* msrclr and msrset. */
456 if (!(dc->imm & (1 << 15))) {
457 unsigned int clr = dc->ir & (1 << 16);
458
459 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
460 dc->rd, dc->imm);
461
462 if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
463 /* nop??? */
464 return;
465 }
466
467 if ((dc->tb_flags & MSR_EE_FLAG)
468 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
469 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
470 t_gen_raise_exception(dc, EXCP_HW_EXCP);
471 return;
472 }
473
474 if (dc->rd)
475 msr_read(dc, cpu_R[dc->rd]);
476
477 t0 = tcg_temp_new();
478 t1 = tcg_temp_new();
479 msr_read(dc, t0);
480 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
481
482 if (clr) {
483 tcg_gen_not_tl(t1, t1);
484 tcg_gen_and_tl(t0, t0, t1);
485 } else
486 tcg_gen_or_tl(t0, t0, t1);
487 msr_write(dc, t0);
488 tcg_temp_free(t0);
489 tcg_temp_free(t1);
490 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
491 dc->is_jmp = DISAS_UPDATE;
492 return;
493 }
494
495 if (to) {
496 if ((dc->tb_flags & MSR_EE_FLAG)
497 && mem_index == MMU_USER_IDX) {
498 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
499 t_gen_raise_exception(dc, EXCP_HW_EXCP);
500 return;
501 }
502 }
503
504 #if !defined(CONFIG_USER_ONLY)
505 /* Catch read/writes to the mmu block. */
506 if ((sr & ~0xff) == 0x1000) {
507 sr &= 7;
508 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
509 if (to)
510 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
511 else
512 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
513 return;
514 }
515 #endif
516
517 if (to) {
518 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
519 switch (sr) {
520 case 0:
521 break;
522 case 1:
523 msr_write(dc, cpu_R[dc->ra]);
524 break;
525 case 0x3:
526 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
527 break;
528 case 0x5:
529 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
530 break;
531 case 0x7:
532 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
533 break;
534 case 0x800:
535 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
536 break;
537 case 0x802:
538 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
539 break;
540 default:
541 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
542 break;
543 }
544 } else {
545 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
546
547 switch (sr) {
548 case 0:
549 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
550 break;
551 case 1:
552 msr_read(dc, cpu_R[dc->rd]);
553 break;
554 case 0x3:
555 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
556 break;
557 case 0x5:
558 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
559 break;
560 case 0x7:
561 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
562 break;
563 case 0xb:
564 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
565 break;
566 case 0x800:
567 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
568 break;
569 case 0x802:
570 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
571 break;
572 case 0x2000:
573 case 0x2001:
574 case 0x2002:
575 case 0x2003:
576 case 0x2004:
577 case 0x2005:
578 case 0x2006:
579 case 0x2007:
580 case 0x2008:
581 case 0x2009:
582 case 0x200a:
583 case 0x200b:
584 case 0x200c:
585 rn = sr & 0xf;
586 tcg_gen_ld_tl(cpu_R[dc->rd],
587 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
588 break;
589 default:
590 cpu_abort(cs, "unknown mfs reg %x\n", sr);
591 break;
592 }
593 }
594
595 if (dc->rd == 0) {
596 tcg_gen_movi_tl(cpu_R[0], 0);
597 }
598 }
599
600 /* 64-bit signed mul, lower result in d and upper in d2. */
601 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
602 {
603 TCGv_i64 t0, t1;
604
605 t0 = tcg_temp_new_i64();
606 t1 = tcg_temp_new_i64();
607
608 tcg_gen_ext_i32_i64(t0, a);
609 tcg_gen_ext_i32_i64(t1, b);
610 tcg_gen_mul_i64(t0, t0, t1);
611
612 tcg_gen_trunc_i64_i32(d, t0);
613 tcg_gen_shri_i64(t0, t0, 32);
614 tcg_gen_trunc_i64_i32(d2, t0);
615
616 tcg_temp_free_i64(t0);
617 tcg_temp_free_i64(t1);
618 }
619
620 /* 64-bit unsigned muls, lower result in d and upper in d2. */
621 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
622 {
623 TCGv_i64 t0, t1;
624
625 t0 = tcg_temp_new_i64();
626 t1 = tcg_temp_new_i64();
627
628 tcg_gen_extu_i32_i64(t0, a);
629 tcg_gen_extu_i32_i64(t1, b);
630 tcg_gen_mul_i64(t0, t0, t1);
631
632 tcg_gen_trunc_i64_i32(d, t0);
633 tcg_gen_shri_i64(t0, t0, 32);
634 tcg_gen_trunc_i64_i32(d2, t0);
635
636 tcg_temp_free_i64(t0);
637 tcg_temp_free_i64(t1);
638 }
639
640 /* Multiplier unit. */
641 static void dec_mul(DisasContext *dc)
642 {
643 TCGv d[2];
644 unsigned int subcode;
645
646 if ((dc->tb_flags & MSR_EE_FLAG)
647 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
648 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
649 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
650 t_gen_raise_exception(dc, EXCP_HW_EXCP);
651 return;
652 }
653
654 subcode = dc->imm & 3;
655 d[0] = tcg_temp_new();
656 d[1] = tcg_temp_new();
657
658 if (dc->type_b) {
659 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
660 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
661 goto done;
662 }
663
664 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
665 if (subcode >= 1 && subcode <= 3
666 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
667 /* nop??? */
668 }
669
670 switch (subcode) {
671 case 0:
672 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
673 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
674 break;
675 case 1:
676 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
677 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
678 break;
679 case 2:
680 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
681 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
682 break;
683 case 3:
684 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
685 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
686 break;
687 default:
688 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
689 break;
690 }
691 done:
692 tcg_temp_free(d[0]);
693 tcg_temp_free(d[1]);
694 }
695
696 /* Div unit. */
697 static void dec_div(DisasContext *dc)
698 {
699 unsigned int u;
700
701 u = dc->imm & 2;
702 LOG_DIS("div\n");
703
704 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
705 && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
706 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
707 t_gen_raise_exception(dc, EXCP_HW_EXCP);
708 }
709
710 if (u)
711 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
712 cpu_R[dc->ra]);
713 else
714 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
715 cpu_R[dc->ra]);
716 if (!dc->rd)
717 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
718 }
719
720 static void dec_barrel(DisasContext *dc)
721 {
722 TCGv t0;
723 unsigned int s, t;
724
725 if ((dc->tb_flags & MSR_EE_FLAG)
726 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
727 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
728 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
729 t_gen_raise_exception(dc, EXCP_HW_EXCP);
730 return;
731 }
732
733 s = dc->imm & (1 << 10);
734 t = dc->imm & (1 << 9);
735
736 LOG_DIS("bs%s%s r%d r%d r%d\n",
737 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
738
739 t0 = tcg_temp_new();
740
741 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
742 tcg_gen_andi_tl(t0, t0, 31);
743
744 if (s)
745 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
746 else {
747 if (t)
748 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
749 else
750 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
751 }
752 }
753
754 static void dec_bit(DisasContext *dc)
755 {
756 CPUState *cs = CPU(dc->cpu);
757 TCGv t0;
758 unsigned int op;
759 int mem_index = cpu_mmu_index(&dc->cpu->env);
760
761 op = dc->ir & ((1 << 9) - 1);
762 switch (op) {
763 case 0x21:
764 /* src. */
765 t0 = tcg_temp_new();
766
767 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
768 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
769 write_carry(dc, cpu_R[dc->ra]);
770 if (dc->rd) {
771 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
772 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
773 }
774 tcg_temp_free(t0);
775 break;
776
777 case 0x1:
778 case 0x41:
779 /* srl. */
780 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
781
782 /* Update carry. Note that write carry only looks at the LSB. */
783 write_carry(dc, cpu_R[dc->ra]);
784 if (dc->rd) {
785 if (op == 0x41)
786 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
787 else
788 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
789 }
790 break;
791 case 0x60:
792 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
793 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
794 break;
795 case 0x61:
796 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
797 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
798 break;
799 case 0x64:
800 case 0x66:
801 case 0x74:
802 case 0x76:
803 /* wdc. */
804 LOG_DIS("wdc r%d\n", dc->ra);
805 if ((dc->tb_flags & MSR_EE_FLAG)
806 && mem_index == MMU_USER_IDX) {
807 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
808 t_gen_raise_exception(dc, EXCP_HW_EXCP);
809 return;
810 }
811 break;
812 case 0x68:
813 /* wic. */
814 LOG_DIS("wic r%d\n", dc->ra);
815 if ((dc->tb_flags & MSR_EE_FLAG)
816 && mem_index == MMU_USER_IDX) {
817 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
818 t_gen_raise_exception(dc, EXCP_HW_EXCP);
819 return;
820 }
821 break;
822 case 0xe0:
823 if ((dc->tb_flags & MSR_EE_FLAG)
824 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
825 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
826 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
827 t_gen_raise_exception(dc, EXCP_HW_EXCP);
828 }
829 if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
830 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
831 }
832 break;
833 case 0x1e0:
834 /* swapb */
835 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
836 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
837 break;
838 case 0x1e2:
839 /*swaph */
840 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
841 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
842 break;
843 default:
844 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
845 dc->pc, op, dc->rd, dc->ra, dc->rb);
846 break;
847 }
848 }
849
850 static inline void sync_jmpstate(DisasContext *dc)
851 {
852 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
853 if (dc->jmp == JMP_DIRECT) {
854 tcg_gen_movi_tl(env_btaken, 1);
855 }
856 dc->jmp = JMP_INDIRECT;
857 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
858 }
859 }
860
861 static void dec_imm(DisasContext *dc)
862 {
863 LOG_DIS("imm %x\n", dc->imm << 16);
864 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
865 dc->tb_flags |= IMM_FLAG;
866 dc->clear_imm = 0;
867 }
868
869 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
870 {
871 unsigned int extimm = dc->tb_flags & IMM_FLAG;
872 /* Should be set to one if r1 is used by loadstores. */
873 int stackprot = 0;
874
875 /* All load/stores use ra. */
876 if (dc->ra == 1) {
877 stackprot = 1;
878 }
879
880 /* Treat the common cases first. */
881 if (!dc->type_b) {
882 /* If any of the regs is r0, return a ptr to the other. */
883 if (dc->ra == 0) {
884 return &cpu_R[dc->rb];
885 } else if (dc->rb == 0) {
886 return &cpu_R[dc->ra];
887 }
888
889 if (dc->rb == 1) {
890 stackprot = 1;
891 }
892
893 *t = tcg_temp_new();
894 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
895
896 if (stackprot) {
897 gen_helper_stackprot(cpu_env, *t);
898 }
899 return t;
900 }
901 /* Immediate. */
902 if (!extimm) {
903 if (dc->imm == 0) {
904 return &cpu_R[dc->ra];
905 }
906 *t = tcg_temp_new();
907 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
908 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
909 } else {
910 *t = tcg_temp_new();
911 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
912 }
913
914 if (stackprot) {
915 gen_helper_stackprot(cpu_env, *t);
916 }
917 return t;
918 }
919
920 static void dec_load(DisasContext *dc)
921 {
922 TCGv t, v, *addr;
923 unsigned int size, rev = 0, ex = 0;
924 TCGMemOp mop;
925
926 mop = dc->opcode & 3;
927 size = 1 << mop;
928 if (!dc->type_b) {
929 rev = (dc->ir >> 9) & 1;
930 ex = (dc->ir >> 10) & 1;
931 }
932 mop |= MO_TE;
933 if (rev) {
934 mop ^= MO_BSWAP;
935 }
936
937 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
938 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
939 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
940 t_gen_raise_exception(dc, EXCP_HW_EXCP);
941 return;
942 }
943
944 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
945 ex ? "x" : "");
946
947 t_sync_flags(dc);
948 addr = compute_ldst_addr(dc, &t);
949
950 /*
951 * When doing reverse accesses we need to do two things.
952 *
953 * 1. Reverse the address wrt endianness.
954 * 2. Byteswap the data lanes on the way back into the CPU core.
955 */
956 if (rev && size != 4) {
957 /* Endian reverse the address. t is addr. */
958 switch (size) {
959 case 1:
960 {
961 /* 00 -> 11
962 01 -> 10
963 10 -> 10
964 11 -> 00 */
965 TCGv low = tcg_temp_new();
966
967 /* Force addr into the temp. */
968 if (addr != &t) {
969 t = tcg_temp_new();
970 tcg_gen_mov_tl(t, *addr);
971 addr = &t;
972 }
973
974 tcg_gen_andi_tl(low, t, 3);
975 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
976 tcg_gen_andi_tl(t, t, ~3);
977 tcg_gen_or_tl(t, t, low);
978 tcg_gen_mov_tl(env_imm, t);
979 tcg_temp_free(low);
980 break;
981 }
982
983 case 2:
984 /* 00 -> 10
985 10 -> 00. */
986 /* Force addr into the temp. */
987 if (addr != &t) {
988 t = tcg_temp_new();
989 tcg_gen_xori_tl(t, *addr, 2);
990 addr = &t;
991 } else {
992 tcg_gen_xori_tl(t, t, 2);
993 }
994 break;
995 default:
996 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
997 break;
998 }
999 }
1000
1001 /* lwx does not throw unaligned access errors, so force alignment */
1002 if (ex) {
1003 /* Force addr into the temp. */
1004 if (addr != &t) {
1005 t = tcg_temp_new();
1006 tcg_gen_mov_tl(t, *addr);
1007 addr = &t;
1008 }
1009 tcg_gen_andi_tl(t, t, ~3);
1010 }
1011
1012 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1013 sync_jmpstate(dc);
1014
1015 /* Verify alignment if needed. */
1016 /*
1017 * Microblaze gives MMU faults priority over faults due to
1018 * unaligned addresses. That's why we speculatively do the load
1019 * into v. If the load succeeds, we verify alignment of the
1020 * address and if that succeeds we write into the destination reg.
1021 */
1022 v = tcg_temp_new();
1023 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
1024
1025 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1026 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1027 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1028 tcg_const_tl(0), tcg_const_tl(size - 1));
1029 }
1030
1031 if (ex) {
1032 tcg_gen_mov_tl(env_res_addr, *addr);
1033 tcg_gen_mov_tl(env_res_val, v);
1034 }
1035 if (dc->rd) {
1036 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1037 }
1038 tcg_temp_free(v);
1039
1040 if (ex) { /* lwx */
1041 /* no support for for AXI exclusive so always clear C */
1042 write_carryi(dc, 0);
1043 }
1044
1045 if (addr == &t)
1046 tcg_temp_free(t);
1047 }
1048
1049 static void dec_store(DisasContext *dc)
1050 {
1051 TCGv t, *addr, swx_addr;
1052 int swx_skip = 0;
1053 unsigned int size, rev = 0, ex = 0;
1054 TCGMemOp mop;
1055
1056 mop = dc->opcode & 3;
1057 size = 1 << mop;
1058 if (!dc->type_b) {
1059 rev = (dc->ir >> 9) & 1;
1060 ex = (dc->ir >> 10) & 1;
1061 }
1062 mop |= MO_TE;
1063 if (rev) {
1064 mop ^= MO_BSWAP;
1065 }
1066
1067 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1068 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1069 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1070 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1071 return;
1072 }
1073
1074 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1075 ex ? "x" : "");
1076 t_sync_flags(dc);
1077 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1078 sync_jmpstate(dc);
1079 addr = compute_ldst_addr(dc, &t);
1080
1081 swx_addr = tcg_temp_local_new();
1082 if (ex) { /* swx */
1083 TCGv tval;
1084
1085 /* Force addr into the swx_addr. */
1086 tcg_gen_mov_tl(swx_addr, *addr);
1087 addr = &swx_addr;
1088 /* swx does not throw unaligned access errors, so force alignment */
1089 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1090
1091 write_carryi(dc, 1);
1092 swx_skip = gen_new_label();
1093 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1094
1095 /* Compare the value loaded at lwx with current contents of
1096 the reserved location.
1097 FIXME: This only works for system emulation where we can expect
1098 this compare and the following write to be atomic. For user
1099 emulation we need to add atomicity between threads. */
1100 tval = tcg_temp_new();
1101 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
1102 MO_TEUL);
1103 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1104 write_carryi(dc, 0);
1105 tcg_temp_free(tval);
1106 }
1107
1108 if (rev && size != 4) {
1109 /* Endian reverse the address. t is addr. */
1110 switch (size) {
1111 case 1:
1112 {
1113 /* 00 -> 11
1114 01 -> 10
1115 10 -> 10
1116 11 -> 00 */
1117 TCGv low = tcg_temp_new();
1118
1119 /* Force addr into the temp. */
1120 if (addr != &t) {
1121 t = tcg_temp_new();
1122 tcg_gen_mov_tl(t, *addr);
1123 addr = &t;
1124 }
1125
1126 tcg_gen_andi_tl(low, t, 3);
1127 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1128 tcg_gen_andi_tl(t, t, ~3);
1129 tcg_gen_or_tl(t, t, low);
1130 tcg_gen_mov_tl(env_imm, t);
1131 tcg_temp_free(low);
1132 break;
1133 }
1134
1135 case 2:
1136 /* 00 -> 10
1137 10 -> 00. */
1138 /* Force addr into the temp. */
1139 if (addr != &t) {
1140 t = tcg_temp_new();
1141 tcg_gen_xori_tl(t, *addr, 2);
1142 addr = &t;
1143 } else {
1144 tcg_gen_xori_tl(t, t, 2);
1145 }
1146 break;
1147 default:
1148 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1149 break;
1150 }
1151 }
1152 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
1153
1154 /* Verify alignment if needed. */
1155 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1156 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1157 /* FIXME: if the alignment is wrong, we should restore the value
1158 * in memory. One possible way to achieve this is to probe
1159 * the MMU prior to the memaccess, thay way we could put
1160 * the alignment checks in between the probe and the mem
1161 * access.
1162 */
1163 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1164 tcg_const_tl(1), tcg_const_tl(size - 1));
1165 }
1166
1167 if (ex) {
1168 gen_set_label(swx_skip);
1169 }
1170 tcg_temp_free(swx_addr);
1171
1172 if (addr == &t)
1173 tcg_temp_free(t);
1174 }
1175
1176 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1177 TCGv d, TCGv a, TCGv b)
1178 {
1179 switch (cc) {
1180 case CC_EQ:
1181 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1182 break;
1183 case CC_NE:
1184 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1185 break;
1186 case CC_LT:
1187 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1188 break;
1189 case CC_LE:
1190 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1191 break;
1192 case CC_GE:
1193 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1194 break;
1195 case CC_GT:
1196 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1197 break;
1198 default:
1199 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1200 break;
1201 }
1202 }
1203
1204 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1205 {
1206 int l1;
1207
1208 l1 = gen_new_label();
1209 /* Conditional jmp. */
1210 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1211 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1212 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1213 gen_set_label(l1);
1214 }
1215
1216 static void dec_bcc(DisasContext *dc)
1217 {
1218 unsigned int cc;
1219 unsigned int dslot;
1220
1221 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1222 dslot = dc->ir & (1 << 25);
1223 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1224
1225 dc->delayed_branch = 1;
1226 if (dslot) {
1227 dc->delayed_branch = 2;
1228 dc->tb_flags |= D_FLAG;
1229 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1230 cpu_env, offsetof(CPUMBState, bimm));
1231 }
1232
1233 if (dec_alu_op_b_is_small_imm(dc)) {
1234 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1235
1236 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1237 dc->jmp = JMP_DIRECT_CC;
1238 dc->jmp_pc = dc->pc + offset;
1239 } else {
1240 dc->jmp = JMP_INDIRECT;
1241 tcg_gen_movi_tl(env_btarget, dc->pc);
1242 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1243 }
1244 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1245 }
1246
1247 static void dec_br(DisasContext *dc)
1248 {
1249 unsigned int dslot, link, abs, mbar;
1250 int mem_index = cpu_mmu_index(&dc->cpu->env);
1251
1252 dslot = dc->ir & (1 << 20);
1253 abs = dc->ir & (1 << 19);
1254 link = dc->ir & (1 << 18);
1255
1256 /* Memory barrier. */
1257 mbar = (dc->ir >> 16) & 31;
1258 if (mbar == 2 && dc->imm == 4) {
1259 /* mbar IMM & 16 decodes to sleep. */
1260 if (dc->rd & 16) {
1261 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1262 TCGv_i32 tmp_1 = tcg_const_i32(1);
1263
1264 LOG_DIS("sleep\n");
1265
1266 t_sync_flags(dc);
1267 tcg_gen_st_i32(tmp_1, cpu_env,
1268 -offsetof(MicroBlazeCPU, env)
1269 +offsetof(CPUState, halted));
1270 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1271 gen_helper_raise_exception(cpu_env, tmp_hlt);
1272 tcg_temp_free_i32(tmp_hlt);
1273 tcg_temp_free_i32(tmp_1);
1274 return;
1275 }
1276 LOG_DIS("mbar %d\n", dc->rd);
1277 /* Break the TB. */
1278 dc->cpustate_changed = 1;
1279 return;
1280 }
1281
1282 LOG_DIS("br%s%s%s%s imm=%x\n",
1283 abs ? "a" : "", link ? "l" : "",
1284 dc->type_b ? "i" : "", dslot ? "d" : "",
1285 dc->imm);
1286
1287 dc->delayed_branch = 1;
1288 if (dslot) {
1289 dc->delayed_branch = 2;
1290 dc->tb_flags |= D_FLAG;
1291 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1292 cpu_env, offsetof(CPUMBState, bimm));
1293 }
1294 if (link && dc->rd)
1295 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1296
1297 dc->jmp = JMP_INDIRECT;
1298 if (abs) {
1299 tcg_gen_movi_tl(env_btaken, 1);
1300 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1301 if (link && !dslot) {
1302 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1303 t_gen_raise_exception(dc, EXCP_BREAK);
1304 if (dc->imm == 0) {
1305 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1306 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1307 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1308 return;
1309 }
1310
1311 t_gen_raise_exception(dc, EXCP_DEBUG);
1312 }
1313 }
1314 } else {
1315 if (dec_alu_op_b_is_small_imm(dc)) {
1316 dc->jmp = JMP_DIRECT;
1317 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1318 } else {
1319 tcg_gen_movi_tl(env_btaken, 1);
1320 tcg_gen_movi_tl(env_btarget, dc->pc);
1321 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1322 }
1323 }
1324 }
1325
1326 static inline void do_rti(DisasContext *dc)
1327 {
1328 TCGv t0, t1;
1329 t0 = tcg_temp_new();
1330 t1 = tcg_temp_new();
1331 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1332 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1333 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1334
1335 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1336 tcg_gen_or_tl(t1, t1, t0);
1337 msr_write(dc, t1);
1338 tcg_temp_free(t1);
1339 tcg_temp_free(t0);
1340 dc->tb_flags &= ~DRTI_FLAG;
1341 }
1342
1343 static inline void do_rtb(DisasContext *dc)
1344 {
1345 TCGv t0, t1;
1346 t0 = tcg_temp_new();
1347 t1 = tcg_temp_new();
1348 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1349 tcg_gen_shri_tl(t0, t1, 1);
1350 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1351
1352 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1353 tcg_gen_or_tl(t1, t1, t0);
1354 msr_write(dc, t1);
1355 tcg_temp_free(t1);
1356 tcg_temp_free(t0);
1357 dc->tb_flags &= ~DRTB_FLAG;
1358 }
1359
1360 static inline void do_rte(DisasContext *dc)
1361 {
1362 TCGv t0, t1;
1363 t0 = tcg_temp_new();
1364 t1 = tcg_temp_new();
1365
1366 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1367 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1368 tcg_gen_shri_tl(t0, t1, 1);
1369 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1370
1371 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1372 tcg_gen_or_tl(t1, t1, t0);
1373 msr_write(dc, t1);
1374 tcg_temp_free(t1);
1375 tcg_temp_free(t0);
1376 dc->tb_flags &= ~DRTE_FLAG;
1377 }
1378
1379 static void dec_rts(DisasContext *dc)
1380 {
1381 unsigned int b_bit, i_bit, e_bit;
1382 int mem_index = cpu_mmu_index(&dc->cpu->env);
1383
1384 i_bit = dc->ir & (1 << 21);
1385 b_bit = dc->ir & (1 << 22);
1386 e_bit = dc->ir & (1 << 23);
1387
1388 dc->delayed_branch = 2;
1389 dc->tb_flags |= D_FLAG;
1390 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1391 cpu_env, offsetof(CPUMBState, bimm));
1392
1393 if (i_bit) {
1394 LOG_DIS("rtid ir=%x\n", dc->ir);
1395 if ((dc->tb_flags & MSR_EE_FLAG)
1396 && mem_index == MMU_USER_IDX) {
1397 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1398 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1399 }
1400 dc->tb_flags |= DRTI_FLAG;
1401 } else if (b_bit) {
1402 LOG_DIS("rtbd ir=%x\n", dc->ir);
1403 if ((dc->tb_flags & MSR_EE_FLAG)
1404 && mem_index == MMU_USER_IDX) {
1405 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1406 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1407 }
1408 dc->tb_flags |= DRTB_FLAG;
1409 } else if (e_bit) {
1410 LOG_DIS("rted ir=%x\n", dc->ir);
1411 if ((dc->tb_flags & MSR_EE_FLAG)
1412 && mem_index == MMU_USER_IDX) {
1413 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1414 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1415 }
1416 dc->tb_flags |= DRTE_FLAG;
1417 } else
1418 LOG_DIS("rts ir=%x\n", dc->ir);
1419
1420 dc->jmp = JMP_INDIRECT;
1421 tcg_gen_movi_tl(env_btaken, 1);
1422 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1423 }
1424
1425 static int dec_check_fpuv2(DisasContext *dc)
1426 {
1427 int r;
1428
1429 r = dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU2_MASK;
1430
1431 if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1432 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1433 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1434 }
1435 return r;
1436 }
1437
1438 static void dec_fpu(DisasContext *dc)
1439 {
1440 unsigned int fpu_insn;
1441
1442 if ((dc->tb_flags & MSR_EE_FLAG)
1443 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1444 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU_MASK))) {
1445 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1446 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1447 return;
1448 }
1449
1450 fpu_insn = (dc->ir >> 7) & 7;
1451
1452 switch (fpu_insn) {
1453 case 0:
1454 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1455 cpu_R[dc->rb]);
1456 break;
1457
1458 case 1:
1459 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1460 cpu_R[dc->rb]);
1461 break;
1462
1463 case 2:
1464 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1465 cpu_R[dc->rb]);
1466 break;
1467
1468 case 3:
1469 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1470 cpu_R[dc->rb]);
1471 break;
1472
1473 case 4:
1474 switch ((dc->ir >> 4) & 7) {
1475 case 0:
1476 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1477 cpu_R[dc->ra], cpu_R[dc->rb]);
1478 break;
1479 case 1:
1480 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1481 cpu_R[dc->ra], cpu_R[dc->rb]);
1482 break;
1483 case 2:
1484 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1485 cpu_R[dc->ra], cpu_R[dc->rb]);
1486 break;
1487 case 3:
1488 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1489 cpu_R[dc->ra], cpu_R[dc->rb]);
1490 break;
1491 case 4:
1492 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1493 cpu_R[dc->ra], cpu_R[dc->rb]);
1494 break;
1495 case 5:
1496 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1497 cpu_R[dc->ra], cpu_R[dc->rb]);
1498 break;
1499 case 6:
1500 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1501 cpu_R[dc->ra], cpu_R[dc->rb]);
1502 break;
1503 default:
1504 qemu_log_mask(LOG_UNIMP,
1505 "unimplemented fcmp fpu_insn=%x pc=%x"
1506 " opc=%x\n",
1507 fpu_insn, dc->pc, dc->opcode);
1508 dc->abort_at_next_insn = 1;
1509 break;
1510 }
1511 break;
1512
1513 case 5:
1514 if (!dec_check_fpuv2(dc)) {
1515 return;
1516 }
1517 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1518 break;
1519
1520 case 6:
1521 if (!dec_check_fpuv2(dc)) {
1522 return;
1523 }
1524 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1525 break;
1526
1527 case 7:
1528 if (!dec_check_fpuv2(dc)) {
1529 return;
1530 }
1531 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1532 break;
1533
1534 default:
1535 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1536 " opc=%x\n",
1537 fpu_insn, dc->pc, dc->opcode);
1538 dc->abort_at_next_insn = 1;
1539 break;
1540 }
1541 }
1542
1543 static void dec_null(DisasContext *dc)
1544 {
1545 if ((dc->tb_flags & MSR_EE_FLAG)
1546 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1547 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1548 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1549 return;
1550 }
1551 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1552 dc->abort_at_next_insn = 1;
1553 }
1554
1555 /* Insns connected to FSL or AXI stream attached devices. */
1556 static void dec_stream(DisasContext *dc)
1557 {
1558 int mem_index = cpu_mmu_index(&dc->cpu->env);
1559 TCGv_i32 t_id, t_ctrl;
1560 int ctrl;
1561
1562 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1563 dc->type_b ? "" : "d", dc->imm);
1564
1565 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1566 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1567 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1568 return;
1569 }
1570
1571 t_id = tcg_temp_new();
1572 if (dc->type_b) {
1573 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1574 ctrl = dc->imm >> 10;
1575 } else {
1576 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1577 ctrl = dc->imm >> 5;
1578 }
1579
1580 t_ctrl = tcg_const_tl(ctrl);
1581
1582 if (dc->rd == 0) {
1583 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1584 } else {
1585 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1586 }
1587 tcg_temp_free(t_id);
1588 tcg_temp_free(t_ctrl);
1589 }
1590
1591 static struct decoder_info {
1592 struct {
1593 uint32_t bits;
1594 uint32_t mask;
1595 };
1596 void (*dec)(DisasContext *dc);
1597 } decinfo[] = {
1598 {DEC_ADD, dec_add},
1599 {DEC_SUB, dec_sub},
1600 {DEC_AND, dec_and},
1601 {DEC_XOR, dec_xor},
1602 {DEC_OR, dec_or},
1603 {DEC_BIT, dec_bit},
1604 {DEC_BARREL, dec_barrel},
1605 {DEC_LD, dec_load},
1606 {DEC_ST, dec_store},
1607 {DEC_IMM, dec_imm},
1608 {DEC_BR, dec_br},
1609 {DEC_BCC, dec_bcc},
1610 {DEC_RTS, dec_rts},
1611 {DEC_FPU, dec_fpu},
1612 {DEC_MUL, dec_mul},
1613 {DEC_DIV, dec_div},
1614 {DEC_MSR, dec_msr},
1615 {DEC_STREAM, dec_stream},
1616 {{0, 0}, dec_null}
1617 };
1618
1619 static inline void decode(DisasContext *dc, uint32_t ir)
1620 {
1621 int i;
1622
1623 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1624 tcg_gen_debug_insn_start(dc->pc);
1625 }
1626
1627 dc->ir = ir;
1628 LOG_DIS("%8.8x\t", dc->ir);
1629
1630 if (dc->ir)
1631 dc->nr_nops = 0;
1632 else {
1633 if ((dc->tb_flags & MSR_EE_FLAG)
1634 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1635 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1636 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1637 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1638 return;
1639 }
1640
1641 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1642 dc->nr_nops++;
1643 if (dc->nr_nops > 4) {
1644 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1645 }
1646 }
1647 /* bit 2 seems to indicate insn type. */
1648 dc->type_b = ir & (1 << 29);
1649
1650 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1651 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1652 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1653 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1654 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1655
1656 /* Large switch for all insns. */
1657 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1658 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1659 decinfo[i].dec(dc);
1660 break;
1661 }
1662 }
1663 }
1664
1665 static void check_breakpoint(CPUMBState *env, DisasContext *dc)
1666 {
1667 CPUState *cs = CPU(mb_env_get_cpu(env));
1668 CPUBreakpoint *bp;
1669
1670 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
1671 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1672 if (bp->pc == dc->pc) {
1673 t_gen_raise_exception(dc, EXCP_DEBUG);
1674 dc->is_jmp = DISAS_UPDATE;
1675 }
1676 }
1677 }
1678 }
1679
1680 /* generate intermediate code for basic block 'tb'. */
1681 static inline void
1682 gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
1683 bool search_pc)
1684 {
1685 CPUState *cs = CPU(cpu);
1686 CPUMBState *env = &cpu->env;
1687 uint16_t *gen_opc_end;
1688 uint32_t pc_start;
1689 int j, lj;
1690 struct DisasContext ctx;
1691 struct DisasContext *dc = &ctx;
1692 uint32_t next_page_start, org_flags;
1693 target_ulong npc;
1694 int num_insns;
1695 int max_insns;
1696
1697 pc_start = tb->pc;
1698 dc->cpu = cpu;
1699 dc->tb = tb;
1700 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1701
1702 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1703
1704 dc->is_jmp = DISAS_NEXT;
1705 dc->jmp = 0;
1706 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1707 if (dc->delayed_branch) {
1708 dc->jmp = JMP_INDIRECT;
1709 }
1710 dc->pc = pc_start;
1711 dc->singlestep_enabled = cs->singlestep_enabled;
1712 dc->cpustate_changed = 0;
1713 dc->abort_at_next_insn = 0;
1714 dc->nr_nops = 0;
1715
1716 if (pc_start & 3) {
1717 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1718 }
1719
1720 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1721 #if !SIM_COMPAT
1722 qemu_log("--------------\n");
1723 log_cpu_state(CPU(cpu), 0);
1724 #endif
1725 }
1726
1727 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1728 lj = -1;
1729 num_insns = 0;
1730 max_insns = tb->cflags & CF_COUNT_MASK;
1731 if (max_insns == 0)
1732 max_insns = CF_COUNT_MASK;
1733
1734 gen_tb_start();
1735 do
1736 {
1737 #if SIM_COMPAT
1738 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1739 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1740 gen_helper_debug();
1741 }
1742 #endif
1743 check_breakpoint(env, dc);
1744
1745 if (search_pc) {
1746 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1747 if (lj < j) {
1748 lj++;
1749 while (lj < j)
1750 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1751 }
1752 tcg_ctx.gen_opc_pc[lj] = dc->pc;
1753 tcg_ctx.gen_opc_instr_start[lj] = 1;
1754 tcg_ctx.gen_opc_icount[lj] = num_insns;
1755 }
1756
1757 /* Pretty disas. */
1758 LOG_DIS("%8.8x:\t", dc->pc);
1759
1760 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1761 gen_io_start();
1762
1763 dc->clear_imm = 1;
1764 decode(dc, cpu_ldl_code(env, dc->pc));
1765 if (dc->clear_imm)
1766 dc->tb_flags &= ~IMM_FLAG;
1767 dc->pc += 4;
1768 num_insns++;
1769
1770 if (dc->delayed_branch) {
1771 dc->delayed_branch--;
1772 if (!dc->delayed_branch) {
1773 if (dc->tb_flags & DRTI_FLAG)
1774 do_rti(dc);
1775 if (dc->tb_flags & DRTB_FLAG)
1776 do_rtb(dc);
1777 if (dc->tb_flags & DRTE_FLAG)
1778 do_rte(dc);
1779 /* Clear the delay slot flag. */
1780 dc->tb_flags &= ~D_FLAG;
1781 /* If it is a direct jump, try direct chaining. */
1782 if (dc->jmp == JMP_INDIRECT) {
1783 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1784 dc->is_jmp = DISAS_JUMP;
1785 } else if (dc->jmp == JMP_DIRECT) {
1786 t_sync_flags(dc);
1787 gen_goto_tb(dc, 0, dc->jmp_pc);
1788 dc->is_jmp = DISAS_TB_JUMP;
1789 } else if (dc->jmp == JMP_DIRECT_CC) {
1790 int l1;
1791
1792 t_sync_flags(dc);
1793 l1 = gen_new_label();
1794 /* Conditional jmp. */
1795 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1796 gen_goto_tb(dc, 1, dc->pc);
1797 gen_set_label(l1);
1798 gen_goto_tb(dc, 0, dc->jmp_pc);
1799
1800 dc->is_jmp = DISAS_TB_JUMP;
1801 }
1802 break;
1803 }
1804 }
1805 if (cs->singlestep_enabled) {
1806 break;
1807 }
1808 } while (!dc->is_jmp && !dc->cpustate_changed
1809 && tcg_ctx.gen_opc_ptr < gen_opc_end
1810 && !singlestep
1811 && (dc->pc < next_page_start)
1812 && num_insns < max_insns);
1813
1814 npc = dc->pc;
1815 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1816 if (dc->tb_flags & D_FLAG) {
1817 dc->is_jmp = DISAS_UPDATE;
1818 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1819 sync_jmpstate(dc);
1820 } else
1821 npc = dc->jmp_pc;
1822 }
1823
1824 if (tb->cflags & CF_LAST_IO)
1825 gen_io_end();
1826 /* Force an update if the per-tb cpu state has changed. */
1827 if (dc->is_jmp == DISAS_NEXT
1828 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1829 dc->is_jmp = DISAS_UPDATE;
1830 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1831 }
1832 t_sync_flags(dc);
1833
1834 if (unlikely(cs->singlestep_enabled)) {
1835 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1836
1837 if (dc->is_jmp != DISAS_JUMP) {
1838 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1839 }
1840 gen_helper_raise_exception(cpu_env, tmp);
1841 tcg_temp_free_i32(tmp);
1842 } else {
1843 switch(dc->is_jmp) {
1844 case DISAS_NEXT:
1845 gen_goto_tb(dc, 1, npc);
1846 break;
1847 default:
1848 case DISAS_JUMP:
1849 case DISAS_UPDATE:
1850 /* indicate that the hash table must be used
1851 to find the next TB */
1852 tcg_gen_exit_tb(0);
1853 break;
1854 case DISAS_TB_JUMP:
1855 /* nothing more to generate */
1856 break;
1857 }
1858 }
1859 gen_tb_end(tb, num_insns);
1860 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1861 if (search_pc) {
1862 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1863 lj++;
1864 while (lj <= j)
1865 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1866 } else {
1867 tb->size = dc->pc - pc_start;
1868 tb->icount = num_insns;
1869 }
1870
1871 #ifdef DEBUG_DISAS
1872 #if !SIM_COMPAT
1873 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1874 qemu_log("\n");
1875 #if DISAS_GNU
1876 log_target_disas(env, pc_start, dc->pc - pc_start, 0);
1877 #endif
1878 qemu_log("\nisize=%d osize=%td\n",
1879 dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
1880 tcg_ctx.gen_opc_buf);
1881 }
1882 #endif
1883 #endif
1884 assert(!dc->abort_at_next_insn);
1885 }
1886
1887 void gen_intermediate_code (CPUMBState *env, struct TranslationBlock *tb)
1888 {
1889 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, false);
1890 }
1891
1892 void gen_intermediate_code_pc (CPUMBState *env, struct TranslationBlock *tb)
1893 {
1894 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, true);
1895 }
1896
1897 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1898 int flags)
1899 {
1900 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1901 CPUMBState *env = &cpu->env;
1902 int i;
1903
1904 if (!env || !f)
1905 return;
1906
1907 cpu_fprintf(f, "IN: PC=%x %s\n",
1908 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1909 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1910 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1911 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1912 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1913 env->btaken, env->btarget,
1914 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1915 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1916 (env->sregs[SR_MSR] & MSR_EIP),
1917 (env->sregs[SR_MSR] & MSR_IE));
1918
1919 for (i = 0; i < 32; i++) {
1920 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1921 if ((i + 1) % 4 == 0)
1922 cpu_fprintf(f, "\n");
1923 }
1924 cpu_fprintf(f, "\n\n");
1925 }
1926
1927 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1928 {
1929 MicroBlazeCPU *cpu;
1930
1931 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1932
1933 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1934
1935 return cpu;
1936 }
1937
1938 void mb_tcg_init(void)
1939 {
1940 int i;
1941
1942 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1943
1944 env_debug = tcg_global_mem_new(TCG_AREG0,
1945 offsetof(CPUMBState, debug),
1946 "debug0");
1947 env_iflags = tcg_global_mem_new(TCG_AREG0,
1948 offsetof(CPUMBState, iflags),
1949 "iflags");
1950 env_imm = tcg_global_mem_new(TCG_AREG0,
1951 offsetof(CPUMBState, imm),
1952 "imm");
1953 env_btarget = tcg_global_mem_new(TCG_AREG0,
1954 offsetof(CPUMBState, btarget),
1955 "btarget");
1956 env_btaken = tcg_global_mem_new(TCG_AREG0,
1957 offsetof(CPUMBState, btaken),
1958 "btaken");
1959 env_res_addr = tcg_global_mem_new(TCG_AREG0,
1960 offsetof(CPUMBState, res_addr),
1961 "res_addr");
1962 env_res_val = tcg_global_mem_new(TCG_AREG0,
1963 offsetof(CPUMBState, res_val),
1964 "res_val");
1965 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1966 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1967 offsetof(CPUMBState, regs[i]),
1968 regnames[i]);
1969 }
1970 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1971 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
1972 offsetof(CPUMBState, sregs[i]),
1973 special_regnames[i]);
1974 }
1975 }
1976
1977 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
1978 {
1979 env->sregs[SR_PC] = tcg_ctx.gen_opc_pc[pc_pos];
1980 }