]> git.proxmox.com Git - qemu.git/blob - target-microblaze/translate.c
rng-egd: remove redundant free
[qemu.git] / target-microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "helper.h"
25 #include "microblaze-decode.h"
26
27 #define GEN_HELPER 1
28 #include "helper.h"
29
30 #define SIM_COMPAT 0
31 #define DISAS_GNU 1
32 #define DISAS_MB 1
33 #if DISAS_MB && !SIM_COMPAT
34 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 #else
36 # define LOG_DIS(...) do { } while (0)
37 #endif
38
39 #define D(x)
40
41 #define EXTRACT_FIELD(src, start, end) \
42 (((src) >> start) & ((1 << (end - start + 1)) - 1))
43
44 static TCGv env_debug;
45 static TCGv_ptr cpu_env;
46 static TCGv cpu_R[32];
47 static TCGv cpu_SR[18];
48 static TCGv env_imm;
49 static TCGv env_btaken;
50 static TCGv env_btarget;
51 static TCGv env_iflags;
52 static TCGv env_res_addr;
53 static TCGv env_res_val;
54
55 #include "exec/gen-icount.h"
56
57 /* This is the state at translation time. */
58 typedef struct DisasContext {
59 CPUMBState *env;
60 target_ulong pc;
61
62 /* Decoder. */
63 int type_b;
64 uint32_t ir;
65 uint8_t opcode;
66 uint8_t rd, ra, rb;
67 uint16_t imm;
68
69 unsigned int cpustate_changed;
70 unsigned int delayed_branch;
71 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
72 unsigned int clear_imm;
73 int is_jmp;
74
75 #define JMP_NOJMP 0
76 #define JMP_DIRECT 1
77 #define JMP_DIRECT_CC 2
78 #define JMP_INDIRECT 3
79 unsigned int jmp;
80 uint32_t jmp_pc;
81
82 int abort_at_next_insn;
83 int nr_nops;
84 struct TranslationBlock *tb;
85 int singlestep_enabled;
86 } DisasContext;
87
88 static const char *regnames[] =
89 {
90 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
91 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
92 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
93 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
94 };
95
96 static const char *special_regnames[] =
97 {
98 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
99 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
100 "sr16", "sr17", "sr18"
101 };
102
103 /* Sign extend at translation time. */
104 static inline int sign_extend(unsigned int val, unsigned int width)
105 {
106 int sval;
107
108 /* LSL. */
109 val <<= 31 - width;
110 sval = val;
111 /* ASR. */
112 sval >>= 31 - width;
113 return sval;
114 }
115
116 static inline void t_sync_flags(DisasContext *dc)
117 {
118 /* Synch the tb dependent flags between translator and runtime. */
119 if (dc->tb_flags != dc->synced_flags) {
120 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
121 dc->synced_flags = dc->tb_flags;
122 }
123 }
124
125 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
126 {
127 TCGv_i32 tmp = tcg_const_i32(index);
128
129 t_sync_flags(dc);
130 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
131 gen_helper_raise_exception(cpu_env, tmp);
132 tcg_temp_free_i32(tmp);
133 dc->is_jmp = DISAS_UPDATE;
134 }
135
136 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
137 {
138 TranslationBlock *tb;
139 tb = dc->tb;
140 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
141 tcg_gen_goto_tb(n);
142 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
143 tcg_gen_exit_tb((uintptr_t)tb + n);
144 } else {
145 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
146 tcg_gen_exit_tb(0);
147 }
148 }
149
150 static void read_carry(DisasContext *dc, TCGv d)
151 {
152 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
153 }
154
155 /*
156 * write_carry sets the carry bits in MSR based on bit 0 of v.
157 * v[31:1] are ignored.
158 */
159 static void write_carry(DisasContext *dc, TCGv v)
160 {
161 TCGv t0 = tcg_temp_new();
162 tcg_gen_shli_tl(t0, v, 31);
163 tcg_gen_sari_tl(t0, t0, 31);
164 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
165 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
166 ~(MSR_C | MSR_CC));
167 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
168 tcg_temp_free(t0);
169 }
170
171 static void write_carryi(DisasContext *dc, bool carry)
172 {
173 TCGv t0 = tcg_temp_new();
174 tcg_gen_movi_tl(t0, carry);
175 write_carry(dc, t0);
176 tcg_temp_free(t0);
177 }
178
179 /* True if ALU operand b is a small immediate that may deserve
180 faster treatment. */
181 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
182 {
183 /* Immediate insn without the imm prefix ? */
184 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
185 }
186
187 static inline TCGv *dec_alu_op_b(DisasContext *dc)
188 {
189 if (dc->type_b) {
190 if (dc->tb_flags & IMM_FLAG)
191 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
192 else
193 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
194 return &env_imm;
195 } else
196 return &cpu_R[dc->rb];
197 }
198
199 static void dec_add(DisasContext *dc)
200 {
201 unsigned int k, c;
202 TCGv cf;
203
204 k = dc->opcode & 4;
205 c = dc->opcode & 2;
206
207 LOG_DIS("add%s%s%s r%d r%d r%d\n",
208 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
209 dc->rd, dc->ra, dc->rb);
210
211 /* Take care of the easy cases first. */
212 if (k) {
213 /* k - keep carry, no need to update MSR. */
214 /* If rd == r0, it's a nop. */
215 if (dc->rd) {
216 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
217
218 if (c) {
219 /* c - Add carry into the result. */
220 cf = tcg_temp_new();
221
222 read_carry(dc, cf);
223 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
224 tcg_temp_free(cf);
225 }
226 }
227 return;
228 }
229
230 /* From now on, we can assume k is zero. So we need to update MSR. */
231 /* Extract carry. */
232 cf = tcg_temp_new();
233 if (c) {
234 read_carry(dc, cf);
235 } else {
236 tcg_gen_movi_tl(cf, 0);
237 }
238
239 if (dc->rd) {
240 TCGv ncf = tcg_temp_new();
241 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
243 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
244 write_carry(dc, ncf);
245 tcg_temp_free(ncf);
246 } else {
247 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
248 write_carry(dc, cf);
249 }
250 tcg_temp_free(cf);
251 }
252
253 static void dec_sub(DisasContext *dc)
254 {
255 unsigned int u, cmp, k, c;
256 TCGv cf, na;
257
258 u = dc->imm & 2;
259 k = dc->opcode & 4;
260 c = dc->opcode & 2;
261 cmp = (dc->imm & 1) && (!dc->type_b) && k;
262
263 if (cmp) {
264 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
265 if (dc->rd) {
266 if (u)
267 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
268 else
269 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
270 }
271 return;
272 }
273
274 LOG_DIS("sub%s%s r%d, r%d r%d\n",
275 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
276
277 /* Take care of the easy cases first. */
278 if (k) {
279 /* k - keep carry, no need to update MSR. */
280 /* If rd == r0, it's a nop. */
281 if (dc->rd) {
282 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
283
284 if (c) {
285 /* c - Add carry into the result. */
286 cf = tcg_temp_new();
287
288 read_carry(dc, cf);
289 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
290 tcg_temp_free(cf);
291 }
292 }
293 return;
294 }
295
296 /* From now on, we can assume k is zero. So we need to update MSR. */
297 /* Extract carry. And complement a into na. */
298 cf = tcg_temp_new();
299 na = tcg_temp_new();
300 if (c) {
301 read_carry(dc, cf);
302 } else {
303 tcg_gen_movi_tl(cf, 1);
304 }
305
306 /* d = b + ~a + c. carry defaults to 1. */
307 tcg_gen_not_tl(na, cpu_R[dc->ra]);
308
309 if (dc->rd) {
310 TCGv ncf = tcg_temp_new();
311 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
312 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
313 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
314 write_carry(dc, ncf);
315 tcg_temp_free(ncf);
316 } else {
317 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
318 write_carry(dc, cf);
319 }
320 tcg_temp_free(cf);
321 tcg_temp_free(na);
322 }
323
324 static void dec_pattern(DisasContext *dc)
325 {
326 unsigned int mode;
327 int l1;
328
329 if ((dc->tb_flags & MSR_EE_FLAG)
330 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
331 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
332 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
333 t_gen_raise_exception(dc, EXCP_HW_EXCP);
334 }
335
336 mode = dc->opcode & 3;
337 switch (mode) {
338 case 0:
339 /* pcmpbf. */
340 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
341 if (dc->rd)
342 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
343 break;
344 case 2:
345 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
346 if (dc->rd) {
347 TCGv t0 = tcg_temp_local_new();
348 l1 = gen_new_label();
349 tcg_gen_movi_tl(t0, 1);
350 tcg_gen_brcond_tl(TCG_COND_EQ,
351 cpu_R[dc->ra], cpu_R[dc->rb], l1);
352 tcg_gen_movi_tl(t0, 0);
353 gen_set_label(l1);
354 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
355 tcg_temp_free(t0);
356 }
357 break;
358 case 3:
359 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
360 l1 = gen_new_label();
361 if (dc->rd) {
362 TCGv t0 = tcg_temp_local_new();
363 tcg_gen_movi_tl(t0, 1);
364 tcg_gen_brcond_tl(TCG_COND_NE,
365 cpu_R[dc->ra], cpu_R[dc->rb], l1);
366 tcg_gen_movi_tl(t0, 0);
367 gen_set_label(l1);
368 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
369 tcg_temp_free(t0);
370 }
371 break;
372 default:
373 cpu_abort(dc->env,
374 "unsupported pattern insn opcode=%x\n", dc->opcode);
375 break;
376 }
377 }
378
379 static void dec_and(DisasContext *dc)
380 {
381 unsigned int not;
382
383 if (!dc->type_b && (dc->imm & (1 << 10))) {
384 dec_pattern(dc);
385 return;
386 }
387
388 not = dc->opcode & (1 << 1);
389 LOG_DIS("and%s\n", not ? "n" : "");
390
391 if (!dc->rd)
392 return;
393
394 if (not) {
395 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
396 } else
397 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
398 }
399
400 static void dec_or(DisasContext *dc)
401 {
402 if (!dc->type_b && (dc->imm & (1 << 10))) {
403 dec_pattern(dc);
404 return;
405 }
406
407 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
408 if (dc->rd)
409 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410 }
411
412 static void dec_xor(DisasContext *dc)
413 {
414 if (!dc->type_b && (dc->imm & (1 << 10))) {
415 dec_pattern(dc);
416 return;
417 }
418
419 LOG_DIS("xor r%d\n", dc->rd);
420 if (dc->rd)
421 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
422 }
423
424 static inline void msr_read(DisasContext *dc, TCGv d)
425 {
426 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
427 }
428
429 static inline void msr_write(DisasContext *dc, TCGv v)
430 {
431 TCGv t;
432
433 t = tcg_temp_new();
434 dc->cpustate_changed = 1;
435 /* PVR bit is not writable. */
436 tcg_gen_andi_tl(t, v, ~MSR_PVR);
437 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
438 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
439 tcg_temp_free(t);
440 }
441
442 static void dec_msr(DisasContext *dc)
443 {
444 TCGv t0, t1;
445 unsigned int sr, to, rn;
446 int mem_index = cpu_mmu_index(dc->env);
447
448 sr = dc->imm & ((1 << 14) - 1);
449 to = dc->imm & (1 << 14);
450 dc->type_b = 1;
451 if (to)
452 dc->cpustate_changed = 1;
453
454 /* msrclr and msrset. */
455 if (!(dc->imm & (1 << 15))) {
456 unsigned int clr = dc->ir & (1 << 16);
457
458 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
459 dc->rd, dc->imm);
460
461 if (!(dc->env->pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
462 /* nop??? */
463 return;
464 }
465
466 if ((dc->tb_flags & MSR_EE_FLAG)
467 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
468 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
469 t_gen_raise_exception(dc, EXCP_HW_EXCP);
470 return;
471 }
472
473 if (dc->rd)
474 msr_read(dc, cpu_R[dc->rd]);
475
476 t0 = tcg_temp_new();
477 t1 = tcg_temp_new();
478 msr_read(dc, t0);
479 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
480
481 if (clr) {
482 tcg_gen_not_tl(t1, t1);
483 tcg_gen_and_tl(t0, t0, t1);
484 } else
485 tcg_gen_or_tl(t0, t0, t1);
486 msr_write(dc, t0);
487 tcg_temp_free(t0);
488 tcg_temp_free(t1);
489 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
490 dc->is_jmp = DISAS_UPDATE;
491 return;
492 }
493
494 if (to) {
495 if ((dc->tb_flags & MSR_EE_FLAG)
496 && mem_index == MMU_USER_IDX) {
497 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
498 t_gen_raise_exception(dc, EXCP_HW_EXCP);
499 return;
500 }
501 }
502
503 #if !defined(CONFIG_USER_ONLY)
504 /* Catch read/writes to the mmu block. */
505 if ((sr & ~0xff) == 0x1000) {
506 sr &= 7;
507 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
508 if (to)
509 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
510 else
511 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
512 return;
513 }
514 #endif
515
516 if (to) {
517 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
518 switch (sr) {
519 case 0:
520 break;
521 case 1:
522 msr_write(dc, cpu_R[dc->ra]);
523 break;
524 case 0x3:
525 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
526 break;
527 case 0x5:
528 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
529 break;
530 case 0x7:
531 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
532 break;
533 case 0x800:
534 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
535 break;
536 case 0x802:
537 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
538 break;
539 default:
540 cpu_abort(dc->env, "unknown mts reg %x\n", sr);
541 break;
542 }
543 } else {
544 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
545
546 switch (sr) {
547 case 0:
548 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
549 break;
550 case 1:
551 msr_read(dc, cpu_R[dc->rd]);
552 break;
553 case 0x3:
554 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
555 break;
556 case 0x5:
557 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
558 break;
559 case 0x7:
560 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
561 break;
562 case 0xb:
563 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
564 break;
565 case 0x800:
566 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
567 break;
568 case 0x802:
569 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
570 break;
571 case 0x2000:
572 case 0x2001:
573 case 0x2002:
574 case 0x2003:
575 case 0x2004:
576 case 0x2005:
577 case 0x2006:
578 case 0x2007:
579 case 0x2008:
580 case 0x2009:
581 case 0x200a:
582 case 0x200b:
583 case 0x200c:
584 rn = sr & 0xf;
585 tcg_gen_ld_tl(cpu_R[dc->rd],
586 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
587 break;
588 default:
589 cpu_abort(dc->env, "unknown mfs reg %x\n", sr);
590 break;
591 }
592 }
593
594 if (dc->rd == 0) {
595 tcg_gen_movi_tl(cpu_R[0], 0);
596 }
597 }
598
599 /* 64-bit signed mul, lower result in d and upper in d2. */
600 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
601 {
602 TCGv_i64 t0, t1;
603
604 t0 = tcg_temp_new_i64();
605 t1 = tcg_temp_new_i64();
606
607 tcg_gen_ext_i32_i64(t0, a);
608 tcg_gen_ext_i32_i64(t1, b);
609 tcg_gen_mul_i64(t0, t0, t1);
610
611 tcg_gen_trunc_i64_i32(d, t0);
612 tcg_gen_shri_i64(t0, t0, 32);
613 tcg_gen_trunc_i64_i32(d2, t0);
614
615 tcg_temp_free_i64(t0);
616 tcg_temp_free_i64(t1);
617 }
618
619 /* 64-bit unsigned muls, lower result in d and upper in d2. */
620 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
621 {
622 TCGv_i64 t0, t1;
623
624 t0 = tcg_temp_new_i64();
625 t1 = tcg_temp_new_i64();
626
627 tcg_gen_extu_i32_i64(t0, a);
628 tcg_gen_extu_i32_i64(t1, b);
629 tcg_gen_mul_i64(t0, t0, t1);
630
631 tcg_gen_trunc_i64_i32(d, t0);
632 tcg_gen_shri_i64(t0, t0, 32);
633 tcg_gen_trunc_i64_i32(d2, t0);
634
635 tcg_temp_free_i64(t0);
636 tcg_temp_free_i64(t1);
637 }
638
639 /* Multiplier unit. */
640 static void dec_mul(DisasContext *dc)
641 {
642 TCGv d[2];
643 unsigned int subcode;
644
645 if ((dc->tb_flags & MSR_EE_FLAG)
646 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
647 && !(dc->env->pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
648 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
649 t_gen_raise_exception(dc, EXCP_HW_EXCP);
650 return;
651 }
652
653 subcode = dc->imm & 3;
654 d[0] = tcg_temp_new();
655 d[1] = tcg_temp_new();
656
657 if (dc->type_b) {
658 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
659 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
660 goto done;
661 }
662
663 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
664 if (subcode >= 1 && subcode <= 3
665 && !((dc->env->pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
666 /* nop??? */
667 }
668
669 switch (subcode) {
670 case 0:
671 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
672 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
673 break;
674 case 1:
675 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
676 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
677 break;
678 case 2:
679 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
680 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
681 break;
682 case 3:
683 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
684 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
685 break;
686 default:
687 cpu_abort(dc->env, "unknown MUL insn %x\n", subcode);
688 break;
689 }
690 done:
691 tcg_temp_free(d[0]);
692 tcg_temp_free(d[1]);
693 }
694
695 /* Div unit. */
696 static void dec_div(DisasContext *dc)
697 {
698 unsigned int u;
699
700 u = dc->imm & 2;
701 LOG_DIS("div\n");
702
703 if ((dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
704 && !((dc->env->pvr.regs[0] & PVR0_USE_DIV_MASK))) {
705 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
706 t_gen_raise_exception(dc, EXCP_HW_EXCP);
707 }
708
709 if (u)
710 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
711 cpu_R[dc->ra]);
712 else
713 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
714 cpu_R[dc->ra]);
715 if (!dc->rd)
716 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
717 }
718
719 static void dec_barrel(DisasContext *dc)
720 {
721 TCGv t0;
722 unsigned int s, t;
723
724 if ((dc->tb_flags & MSR_EE_FLAG)
725 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
726 && !(dc->env->pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
727 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
728 t_gen_raise_exception(dc, EXCP_HW_EXCP);
729 return;
730 }
731
732 s = dc->imm & (1 << 10);
733 t = dc->imm & (1 << 9);
734
735 LOG_DIS("bs%s%s r%d r%d r%d\n",
736 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
737
738 t0 = tcg_temp_new();
739
740 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
741 tcg_gen_andi_tl(t0, t0, 31);
742
743 if (s)
744 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
745 else {
746 if (t)
747 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
748 else
749 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
750 }
751 }
752
753 static void dec_bit(DisasContext *dc)
754 {
755 TCGv t0;
756 unsigned int op;
757 int mem_index = cpu_mmu_index(dc->env);
758
759 op = dc->ir & ((1 << 9) - 1);
760 switch (op) {
761 case 0x21:
762 /* src. */
763 t0 = tcg_temp_new();
764
765 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
766 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
767 write_carry(dc, cpu_R[dc->ra]);
768 if (dc->rd) {
769 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
770 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
771 }
772 tcg_temp_free(t0);
773 break;
774
775 case 0x1:
776 case 0x41:
777 /* srl. */
778 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
779
780 /* Update carry. Note that write carry only looks at the LSB. */
781 write_carry(dc, cpu_R[dc->ra]);
782 if (dc->rd) {
783 if (op == 0x41)
784 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
785 else
786 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
787 }
788 break;
789 case 0x60:
790 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
791 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
792 break;
793 case 0x61:
794 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
795 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
796 break;
797 case 0x64:
798 case 0x66:
799 case 0x74:
800 case 0x76:
801 /* wdc. */
802 LOG_DIS("wdc r%d\n", dc->ra);
803 if ((dc->tb_flags & MSR_EE_FLAG)
804 && mem_index == MMU_USER_IDX) {
805 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
806 t_gen_raise_exception(dc, EXCP_HW_EXCP);
807 return;
808 }
809 break;
810 case 0x68:
811 /* wic. */
812 LOG_DIS("wic r%d\n", dc->ra);
813 if ((dc->tb_flags & MSR_EE_FLAG)
814 && mem_index == MMU_USER_IDX) {
815 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
816 t_gen_raise_exception(dc, EXCP_HW_EXCP);
817 return;
818 }
819 break;
820 case 0xe0:
821 if ((dc->tb_flags & MSR_EE_FLAG)
822 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
823 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
824 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
825 t_gen_raise_exception(dc, EXCP_HW_EXCP);
826 }
827 if (dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
828 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
829 }
830 break;
831 case 0x1e0:
832 /* swapb */
833 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
834 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
835 break;
836 case 0x1e2:
837 /*swaph */
838 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
839 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
840 break;
841 default:
842 cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
843 dc->pc, op, dc->rd, dc->ra, dc->rb);
844 break;
845 }
846 }
847
848 static inline void sync_jmpstate(DisasContext *dc)
849 {
850 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
851 if (dc->jmp == JMP_DIRECT) {
852 tcg_gen_movi_tl(env_btaken, 1);
853 }
854 dc->jmp = JMP_INDIRECT;
855 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
856 }
857 }
858
859 static void dec_imm(DisasContext *dc)
860 {
861 LOG_DIS("imm %x\n", dc->imm << 16);
862 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
863 dc->tb_flags |= IMM_FLAG;
864 dc->clear_imm = 0;
865 }
866
867 static inline void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
868 unsigned int size, bool exclusive)
869 {
870 int mem_index = cpu_mmu_index(dc->env);
871
872 if (size == 1) {
873 tcg_gen_qemu_ld8u(dst, addr, mem_index);
874 } else if (size == 2) {
875 tcg_gen_qemu_ld16u(dst, addr, mem_index);
876 } else if (size == 4) {
877 tcg_gen_qemu_ld32u(dst, addr, mem_index);
878 } else
879 cpu_abort(dc->env, "Incorrect load size %d\n", size);
880
881 if (exclusive) {
882 tcg_gen_mov_tl(env_res_addr, addr);
883 tcg_gen_mov_tl(env_res_val, dst);
884 }
885 }
886
887 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
888 {
889 unsigned int extimm = dc->tb_flags & IMM_FLAG;
890 /* Should be set to one if r1 is used by loadstores. */
891 int stackprot = 0;
892
893 /* All load/stores use ra. */
894 if (dc->ra == 1) {
895 stackprot = 1;
896 }
897
898 /* Treat the common cases first. */
899 if (!dc->type_b) {
900 /* If any of the regs is r0, return a ptr to the other. */
901 if (dc->ra == 0) {
902 return &cpu_R[dc->rb];
903 } else if (dc->rb == 0) {
904 return &cpu_R[dc->ra];
905 }
906
907 if (dc->rb == 1) {
908 stackprot = 1;
909 }
910
911 *t = tcg_temp_new();
912 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
913
914 if (stackprot) {
915 gen_helper_stackprot(cpu_env, *t);
916 }
917 return t;
918 }
919 /* Immediate. */
920 if (!extimm) {
921 if (dc->imm == 0) {
922 return &cpu_R[dc->ra];
923 }
924 *t = tcg_temp_new();
925 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
926 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
927 } else {
928 *t = tcg_temp_new();
929 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
930 }
931
932 if (stackprot) {
933 gen_helper_stackprot(cpu_env, *t);
934 }
935 return t;
936 }
937
938 static inline void dec_byteswap(DisasContext *dc, TCGv dst, TCGv src, int size)
939 {
940 if (size == 4) {
941 tcg_gen_bswap32_tl(dst, src);
942 } else if (size == 2) {
943 TCGv t = tcg_temp_new();
944
945 /* bswap16 assumes the high bits are zero. */
946 tcg_gen_andi_tl(t, src, 0xffff);
947 tcg_gen_bswap16_tl(dst, t);
948 tcg_temp_free(t);
949 } else {
950 /* Ignore.
951 cpu_abort(dc->env, "Invalid ldst byteswap size %d\n", size);
952 */
953 }
954 }
955
956 static void dec_load(DisasContext *dc)
957 {
958 TCGv t, *addr;
959 unsigned int size, rev = 0, ex = 0;
960
961 size = 1 << (dc->opcode & 3);
962
963 if (!dc->type_b) {
964 rev = (dc->ir >> 9) & 1;
965 ex = (dc->ir >> 10) & 1;
966 }
967
968 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
969 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
970 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
971 t_gen_raise_exception(dc, EXCP_HW_EXCP);
972 return;
973 }
974
975 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
976 ex ? "x" : "");
977
978 t_sync_flags(dc);
979 addr = compute_ldst_addr(dc, &t);
980
981 /*
982 * When doing reverse accesses we need to do two things.
983 *
984 * 1. Reverse the address wrt endianness.
985 * 2. Byteswap the data lanes on the way back into the CPU core.
986 */
987 if (rev && size != 4) {
988 /* Endian reverse the address. t is addr. */
989 switch (size) {
990 case 1:
991 {
992 /* 00 -> 11
993 01 -> 10
994 10 -> 10
995 11 -> 00 */
996 TCGv low = tcg_temp_new();
997
998 /* Force addr into the temp. */
999 if (addr != &t) {
1000 t = tcg_temp_new();
1001 tcg_gen_mov_tl(t, *addr);
1002 addr = &t;
1003 }
1004
1005 tcg_gen_andi_tl(low, t, 3);
1006 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1007 tcg_gen_andi_tl(t, t, ~3);
1008 tcg_gen_or_tl(t, t, low);
1009 tcg_gen_mov_tl(env_imm, t);
1010 tcg_temp_free(low);
1011 break;
1012 }
1013
1014 case 2:
1015 /* 00 -> 10
1016 10 -> 00. */
1017 /* Force addr into the temp. */
1018 if (addr != &t) {
1019 t = tcg_temp_new();
1020 tcg_gen_xori_tl(t, *addr, 2);
1021 addr = &t;
1022 } else {
1023 tcg_gen_xori_tl(t, t, 2);
1024 }
1025 break;
1026 default:
1027 cpu_abort(dc->env, "Invalid reverse size\n");
1028 break;
1029 }
1030 }
1031
1032 /* lwx does not throw unaligned access errors, so force alignment */
1033 if (ex) {
1034 /* Force addr into the temp. */
1035 if (addr != &t) {
1036 t = tcg_temp_new();
1037 tcg_gen_mov_tl(t, *addr);
1038 addr = &t;
1039 }
1040 tcg_gen_andi_tl(t, t, ~3);
1041 }
1042
1043 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1044 sync_jmpstate(dc);
1045
1046 /* Verify alignment if needed. */
1047 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1048 TCGv v = tcg_temp_new();
1049
1050 /*
1051 * Microblaze gives MMU faults priority over faults due to
1052 * unaligned addresses. That's why we speculatively do the load
1053 * into v. If the load succeeds, we verify alignment of the
1054 * address and if that succeeds we write into the destination reg.
1055 */
1056 gen_load(dc, v, *addr, size, ex);
1057
1058 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1059 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1060 tcg_const_tl(0), tcg_const_tl(size - 1));
1061 if (dc->rd) {
1062 if (rev) {
1063 dec_byteswap(dc, cpu_R[dc->rd], v, size);
1064 } else {
1065 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1066 }
1067 }
1068 tcg_temp_free(v);
1069 } else {
1070 if (dc->rd) {
1071 gen_load(dc, cpu_R[dc->rd], *addr, size, ex);
1072 if (rev) {
1073 dec_byteswap(dc, cpu_R[dc->rd], cpu_R[dc->rd], size);
1074 }
1075 } else {
1076 /* We are loading into r0, no need to reverse. */
1077 gen_load(dc, env_imm, *addr, size, ex);
1078 }
1079 }
1080
1081 if (ex) { /* lwx */
1082 /* no support for for AXI exclusive so always clear C */
1083 write_carryi(dc, 0);
1084 }
1085
1086 if (addr == &t)
1087 tcg_temp_free(t);
1088 }
1089
1090 static void gen_store(DisasContext *dc, TCGv addr, TCGv val,
1091 unsigned int size)
1092 {
1093 int mem_index = cpu_mmu_index(dc->env);
1094
1095 if (size == 1)
1096 tcg_gen_qemu_st8(val, addr, mem_index);
1097 else if (size == 2) {
1098 tcg_gen_qemu_st16(val, addr, mem_index);
1099 } else if (size == 4) {
1100 tcg_gen_qemu_st32(val, addr, mem_index);
1101 } else
1102 cpu_abort(dc->env, "Incorrect store size %d\n", size);
1103 }
1104
1105 static void dec_store(DisasContext *dc)
1106 {
1107 TCGv t, *addr, swx_addr;
1108 int swx_skip = 0;
1109 unsigned int size, rev = 0, ex = 0;
1110
1111 size = 1 << (dc->opcode & 3);
1112 if (!dc->type_b) {
1113 rev = (dc->ir >> 9) & 1;
1114 ex = (dc->ir >> 10) & 1;
1115 }
1116
1117 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1118 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1119 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1120 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1121 return;
1122 }
1123
1124 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1125 ex ? "x" : "");
1126 t_sync_flags(dc);
1127 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1128 sync_jmpstate(dc);
1129 addr = compute_ldst_addr(dc, &t);
1130
1131 swx_addr = tcg_temp_local_new();
1132 if (ex) { /* swx */
1133 TCGv tval;
1134
1135 /* Force addr into the swx_addr. */
1136 tcg_gen_mov_tl(swx_addr, *addr);
1137 addr = &swx_addr;
1138 /* swx does not throw unaligned access errors, so force alignment */
1139 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1140
1141 write_carryi(dc, 1);
1142 swx_skip = gen_new_label();
1143 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1144
1145 /* Compare the value loaded at lwx with current contents of
1146 the reserved location.
1147 FIXME: This only works for system emulation where we can expect
1148 this compare and the following write to be atomic. For user
1149 emulation we need to add atomicity between threads. */
1150 tval = tcg_temp_new();
1151 gen_load(dc, tval, swx_addr, 4, false);
1152 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1153 write_carryi(dc, 0);
1154 tcg_temp_free(tval);
1155 }
1156
1157 if (rev && size != 4) {
1158 /* Endian reverse the address. t is addr. */
1159 switch (size) {
1160 case 1:
1161 {
1162 /* 00 -> 11
1163 01 -> 10
1164 10 -> 10
1165 11 -> 00 */
1166 TCGv low = tcg_temp_new();
1167
1168 /* Force addr into the temp. */
1169 if (addr != &t) {
1170 t = tcg_temp_new();
1171 tcg_gen_mov_tl(t, *addr);
1172 addr = &t;
1173 }
1174
1175 tcg_gen_andi_tl(low, t, 3);
1176 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1177 tcg_gen_andi_tl(t, t, ~3);
1178 tcg_gen_or_tl(t, t, low);
1179 tcg_gen_mov_tl(env_imm, t);
1180 tcg_temp_free(low);
1181 break;
1182 }
1183
1184 case 2:
1185 /* 00 -> 10
1186 10 -> 00. */
1187 /* Force addr into the temp. */
1188 if (addr != &t) {
1189 t = tcg_temp_new();
1190 tcg_gen_xori_tl(t, *addr, 2);
1191 addr = &t;
1192 } else {
1193 tcg_gen_xori_tl(t, t, 2);
1194 }
1195 break;
1196 default:
1197 cpu_abort(dc->env, "Invalid reverse size\n");
1198 break;
1199 }
1200
1201 if (size != 1) {
1202 TCGv bs_data = tcg_temp_new();
1203 dec_byteswap(dc, bs_data, cpu_R[dc->rd], size);
1204 gen_store(dc, *addr, bs_data, size);
1205 tcg_temp_free(bs_data);
1206 } else {
1207 gen_store(dc, *addr, cpu_R[dc->rd], size);
1208 }
1209 } else {
1210 if (rev) {
1211 TCGv bs_data = tcg_temp_new();
1212 dec_byteswap(dc, bs_data, cpu_R[dc->rd], size);
1213 gen_store(dc, *addr, bs_data, size);
1214 tcg_temp_free(bs_data);
1215 } else {
1216 gen_store(dc, *addr, cpu_R[dc->rd], size);
1217 }
1218 }
1219
1220 /* Verify alignment if needed. */
1221 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1222 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1223 /* FIXME: if the alignment is wrong, we should restore the value
1224 * in memory. One possible way to achieve this is to probe
1225 * the MMU prior to the memaccess, thay way we could put
1226 * the alignment checks in between the probe and the mem
1227 * access.
1228 */
1229 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1230 tcg_const_tl(1), tcg_const_tl(size - 1));
1231 }
1232
1233 if (ex) {
1234 gen_set_label(swx_skip);
1235 }
1236 tcg_temp_free(swx_addr);
1237
1238 if (addr == &t)
1239 tcg_temp_free(t);
1240 }
1241
1242 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1243 TCGv d, TCGv a, TCGv b)
1244 {
1245 switch (cc) {
1246 case CC_EQ:
1247 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1248 break;
1249 case CC_NE:
1250 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1251 break;
1252 case CC_LT:
1253 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1254 break;
1255 case CC_LE:
1256 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1257 break;
1258 case CC_GE:
1259 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1260 break;
1261 case CC_GT:
1262 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1263 break;
1264 default:
1265 cpu_abort(dc->env, "Unknown condition code %x.\n", cc);
1266 break;
1267 }
1268 }
1269
1270 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1271 {
1272 int l1;
1273
1274 l1 = gen_new_label();
1275 /* Conditional jmp. */
1276 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1277 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1278 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1279 gen_set_label(l1);
1280 }
1281
1282 static void dec_bcc(DisasContext *dc)
1283 {
1284 unsigned int cc;
1285 unsigned int dslot;
1286
1287 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1288 dslot = dc->ir & (1 << 25);
1289 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1290
1291 dc->delayed_branch = 1;
1292 if (dslot) {
1293 dc->delayed_branch = 2;
1294 dc->tb_flags |= D_FLAG;
1295 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1296 cpu_env, offsetof(CPUMBState, bimm));
1297 }
1298
1299 if (dec_alu_op_b_is_small_imm(dc)) {
1300 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1301
1302 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1303 dc->jmp = JMP_DIRECT_CC;
1304 dc->jmp_pc = dc->pc + offset;
1305 } else {
1306 dc->jmp = JMP_INDIRECT;
1307 tcg_gen_movi_tl(env_btarget, dc->pc);
1308 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1309 }
1310 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1311 }
1312
1313 static void dec_br(DisasContext *dc)
1314 {
1315 unsigned int dslot, link, abs, mbar;
1316 int mem_index = cpu_mmu_index(dc->env);
1317
1318 dslot = dc->ir & (1 << 20);
1319 abs = dc->ir & (1 << 19);
1320 link = dc->ir & (1 << 18);
1321
1322 /* Memory barrier. */
1323 mbar = (dc->ir >> 16) & 31;
1324 if (mbar == 2 && dc->imm == 4) {
1325 /* mbar IMM & 16 decodes to sleep. */
1326 if (dc->rd & 16) {
1327 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1328 TCGv_i32 tmp_1 = tcg_const_i32(1);
1329
1330 LOG_DIS("sleep\n");
1331
1332 t_sync_flags(dc);
1333 tcg_gen_st_i32(tmp_1, cpu_env,
1334 -offsetof(MicroBlazeCPU, env)
1335 +offsetof(CPUState, halted));
1336 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1337 gen_helper_raise_exception(cpu_env, tmp_hlt);
1338 tcg_temp_free_i32(tmp_hlt);
1339 tcg_temp_free_i32(tmp_1);
1340 return;
1341 }
1342 LOG_DIS("mbar %d\n", dc->rd);
1343 /* Break the TB. */
1344 dc->cpustate_changed = 1;
1345 return;
1346 }
1347
1348 LOG_DIS("br%s%s%s%s imm=%x\n",
1349 abs ? "a" : "", link ? "l" : "",
1350 dc->type_b ? "i" : "", dslot ? "d" : "",
1351 dc->imm);
1352
1353 dc->delayed_branch = 1;
1354 if (dslot) {
1355 dc->delayed_branch = 2;
1356 dc->tb_flags |= D_FLAG;
1357 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1358 cpu_env, offsetof(CPUMBState, bimm));
1359 }
1360 if (link && dc->rd)
1361 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1362
1363 dc->jmp = JMP_INDIRECT;
1364 if (abs) {
1365 tcg_gen_movi_tl(env_btaken, 1);
1366 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1367 if (link && !dslot) {
1368 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1369 t_gen_raise_exception(dc, EXCP_BREAK);
1370 if (dc->imm == 0) {
1371 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1372 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1373 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1374 return;
1375 }
1376
1377 t_gen_raise_exception(dc, EXCP_DEBUG);
1378 }
1379 }
1380 } else {
1381 if (dec_alu_op_b_is_small_imm(dc)) {
1382 dc->jmp = JMP_DIRECT;
1383 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1384 } else {
1385 tcg_gen_movi_tl(env_btaken, 1);
1386 tcg_gen_movi_tl(env_btarget, dc->pc);
1387 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1388 }
1389 }
1390 }
1391
1392 static inline void do_rti(DisasContext *dc)
1393 {
1394 TCGv t0, t1;
1395 t0 = tcg_temp_new();
1396 t1 = tcg_temp_new();
1397 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1398 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1399 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1400
1401 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1402 tcg_gen_or_tl(t1, t1, t0);
1403 msr_write(dc, t1);
1404 tcg_temp_free(t1);
1405 tcg_temp_free(t0);
1406 dc->tb_flags &= ~DRTI_FLAG;
1407 }
1408
1409 static inline void do_rtb(DisasContext *dc)
1410 {
1411 TCGv t0, t1;
1412 t0 = tcg_temp_new();
1413 t1 = tcg_temp_new();
1414 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1415 tcg_gen_shri_tl(t0, t1, 1);
1416 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1417
1418 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1419 tcg_gen_or_tl(t1, t1, t0);
1420 msr_write(dc, t1);
1421 tcg_temp_free(t1);
1422 tcg_temp_free(t0);
1423 dc->tb_flags &= ~DRTB_FLAG;
1424 }
1425
1426 static inline void do_rte(DisasContext *dc)
1427 {
1428 TCGv t0, t1;
1429 t0 = tcg_temp_new();
1430 t1 = tcg_temp_new();
1431
1432 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1433 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1434 tcg_gen_shri_tl(t0, t1, 1);
1435 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1436
1437 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1438 tcg_gen_or_tl(t1, t1, t0);
1439 msr_write(dc, t1);
1440 tcg_temp_free(t1);
1441 tcg_temp_free(t0);
1442 dc->tb_flags &= ~DRTE_FLAG;
1443 }
1444
1445 static void dec_rts(DisasContext *dc)
1446 {
1447 unsigned int b_bit, i_bit, e_bit;
1448 int mem_index = cpu_mmu_index(dc->env);
1449
1450 i_bit = dc->ir & (1 << 21);
1451 b_bit = dc->ir & (1 << 22);
1452 e_bit = dc->ir & (1 << 23);
1453
1454 dc->delayed_branch = 2;
1455 dc->tb_flags |= D_FLAG;
1456 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1457 cpu_env, offsetof(CPUMBState, bimm));
1458
1459 if (i_bit) {
1460 LOG_DIS("rtid ir=%x\n", dc->ir);
1461 if ((dc->tb_flags & MSR_EE_FLAG)
1462 && mem_index == MMU_USER_IDX) {
1463 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1464 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1465 }
1466 dc->tb_flags |= DRTI_FLAG;
1467 } else if (b_bit) {
1468 LOG_DIS("rtbd ir=%x\n", dc->ir);
1469 if ((dc->tb_flags & MSR_EE_FLAG)
1470 && mem_index == MMU_USER_IDX) {
1471 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1472 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1473 }
1474 dc->tb_flags |= DRTB_FLAG;
1475 } else if (e_bit) {
1476 LOG_DIS("rted ir=%x\n", dc->ir);
1477 if ((dc->tb_flags & MSR_EE_FLAG)
1478 && mem_index == MMU_USER_IDX) {
1479 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1480 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1481 }
1482 dc->tb_flags |= DRTE_FLAG;
1483 } else
1484 LOG_DIS("rts ir=%x\n", dc->ir);
1485
1486 dc->jmp = JMP_INDIRECT;
1487 tcg_gen_movi_tl(env_btaken, 1);
1488 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1489 }
1490
1491 static int dec_check_fpuv2(DisasContext *dc)
1492 {
1493 int r;
1494
1495 r = dc->env->pvr.regs[2] & PVR2_USE_FPU2_MASK;
1496
1497 if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1498 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1499 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1500 }
1501 return r;
1502 }
1503
1504 static void dec_fpu(DisasContext *dc)
1505 {
1506 unsigned int fpu_insn;
1507
1508 if ((dc->tb_flags & MSR_EE_FLAG)
1509 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1510 && !((dc->env->pvr.regs[2] & PVR2_USE_FPU_MASK))) {
1511 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1512 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1513 return;
1514 }
1515
1516 fpu_insn = (dc->ir >> 7) & 7;
1517
1518 switch (fpu_insn) {
1519 case 0:
1520 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1521 cpu_R[dc->rb]);
1522 break;
1523
1524 case 1:
1525 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1526 cpu_R[dc->rb]);
1527 break;
1528
1529 case 2:
1530 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1531 cpu_R[dc->rb]);
1532 break;
1533
1534 case 3:
1535 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1536 cpu_R[dc->rb]);
1537 break;
1538
1539 case 4:
1540 switch ((dc->ir >> 4) & 7) {
1541 case 0:
1542 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1543 cpu_R[dc->ra], cpu_R[dc->rb]);
1544 break;
1545 case 1:
1546 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1547 cpu_R[dc->ra], cpu_R[dc->rb]);
1548 break;
1549 case 2:
1550 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1551 cpu_R[dc->ra], cpu_R[dc->rb]);
1552 break;
1553 case 3:
1554 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1555 cpu_R[dc->ra], cpu_R[dc->rb]);
1556 break;
1557 case 4:
1558 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1559 cpu_R[dc->ra], cpu_R[dc->rb]);
1560 break;
1561 case 5:
1562 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1563 cpu_R[dc->ra], cpu_R[dc->rb]);
1564 break;
1565 case 6:
1566 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1567 cpu_R[dc->ra], cpu_R[dc->rb]);
1568 break;
1569 default:
1570 qemu_log_mask(LOG_UNIMP,
1571 "unimplemented fcmp fpu_insn=%x pc=%x"
1572 " opc=%x\n",
1573 fpu_insn, dc->pc, dc->opcode);
1574 dc->abort_at_next_insn = 1;
1575 break;
1576 }
1577 break;
1578
1579 case 5:
1580 if (!dec_check_fpuv2(dc)) {
1581 return;
1582 }
1583 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1584 break;
1585
1586 case 6:
1587 if (!dec_check_fpuv2(dc)) {
1588 return;
1589 }
1590 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1591 break;
1592
1593 case 7:
1594 if (!dec_check_fpuv2(dc)) {
1595 return;
1596 }
1597 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1598 break;
1599
1600 default:
1601 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1602 " opc=%x\n",
1603 fpu_insn, dc->pc, dc->opcode);
1604 dc->abort_at_next_insn = 1;
1605 break;
1606 }
1607 }
1608
1609 static void dec_null(DisasContext *dc)
1610 {
1611 if ((dc->tb_flags & MSR_EE_FLAG)
1612 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1613 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1614 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1615 return;
1616 }
1617 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1618 dc->abort_at_next_insn = 1;
1619 }
1620
1621 /* Insns connected to FSL or AXI stream attached devices. */
1622 static void dec_stream(DisasContext *dc)
1623 {
1624 int mem_index = cpu_mmu_index(dc->env);
1625 TCGv_i32 t_id, t_ctrl;
1626 int ctrl;
1627
1628 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1629 dc->type_b ? "" : "d", dc->imm);
1630
1631 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1632 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1633 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1634 return;
1635 }
1636
1637 t_id = tcg_temp_new();
1638 if (dc->type_b) {
1639 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1640 ctrl = dc->imm >> 10;
1641 } else {
1642 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1643 ctrl = dc->imm >> 5;
1644 }
1645
1646 t_ctrl = tcg_const_tl(ctrl);
1647
1648 if (dc->rd == 0) {
1649 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1650 } else {
1651 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1652 }
1653 tcg_temp_free(t_id);
1654 tcg_temp_free(t_ctrl);
1655 }
1656
1657 static struct decoder_info {
1658 struct {
1659 uint32_t bits;
1660 uint32_t mask;
1661 };
1662 void (*dec)(DisasContext *dc);
1663 } decinfo[] = {
1664 {DEC_ADD, dec_add},
1665 {DEC_SUB, dec_sub},
1666 {DEC_AND, dec_and},
1667 {DEC_XOR, dec_xor},
1668 {DEC_OR, dec_or},
1669 {DEC_BIT, dec_bit},
1670 {DEC_BARREL, dec_barrel},
1671 {DEC_LD, dec_load},
1672 {DEC_ST, dec_store},
1673 {DEC_IMM, dec_imm},
1674 {DEC_BR, dec_br},
1675 {DEC_BCC, dec_bcc},
1676 {DEC_RTS, dec_rts},
1677 {DEC_FPU, dec_fpu},
1678 {DEC_MUL, dec_mul},
1679 {DEC_DIV, dec_div},
1680 {DEC_MSR, dec_msr},
1681 {DEC_STREAM, dec_stream},
1682 {{0, 0}, dec_null}
1683 };
1684
1685 static inline void decode(DisasContext *dc, uint32_t ir)
1686 {
1687 int i;
1688
1689 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1690 tcg_gen_debug_insn_start(dc->pc);
1691 }
1692
1693 dc->ir = ir;
1694 LOG_DIS("%8.8x\t", dc->ir);
1695
1696 if (dc->ir)
1697 dc->nr_nops = 0;
1698 else {
1699 if ((dc->tb_flags & MSR_EE_FLAG)
1700 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1701 && (dc->env->pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1702 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1703 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1704 return;
1705 }
1706
1707 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1708 dc->nr_nops++;
1709 if (dc->nr_nops > 4)
1710 cpu_abort(dc->env, "fetching nop sequence\n");
1711 }
1712 /* bit 2 seems to indicate insn type. */
1713 dc->type_b = ir & (1 << 29);
1714
1715 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1716 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1717 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1718 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1719 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1720
1721 /* Large switch for all insns. */
1722 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1723 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1724 decinfo[i].dec(dc);
1725 break;
1726 }
1727 }
1728 }
1729
1730 static void check_breakpoint(CPUMBState *env, DisasContext *dc)
1731 {
1732 CPUBreakpoint *bp;
1733
1734 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1735 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1736 if (bp->pc == dc->pc) {
1737 t_gen_raise_exception(dc, EXCP_DEBUG);
1738 dc->is_jmp = DISAS_UPDATE;
1739 }
1740 }
1741 }
1742 }
1743
1744 /* generate intermediate code for basic block 'tb'. */
1745 static inline void
1746 gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
1747 bool search_pc)
1748 {
1749 CPUState *cs = CPU(cpu);
1750 CPUMBState *env = &cpu->env;
1751 uint16_t *gen_opc_end;
1752 uint32_t pc_start;
1753 int j, lj;
1754 struct DisasContext ctx;
1755 struct DisasContext *dc = &ctx;
1756 uint32_t next_page_start, org_flags;
1757 target_ulong npc;
1758 int num_insns;
1759 int max_insns;
1760
1761 pc_start = tb->pc;
1762 dc->env = env;
1763 dc->tb = tb;
1764 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1765
1766 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1767
1768 dc->is_jmp = DISAS_NEXT;
1769 dc->jmp = 0;
1770 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1771 if (dc->delayed_branch) {
1772 dc->jmp = JMP_INDIRECT;
1773 }
1774 dc->pc = pc_start;
1775 dc->singlestep_enabled = cs->singlestep_enabled;
1776 dc->cpustate_changed = 0;
1777 dc->abort_at_next_insn = 0;
1778 dc->nr_nops = 0;
1779
1780 if (pc_start & 3)
1781 cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start);
1782
1783 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1784 #if !SIM_COMPAT
1785 qemu_log("--------------\n");
1786 log_cpu_state(CPU(cpu), 0);
1787 #endif
1788 }
1789
1790 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1791 lj = -1;
1792 num_insns = 0;
1793 max_insns = tb->cflags & CF_COUNT_MASK;
1794 if (max_insns == 0)
1795 max_insns = CF_COUNT_MASK;
1796
1797 gen_tb_start();
1798 do
1799 {
1800 #if SIM_COMPAT
1801 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1802 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1803 gen_helper_debug();
1804 }
1805 #endif
1806 check_breakpoint(env, dc);
1807
1808 if (search_pc) {
1809 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1810 if (lj < j) {
1811 lj++;
1812 while (lj < j)
1813 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1814 }
1815 tcg_ctx.gen_opc_pc[lj] = dc->pc;
1816 tcg_ctx.gen_opc_instr_start[lj] = 1;
1817 tcg_ctx.gen_opc_icount[lj] = num_insns;
1818 }
1819
1820 /* Pretty disas. */
1821 LOG_DIS("%8.8x:\t", dc->pc);
1822
1823 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1824 gen_io_start();
1825
1826 dc->clear_imm = 1;
1827 decode(dc, cpu_ldl_code(env, dc->pc));
1828 if (dc->clear_imm)
1829 dc->tb_flags &= ~IMM_FLAG;
1830 dc->pc += 4;
1831 num_insns++;
1832
1833 if (dc->delayed_branch) {
1834 dc->delayed_branch--;
1835 if (!dc->delayed_branch) {
1836 if (dc->tb_flags & DRTI_FLAG)
1837 do_rti(dc);
1838 if (dc->tb_flags & DRTB_FLAG)
1839 do_rtb(dc);
1840 if (dc->tb_flags & DRTE_FLAG)
1841 do_rte(dc);
1842 /* Clear the delay slot flag. */
1843 dc->tb_flags &= ~D_FLAG;
1844 /* If it is a direct jump, try direct chaining. */
1845 if (dc->jmp == JMP_INDIRECT) {
1846 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1847 dc->is_jmp = DISAS_JUMP;
1848 } else if (dc->jmp == JMP_DIRECT) {
1849 t_sync_flags(dc);
1850 gen_goto_tb(dc, 0, dc->jmp_pc);
1851 dc->is_jmp = DISAS_TB_JUMP;
1852 } else if (dc->jmp == JMP_DIRECT_CC) {
1853 int l1;
1854
1855 t_sync_flags(dc);
1856 l1 = gen_new_label();
1857 /* Conditional jmp. */
1858 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1859 gen_goto_tb(dc, 1, dc->pc);
1860 gen_set_label(l1);
1861 gen_goto_tb(dc, 0, dc->jmp_pc);
1862
1863 dc->is_jmp = DISAS_TB_JUMP;
1864 }
1865 break;
1866 }
1867 }
1868 if (cs->singlestep_enabled) {
1869 break;
1870 }
1871 } while (!dc->is_jmp && !dc->cpustate_changed
1872 && tcg_ctx.gen_opc_ptr < gen_opc_end
1873 && !singlestep
1874 && (dc->pc < next_page_start)
1875 && num_insns < max_insns);
1876
1877 npc = dc->pc;
1878 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1879 if (dc->tb_flags & D_FLAG) {
1880 dc->is_jmp = DISAS_UPDATE;
1881 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1882 sync_jmpstate(dc);
1883 } else
1884 npc = dc->jmp_pc;
1885 }
1886
1887 if (tb->cflags & CF_LAST_IO)
1888 gen_io_end();
1889 /* Force an update if the per-tb cpu state has changed. */
1890 if (dc->is_jmp == DISAS_NEXT
1891 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1892 dc->is_jmp = DISAS_UPDATE;
1893 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1894 }
1895 t_sync_flags(dc);
1896
1897 if (unlikely(cs->singlestep_enabled)) {
1898 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1899
1900 if (dc->is_jmp != DISAS_JUMP) {
1901 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1902 }
1903 gen_helper_raise_exception(cpu_env, tmp);
1904 tcg_temp_free_i32(tmp);
1905 } else {
1906 switch(dc->is_jmp) {
1907 case DISAS_NEXT:
1908 gen_goto_tb(dc, 1, npc);
1909 break;
1910 default:
1911 case DISAS_JUMP:
1912 case DISAS_UPDATE:
1913 /* indicate that the hash table must be used
1914 to find the next TB */
1915 tcg_gen_exit_tb(0);
1916 break;
1917 case DISAS_TB_JUMP:
1918 /* nothing more to generate */
1919 break;
1920 }
1921 }
1922 gen_tb_end(tb, num_insns);
1923 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1924 if (search_pc) {
1925 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1926 lj++;
1927 while (lj <= j)
1928 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1929 } else {
1930 tb->size = dc->pc - pc_start;
1931 tb->icount = num_insns;
1932 }
1933
1934 #ifdef DEBUG_DISAS
1935 #if !SIM_COMPAT
1936 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1937 qemu_log("\n");
1938 #if DISAS_GNU
1939 log_target_disas(env, pc_start, dc->pc - pc_start, 0);
1940 #endif
1941 qemu_log("\nisize=%d osize=%td\n",
1942 dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
1943 tcg_ctx.gen_opc_buf);
1944 }
1945 #endif
1946 #endif
1947 assert(!dc->abort_at_next_insn);
1948 }
1949
1950 void gen_intermediate_code (CPUMBState *env, struct TranslationBlock *tb)
1951 {
1952 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, false);
1953 }
1954
1955 void gen_intermediate_code_pc (CPUMBState *env, struct TranslationBlock *tb)
1956 {
1957 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, true);
1958 }
1959
1960 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1961 int flags)
1962 {
1963 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1964 CPUMBState *env = &cpu->env;
1965 int i;
1966
1967 if (!env || !f)
1968 return;
1969
1970 cpu_fprintf(f, "IN: PC=%x %s\n",
1971 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1972 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1973 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1974 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1975 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1976 env->btaken, env->btarget,
1977 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1978 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1979 (env->sregs[SR_MSR] & MSR_EIP),
1980 (env->sregs[SR_MSR] & MSR_IE));
1981
1982 for (i = 0; i < 32; i++) {
1983 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1984 if ((i + 1) % 4 == 0)
1985 cpu_fprintf(f, "\n");
1986 }
1987 cpu_fprintf(f, "\n\n");
1988 }
1989
1990 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1991 {
1992 MicroBlazeCPU *cpu;
1993
1994 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1995
1996 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1997
1998 return cpu;
1999 }
2000
2001 void mb_tcg_init(void)
2002 {
2003 int i;
2004
2005 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
2006
2007 env_debug = tcg_global_mem_new(TCG_AREG0,
2008 offsetof(CPUMBState, debug),
2009 "debug0");
2010 env_iflags = tcg_global_mem_new(TCG_AREG0,
2011 offsetof(CPUMBState, iflags),
2012 "iflags");
2013 env_imm = tcg_global_mem_new(TCG_AREG0,
2014 offsetof(CPUMBState, imm),
2015 "imm");
2016 env_btarget = tcg_global_mem_new(TCG_AREG0,
2017 offsetof(CPUMBState, btarget),
2018 "btarget");
2019 env_btaken = tcg_global_mem_new(TCG_AREG0,
2020 offsetof(CPUMBState, btaken),
2021 "btaken");
2022 env_res_addr = tcg_global_mem_new(TCG_AREG0,
2023 offsetof(CPUMBState, res_addr),
2024 "res_addr");
2025 env_res_val = tcg_global_mem_new(TCG_AREG0,
2026 offsetof(CPUMBState, res_val),
2027 "res_val");
2028 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
2029 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
2030 offsetof(CPUMBState, regs[i]),
2031 regnames[i]);
2032 }
2033 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
2034 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
2035 offsetof(CPUMBState, sregs[i]),
2036 special_regnames[i]);
2037 }
2038 }
2039
2040 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
2041 {
2042 env->sregs[SR_PC] = tcg_ctx.gen_opc_pc[pc_pos];
2043 }