]> git.proxmox.com Git - mirror_qemu.git/blob - target-microblaze/translate.c
Merge remote-tracking branch 'remotes/mdroth/qga-pull-2014-06-05' into staging
[mirror_qemu.git] / target-microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "exec/helper-proto.h"
25 #include "microblaze-decode.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-gen.h"
28
29 #define SIM_COMPAT 0
30 #define DISAS_GNU 1
31 #define DISAS_MB 1
32 #if DISAS_MB && !SIM_COMPAT
33 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34 #else
35 # define LOG_DIS(...) do { } while (0)
36 #endif
37
38 #define D(x)
39
40 #define EXTRACT_FIELD(src, start, end) \
41 (((src) >> start) & ((1 << (end - start + 1)) - 1))
42
43 static TCGv env_debug;
44 static TCGv_ptr cpu_env;
45 static TCGv cpu_R[32];
46 static TCGv cpu_SR[18];
47 static TCGv env_imm;
48 static TCGv env_btaken;
49 static TCGv env_btarget;
50 static TCGv env_iflags;
51 static TCGv env_res_addr;
52 static TCGv env_res_val;
53
54 #include "exec/gen-icount.h"
55
56 /* This is the state at translation time. */
57 typedef struct DisasContext {
58 MicroBlazeCPU *cpu;
59 target_ulong pc;
60
61 /* Decoder. */
62 int type_b;
63 uint32_t ir;
64 uint8_t opcode;
65 uint8_t rd, ra, rb;
66 uint16_t imm;
67
68 unsigned int cpustate_changed;
69 unsigned int delayed_branch;
70 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
71 unsigned int clear_imm;
72 int is_jmp;
73
74 #define JMP_NOJMP 0
75 #define JMP_DIRECT 1
76 #define JMP_DIRECT_CC 2
77 #define JMP_INDIRECT 3
78 unsigned int jmp;
79 uint32_t jmp_pc;
80
81 int abort_at_next_insn;
82 int nr_nops;
83 struct TranslationBlock *tb;
84 int singlestep_enabled;
85 } DisasContext;
86
87 static const char *regnames[] =
88 {
89 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
90 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
91 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
92 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
93 };
94
95 static const char *special_regnames[] =
96 {
97 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
98 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
99 "sr16", "sr17", "sr18"
100 };
101
102 /* Sign extend at translation time. */
103 static inline int sign_extend(unsigned int val, unsigned int width)
104 {
105 int sval;
106
107 /* LSL. */
108 val <<= 31 - width;
109 sval = val;
110 /* ASR. */
111 sval >>= 31 - width;
112 return sval;
113 }
114
115 static inline void t_sync_flags(DisasContext *dc)
116 {
117 /* Synch the tb dependent flags between translator and runtime. */
118 if (dc->tb_flags != dc->synced_flags) {
119 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
120 dc->synced_flags = dc->tb_flags;
121 }
122 }
123
124 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
125 {
126 TCGv_i32 tmp = tcg_const_i32(index);
127
128 t_sync_flags(dc);
129 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
130 gen_helper_raise_exception(cpu_env, tmp);
131 tcg_temp_free_i32(tmp);
132 dc->is_jmp = DISAS_UPDATE;
133 }
134
135 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
136 {
137 TranslationBlock *tb;
138 tb = dc->tb;
139 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
140 tcg_gen_goto_tb(n);
141 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
142 tcg_gen_exit_tb((uintptr_t)tb + n);
143 } else {
144 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
145 tcg_gen_exit_tb(0);
146 }
147 }
148
149 static void read_carry(DisasContext *dc, TCGv d)
150 {
151 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
152 }
153
154 /*
155 * write_carry sets the carry bits in MSR based on bit 0 of v.
156 * v[31:1] are ignored.
157 */
158 static void write_carry(DisasContext *dc, TCGv v)
159 {
160 TCGv t0 = tcg_temp_new();
161 tcg_gen_shli_tl(t0, v, 31);
162 tcg_gen_sari_tl(t0, t0, 31);
163 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
164 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
165 ~(MSR_C | MSR_CC));
166 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
167 tcg_temp_free(t0);
168 }
169
170 static void write_carryi(DisasContext *dc, bool carry)
171 {
172 TCGv t0 = tcg_temp_new();
173 tcg_gen_movi_tl(t0, carry);
174 write_carry(dc, t0);
175 tcg_temp_free(t0);
176 }
177
178 /* True if ALU operand b is a small immediate that may deserve
179 faster treatment. */
180 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
181 {
182 /* Immediate insn without the imm prefix ? */
183 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
184 }
185
186 static inline TCGv *dec_alu_op_b(DisasContext *dc)
187 {
188 if (dc->type_b) {
189 if (dc->tb_flags & IMM_FLAG)
190 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
191 else
192 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
193 return &env_imm;
194 } else
195 return &cpu_R[dc->rb];
196 }
197
198 static void dec_add(DisasContext *dc)
199 {
200 unsigned int k, c;
201 TCGv cf;
202
203 k = dc->opcode & 4;
204 c = dc->opcode & 2;
205
206 LOG_DIS("add%s%s%s r%d r%d r%d\n",
207 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
208 dc->rd, dc->ra, dc->rb);
209
210 /* Take care of the easy cases first. */
211 if (k) {
212 /* k - keep carry, no need to update MSR. */
213 /* If rd == r0, it's a nop. */
214 if (dc->rd) {
215 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
216
217 if (c) {
218 /* c - Add carry into the result. */
219 cf = tcg_temp_new();
220
221 read_carry(dc, cf);
222 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
223 tcg_temp_free(cf);
224 }
225 }
226 return;
227 }
228
229 /* From now on, we can assume k is zero. So we need to update MSR. */
230 /* Extract carry. */
231 cf = tcg_temp_new();
232 if (c) {
233 read_carry(dc, cf);
234 } else {
235 tcg_gen_movi_tl(cf, 0);
236 }
237
238 if (dc->rd) {
239 TCGv ncf = tcg_temp_new();
240 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
241 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
243 write_carry(dc, ncf);
244 tcg_temp_free(ncf);
245 } else {
246 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
247 write_carry(dc, cf);
248 }
249 tcg_temp_free(cf);
250 }
251
252 static void dec_sub(DisasContext *dc)
253 {
254 unsigned int u, cmp, k, c;
255 TCGv cf, na;
256
257 u = dc->imm & 2;
258 k = dc->opcode & 4;
259 c = dc->opcode & 2;
260 cmp = (dc->imm & 1) && (!dc->type_b) && k;
261
262 if (cmp) {
263 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
264 if (dc->rd) {
265 if (u)
266 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
267 else
268 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
269 }
270 return;
271 }
272
273 LOG_DIS("sub%s%s r%d, r%d r%d\n",
274 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
275
276 /* Take care of the easy cases first. */
277 if (k) {
278 /* k - keep carry, no need to update MSR. */
279 /* If rd == r0, it's a nop. */
280 if (dc->rd) {
281 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
282
283 if (c) {
284 /* c - Add carry into the result. */
285 cf = tcg_temp_new();
286
287 read_carry(dc, cf);
288 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
289 tcg_temp_free(cf);
290 }
291 }
292 return;
293 }
294
295 /* From now on, we can assume k is zero. So we need to update MSR. */
296 /* Extract carry. And complement a into na. */
297 cf = tcg_temp_new();
298 na = tcg_temp_new();
299 if (c) {
300 read_carry(dc, cf);
301 } else {
302 tcg_gen_movi_tl(cf, 1);
303 }
304
305 /* d = b + ~a + c. carry defaults to 1. */
306 tcg_gen_not_tl(na, cpu_R[dc->ra]);
307
308 if (dc->rd) {
309 TCGv ncf = tcg_temp_new();
310 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
311 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
312 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
313 write_carry(dc, ncf);
314 tcg_temp_free(ncf);
315 } else {
316 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
317 write_carry(dc, cf);
318 }
319 tcg_temp_free(cf);
320 tcg_temp_free(na);
321 }
322
323 static void dec_pattern(DisasContext *dc)
324 {
325 unsigned int mode;
326 int l1;
327
328 if ((dc->tb_flags & MSR_EE_FLAG)
329 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
330 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
331 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
332 t_gen_raise_exception(dc, EXCP_HW_EXCP);
333 }
334
335 mode = dc->opcode & 3;
336 switch (mode) {
337 case 0:
338 /* pcmpbf. */
339 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
340 if (dc->rd)
341 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
342 break;
343 case 2:
344 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
345 if (dc->rd) {
346 TCGv t0 = tcg_temp_local_new();
347 l1 = gen_new_label();
348 tcg_gen_movi_tl(t0, 1);
349 tcg_gen_brcond_tl(TCG_COND_EQ,
350 cpu_R[dc->ra], cpu_R[dc->rb], l1);
351 tcg_gen_movi_tl(t0, 0);
352 gen_set_label(l1);
353 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
354 tcg_temp_free(t0);
355 }
356 break;
357 case 3:
358 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
359 l1 = gen_new_label();
360 if (dc->rd) {
361 TCGv t0 = tcg_temp_local_new();
362 tcg_gen_movi_tl(t0, 1);
363 tcg_gen_brcond_tl(TCG_COND_NE,
364 cpu_R[dc->ra], cpu_R[dc->rb], l1);
365 tcg_gen_movi_tl(t0, 0);
366 gen_set_label(l1);
367 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
368 tcg_temp_free(t0);
369 }
370 break;
371 default:
372 cpu_abort(CPU(dc->cpu),
373 "unsupported pattern insn opcode=%x\n", dc->opcode);
374 break;
375 }
376 }
377
378 static void dec_and(DisasContext *dc)
379 {
380 unsigned int not;
381
382 if (!dc->type_b && (dc->imm & (1 << 10))) {
383 dec_pattern(dc);
384 return;
385 }
386
387 not = dc->opcode & (1 << 1);
388 LOG_DIS("and%s\n", not ? "n" : "");
389
390 if (!dc->rd)
391 return;
392
393 if (not) {
394 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
395 } else
396 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
397 }
398
399 static void dec_or(DisasContext *dc)
400 {
401 if (!dc->type_b && (dc->imm & (1 << 10))) {
402 dec_pattern(dc);
403 return;
404 }
405
406 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
407 if (dc->rd)
408 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
409 }
410
411 static void dec_xor(DisasContext *dc)
412 {
413 if (!dc->type_b && (dc->imm & (1 << 10))) {
414 dec_pattern(dc);
415 return;
416 }
417
418 LOG_DIS("xor r%d\n", dc->rd);
419 if (dc->rd)
420 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
421 }
422
423 static inline void msr_read(DisasContext *dc, TCGv d)
424 {
425 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
426 }
427
428 static inline void msr_write(DisasContext *dc, TCGv v)
429 {
430 TCGv t;
431
432 t = tcg_temp_new();
433 dc->cpustate_changed = 1;
434 /* PVR bit is not writable. */
435 tcg_gen_andi_tl(t, v, ~MSR_PVR);
436 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
437 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
438 tcg_temp_free(t);
439 }
440
441 static void dec_msr(DisasContext *dc)
442 {
443 CPUState *cs = CPU(dc->cpu);
444 TCGv t0, t1;
445 unsigned int sr, to, rn;
446 int mem_index = cpu_mmu_index(&dc->cpu->env);
447
448 sr = dc->imm & ((1 << 14) - 1);
449 to = dc->imm & (1 << 14);
450 dc->type_b = 1;
451 if (to)
452 dc->cpustate_changed = 1;
453
454 /* msrclr and msrset. */
455 if (!(dc->imm & (1 << 15))) {
456 unsigned int clr = dc->ir & (1 << 16);
457
458 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
459 dc->rd, dc->imm);
460
461 if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
462 /* nop??? */
463 return;
464 }
465
466 if ((dc->tb_flags & MSR_EE_FLAG)
467 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
468 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
469 t_gen_raise_exception(dc, EXCP_HW_EXCP);
470 return;
471 }
472
473 if (dc->rd)
474 msr_read(dc, cpu_R[dc->rd]);
475
476 t0 = tcg_temp_new();
477 t1 = tcg_temp_new();
478 msr_read(dc, t0);
479 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
480
481 if (clr) {
482 tcg_gen_not_tl(t1, t1);
483 tcg_gen_and_tl(t0, t0, t1);
484 } else
485 tcg_gen_or_tl(t0, t0, t1);
486 msr_write(dc, t0);
487 tcg_temp_free(t0);
488 tcg_temp_free(t1);
489 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
490 dc->is_jmp = DISAS_UPDATE;
491 return;
492 }
493
494 if (to) {
495 if ((dc->tb_flags & MSR_EE_FLAG)
496 && mem_index == MMU_USER_IDX) {
497 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
498 t_gen_raise_exception(dc, EXCP_HW_EXCP);
499 return;
500 }
501 }
502
503 #if !defined(CONFIG_USER_ONLY)
504 /* Catch read/writes to the mmu block. */
505 if ((sr & ~0xff) == 0x1000) {
506 sr &= 7;
507 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
508 if (to)
509 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
510 else
511 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
512 return;
513 }
514 #endif
515
516 if (to) {
517 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
518 switch (sr) {
519 case 0:
520 break;
521 case 1:
522 msr_write(dc, cpu_R[dc->ra]);
523 break;
524 case 0x3:
525 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
526 break;
527 case 0x5:
528 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
529 break;
530 case 0x7:
531 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
532 break;
533 case 0x800:
534 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
535 break;
536 case 0x802:
537 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
538 break;
539 default:
540 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
541 break;
542 }
543 } else {
544 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
545
546 switch (sr) {
547 case 0:
548 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
549 break;
550 case 1:
551 msr_read(dc, cpu_R[dc->rd]);
552 break;
553 case 0x3:
554 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
555 break;
556 case 0x5:
557 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
558 break;
559 case 0x7:
560 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
561 break;
562 case 0xb:
563 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
564 break;
565 case 0x800:
566 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
567 break;
568 case 0x802:
569 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
570 break;
571 case 0x2000:
572 case 0x2001:
573 case 0x2002:
574 case 0x2003:
575 case 0x2004:
576 case 0x2005:
577 case 0x2006:
578 case 0x2007:
579 case 0x2008:
580 case 0x2009:
581 case 0x200a:
582 case 0x200b:
583 case 0x200c:
584 rn = sr & 0xf;
585 tcg_gen_ld_tl(cpu_R[dc->rd],
586 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
587 break;
588 default:
589 cpu_abort(cs, "unknown mfs reg %x\n", sr);
590 break;
591 }
592 }
593
594 if (dc->rd == 0) {
595 tcg_gen_movi_tl(cpu_R[0], 0);
596 }
597 }
598
599 /* 64-bit signed mul, lower result in d and upper in d2. */
600 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
601 {
602 TCGv_i64 t0, t1;
603
604 t0 = tcg_temp_new_i64();
605 t1 = tcg_temp_new_i64();
606
607 tcg_gen_ext_i32_i64(t0, a);
608 tcg_gen_ext_i32_i64(t1, b);
609 tcg_gen_mul_i64(t0, t0, t1);
610
611 tcg_gen_trunc_i64_i32(d, t0);
612 tcg_gen_shri_i64(t0, t0, 32);
613 tcg_gen_trunc_i64_i32(d2, t0);
614
615 tcg_temp_free_i64(t0);
616 tcg_temp_free_i64(t1);
617 }
618
619 /* 64-bit unsigned muls, lower result in d and upper in d2. */
620 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
621 {
622 TCGv_i64 t0, t1;
623
624 t0 = tcg_temp_new_i64();
625 t1 = tcg_temp_new_i64();
626
627 tcg_gen_extu_i32_i64(t0, a);
628 tcg_gen_extu_i32_i64(t1, b);
629 tcg_gen_mul_i64(t0, t0, t1);
630
631 tcg_gen_trunc_i64_i32(d, t0);
632 tcg_gen_shri_i64(t0, t0, 32);
633 tcg_gen_trunc_i64_i32(d2, t0);
634
635 tcg_temp_free_i64(t0);
636 tcg_temp_free_i64(t1);
637 }
638
639 /* Multiplier unit. */
640 static void dec_mul(DisasContext *dc)
641 {
642 TCGv d[2];
643 unsigned int subcode;
644
645 if ((dc->tb_flags & MSR_EE_FLAG)
646 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
647 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
648 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
649 t_gen_raise_exception(dc, EXCP_HW_EXCP);
650 return;
651 }
652
653 subcode = dc->imm & 3;
654 d[0] = tcg_temp_new();
655 d[1] = tcg_temp_new();
656
657 if (dc->type_b) {
658 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
659 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
660 goto done;
661 }
662
663 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
664 if (subcode >= 1 && subcode <= 3
665 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
666 /* nop??? */
667 }
668
669 switch (subcode) {
670 case 0:
671 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
672 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
673 break;
674 case 1:
675 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
676 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
677 break;
678 case 2:
679 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
680 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
681 break;
682 case 3:
683 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
684 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
685 break;
686 default:
687 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
688 break;
689 }
690 done:
691 tcg_temp_free(d[0]);
692 tcg_temp_free(d[1]);
693 }
694
695 /* Div unit. */
696 static void dec_div(DisasContext *dc)
697 {
698 unsigned int u;
699
700 u = dc->imm & 2;
701 LOG_DIS("div\n");
702
703 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
704 && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
705 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
706 t_gen_raise_exception(dc, EXCP_HW_EXCP);
707 }
708
709 if (u)
710 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
711 cpu_R[dc->ra]);
712 else
713 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
714 cpu_R[dc->ra]);
715 if (!dc->rd)
716 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
717 }
718
719 static void dec_barrel(DisasContext *dc)
720 {
721 TCGv t0;
722 unsigned int s, t;
723
724 if ((dc->tb_flags & MSR_EE_FLAG)
725 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
726 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
727 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
728 t_gen_raise_exception(dc, EXCP_HW_EXCP);
729 return;
730 }
731
732 s = dc->imm & (1 << 10);
733 t = dc->imm & (1 << 9);
734
735 LOG_DIS("bs%s%s r%d r%d r%d\n",
736 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
737
738 t0 = tcg_temp_new();
739
740 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
741 tcg_gen_andi_tl(t0, t0, 31);
742
743 if (s)
744 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
745 else {
746 if (t)
747 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
748 else
749 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
750 }
751 }
752
753 static void dec_bit(DisasContext *dc)
754 {
755 CPUState *cs = CPU(dc->cpu);
756 TCGv t0;
757 unsigned int op;
758 int mem_index = cpu_mmu_index(&dc->cpu->env);
759
760 op = dc->ir & ((1 << 9) - 1);
761 switch (op) {
762 case 0x21:
763 /* src. */
764 t0 = tcg_temp_new();
765
766 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
767 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
768 write_carry(dc, cpu_R[dc->ra]);
769 if (dc->rd) {
770 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
771 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
772 }
773 tcg_temp_free(t0);
774 break;
775
776 case 0x1:
777 case 0x41:
778 /* srl. */
779 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
780
781 /* Update carry. Note that write carry only looks at the LSB. */
782 write_carry(dc, cpu_R[dc->ra]);
783 if (dc->rd) {
784 if (op == 0x41)
785 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
786 else
787 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
788 }
789 break;
790 case 0x60:
791 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
792 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
793 break;
794 case 0x61:
795 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
796 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
797 break;
798 case 0x64:
799 case 0x66:
800 case 0x74:
801 case 0x76:
802 /* wdc. */
803 LOG_DIS("wdc r%d\n", dc->ra);
804 if ((dc->tb_flags & MSR_EE_FLAG)
805 && mem_index == MMU_USER_IDX) {
806 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
807 t_gen_raise_exception(dc, EXCP_HW_EXCP);
808 return;
809 }
810 break;
811 case 0x68:
812 /* wic. */
813 LOG_DIS("wic r%d\n", dc->ra);
814 if ((dc->tb_flags & MSR_EE_FLAG)
815 && mem_index == MMU_USER_IDX) {
816 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
817 t_gen_raise_exception(dc, EXCP_HW_EXCP);
818 return;
819 }
820 break;
821 case 0xe0:
822 if ((dc->tb_flags & MSR_EE_FLAG)
823 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
824 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
825 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
826 t_gen_raise_exception(dc, EXCP_HW_EXCP);
827 }
828 if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
829 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
830 }
831 break;
832 case 0x1e0:
833 /* swapb */
834 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
835 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
836 break;
837 case 0x1e2:
838 /*swaph */
839 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
840 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
841 break;
842 default:
843 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
844 dc->pc, op, dc->rd, dc->ra, dc->rb);
845 break;
846 }
847 }
848
849 static inline void sync_jmpstate(DisasContext *dc)
850 {
851 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
852 if (dc->jmp == JMP_DIRECT) {
853 tcg_gen_movi_tl(env_btaken, 1);
854 }
855 dc->jmp = JMP_INDIRECT;
856 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
857 }
858 }
859
860 static void dec_imm(DisasContext *dc)
861 {
862 LOG_DIS("imm %x\n", dc->imm << 16);
863 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
864 dc->tb_flags |= IMM_FLAG;
865 dc->clear_imm = 0;
866 }
867
868 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
869 {
870 unsigned int extimm = dc->tb_flags & IMM_FLAG;
871 /* Should be set to one if r1 is used by loadstores. */
872 int stackprot = 0;
873
874 /* All load/stores use ra. */
875 if (dc->ra == 1) {
876 stackprot = 1;
877 }
878
879 /* Treat the common cases first. */
880 if (!dc->type_b) {
881 /* If any of the regs is r0, return a ptr to the other. */
882 if (dc->ra == 0) {
883 return &cpu_R[dc->rb];
884 } else if (dc->rb == 0) {
885 return &cpu_R[dc->ra];
886 }
887
888 if (dc->rb == 1) {
889 stackprot = 1;
890 }
891
892 *t = tcg_temp_new();
893 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
894
895 if (stackprot) {
896 gen_helper_stackprot(cpu_env, *t);
897 }
898 return t;
899 }
900 /* Immediate. */
901 if (!extimm) {
902 if (dc->imm == 0) {
903 return &cpu_R[dc->ra];
904 }
905 *t = tcg_temp_new();
906 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
907 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
908 } else {
909 *t = tcg_temp_new();
910 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
911 }
912
913 if (stackprot) {
914 gen_helper_stackprot(cpu_env, *t);
915 }
916 return t;
917 }
918
919 static void dec_load(DisasContext *dc)
920 {
921 TCGv t, v, *addr;
922 unsigned int size, rev = 0, ex = 0;
923 TCGMemOp mop;
924
925 mop = dc->opcode & 3;
926 size = 1 << mop;
927 if (!dc->type_b) {
928 rev = (dc->ir >> 9) & 1;
929 ex = (dc->ir >> 10) & 1;
930 }
931 mop |= MO_TE;
932 if (rev) {
933 mop ^= MO_BSWAP;
934 }
935
936 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
937 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
938 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
939 t_gen_raise_exception(dc, EXCP_HW_EXCP);
940 return;
941 }
942
943 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
944 ex ? "x" : "");
945
946 t_sync_flags(dc);
947 addr = compute_ldst_addr(dc, &t);
948
949 /*
950 * When doing reverse accesses we need to do two things.
951 *
952 * 1. Reverse the address wrt endianness.
953 * 2. Byteswap the data lanes on the way back into the CPU core.
954 */
955 if (rev && size != 4) {
956 /* Endian reverse the address. t is addr. */
957 switch (size) {
958 case 1:
959 {
960 /* 00 -> 11
961 01 -> 10
962 10 -> 10
963 11 -> 00 */
964 TCGv low = tcg_temp_new();
965
966 /* Force addr into the temp. */
967 if (addr != &t) {
968 t = tcg_temp_new();
969 tcg_gen_mov_tl(t, *addr);
970 addr = &t;
971 }
972
973 tcg_gen_andi_tl(low, t, 3);
974 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
975 tcg_gen_andi_tl(t, t, ~3);
976 tcg_gen_or_tl(t, t, low);
977 tcg_gen_mov_tl(env_imm, t);
978 tcg_temp_free(low);
979 break;
980 }
981
982 case 2:
983 /* 00 -> 10
984 10 -> 00. */
985 /* Force addr into the temp. */
986 if (addr != &t) {
987 t = tcg_temp_new();
988 tcg_gen_xori_tl(t, *addr, 2);
989 addr = &t;
990 } else {
991 tcg_gen_xori_tl(t, t, 2);
992 }
993 break;
994 default:
995 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
996 break;
997 }
998 }
999
1000 /* lwx does not throw unaligned access errors, so force alignment */
1001 if (ex) {
1002 /* Force addr into the temp. */
1003 if (addr != &t) {
1004 t = tcg_temp_new();
1005 tcg_gen_mov_tl(t, *addr);
1006 addr = &t;
1007 }
1008 tcg_gen_andi_tl(t, t, ~3);
1009 }
1010
1011 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1012 sync_jmpstate(dc);
1013
1014 /* Verify alignment if needed. */
1015 /*
1016 * Microblaze gives MMU faults priority over faults due to
1017 * unaligned addresses. That's why we speculatively do the load
1018 * into v. If the load succeeds, we verify alignment of the
1019 * address and if that succeeds we write into the destination reg.
1020 */
1021 v = tcg_temp_new();
1022 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
1023
1024 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1025 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1026 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1027 tcg_const_tl(0), tcg_const_tl(size - 1));
1028 }
1029
1030 if (ex) {
1031 tcg_gen_mov_tl(env_res_addr, *addr);
1032 tcg_gen_mov_tl(env_res_val, v);
1033 }
1034 if (dc->rd) {
1035 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1036 }
1037 tcg_temp_free(v);
1038
1039 if (ex) { /* lwx */
1040 /* no support for for AXI exclusive so always clear C */
1041 write_carryi(dc, 0);
1042 }
1043
1044 if (addr == &t)
1045 tcg_temp_free(t);
1046 }
1047
1048 static void dec_store(DisasContext *dc)
1049 {
1050 TCGv t, *addr, swx_addr;
1051 int swx_skip = 0;
1052 unsigned int size, rev = 0, ex = 0;
1053 TCGMemOp mop;
1054
1055 mop = dc->opcode & 3;
1056 size = 1 << mop;
1057 if (!dc->type_b) {
1058 rev = (dc->ir >> 9) & 1;
1059 ex = (dc->ir >> 10) & 1;
1060 }
1061 mop |= MO_TE;
1062 if (rev) {
1063 mop ^= MO_BSWAP;
1064 }
1065
1066 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1067 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1068 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1069 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1070 return;
1071 }
1072
1073 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1074 ex ? "x" : "");
1075 t_sync_flags(dc);
1076 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1077 sync_jmpstate(dc);
1078 addr = compute_ldst_addr(dc, &t);
1079
1080 swx_addr = tcg_temp_local_new();
1081 if (ex) { /* swx */
1082 TCGv tval;
1083
1084 /* Force addr into the swx_addr. */
1085 tcg_gen_mov_tl(swx_addr, *addr);
1086 addr = &swx_addr;
1087 /* swx does not throw unaligned access errors, so force alignment */
1088 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1089
1090 write_carryi(dc, 1);
1091 swx_skip = gen_new_label();
1092 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1093
1094 /* Compare the value loaded at lwx with current contents of
1095 the reserved location.
1096 FIXME: This only works for system emulation where we can expect
1097 this compare and the following write to be atomic. For user
1098 emulation we need to add atomicity between threads. */
1099 tval = tcg_temp_new();
1100 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
1101 MO_TEUL);
1102 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1103 write_carryi(dc, 0);
1104 tcg_temp_free(tval);
1105 }
1106
1107 if (rev && size != 4) {
1108 /* Endian reverse the address. t is addr. */
1109 switch (size) {
1110 case 1:
1111 {
1112 /* 00 -> 11
1113 01 -> 10
1114 10 -> 10
1115 11 -> 00 */
1116 TCGv low = tcg_temp_new();
1117
1118 /* Force addr into the temp. */
1119 if (addr != &t) {
1120 t = tcg_temp_new();
1121 tcg_gen_mov_tl(t, *addr);
1122 addr = &t;
1123 }
1124
1125 tcg_gen_andi_tl(low, t, 3);
1126 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1127 tcg_gen_andi_tl(t, t, ~3);
1128 tcg_gen_or_tl(t, t, low);
1129 tcg_gen_mov_tl(env_imm, t);
1130 tcg_temp_free(low);
1131 break;
1132 }
1133
1134 case 2:
1135 /* 00 -> 10
1136 10 -> 00. */
1137 /* Force addr into the temp. */
1138 if (addr != &t) {
1139 t = tcg_temp_new();
1140 tcg_gen_xori_tl(t, *addr, 2);
1141 addr = &t;
1142 } else {
1143 tcg_gen_xori_tl(t, t, 2);
1144 }
1145 break;
1146 default:
1147 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1148 break;
1149 }
1150 }
1151 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
1152
1153 /* Verify alignment if needed. */
1154 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1155 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1156 /* FIXME: if the alignment is wrong, we should restore the value
1157 * in memory. One possible way to achieve this is to probe
1158 * the MMU prior to the memaccess, thay way we could put
1159 * the alignment checks in between the probe and the mem
1160 * access.
1161 */
1162 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1163 tcg_const_tl(1), tcg_const_tl(size - 1));
1164 }
1165
1166 if (ex) {
1167 gen_set_label(swx_skip);
1168 }
1169 tcg_temp_free(swx_addr);
1170
1171 if (addr == &t)
1172 tcg_temp_free(t);
1173 }
1174
1175 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1176 TCGv d, TCGv a, TCGv b)
1177 {
1178 switch (cc) {
1179 case CC_EQ:
1180 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1181 break;
1182 case CC_NE:
1183 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1184 break;
1185 case CC_LT:
1186 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1187 break;
1188 case CC_LE:
1189 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1190 break;
1191 case CC_GE:
1192 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1193 break;
1194 case CC_GT:
1195 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1196 break;
1197 default:
1198 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1199 break;
1200 }
1201 }
1202
1203 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1204 {
1205 int l1;
1206
1207 l1 = gen_new_label();
1208 /* Conditional jmp. */
1209 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1210 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1211 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1212 gen_set_label(l1);
1213 }
1214
1215 static void dec_bcc(DisasContext *dc)
1216 {
1217 unsigned int cc;
1218 unsigned int dslot;
1219
1220 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1221 dslot = dc->ir & (1 << 25);
1222 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1223
1224 dc->delayed_branch = 1;
1225 if (dslot) {
1226 dc->delayed_branch = 2;
1227 dc->tb_flags |= D_FLAG;
1228 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1229 cpu_env, offsetof(CPUMBState, bimm));
1230 }
1231
1232 if (dec_alu_op_b_is_small_imm(dc)) {
1233 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1234
1235 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1236 dc->jmp = JMP_DIRECT_CC;
1237 dc->jmp_pc = dc->pc + offset;
1238 } else {
1239 dc->jmp = JMP_INDIRECT;
1240 tcg_gen_movi_tl(env_btarget, dc->pc);
1241 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1242 }
1243 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1244 }
1245
1246 static void dec_br(DisasContext *dc)
1247 {
1248 unsigned int dslot, link, abs, mbar;
1249 int mem_index = cpu_mmu_index(&dc->cpu->env);
1250
1251 dslot = dc->ir & (1 << 20);
1252 abs = dc->ir & (1 << 19);
1253 link = dc->ir & (1 << 18);
1254
1255 /* Memory barrier. */
1256 mbar = (dc->ir >> 16) & 31;
1257 if (mbar == 2 && dc->imm == 4) {
1258 /* mbar IMM & 16 decodes to sleep. */
1259 if (dc->rd & 16) {
1260 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1261 TCGv_i32 tmp_1 = tcg_const_i32(1);
1262
1263 LOG_DIS("sleep\n");
1264
1265 t_sync_flags(dc);
1266 tcg_gen_st_i32(tmp_1, cpu_env,
1267 -offsetof(MicroBlazeCPU, env)
1268 +offsetof(CPUState, halted));
1269 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1270 gen_helper_raise_exception(cpu_env, tmp_hlt);
1271 tcg_temp_free_i32(tmp_hlt);
1272 tcg_temp_free_i32(tmp_1);
1273 return;
1274 }
1275 LOG_DIS("mbar %d\n", dc->rd);
1276 /* Break the TB. */
1277 dc->cpustate_changed = 1;
1278 return;
1279 }
1280
1281 LOG_DIS("br%s%s%s%s imm=%x\n",
1282 abs ? "a" : "", link ? "l" : "",
1283 dc->type_b ? "i" : "", dslot ? "d" : "",
1284 dc->imm);
1285
1286 dc->delayed_branch = 1;
1287 if (dslot) {
1288 dc->delayed_branch = 2;
1289 dc->tb_flags |= D_FLAG;
1290 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1291 cpu_env, offsetof(CPUMBState, bimm));
1292 }
1293 if (link && dc->rd)
1294 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1295
1296 dc->jmp = JMP_INDIRECT;
1297 if (abs) {
1298 tcg_gen_movi_tl(env_btaken, 1);
1299 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1300 if (link && !dslot) {
1301 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1302 t_gen_raise_exception(dc, EXCP_BREAK);
1303 if (dc->imm == 0) {
1304 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1305 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1306 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1307 return;
1308 }
1309
1310 t_gen_raise_exception(dc, EXCP_DEBUG);
1311 }
1312 }
1313 } else {
1314 if (dec_alu_op_b_is_small_imm(dc)) {
1315 dc->jmp = JMP_DIRECT;
1316 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1317 } else {
1318 tcg_gen_movi_tl(env_btaken, 1);
1319 tcg_gen_movi_tl(env_btarget, dc->pc);
1320 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1321 }
1322 }
1323 }
1324
1325 static inline void do_rti(DisasContext *dc)
1326 {
1327 TCGv t0, t1;
1328 t0 = tcg_temp_new();
1329 t1 = tcg_temp_new();
1330 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1331 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1332 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1333
1334 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1335 tcg_gen_or_tl(t1, t1, t0);
1336 msr_write(dc, t1);
1337 tcg_temp_free(t1);
1338 tcg_temp_free(t0);
1339 dc->tb_flags &= ~DRTI_FLAG;
1340 }
1341
1342 static inline void do_rtb(DisasContext *dc)
1343 {
1344 TCGv t0, t1;
1345 t0 = tcg_temp_new();
1346 t1 = tcg_temp_new();
1347 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1348 tcg_gen_shri_tl(t0, t1, 1);
1349 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1350
1351 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1352 tcg_gen_or_tl(t1, t1, t0);
1353 msr_write(dc, t1);
1354 tcg_temp_free(t1);
1355 tcg_temp_free(t0);
1356 dc->tb_flags &= ~DRTB_FLAG;
1357 }
1358
1359 static inline void do_rte(DisasContext *dc)
1360 {
1361 TCGv t0, t1;
1362 t0 = tcg_temp_new();
1363 t1 = tcg_temp_new();
1364
1365 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1366 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1367 tcg_gen_shri_tl(t0, t1, 1);
1368 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1369
1370 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1371 tcg_gen_or_tl(t1, t1, t0);
1372 msr_write(dc, t1);
1373 tcg_temp_free(t1);
1374 tcg_temp_free(t0);
1375 dc->tb_flags &= ~DRTE_FLAG;
1376 }
1377
1378 static void dec_rts(DisasContext *dc)
1379 {
1380 unsigned int b_bit, i_bit, e_bit;
1381 int mem_index = cpu_mmu_index(&dc->cpu->env);
1382
1383 i_bit = dc->ir & (1 << 21);
1384 b_bit = dc->ir & (1 << 22);
1385 e_bit = dc->ir & (1 << 23);
1386
1387 dc->delayed_branch = 2;
1388 dc->tb_flags |= D_FLAG;
1389 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1390 cpu_env, offsetof(CPUMBState, bimm));
1391
1392 if (i_bit) {
1393 LOG_DIS("rtid ir=%x\n", dc->ir);
1394 if ((dc->tb_flags & MSR_EE_FLAG)
1395 && mem_index == MMU_USER_IDX) {
1396 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1397 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1398 }
1399 dc->tb_flags |= DRTI_FLAG;
1400 } else if (b_bit) {
1401 LOG_DIS("rtbd ir=%x\n", dc->ir);
1402 if ((dc->tb_flags & MSR_EE_FLAG)
1403 && mem_index == MMU_USER_IDX) {
1404 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1405 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1406 }
1407 dc->tb_flags |= DRTB_FLAG;
1408 } else if (e_bit) {
1409 LOG_DIS("rted ir=%x\n", dc->ir);
1410 if ((dc->tb_flags & MSR_EE_FLAG)
1411 && mem_index == MMU_USER_IDX) {
1412 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1413 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1414 }
1415 dc->tb_flags |= DRTE_FLAG;
1416 } else
1417 LOG_DIS("rts ir=%x\n", dc->ir);
1418
1419 dc->jmp = JMP_INDIRECT;
1420 tcg_gen_movi_tl(env_btaken, 1);
1421 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1422 }
1423
1424 static int dec_check_fpuv2(DisasContext *dc)
1425 {
1426 int r;
1427
1428 r = dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU2_MASK;
1429
1430 if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1431 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1432 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1433 }
1434 return r;
1435 }
1436
1437 static void dec_fpu(DisasContext *dc)
1438 {
1439 unsigned int fpu_insn;
1440
1441 if ((dc->tb_flags & MSR_EE_FLAG)
1442 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1443 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU_MASK))) {
1444 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1445 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1446 return;
1447 }
1448
1449 fpu_insn = (dc->ir >> 7) & 7;
1450
1451 switch (fpu_insn) {
1452 case 0:
1453 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1454 cpu_R[dc->rb]);
1455 break;
1456
1457 case 1:
1458 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1459 cpu_R[dc->rb]);
1460 break;
1461
1462 case 2:
1463 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1464 cpu_R[dc->rb]);
1465 break;
1466
1467 case 3:
1468 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1469 cpu_R[dc->rb]);
1470 break;
1471
1472 case 4:
1473 switch ((dc->ir >> 4) & 7) {
1474 case 0:
1475 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1476 cpu_R[dc->ra], cpu_R[dc->rb]);
1477 break;
1478 case 1:
1479 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1480 cpu_R[dc->ra], cpu_R[dc->rb]);
1481 break;
1482 case 2:
1483 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1484 cpu_R[dc->ra], cpu_R[dc->rb]);
1485 break;
1486 case 3:
1487 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1488 cpu_R[dc->ra], cpu_R[dc->rb]);
1489 break;
1490 case 4:
1491 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1492 cpu_R[dc->ra], cpu_R[dc->rb]);
1493 break;
1494 case 5:
1495 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1496 cpu_R[dc->ra], cpu_R[dc->rb]);
1497 break;
1498 case 6:
1499 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1500 cpu_R[dc->ra], cpu_R[dc->rb]);
1501 break;
1502 default:
1503 qemu_log_mask(LOG_UNIMP,
1504 "unimplemented fcmp fpu_insn=%x pc=%x"
1505 " opc=%x\n",
1506 fpu_insn, dc->pc, dc->opcode);
1507 dc->abort_at_next_insn = 1;
1508 break;
1509 }
1510 break;
1511
1512 case 5:
1513 if (!dec_check_fpuv2(dc)) {
1514 return;
1515 }
1516 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1517 break;
1518
1519 case 6:
1520 if (!dec_check_fpuv2(dc)) {
1521 return;
1522 }
1523 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1524 break;
1525
1526 case 7:
1527 if (!dec_check_fpuv2(dc)) {
1528 return;
1529 }
1530 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1531 break;
1532
1533 default:
1534 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1535 " opc=%x\n",
1536 fpu_insn, dc->pc, dc->opcode);
1537 dc->abort_at_next_insn = 1;
1538 break;
1539 }
1540 }
1541
1542 static void dec_null(DisasContext *dc)
1543 {
1544 if ((dc->tb_flags & MSR_EE_FLAG)
1545 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1546 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1547 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1548 return;
1549 }
1550 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1551 dc->abort_at_next_insn = 1;
1552 }
1553
1554 /* Insns connected to FSL or AXI stream attached devices. */
1555 static void dec_stream(DisasContext *dc)
1556 {
1557 int mem_index = cpu_mmu_index(&dc->cpu->env);
1558 TCGv_i32 t_id, t_ctrl;
1559 int ctrl;
1560
1561 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1562 dc->type_b ? "" : "d", dc->imm);
1563
1564 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1565 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1566 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1567 return;
1568 }
1569
1570 t_id = tcg_temp_new();
1571 if (dc->type_b) {
1572 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1573 ctrl = dc->imm >> 10;
1574 } else {
1575 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1576 ctrl = dc->imm >> 5;
1577 }
1578
1579 t_ctrl = tcg_const_tl(ctrl);
1580
1581 if (dc->rd == 0) {
1582 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1583 } else {
1584 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1585 }
1586 tcg_temp_free(t_id);
1587 tcg_temp_free(t_ctrl);
1588 }
1589
1590 static struct decoder_info {
1591 struct {
1592 uint32_t bits;
1593 uint32_t mask;
1594 };
1595 void (*dec)(DisasContext *dc);
1596 } decinfo[] = {
1597 {DEC_ADD, dec_add},
1598 {DEC_SUB, dec_sub},
1599 {DEC_AND, dec_and},
1600 {DEC_XOR, dec_xor},
1601 {DEC_OR, dec_or},
1602 {DEC_BIT, dec_bit},
1603 {DEC_BARREL, dec_barrel},
1604 {DEC_LD, dec_load},
1605 {DEC_ST, dec_store},
1606 {DEC_IMM, dec_imm},
1607 {DEC_BR, dec_br},
1608 {DEC_BCC, dec_bcc},
1609 {DEC_RTS, dec_rts},
1610 {DEC_FPU, dec_fpu},
1611 {DEC_MUL, dec_mul},
1612 {DEC_DIV, dec_div},
1613 {DEC_MSR, dec_msr},
1614 {DEC_STREAM, dec_stream},
1615 {{0, 0}, dec_null}
1616 };
1617
1618 static inline void decode(DisasContext *dc, uint32_t ir)
1619 {
1620 int i;
1621
1622 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1623 tcg_gen_debug_insn_start(dc->pc);
1624 }
1625
1626 dc->ir = ir;
1627 LOG_DIS("%8.8x\t", dc->ir);
1628
1629 if (dc->ir)
1630 dc->nr_nops = 0;
1631 else {
1632 if ((dc->tb_flags & MSR_EE_FLAG)
1633 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1634 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1635 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1636 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1637 return;
1638 }
1639
1640 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1641 dc->nr_nops++;
1642 if (dc->nr_nops > 4) {
1643 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1644 }
1645 }
1646 /* bit 2 seems to indicate insn type. */
1647 dc->type_b = ir & (1 << 29);
1648
1649 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1650 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1651 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1652 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1653 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1654
1655 /* Large switch for all insns. */
1656 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1657 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1658 decinfo[i].dec(dc);
1659 break;
1660 }
1661 }
1662 }
1663
1664 static void check_breakpoint(CPUMBState *env, DisasContext *dc)
1665 {
1666 CPUState *cs = CPU(mb_env_get_cpu(env));
1667 CPUBreakpoint *bp;
1668
1669 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
1670 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1671 if (bp->pc == dc->pc) {
1672 t_gen_raise_exception(dc, EXCP_DEBUG);
1673 dc->is_jmp = DISAS_UPDATE;
1674 }
1675 }
1676 }
1677 }
1678
1679 /* generate intermediate code for basic block 'tb'. */
1680 static inline void
1681 gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
1682 bool search_pc)
1683 {
1684 CPUState *cs = CPU(cpu);
1685 CPUMBState *env = &cpu->env;
1686 uint16_t *gen_opc_end;
1687 uint32_t pc_start;
1688 int j, lj;
1689 struct DisasContext ctx;
1690 struct DisasContext *dc = &ctx;
1691 uint32_t next_page_start, org_flags;
1692 target_ulong npc;
1693 int num_insns;
1694 int max_insns;
1695
1696 pc_start = tb->pc;
1697 dc->cpu = cpu;
1698 dc->tb = tb;
1699 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1700
1701 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1702
1703 dc->is_jmp = DISAS_NEXT;
1704 dc->jmp = 0;
1705 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1706 if (dc->delayed_branch) {
1707 dc->jmp = JMP_INDIRECT;
1708 }
1709 dc->pc = pc_start;
1710 dc->singlestep_enabled = cs->singlestep_enabled;
1711 dc->cpustate_changed = 0;
1712 dc->abort_at_next_insn = 0;
1713 dc->nr_nops = 0;
1714
1715 if (pc_start & 3) {
1716 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1717 }
1718
1719 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1720 #if !SIM_COMPAT
1721 qemu_log("--------------\n");
1722 log_cpu_state(CPU(cpu), 0);
1723 #endif
1724 }
1725
1726 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1727 lj = -1;
1728 num_insns = 0;
1729 max_insns = tb->cflags & CF_COUNT_MASK;
1730 if (max_insns == 0)
1731 max_insns = CF_COUNT_MASK;
1732
1733 gen_tb_start();
1734 do
1735 {
1736 #if SIM_COMPAT
1737 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1738 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1739 gen_helper_debug();
1740 }
1741 #endif
1742 check_breakpoint(env, dc);
1743
1744 if (search_pc) {
1745 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1746 if (lj < j) {
1747 lj++;
1748 while (lj < j)
1749 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1750 }
1751 tcg_ctx.gen_opc_pc[lj] = dc->pc;
1752 tcg_ctx.gen_opc_instr_start[lj] = 1;
1753 tcg_ctx.gen_opc_icount[lj] = num_insns;
1754 }
1755
1756 /* Pretty disas. */
1757 LOG_DIS("%8.8x:\t", dc->pc);
1758
1759 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1760 gen_io_start();
1761
1762 dc->clear_imm = 1;
1763 decode(dc, cpu_ldl_code(env, dc->pc));
1764 if (dc->clear_imm)
1765 dc->tb_flags &= ~IMM_FLAG;
1766 dc->pc += 4;
1767 num_insns++;
1768
1769 if (dc->delayed_branch) {
1770 dc->delayed_branch--;
1771 if (!dc->delayed_branch) {
1772 if (dc->tb_flags & DRTI_FLAG)
1773 do_rti(dc);
1774 if (dc->tb_flags & DRTB_FLAG)
1775 do_rtb(dc);
1776 if (dc->tb_flags & DRTE_FLAG)
1777 do_rte(dc);
1778 /* Clear the delay slot flag. */
1779 dc->tb_flags &= ~D_FLAG;
1780 /* If it is a direct jump, try direct chaining. */
1781 if (dc->jmp == JMP_INDIRECT) {
1782 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1783 dc->is_jmp = DISAS_JUMP;
1784 } else if (dc->jmp == JMP_DIRECT) {
1785 t_sync_flags(dc);
1786 gen_goto_tb(dc, 0, dc->jmp_pc);
1787 dc->is_jmp = DISAS_TB_JUMP;
1788 } else if (dc->jmp == JMP_DIRECT_CC) {
1789 int l1;
1790
1791 t_sync_flags(dc);
1792 l1 = gen_new_label();
1793 /* Conditional jmp. */
1794 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1795 gen_goto_tb(dc, 1, dc->pc);
1796 gen_set_label(l1);
1797 gen_goto_tb(dc, 0, dc->jmp_pc);
1798
1799 dc->is_jmp = DISAS_TB_JUMP;
1800 }
1801 break;
1802 }
1803 }
1804 if (cs->singlestep_enabled) {
1805 break;
1806 }
1807 } while (!dc->is_jmp && !dc->cpustate_changed
1808 && tcg_ctx.gen_opc_ptr < gen_opc_end
1809 && !singlestep
1810 && (dc->pc < next_page_start)
1811 && num_insns < max_insns);
1812
1813 npc = dc->pc;
1814 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1815 if (dc->tb_flags & D_FLAG) {
1816 dc->is_jmp = DISAS_UPDATE;
1817 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1818 sync_jmpstate(dc);
1819 } else
1820 npc = dc->jmp_pc;
1821 }
1822
1823 if (tb->cflags & CF_LAST_IO)
1824 gen_io_end();
1825 /* Force an update if the per-tb cpu state has changed. */
1826 if (dc->is_jmp == DISAS_NEXT
1827 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1828 dc->is_jmp = DISAS_UPDATE;
1829 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1830 }
1831 t_sync_flags(dc);
1832
1833 if (unlikely(cs->singlestep_enabled)) {
1834 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1835
1836 if (dc->is_jmp != DISAS_JUMP) {
1837 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1838 }
1839 gen_helper_raise_exception(cpu_env, tmp);
1840 tcg_temp_free_i32(tmp);
1841 } else {
1842 switch(dc->is_jmp) {
1843 case DISAS_NEXT:
1844 gen_goto_tb(dc, 1, npc);
1845 break;
1846 default:
1847 case DISAS_JUMP:
1848 case DISAS_UPDATE:
1849 /* indicate that the hash table must be used
1850 to find the next TB */
1851 tcg_gen_exit_tb(0);
1852 break;
1853 case DISAS_TB_JUMP:
1854 /* nothing more to generate */
1855 break;
1856 }
1857 }
1858 gen_tb_end(tb, num_insns);
1859 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1860 if (search_pc) {
1861 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1862 lj++;
1863 while (lj <= j)
1864 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1865 } else {
1866 tb->size = dc->pc - pc_start;
1867 tb->icount = num_insns;
1868 }
1869
1870 #ifdef DEBUG_DISAS
1871 #if !SIM_COMPAT
1872 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1873 qemu_log("\n");
1874 #if DISAS_GNU
1875 log_target_disas(env, pc_start, dc->pc - pc_start, 0);
1876 #endif
1877 qemu_log("\nisize=%d osize=%td\n",
1878 dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
1879 tcg_ctx.gen_opc_buf);
1880 }
1881 #endif
1882 #endif
1883 assert(!dc->abort_at_next_insn);
1884 }
1885
1886 void gen_intermediate_code (CPUMBState *env, struct TranslationBlock *tb)
1887 {
1888 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, false);
1889 }
1890
1891 void gen_intermediate_code_pc (CPUMBState *env, struct TranslationBlock *tb)
1892 {
1893 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, true);
1894 }
1895
1896 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1897 int flags)
1898 {
1899 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1900 CPUMBState *env = &cpu->env;
1901 int i;
1902
1903 if (!env || !f)
1904 return;
1905
1906 cpu_fprintf(f, "IN: PC=%x %s\n",
1907 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1908 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1909 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1910 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1911 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1912 env->btaken, env->btarget,
1913 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1914 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1915 (env->sregs[SR_MSR] & MSR_EIP),
1916 (env->sregs[SR_MSR] & MSR_IE));
1917
1918 for (i = 0; i < 32; i++) {
1919 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1920 if ((i + 1) % 4 == 0)
1921 cpu_fprintf(f, "\n");
1922 }
1923 cpu_fprintf(f, "\n\n");
1924 }
1925
1926 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1927 {
1928 MicroBlazeCPU *cpu;
1929
1930 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1931
1932 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1933
1934 return cpu;
1935 }
1936
1937 void mb_tcg_init(void)
1938 {
1939 int i;
1940
1941 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1942
1943 env_debug = tcg_global_mem_new(TCG_AREG0,
1944 offsetof(CPUMBState, debug),
1945 "debug0");
1946 env_iflags = tcg_global_mem_new(TCG_AREG0,
1947 offsetof(CPUMBState, iflags),
1948 "iflags");
1949 env_imm = tcg_global_mem_new(TCG_AREG0,
1950 offsetof(CPUMBState, imm),
1951 "imm");
1952 env_btarget = tcg_global_mem_new(TCG_AREG0,
1953 offsetof(CPUMBState, btarget),
1954 "btarget");
1955 env_btaken = tcg_global_mem_new(TCG_AREG0,
1956 offsetof(CPUMBState, btaken),
1957 "btaken");
1958 env_res_addr = tcg_global_mem_new(TCG_AREG0,
1959 offsetof(CPUMBState, res_addr),
1960 "res_addr");
1961 env_res_val = tcg_global_mem_new(TCG_AREG0,
1962 offsetof(CPUMBState, res_val),
1963 "res_val");
1964 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1965 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1966 offsetof(CPUMBState, regs[i]),
1967 regnames[i]);
1968 }
1969 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1970 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
1971 offsetof(CPUMBState, sregs[i]),
1972 special_regnames[i]);
1973 }
1974 }
1975
1976 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
1977 {
1978 env->sregs[SR_PC] = tcg_ctx.gen_opc_pc[pc_pos];
1979 }