]> git.proxmox.com Git - mirror_qemu.git/blob - target/microblaze/translate.c
target/microblaze: Remove cpu_ear
[mirror_qemu.git] / target / microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
32
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35
36
37 #define SIM_COMPAT 0
38 #define DISAS_GNU 1
39 #define DISAS_MB 1
40 #if DISAS_MB && !SIM_COMPAT
41 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 #else
43 # define LOG_DIS(...) do { } while (0)
44 #endif
45
46 #define D(x)
47
48 #define EXTRACT_FIELD(src, start, end) \
49 (((src) >> start) & ((1 << (end - start + 1)) - 1))
50
51 /* is_jmp field values */
52 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
53 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
54 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55
56 static TCGv_i32 env_debug;
57 static TCGv_i32 cpu_R[32];
58 static TCGv_i32 cpu_pc;
59 static TCGv_i32 cpu_msr;
60 static TCGv_i32 cpu_esr;
61 static TCGv_i32 env_imm;
62 static TCGv_i32 env_btaken;
63 static TCGv_i32 cpu_btarget;
64 static TCGv_i32 env_iflags;
65 static TCGv env_res_addr;
66 static TCGv_i32 env_res_val;
67
68 #include "exec/gen-icount.h"
69
70 /* This is the state at translation time. */
71 typedef struct DisasContext {
72 MicroBlazeCPU *cpu;
73 uint32_t pc;
74
75 /* Decoder. */
76 int type_b;
77 uint32_t ir;
78 uint8_t opcode;
79 uint8_t rd, ra, rb;
80 uint16_t imm;
81
82 unsigned int cpustate_changed;
83 unsigned int delayed_branch;
84 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
85 unsigned int clear_imm;
86 int is_jmp;
87
88 #define JMP_NOJMP 0
89 #define JMP_DIRECT 1
90 #define JMP_DIRECT_CC 2
91 #define JMP_INDIRECT 3
92 unsigned int jmp;
93 uint32_t jmp_pc;
94
95 int abort_at_next_insn;
96 struct TranslationBlock *tb;
97 int singlestep_enabled;
98 } DisasContext;
99
100 static const char *regnames[] =
101 {
102 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
103 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
104 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
105 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
106 };
107
108 static inline void t_sync_flags(DisasContext *dc)
109 {
110 /* Synch the tb dependent flags between translator and runtime. */
111 if (dc->tb_flags != dc->synced_flags) {
112 tcg_gen_movi_i32(env_iflags, dc->tb_flags);
113 dc->synced_flags = dc->tb_flags;
114 }
115 }
116
117 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
118 {
119 TCGv_i32 tmp = tcg_const_i32(index);
120
121 t_sync_flags(dc);
122 tcg_gen_movi_i32(cpu_pc, dc->pc);
123 gen_helper_raise_exception(cpu_env, tmp);
124 tcg_temp_free_i32(tmp);
125 dc->is_jmp = DISAS_UPDATE;
126 }
127
128 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
129 {
130 #ifndef CONFIG_USER_ONLY
131 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
132 #else
133 return true;
134 #endif
135 }
136
137 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
138 {
139 if (use_goto_tb(dc, dest)) {
140 tcg_gen_goto_tb(n);
141 tcg_gen_movi_i32(cpu_pc, dest);
142 tcg_gen_exit_tb(dc->tb, n);
143 } else {
144 tcg_gen_movi_i32(cpu_pc, dest);
145 tcg_gen_exit_tb(NULL, 0);
146 }
147 }
148
149 static void read_carry(DisasContext *dc, TCGv_i32 d)
150 {
151 tcg_gen_shri_i32(d, cpu_msr, 31);
152 }
153
154 /*
155 * write_carry sets the carry bits in MSR based on bit 0 of v.
156 * v[31:1] are ignored.
157 */
158 static void write_carry(DisasContext *dc, TCGv_i32 v)
159 {
160 /* Deposit bit 0 into MSR_C and the alias MSR_CC. */
161 tcg_gen_deposit_i32(cpu_msr, cpu_msr, v, 2, 1);
162 tcg_gen_deposit_i32(cpu_msr, cpu_msr, v, 31, 1);
163 }
164
165 static void write_carryi(DisasContext *dc, bool carry)
166 {
167 TCGv_i32 t0 = tcg_temp_new_i32();
168 tcg_gen_movi_i32(t0, carry);
169 write_carry(dc, t0);
170 tcg_temp_free_i32(t0);
171 }
172
173 /*
174 * Returns true if the insn an illegal operation.
175 * If exceptions are enabled, an exception is raised.
176 */
177 static bool trap_illegal(DisasContext *dc, bool cond)
178 {
179 if (cond && (dc->tb_flags & MSR_EE_FLAG)
180 && dc->cpu->cfg.illegal_opcode_exception) {
181 tcg_gen_movi_i32(cpu_esr, ESR_EC_ILLEGAL_OP);
182 t_gen_raise_exception(dc, EXCP_HW_EXCP);
183 }
184 return cond;
185 }
186
187 /*
188 * Returns true if the insn is illegal in userspace.
189 * If exceptions are enabled, an exception is raised.
190 */
191 static bool trap_userspace(DisasContext *dc, bool cond)
192 {
193 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
194 bool cond_user = cond && mem_index == MMU_USER_IDX;
195
196 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
197 tcg_gen_movi_i32(cpu_esr, ESR_EC_PRIVINSN);
198 t_gen_raise_exception(dc, EXCP_HW_EXCP);
199 }
200 return cond_user;
201 }
202
203 /* True if ALU operand b is a small immediate that may deserve
204 faster treatment. */
205 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
206 {
207 /* Immediate insn without the imm prefix ? */
208 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
209 }
210
211 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
212 {
213 if (dc->type_b) {
214 if (dc->tb_flags & IMM_FLAG)
215 tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
216 else
217 tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
218 return &env_imm;
219 } else
220 return &cpu_R[dc->rb];
221 }
222
223 static void dec_add(DisasContext *dc)
224 {
225 unsigned int k, c;
226 TCGv_i32 cf;
227
228 k = dc->opcode & 4;
229 c = dc->opcode & 2;
230
231 LOG_DIS("add%s%s%s r%d r%d r%d\n",
232 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
233 dc->rd, dc->ra, dc->rb);
234
235 /* Take care of the easy cases first. */
236 if (k) {
237 /* k - keep carry, no need to update MSR. */
238 /* If rd == r0, it's a nop. */
239 if (dc->rd) {
240 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
241
242 if (c) {
243 /* c - Add carry into the result. */
244 cf = tcg_temp_new_i32();
245
246 read_carry(dc, cf);
247 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
248 tcg_temp_free_i32(cf);
249 }
250 }
251 return;
252 }
253
254 /* From now on, we can assume k is zero. So we need to update MSR. */
255 /* Extract carry. */
256 cf = tcg_temp_new_i32();
257 if (c) {
258 read_carry(dc, cf);
259 } else {
260 tcg_gen_movi_i32(cf, 0);
261 }
262
263 if (dc->rd) {
264 TCGv_i32 ncf = tcg_temp_new_i32();
265 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
266 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
267 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
268 write_carry(dc, ncf);
269 tcg_temp_free_i32(ncf);
270 } else {
271 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
272 write_carry(dc, cf);
273 }
274 tcg_temp_free_i32(cf);
275 }
276
277 static void dec_sub(DisasContext *dc)
278 {
279 unsigned int u, cmp, k, c;
280 TCGv_i32 cf, na;
281
282 u = dc->imm & 2;
283 k = dc->opcode & 4;
284 c = dc->opcode & 2;
285 cmp = (dc->imm & 1) && (!dc->type_b) && k;
286
287 if (cmp) {
288 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
289 if (dc->rd) {
290 if (u)
291 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
292 else
293 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
294 }
295 return;
296 }
297
298 LOG_DIS("sub%s%s r%d, r%d r%d\n",
299 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
300
301 /* Take care of the easy cases first. */
302 if (k) {
303 /* k - keep carry, no need to update MSR. */
304 /* If rd == r0, it's a nop. */
305 if (dc->rd) {
306 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
307
308 if (c) {
309 /* c - Add carry into the result. */
310 cf = tcg_temp_new_i32();
311
312 read_carry(dc, cf);
313 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
314 tcg_temp_free_i32(cf);
315 }
316 }
317 return;
318 }
319
320 /* From now on, we can assume k is zero. So we need to update MSR. */
321 /* Extract carry. And complement a into na. */
322 cf = tcg_temp_new_i32();
323 na = tcg_temp_new_i32();
324 if (c) {
325 read_carry(dc, cf);
326 } else {
327 tcg_gen_movi_i32(cf, 1);
328 }
329
330 /* d = b + ~a + c. carry defaults to 1. */
331 tcg_gen_not_i32(na, cpu_R[dc->ra]);
332
333 if (dc->rd) {
334 TCGv_i32 ncf = tcg_temp_new_i32();
335 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
336 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
337 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
338 write_carry(dc, ncf);
339 tcg_temp_free_i32(ncf);
340 } else {
341 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
342 write_carry(dc, cf);
343 }
344 tcg_temp_free_i32(cf);
345 tcg_temp_free_i32(na);
346 }
347
348 static void dec_pattern(DisasContext *dc)
349 {
350 unsigned int mode;
351
352 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
353 return;
354 }
355
356 mode = dc->opcode & 3;
357 switch (mode) {
358 case 0:
359 /* pcmpbf. */
360 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
361 if (dc->rd)
362 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
363 break;
364 case 2:
365 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
366 if (dc->rd) {
367 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
368 cpu_R[dc->ra], cpu_R[dc->rb]);
369 }
370 break;
371 case 3:
372 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
373 if (dc->rd) {
374 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
375 cpu_R[dc->ra], cpu_R[dc->rb]);
376 }
377 break;
378 default:
379 cpu_abort(CPU(dc->cpu),
380 "unsupported pattern insn opcode=%x\n", dc->opcode);
381 break;
382 }
383 }
384
385 static void dec_and(DisasContext *dc)
386 {
387 unsigned int not;
388
389 if (!dc->type_b && (dc->imm & (1 << 10))) {
390 dec_pattern(dc);
391 return;
392 }
393
394 not = dc->opcode & (1 << 1);
395 LOG_DIS("and%s\n", not ? "n" : "");
396
397 if (!dc->rd)
398 return;
399
400 if (not) {
401 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
402 } else
403 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
404 }
405
406 static void dec_or(DisasContext *dc)
407 {
408 if (!dc->type_b && (dc->imm & (1 << 10))) {
409 dec_pattern(dc);
410 return;
411 }
412
413 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
414 if (dc->rd)
415 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
416 }
417
418 static void dec_xor(DisasContext *dc)
419 {
420 if (!dc->type_b && (dc->imm & (1 << 10))) {
421 dec_pattern(dc);
422 return;
423 }
424
425 LOG_DIS("xor r%d\n", dc->rd);
426 if (dc->rd)
427 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
428 }
429
430 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
431 {
432 tcg_gen_mov_i32(d, cpu_msr);
433 }
434
435 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
436 {
437 dc->cpustate_changed = 1;
438 /* PVR bit is not writable, and is never set. */
439 tcg_gen_andi_i32(cpu_msr, v, ~MSR_PVR);
440 }
441
442 static void dec_msr(DisasContext *dc)
443 {
444 CPUState *cs = CPU(dc->cpu);
445 TCGv_i32 t0, t1;
446 unsigned int sr, rn;
447 bool to, clrset, extended = false;
448
449 sr = extract32(dc->imm, 0, 14);
450 to = extract32(dc->imm, 14, 1);
451 clrset = extract32(dc->imm, 15, 1) == 0;
452 dc->type_b = 1;
453 if (to) {
454 dc->cpustate_changed = 1;
455 }
456
457 /* Extended MSRs are only available if addr_size > 32. */
458 if (dc->cpu->cfg.addr_size > 32) {
459 /* The E-bit is encoded differently for To/From MSR. */
460 static const unsigned int e_bit[] = { 19, 24 };
461
462 extended = extract32(dc->imm, e_bit[to], 1);
463 }
464
465 /* msrclr and msrset. */
466 if (clrset) {
467 bool clr = extract32(dc->ir, 16, 1);
468
469 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
470 dc->rd, dc->imm);
471
472 if (!dc->cpu->cfg.use_msr_instr) {
473 /* nop??? */
474 return;
475 }
476
477 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
478 return;
479 }
480
481 if (dc->rd)
482 msr_read(dc, cpu_R[dc->rd]);
483
484 t0 = tcg_temp_new_i32();
485 t1 = tcg_temp_new_i32();
486 msr_read(dc, t0);
487 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
488
489 if (clr) {
490 tcg_gen_not_i32(t1, t1);
491 tcg_gen_and_i32(t0, t0, t1);
492 } else
493 tcg_gen_or_i32(t0, t0, t1);
494 msr_write(dc, t0);
495 tcg_temp_free_i32(t0);
496 tcg_temp_free_i32(t1);
497 tcg_gen_movi_i32(cpu_pc, dc->pc + 4);
498 dc->is_jmp = DISAS_UPDATE;
499 return;
500 }
501
502 if (trap_userspace(dc, to)) {
503 return;
504 }
505
506 #if !defined(CONFIG_USER_ONLY)
507 /* Catch read/writes to the mmu block. */
508 if ((sr & ~0xff) == 0x1000) {
509 TCGv_i32 tmp_ext = tcg_const_i32(extended);
510 TCGv_i32 tmp_sr;
511
512 sr &= 7;
513 tmp_sr = tcg_const_i32(sr);
514 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
515 if (to) {
516 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
517 } else {
518 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
519 }
520 tcg_temp_free_i32(tmp_sr);
521 tcg_temp_free_i32(tmp_ext);
522 return;
523 }
524 #endif
525
526 if (to) {
527 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
528 switch (sr) {
529 case SR_PC:
530 break;
531 case SR_MSR:
532 msr_write(dc, cpu_R[dc->ra]);
533 break;
534 case SR_EAR:
535 {
536 TCGv_i64 t64 = tcg_temp_new_i64();
537 tcg_gen_extu_i32_i64(t64, cpu_R[dc->ra]);
538 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUMBState, ear));
539 tcg_temp_free_i64(t64);
540 }
541 break;
542 case SR_ESR:
543 tcg_gen_mov_i32(cpu_esr, cpu_R[dc->ra]);
544 break;
545 case SR_FSR:
546 tcg_gen_st_i32(cpu_R[dc->ra],
547 cpu_env, offsetof(CPUMBState, fsr));
548 break;
549 case SR_BTR:
550 tcg_gen_st_i32(cpu_R[dc->ra],
551 cpu_env, offsetof(CPUMBState, btr));
552 break;
553 case SR_EDR:
554 tcg_gen_st_i32(cpu_R[dc->ra],
555 cpu_env, offsetof(CPUMBState, edr));
556 break;
557 case 0x800:
558 tcg_gen_st_i32(cpu_R[dc->ra],
559 cpu_env, offsetof(CPUMBState, slr));
560 break;
561 case 0x802:
562 tcg_gen_st_i32(cpu_R[dc->ra],
563 cpu_env, offsetof(CPUMBState, shr));
564 break;
565 default:
566 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
567 break;
568 }
569 } else {
570 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
571
572 switch (sr) {
573 case SR_PC:
574 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
575 break;
576 case SR_MSR:
577 msr_read(dc, cpu_R[dc->rd]);
578 break;
579 case SR_EAR:
580 {
581 TCGv_i64 t64 = tcg_temp_new_i64();
582 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
583 if (extended) {
584 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], t64);
585 } else {
586 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], t64);
587 }
588 tcg_temp_free_i64(t64);
589 }
590 break;
591 case SR_ESR:
592 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_esr);
593 break;
594 case SR_FSR:
595 tcg_gen_ld_i32(cpu_R[dc->rd],
596 cpu_env, offsetof(CPUMBState, fsr));
597 break;
598 case SR_BTR:
599 tcg_gen_ld_i32(cpu_R[dc->rd],
600 cpu_env, offsetof(CPUMBState, btr));
601 break;
602 case SR_EDR:
603 tcg_gen_ld_i32(cpu_R[dc->rd],
604 cpu_env, offsetof(CPUMBState, edr));
605 break;
606 case 0x800:
607 tcg_gen_ld_i32(cpu_R[dc->rd],
608 cpu_env, offsetof(CPUMBState, slr));
609 break;
610 case 0x802:
611 tcg_gen_ld_i32(cpu_R[dc->rd],
612 cpu_env, offsetof(CPUMBState, shr));
613 break;
614 case 0x2000 ... 0x200c:
615 rn = sr & 0xf;
616 tcg_gen_ld_i32(cpu_R[dc->rd],
617 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
618 break;
619 default:
620 cpu_abort(cs, "unknown mfs reg %x\n", sr);
621 break;
622 }
623 }
624
625 if (dc->rd == 0) {
626 tcg_gen_movi_i32(cpu_R[0], 0);
627 }
628 }
629
630 /* Multiplier unit. */
631 static void dec_mul(DisasContext *dc)
632 {
633 TCGv_i32 tmp;
634 unsigned int subcode;
635
636 if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
637 return;
638 }
639
640 subcode = dc->imm & 3;
641
642 if (dc->type_b) {
643 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
644 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
645 return;
646 }
647
648 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
649 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
650 /* nop??? */
651 }
652
653 tmp = tcg_temp_new_i32();
654 switch (subcode) {
655 case 0:
656 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
657 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
658 break;
659 case 1:
660 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
661 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
662 cpu_R[dc->ra], cpu_R[dc->rb]);
663 break;
664 case 2:
665 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
666 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
667 cpu_R[dc->ra], cpu_R[dc->rb]);
668 break;
669 case 3:
670 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
671 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
672 break;
673 default:
674 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
675 break;
676 }
677 tcg_temp_free_i32(tmp);
678 }
679
680 /* Div unit. */
681 static void dec_div(DisasContext *dc)
682 {
683 unsigned int u;
684
685 u = dc->imm & 2;
686 LOG_DIS("div\n");
687
688 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
689 return;
690 }
691
692 if (u)
693 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
694 cpu_R[dc->ra]);
695 else
696 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
697 cpu_R[dc->ra]);
698 if (!dc->rd)
699 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
700 }
701
702 static void dec_barrel(DisasContext *dc)
703 {
704 TCGv_i32 t0;
705 unsigned int imm_w, imm_s;
706 bool s, t, e = false, i = false;
707
708 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
709 return;
710 }
711
712 if (dc->type_b) {
713 /* Insert and extract are only available in immediate mode. */
714 i = extract32(dc->imm, 15, 1);
715 e = extract32(dc->imm, 14, 1);
716 }
717 s = extract32(dc->imm, 10, 1);
718 t = extract32(dc->imm, 9, 1);
719 imm_w = extract32(dc->imm, 6, 5);
720 imm_s = extract32(dc->imm, 0, 5);
721
722 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
723 e ? "e" : "",
724 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
725
726 if (e) {
727 if (imm_w + imm_s > 32 || imm_w == 0) {
728 /* These inputs have an undefined behavior. */
729 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
730 imm_w, imm_s);
731 } else {
732 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
733 }
734 } else if (i) {
735 int width = imm_w - imm_s + 1;
736
737 if (imm_w < imm_s) {
738 /* These inputs have an undefined behavior. */
739 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
740 imm_w, imm_s);
741 } else {
742 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
743 imm_s, width);
744 }
745 } else {
746 t0 = tcg_temp_new_i32();
747
748 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
749 tcg_gen_andi_i32(t0, t0, 31);
750
751 if (s) {
752 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
753 } else {
754 if (t) {
755 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
756 } else {
757 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
758 }
759 }
760 tcg_temp_free_i32(t0);
761 }
762 }
763
764 static void dec_bit(DisasContext *dc)
765 {
766 CPUState *cs = CPU(dc->cpu);
767 TCGv_i32 t0;
768 unsigned int op;
769
770 op = dc->ir & ((1 << 9) - 1);
771 switch (op) {
772 case 0x21:
773 /* src. */
774 t0 = tcg_temp_new_i32();
775
776 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
777 tcg_gen_andi_i32(t0, cpu_msr, MSR_CC);
778 write_carry(dc, cpu_R[dc->ra]);
779 if (dc->rd) {
780 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
781 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
782 }
783 tcg_temp_free_i32(t0);
784 break;
785
786 case 0x1:
787 case 0x41:
788 /* srl. */
789 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
790
791 /* Update carry. Note that write carry only looks at the LSB. */
792 write_carry(dc, cpu_R[dc->ra]);
793 if (dc->rd) {
794 if (op == 0x41)
795 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
796 else
797 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
798 }
799 break;
800 case 0x60:
801 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
802 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
803 break;
804 case 0x61:
805 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
806 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
807 break;
808 case 0x64:
809 case 0x66:
810 case 0x74:
811 case 0x76:
812 /* wdc. */
813 LOG_DIS("wdc r%d\n", dc->ra);
814 trap_userspace(dc, true);
815 break;
816 case 0x68:
817 /* wic. */
818 LOG_DIS("wic r%d\n", dc->ra);
819 trap_userspace(dc, true);
820 break;
821 case 0xe0:
822 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
823 return;
824 }
825 if (dc->cpu->cfg.use_pcmp_instr) {
826 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
827 }
828 break;
829 case 0x1e0:
830 /* swapb */
831 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
832 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
833 break;
834 case 0x1e2:
835 /*swaph */
836 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
837 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
838 break;
839 default:
840 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
841 dc->pc, op, dc->rd, dc->ra, dc->rb);
842 break;
843 }
844 }
845
846 static inline void sync_jmpstate(DisasContext *dc)
847 {
848 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
849 if (dc->jmp == JMP_DIRECT) {
850 tcg_gen_movi_i32(env_btaken, 1);
851 }
852 dc->jmp = JMP_INDIRECT;
853 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
854 }
855 }
856
857 static void dec_imm(DisasContext *dc)
858 {
859 LOG_DIS("imm %x\n", dc->imm << 16);
860 tcg_gen_movi_i32(env_imm, (dc->imm << 16));
861 dc->tb_flags |= IMM_FLAG;
862 dc->clear_imm = 0;
863 }
864
865 static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
866 {
867 bool extimm = dc->tb_flags & IMM_FLAG;
868 /* Should be set to true if r1 is used by loadstores. */
869 bool stackprot = false;
870 TCGv_i32 t32;
871
872 /* All load/stores use ra. */
873 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
874 stackprot = true;
875 }
876
877 /* Treat the common cases first. */
878 if (!dc->type_b) {
879 if (ea) {
880 int addr_size = dc->cpu->cfg.addr_size;
881
882 if (addr_size == 32) {
883 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
884 return;
885 }
886
887 tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
888 if (addr_size < 64) {
889 /* Mask off out of range bits. */
890 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
891 }
892 return;
893 }
894
895 /* If any of the regs is r0, set t to the value of the other reg. */
896 if (dc->ra == 0) {
897 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
898 return;
899 } else if (dc->rb == 0) {
900 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
901 return;
902 }
903
904 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
905 stackprot = true;
906 }
907
908 t32 = tcg_temp_new_i32();
909 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
910 tcg_gen_extu_i32_tl(t, t32);
911 tcg_temp_free_i32(t32);
912
913 if (stackprot) {
914 gen_helper_stackprot(cpu_env, t);
915 }
916 return;
917 }
918 /* Immediate. */
919 t32 = tcg_temp_new_i32();
920 if (!extimm) {
921 tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
922 } else {
923 tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
924 }
925 tcg_gen_extu_i32_tl(t, t32);
926 tcg_temp_free_i32(t32);
927
928 if (stackprot) {
929 gen_helper_stackprot(cpu_env, t);
930 }
931 return;
932 }
933
934 static void dec_load(DisasContext *dc)
935 {
936 TCGv_i32 v;
937 TCGv addr;
938 unsigned int size;
939 bool rev = false, ex = false, ea = false;
940 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
941 MemOp mop;
942
943 mop = dc->opcode & 3;
944 size = 1 << mop;
945 if (!dc->type_b) {
946 ea = extract32(dc->ir, 7, 1);
947 rev = extract32(dc->ir, 9, 1);
948 ex = extract32(dc->ir, 10, 1);
949 }
950 mop |= MO_TE;
951 if (rev) {
952 mop ^= MO_BSWAP;
953 }
954
955 if (trap_illegal(dc, size > 4)) {
956 return;
957 }
958
959 if (trap_userspace(dc, ea)) {
960 return;
961 }
962
963 LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
964 ex ? "x" : "",
965 ea ? "ea" : "");
966
967 t_sync_flags(dc);
968 addr = tcg_temp_new();
969 compute_ldst_addr(dc, ea, addr);
970 /* Extended addressing bypasses the MMU. */
971 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
972
973 /*
974 * When doing reverse accesses we need to do two things.
975 *
976 * 1. Reverse the address wrt endianness.
977 * 2. Byteswap the data lanes on the way back into the CPU core.
978 */
979 if (rev && size != 4) {
980 /* Endian reverse the address. t is addr. */
981 switch (size) {
982 case 1:
983 {
984 tcg_gen_xori_tl(addr, addr, 3);
985 break;
986 }
987
988 case 2:
989 /* 00 -> 10
990 10 -> 00. */
991 tcg_gen_xori_tl(addr, addr, 2);
992 break;
993 default:
994 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
995 break;
996 }
997 }
998
999 /* lwx does not throw unaligned access errors, so force alignment */
1000 if (ex) {
1001 tcg_gen_andi_tl(addr, addr, ~3);
1002 }
1003
1004 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1005 sync_jmpstate(dc);
1006
1007 /* Verify alignment if needed. */
1008 /*
1009 * Microblaze gives MMU faults priority over faults due to
1010 * unaligned addresses. That's why we speculatively do the load
1011 * into v. If the load succeeds, we verify alignment of the
1012 * address and if that succeeds we write into the destination reg.
1013 */
1014 v = tcg_temp_new_i32();
1015 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
1016
1017 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1018 TCGv_i32 t0 = tcg_const_i32(0);
1019 TCGv_i32 treg = tcg_const_i32(dc->rd);
1020 TCGv_i32 tsize = tcg_const_i32(size - 1);
1021
1022 tcg_gen_movi_i32(cpu_pc, dc->pc);
1023 gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
1024
1025 tcg_temp_free_i32(t0);
1026 tcg_temp_free_i32(treg);
1027 tcg_temp_free_i32(tsize);
1028 }
1029
1030 if (ex) {
1031 tcg_gen_mov_tl(env_res_addr, addr);
1032 tcg_gen_mov_i32(env_res_val, v);
1033 }
1034 if (dc->rd) {
1035 tcg_gen_mov_i32(cpu_R[dc->rd], v);
1036 }
1037 tcg_temp_free_i32(v);
1038
1039 if (ex) { /* lwx */
1040 /* no support for AXI exclusive so always clear C */
1041 write_carryi(dc, 0);
1042 }
1043
1044 tcg_temp_free(addr);
1045 }
1046
1047 static void dec_store(DisasContext *dc)
1048 {
1049 TCGv addr;
1050 TCGLabel *swx_skip = NULL;
1051 unsigned int size;
1052 bool rev = false, ex = false, ea = false;
1053 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1054 MemOp mop;
1055
1056 mop = dc->opcode & 3;
1057 size = 1 << mop;
1058 if (!dc->type_b) {
1059 ea = extract32(dc->ir, 7, 1);
1060 rev = extract32(dc->ir, 9, 1);
1061 ex = extract32(dc->ir, 10, 1);
1062 }
1063 mop |= MO_TE;
1064 if (rev) {
1065 mop ^= MO_BSWAP;
1066 }
1067
1068 if (trap_illegal(dc, size > 4)) {
1069 return;
1070 }
1071
1072 trap_userspace(dc, ea);
1073
1074 LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1075 ex ? "x" : "",
1076 ea ? "ea" : "");
1077 t_sync_flags(dc);
1078 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1079 sync_jmpstate(dc);
1080 /* SWX needs a temp_local. */
1081 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1082 compute_ldst_addr(dc, ea, addr);
1083 /* Extended addressing bypasses the MMU. */
1084 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1085
1086 if (ex) { /* swx */
1087 TCGv_i32 tval;
1088
1089 /* swx does not throw unaligned access errors, so force alignment */
1090 tcg_gen_andi_tl(addr, addr, ~3);
1091
1092 write_carryi(dc, 1);
1093 swx_skip = gen_new_label();
1094 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1095
1096 /*
1097 * Compare the value loaded at lwx with current contents of
1098 * the reserved location.
1099 */
1100 tval = tcg_temp_new_i32();
1101
1102 tcg_gen_atomic_cmpxchg_i32(tval, addr, env_res_val,
1103 cpu_R[dc->rd], mem_index,
1104 mop);
1105
1106 tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1107 write_carryi(dc, 0);
1108 tcg_temp_free_i32(tval);
1109 }
1110
1111 if (rev && size != 4) {
1112 /* Endian reverse the address. t is addr. */
1113 switch (size) {
1114 case 1:
1115 {
1116 tcg_gen_xori_tl(addr, addr, 3);
1117 break;
1118 }
1119
1120 case 2:
1121 /* 00 -> 10
1122 10 -> 00. */
1123 /* Force addr into the temp. */
1124 tcg_gen_xori_tl(addr, addr, 2);
1125 break;
1126 default:
1127 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1128 break;
1129 }
1130 }
1131
1132 if (!ex) {
1133 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1134 }
1135
1136 /* Verify alignment if needed. */
1137 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1138 TCGv_i32 t1 = tcg_const_i32(1);
1139 TCGv_i32 treg = tcg_const_i32(dc->rd);
1140 TCGv_i32 tsize = tcg_const_i32(size - 1);
1141
1142 tcg_gen_movi_i32(cpu_pc, dc->pc);
1143 /* FIXME: if the alignment is wrong, we should restore the value
1144 * in memory. One possible way to achieve this is to probe
1145 * the MMU prior to the memaccess, thay way we could put
1146 * the alignment checks in between the probe and the mem
1147 * access.
1148 */
1149 gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
1150
1151 tcg_temp_free_i32(t1);
1152 tcg_temp_free_i32(treg);
1153 tcg_temp_free_i32(tsize);
1154 }
1155
1156 if (ex) {
1157 gen_set_label(swx_skip);
1158 }
1159
1160 tcg_temp_free(addr);
1161 }
1162
1163 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1164 TCGv_i32 d, TCGv_i32 a)
1165 {
1166 static const int mb_to_tcg_cc[] = {
1167 [CC_EQ] = TCG_COND_EQ,
1168 [CC_NE] = TCG_COND_NE,
1169 [CC_LT] = TCG_COND_LT,
1170 [CC_LE] = TCG_COND_LE,
1171 [CC_GE] = TCG_COND_GE,
1172 [CC_GT] = TCG_COND_GT,
1173 };
1174
1175 switch (cc) {
1176 case CC_EQ:
1177 case CC_NE:
1178 case CC_LT:
1179 case CC_LE:
1180 case CC_GE:
1181 case CC_GT:
1182 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1183 break;
1184 default:
1185 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1186 break;
1187 }
1188 }
1189
1190 static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
1191 {
1192 TCGv_i32 zero = tcg_const_i32(0);
1193
1194 tcg_gen_movcond_i32(TCG_COND_NE, cpu_pc,
1195 env_btaken, zero,
1196 pc_true, pc_false);
1197
1198 tcg_temp_free_i32(zero);
1199 }
1200
1201 static void dec_setup_dslot(DisasContext *dc)
1202 {
1203 TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1204
1205 dc->delayed_branch = 2;
1206 dc->tb_flags |= D_FLAG;
1207
1208 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1209 tcg_temp_free_i32(tmp);
1210 }
1211
1212 static void dec_bcc(DisasContext *dc)
1213 {
1214 unsigned int cc;
1215 unsigned int dslot;
1216
1217 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1218 dslot = dc->ir & (1 << 25);
1219 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1220
1221 dc->delayed_branch = 1;
1222 if (dslot) {
1223 dec_setup_dslot(dc);
1224 }
1225
1226 if (dec_alu_op_b_is_small_imm(dc)) {
1227 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1228
1229 tcg_gen_movi_i32(cpu_btarget, dc->pc + offset);
1230 dc->jmp = JMP_DIRECT_CC;
1231 dc->jmp_pc = dc->pc + offset;
1232 } else {
1233 dc->jmp = JMP_INDIRECT;
1234 tcg_gen_addi_i32(cpu_btarget, *dec_alu_op_b(dc), dc->pc);
1235 }
1236 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
1237 }
1238
1239 static void dec_br(DisasContext *dc)
1240 {
1241 unsigned int dslot, link, abs, mbar;
1242
1243 dslot = dc->ir & (1 << 20);
1244 abs = dc->ir & (1 << 19);
1245 link = dc->ir & (1 << 18);
1246
1247 /* Memory barrier. */
1248 mbar = (dc->ir >> 16) & 31;
1249 if (mbar == 2 && dc->imm == 4) {
1250 uint16_t mbar_imm = dc->rd;
1251
1252 LOG_DIS("mbar %d\n", mbar_imm);
1253
1254 /* Data access memory barrier. */
1255 if ((mbar_imm & 2) == 0) {
1256 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1257 }
1258
1259 /* mbar IMM & 16 decodes to sleep. */
1260 if (mbar_imm & 16) {
1261 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1262 TCGv_i32 tmp_1 = tcg_const_i32(1);
1263
1264 LOG_DIS("sleep\n");
1265
1266 if (trap_userspace(dc, true)) {
1267 /* Sleep is a privileged instruction. */
1268 return;
1269 }
1270
1271 t_sync_flags(dc);
1272 tcg_gen_st_i32(tmp_1, cpu_env,
1273 -offsetof(MicroBlazeCPU, env)
1274 +offsetof(CPUState, halted));
1275 tcg_gen_movi_i32(cpu_pc, dc->pc + 4);
1276 gen_helper_raise_exception(cpu_env, tmp_hlt);
1277 tcg_temp_free_i32(tmp_hlt);
1278 tcg_temp_free_i32(tmp_1);
1279 return;
1280 }
1281 /* Break the TB. */
1282 dc->cpustate_changed = 1;
1283 return;
1284 }
1285
1286 LOG_DIS("br%s%s%s%s imm=%x\n",
1287 abs ? "a" : "", link ? "l" : "",
1288 dc->type_b ? "i" : "", dslot ? "d" : "",
1289 dc->imm);
1290
1291 dc->delayed_branch = 1;
1292 if (dslot) {
1293 dec_setup_dslot(dc);
1294 }
1295 if (link && dc->rd)
1296 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1297
1298 dc->jmp = JMP_INDIRECT;
1299 if (abs) {
1300 tcg_gen_movi_i32(env_btaken, 1);
1301 tcg_gen_mov_i32(cpu_btarget, *(dec_alu_op_b(dc)));
1302 if (link && !dslot) {
1303 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1304 t_gen_raise_exception(dc, EXCP_BREAK);
1305 if (dc->imm == 0) {
1306 if (trap_userspace(dc, true)) {
1307 return;
1308 }
1309
1310 t_gen_raise_exception(dc, EXCP_DEBUG);
1311 }
1312 }
1313 } else {
1314 if (dec_alu_op_b_is_small_imm(dc)) {
1315 dc->jmp = JMP_DIRECT;
1316 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1317 } else {
1318 tcg_gen_movi_i32(env_btaken, 1);
1319 tcg_gen_addi_i32(cpu_btarget, *dec_alu_op_b(dc), dc->pc);
1320 }
1321 }
1322 }
1323
1324 static inline void do_rti(DisasContext *dc)
1325 {
1326 TCGv_i32 t0, t1;
1327 t0 = tcg_temp_new_i32();
1328 t1 = tcg_temp_new_i32();
1329 tcg_gen_mov_i32(t1, cpu_msr);
1330 tcg_gen_shri_i32(t0, t1, 1);
1331 tcg_gen_ori_i32(t1, t1, MSR_IE);
1332 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1333
1334 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1335 tcg_gen_or_i32(t1, t1, t0);
1336 msr_write(dc, t1);
1337 tcg_temp_free_i32(t1);
1338 tcg_temp_free_i32(t0);
1339 dc->tb_flags &= ~DRTI_FLAG;
1340 }
1341
1342 static inline void do_rtb(DisasContext *dc)
1343 {
1344 TCGv_i32 t0, t1;
1345 t0 = tcg_temp_new_i32();
1346 t1 = tcg_temp_new_i32();
1347 tcg_gen_mov_i32(t1, cpu_msr);
1348 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1349 tcg_gen_shri_i32(t0, t1, 1);
1350 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1351
1352 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1353 tcg_gen_or_i32(t1, t1, t0);
1354 msr_write(dc, t1);
1355 tcg_temp_free_i32(t1);
1356 tcg_temp_free_i32(t0);
1357 dc->tb_flags &= ~DRTB_FLAG;
1358 }
1359
1360 static inline void do_rte(DisasContext *dc)
1361 {
1362 TCGv_i32 t0, t1;
1363 t0 = tcg_temp_new_i32();
1364 t1 = tcg_temp_new_i32();
1365
1366 tcg_gen_mov_i32(t1, cpu_msr);
1367 tcg_gen_ori_i32(t1, t1, MSR_EE);
1368 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1369 tcg_gen_shri_i32(t0, t1, 1);
1370 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1371
1372 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1373 tcg_gen_or_i32(t1, t1, t0);
1374 msr_write(dc, t1);
1375 tcg_temp_free_i32(t1);
1376 tcg_temp_free_i32(t0);
1377 dc->tb_flags &= ~DRTE_FLAG;
1378 }
1379
1380 static void dec_rts(DisasContext *dc)
1381 {
1382 unsigned int b_bit, i_bit, e_bit;
1383
1384 i_bit = dc->ir & (1 << 21);
1385 b_bit = dc->ir & (1 << 22);
1386 e_bit = dc->ir & (1 << 23);
1387
1388 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1389 return;
1390 }
1391
1392 dec_setup_dslot(dc);
1393
1394 if (i_bit) {
1395 LOG_DIS("rtid ir=%x\n", dc->ir);
1396 dc->tb_flags |= DRTI_FLAG;
1397 } else if (b_bit) {
1398 LOG_DIS("rtbd ir=%x\n", dc->ir);
1399 dc->tb_flags |= DRTB_FLAG;
1400 } else if (e_bit) {
1401 LOG_DIS("rted ir=%x\n", dc->ir);
1402 dc->tb_flags |= DRTE_FLAG;
1403 } else
1404 LOG_DIS("rts ir=%x\n", dc->ir);
1405
1406 dc->jmp = JMP_INDIRECT;
1407 tcg_gen_movi_i32(env_btaken, 1);
1408 tcg_gen_add_i32(cpu_btarget, cpu_R[dc->ra], *dec_alu_op_b(dc));
1409 }
1410
1411 static int dec_check_fpuv2(DisasContext *dc)
1412 {
1413 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1414 tcg_gen_movi_i32(cpu_esr, ESR_EC_FPU);
1415 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1416 }
1417 return (dc->cpu->cfg.use_fpu == 2) ? PVR2_USE_FPU2_MASK : 0;
1418 }
1419
1420 static void dec_fpu(DisasContext *dc)
1421 {
1422 unsigned int fpu_insn;
1423
1424 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1425 return;
1426 }
1427
1428 fpu_insn = (dc->ir >> 7) & 7;
1429
1430 switch (fpu_insn) {
1431 case 0:
1432 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1433 cpu_R[dc->rb]);
1434 break;
1435
1436 case 1:
1437 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1438 cpu_R[dc->rb]);
1439 break;
1440
1441 case 2:
1442 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1443 cpu_R[dc->rb]);
1444 break;
1445
1446 case 3:
1447 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1448 cpu_R[dc->rb]);
1449 break;
1450
1451 case 4:
1452 switch ((dc->ir >> 4) & 7) {
1453 case 0:
1454 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1455 cpu_R[dc->ra], cpu_R[dc->rb]);
1456 break;
1457 case 1:
1458 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1459 cpu_R[dc->ra], cpu_R[dc->rb]);
1460 break;
1461 case 2:
1462 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1463 cpu_R[dc->ra], cpu_R[dc->rb]);
1464 break;
1465 case 3:
1466 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1467 cpu_R[dc->ra], cpu_R[dc->rb]);
1468 break;
1469 case 4:
1470 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1471 cpu_R[dc->ra], cpu_R[dc->rb]);
1472 break;
1473 case 5:
1474 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1475 cpu_R[dc->ra], cpu_R[dc->rb]);
1476 break;
1477 case 6:
1478 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1479 cpu_R[dc->ra], cpu_R[dc->rb]);
1480 break;
1481 default:
1482 qemu_log_mask(LOG_UNIMP,
1483 "unimplemented fcmp fpu_insn=%x pc=%x"
1484 " opc=%x\n",
1485 fpu_insn, dc->pc, dc->opcode);
1486 dc->abort_at_next_insn = 1;
1487 break;
1488 }
1489 break;
1490
1491 case 5:
1492 if (!dec_check_fpuv2(dc)) {
1493 return;
1494 }
1495 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1496 break;
1497
1498 case 6:
1499 if (!dec_check_fpuv2(dc)) {
1500 return;
1501 }
1502 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1503 break;
1504
1505 case 7:
1506 if (!dec_check_fpuv2(dc)) {
1507 return;
1508 }
1509 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1510 break;
1511
1512 default:
1513 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1514 " opc=%x\n",
1515 fpu_insn, dc->pc, dc->opcode);
1516 dc->abort_at_next_insn = 1;
1517 break;
1518 }
1519 }
1520
1521 static void dec_null(DisasContext *dc)
1522 {
1523 if (trap_illegal(dc, true)) {
1524 return;
1525 }
1526 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1527 dc->abort_at_next_insn = 1;
1528 }
1529
1530 /* Insns connected to FSL or AXI stream attached devices. */
1531 static void dec_stream(DisasContext *dc)
1532 {
1533 TCGv_i32 t_id, t_ctrl;
1534 int ctrl;
1535
1536 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1537 dc->type_b ? "" : "d", dc->imm);
1538
1539 if (trap_userspace(dc, true)) {
1540 return;
1541 }
1542
1543 t_id = tcg_temp_new_i32();
1544 if (dc->type_b) {
1545 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1546 ctrl = dc->imm >> 10;
1547 } else {
1548 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1549 ctrl = dc->imm >> 5;
1550 }
1551
1552 t_ctrl = tcg_const_i32(ctrl);
1553
1554 if (dc->rd == 0) {
1555 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1556 } else {
1557 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1558 }
1559 tcg_temp_free_i32(t_id);
1560 tcg_temp_free_i32(t_ctrl);
1561 }
1562
1563 static struct decoder_info {
1564 struct {
1565 uint32_t bits;
1566 uint32_t mask;
1567 };
1568 void (*dec)(DisasContext *dc);
1569 } decinfo[] = {
1570 {DEC_ADD, dec_add},
1571 {DEC_SUB, dec_sub},
1572 {DEC_AND, dec_and},
1573 {DEC_XOR, dec_xor},
1574 {DEC_OR, dec_or},
1575 {DEC_BIT, dec_bit},
1576 {DEC_BARREL, dec_barrel},
1577 {DEC_LD, dec_load},
1578 {DEC_ST, dec_store},
1579 {DEC_IMM, dec_imm},
1580 {DEC_BR, dec_br},
1581 {DEC_BCC, dec_bcc},
1582 {DEC_RTS, dec_rts},
1583 {DEC_FPU, dec_fpu},
1584 {DEC_MUL, dec_mul},
1585 {DEC_DIV, dec_div},
1586 {DEC_MSR, dec_msr},
1587 {DEC_STREAM, dec_stream},
1588 {{0, 0}, dec_null}
1589 };
1590
1591 static inline void decode(DisasContext *dc, uint32_t ir)
1592 {
1593 int i;
1594
1595 dc->ir = ir;
1596 LOG_DIS("%8.8x\t", dc->ir);
1597
1598 if (ir == 0) {
1599 trap_illegal(dc, dc->cpu->cfg.opcode_0_illegal);
1600 /* Don't decode nop/zero instructions any further. */
1601 return;
1602 }
1603
1604 /* bit 2 seems to indicate insn type. */
1605 dc->type_b = ir & (1 << 29);
1606
1607 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1608 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1609 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1610 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1611 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1612
1613 /* Large switch for all insns. */
1614 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1615 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1616 decinfo[i].dec(dc);
1617 break;
1618 }
1619 }
1620 }
1621
1622 /* generate intermediate code for basic block 'tb'. */
1623 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1624 {
1625 CPUMBState *env = cs->env_ptr;
1626 MicroBlazeCPU *cpu = env_archcpu(env);
1627 uint32_t pc_start;
1628 struct DisasContext ctx;
1629 struct DisasContext *dc = &ctx;
1630 uint32_t page_start, org_flags;
1631 uint32_t npc;
1632 int num_insns;
1633
1634 pc_start = tb->pc;
1635 dc->cpu = cpu;
1636 dc->tb = tb;
1637 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1638
1639 dc->is_jmp = DISAS_NEXT;
1640 dc->jmp = 0;
1641 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1642 if (dc->delayed_branch) {
1643 dc->jmp = JMP_INDIRECT;
1644 }
1645 dc->pc = pc_start;
1646 dc->singlestep_enabled = cs->singlestep_enabled;
1647 dc->cpustate_changed = 0;
1648 dc->abort_at_next_insn = 0;
1649
1650 if (pc_start & 3) {
1651 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1652 }
1653
1654 page_start = pc_start & TARGET_PAGE_MASK;
1655 num_insns = 0;
1656
1657 gen_tb_start(tb);
1658 do
1659 {
1660 tcg_gen_insn_start(dc->pc);
1661 num_insns++;
1662
1663 #if SIM_COMPAT
1664 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1665 tcg_gen_movi_i32(cpu_pc, dc->pc);
1666 gen_helper_debug();
1667 }
1668 #endif
1669
1670 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1671 t_gen_raise_exception(dc, EXCP_DEBUG);
1672 dc->is_jmp = DISAS_UPDATE;
1673 /* The address covered by the breakpoint must be included in
1674 [tb->pc, tb->pc + tb->size) in order to for it to be
1675 properly cleared -- thus we increment the PC here so that
1676 the logic setting tb->size below does the right thing. */
1677 dc->pc += 4;
1678 break;
1679 }
1680
1681 /* Pretty disas. */
1682 LOG_DIS("%8.8x:\t", dc->pc);
1683
1684 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1685 gen_io_start();
1686 }
1687
1688 dc->clear_imm = 1;
1689 decode(dc, cpu_ldl_code(env, dc->pc));
1690 if (dc->clear_imm)
1691 dc->tb_flags &= ~IMM_FLAG;
1692 dc->pc += 4;
1693
1694 if (dc->delayed_branch) {
1695 dc->delayed_branch--;
1696 if (!dc->delayed_branch) {
1697 if (dc->tb_flags & DRTI_FLAG)
1698 do_rti(dc);
1699 if (dc->tb_flags & DRTB_FLAG)
1700 do_rtb(dc);
1701 if (dc->tb_flags & DRTE_FLAG)
1702 do_rte(dc);
1703 /* Clear the delay slot flag. */
1704 dc->tb_flags &= ~D_FLAG;
1705 /* If it is a direct jump, try direct chaining. */
1706 if (dc->jmp == JMP_INDIRECT) {
1707 TCGv_i32 tmp_pc = tcg_const_i32(dc->pc);
1708 eval_cond_jmp(dc, cpu_btarget, tmp_pc);
1709 tcg_temp_free_i32(tmp_pc);
1710 dc->is_jmp = DISAS_JUMP;
1711 } else if (dc->jmp == JMP_DIRECT) {
1712 t_sync_flags(dc);
1713 gen_goto_tb(dc, 0, dc->jmp_pc);
1714 dc->is_jmp = DISAS_TB_JUMP;
1715 } else if (dc->jmp == JMP_DIRECT_CC) {
1716 TCGLabel *l1 = gen_new_label();
1717 t_sync_flags(dc);
1718 /* Conditional jmp. */
1719 tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1720 gen_goto_tb(dc, 1, dc->pc);
1721 gen_set_label(l1);
1722 gen_goto_tb(dc, 0, dc->jmp_pc);
1723
1724 dc->is_jmp = DISAS_TB_JUMP;
1725 }
1726 break;
1727 }
1728 }
1729 if (cs->singlestep_enabled) {
1730 break;
1731 }
1732 } while (!dc->is_jmp && !dc->cpustate_changed
1733 && !tcg_op_buf_full()
1734 && !singlestep
1735 && (dc->pc - page_start < TARGET_PAGE_SIZE)
1736 && num_insns < max_insns);
1737
1738 npc = dc->pc;
1739 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1740 if (dc->tb_flags & D_FLAG) {
1741 dc->is_jmp = DISAS_UPDATE;
1742 tcg_gen_movi_i32(cpu_pc, npc);
1743 sync_jmpstate(dc);
1744 } else
1745 npc = dc->jmp_pc;
1746 }
1747
1748 /* Force an update if the per-tb cpu state has changed. */
1749 if (dc->is_jmp == DISAS_NEXT
1750 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1751 dc->is_jmp = DISAS_UPDATE;
1752 tcg_gen_movi_i32(cpu_pc, npc);
1753 }
1754 t_sync_flags(dc);
1755
1756 if (unlikely(cs->singlestep_enabled)) {
1757 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1758
1759 if (dc->is_jmp != DISAS_JUMP) {
1760 tcg_gen_movi_i32(cpu_pc, npc);
1761 }
1762 gen_helper_raise_exception(cpu_env, tmp);
1763 tcg_temp_free_i32(tmp);
1764 } else {
1765 switch(dc->is_jmp) {
1766 case DISAS_NEXT:
1767 gen_goto_tb(dc, 1, npc);
1768 break;
1769 default:
1770 case DISAS_JUMP:
1771 case DISAS_UPDATE:
1772 /* indicate that the hash table must be used
1773 to find the next TB */
1774 tcg_gen_exit_tb(NULL, 0);
1775 break;
1776 case DISAS_TB_JUMP:
1777 /* nothing more to generate */
1778 break;
1779 }
1780 }
1781 gen_tb_end(tb, num_insns);
1782
1783 tb->size = dc->pc - pc_start;
1784 tb->icount = num_insns;
1785
1786 #ifdef DEBUG_DISAS
1787 #if !SIM_COMPAT
1788 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1789 && qemu_log_in_addr_range(pc_start)) {
1790 FILE *logfile = qemu_log_lock();
1791 qemu_log("--------------\n");
1792 log_target_disas(cs, pc_start, dc->pc - pc_start);
1793 qemu_log_unlock(logfile);
1794 }
1795 #endif
1796 #endif
1797 assert(!dc->abort_at_next_insn);
1798 }
1799
1800 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1801 {
1802 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1803 CPUMBState *env = &cpu->env;
1804 int i;
1805
1806 if (!env) {
1807 return;
1808 }
1809
1810 qemu_fprintf(f, "IN: PC=%x %s\n",
1811 env->pc, lookup_symbol(env->pc));
1812 qemu_fprintf(f, "rmsr=%x resr=%x rear=%" PRIx64 " "
1813 "debug=%x imm=%x iflags=%x fsr=%x rbtr=%x\n",
1814 env->msr, env->esr, env->ear,
1815 env->debug, env->imm, env->iflags, env->fsr,
1816 env->btr);
1817 qemu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1818 env->btaken, env->btarget,
1819 (env->msr & MSR_UM) ? "user" : "kernel",
1820 (env->msr & MSR_UMS) ? "user" : "kernel",
1821 (bool)(env->msr & MSR_EIP),
1822 (bool)(env->msr & MSR_IE));
1823 for (i = 0; i < 12; i++) {
1824 qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1825 if ((i + 1) % 4 == 0) {
1826 qemu_fprintf(f, "\n");
1827 }
1828 }
1829
1830 /* Registers that aren't modeled are reported as 0 */
1831 qemu_fprintf(f, "redr=%x rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
1832 "rtlblo=0 rtlbhi=0\n", env->edr);
1833 qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
1834 for (i = 0; i < 32; i++) {
1835 qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1836 if ((i + 1) % 4 == 0)
1837 qemu_fprintf(f, "\n");
1838 }
1839 qemu_fprintf(f, "\n\n");
1840 }
1841
1842 void mb_tcg_init(void)
1843 {
1844 int i;
1845
1846 env_debug = tcg_global_mem_new_i32(cpu_env,
1847 offsetof(CPUMBState, debug),
1848 "debug0");
1849 env_iflags = tcg_global_mem_new_i32(cpu_env,
1850 offsetof(CPUMBState, iflags),
1851 "iflags");
1852 env_imm = tcg_global_mem_new_i32(cpu_env,
1853 offsetof(CPUMBState, imm),
1854 "imm");
1855 cpu_btarget = tcg_global_mem_new_i32(cpu_env,
1856 offsetof(CPUMBState, btarget),
1857 "btarget");
1858 env_btaken = tcg_global_mem_new_i32(cpu_env,
1859 offsetof(CPUMBState, btaken),
1860 "btaken");
1861 env_res_addr = tcg_global_mem_new(cpu_env,
1862 offsetof(CPUMBState, res_addr),
1863 "res_addr");
1864 env_res_val = tcg_global_mem_new_i32(cpu_env,
1865 offsetof(CPUMBState, res_val),
1866 "res_val");
1867 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1868 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1869 offsetof(CPUMBState, regs[i]),
1870 regnames[i]);
1871 }
1872
1873 cpu_pc =
1874 tcg_global_mem_new_i32(cpu_env, offsetof(CPUMBState, pc), "rpc");
1875 cpu_msr =
1876 tcg_global_mem_new_i32(cpu_env, offsetof(CPUMBState, msr), "rmsr");
1877 cpu_esr =
1878 tcg_global_mem_new_i32(cpu_env, offsetof(CPUMBState, esr), "resr");
1879 }
1880
1881 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1882 target_ulong *data)
1883 {
1884 env->pc = data[0];
1885 }