]> git.proxmox.com Git - mirror_qemu.git/blob - target/microblaze/translate.c
target/microblaze: Convert dec_mul to decodetree
[mirror_qemu.git] / target / microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
32
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35
36 #define EXTRACT_FIELD(src, start, end) \
37 (((src) >> start) & ((1 << (end - start + 1)) - 1))
38
39 /* is_jmp field values */
40 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
41 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
42
43 static TCGv_i32 cpu_R[32];
44 static TCGv_i32 cpu_pc;
45 static TCGv_i32 cpu_msr;
46 static TCGv_i32 cpu_msr_c;
47 static TCGv_i32 cpu_imm;
48 static TCGv_i32 cpu_btaken;
49 static TCGv_i32 cpu_btarget;
50 static TCGv_i32 cpu_iflags;
51 static TCGv cpu_res_addr;
52 static TCGv_i32 cpu_res_val;
53
54 #include "exec/gen-icount.h"
55
56 /* This is the state at translation time. */
57 typedef struct DisasContext {
58 DisasContextBase base;
59 MicroBlazeCPU *cpu;
60
61 TCGv_i32 r0;
62 bool r0_set;
63
64 /* Decoder. */
65 int type_b;
66 uint32_t ir;
67 uint32_t ext_imm;
68 uint8_t opcode;
69 uint8_t rd, ra, rb;
70 uint16_t imm;
71
72 unsigned int cpustate_changed;
73 unsigned int delayed_branch;
74 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
75 unsigned int clear_imm;
76
77 #define JMP_NOJMP 0
78 #define JMP_DIRECT 1
79 #define JMP_DIRECT_CC 2
80 #define JMP_INDIRECT 3
81 unsigned int jmp;
82 uint32_t jmp_pc;
83
84 int abort_at_next_insn;
85 } DisasContext;
86
87 static int typeb_imm(DisasContext *dc, int x)
88 {
89 if (dc->tb_flags & IMM_FLAG) {
90 return deposit32(dc->ext_imm, 0, 16, x);
91 }
92 return x;
93 }
94
95 /* Include the auto-generated decoder. */
96 #include "decode-insns.c.inc"
97
98 static inline void t_sync_flags(DisasContext *dc)
99 {
100 /* Synch the tb dependent flags between translator and runtime. */
101 if (dc->tb_flags != dc->synced_flags) {
102 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags);
103 dc->synced_flags = dc->tb_flags;
104 }
105 }
106
107 static void gen_raise_exception(DisasContext *dc, uint32_t index)
108 {
109 TCGv_i32 tmp = tcg_const_i32(index);
110
111 gen_helper_raise_exception(cpu_env, tmp);
112 tcg_temp_free_i32(tmp);
113 dc->base.is_jmp = DISAS_NORETURN;
114 }
115
116 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
117 {
118 t_sync_flags(dc);
119 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
120 gen_raise_exception(dc, index);
121 }
122
123 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
124 {
125 TCGv_i32 tmp = tcg_const_i32(esr_ec);
126 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
127 tcg_temp_free_i32(tmp);
128
129 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
130 }
131
132 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133 {
134 #ifndef CONFIG_USER_ONLY
135 return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136 #else
137 return true;
138 #endif
139 }
140
141 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142 {
143 if (dc->base.singlestep_enabled) {
144 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
145 tcg_gen_movi_i32(cpu_pc, dest);
146 gen_helper_raise_exception(cpu_env, tmp);
147 tcg_temp_free_i32(tmp);
148 } else if (use_goto_tb(dc, dest)) {
149 tcg_gen_goto_tb(n);
150 tcg_gen_movi_i32(cpu_pc, dest);
151 tcg_gen_exit_tb(dc->base.tb, n);
152 } else {
153 tcg_gen_movi_i32(cpu_pc, dest);
154 tcg_gen_exit_tb(NULL, 0);
155 }
156 dc->base.is_jmp = DISAS_NORETURN;
157 }
158
159 /*
160 * Returns true if the insn an illegal operation.
161 * If exceptions are enabled, an exception is raised.
162 */
163 static bool trap_illegal(DisasContext *dc, bool cond)
164 {
165 if (cond && (dc->tb_flags & MSR_EE_FLAG)
166 && dc->cpu->cfg.illegal_opcode_exception) {
167 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
168 }
169 return cond;
170 }
171
172 /*
173 * Returns true if the insn is illegal in userspace.
174 * If exceptions are enabled, an exception is raised.
175 */
176 static bool trap_userspace(DisasContext *dc, bool cond)
177 {
178 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
179 bool cond_user = cond && mem_index == MMU_USER_IDX;
180
181 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
182 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
183 }
184 return cond_user;
185 }
186
187 static int32_t dec_alu_typeb_imm(DisasContext *dc)
188 {
189 tcg_debug_assert(dc->type_b);
190 return typeb_imm(dc, (int16_t)dc->imm);
191 }
192
193 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
194 {
195 if (dc->type_b) {
196 tcg_gen_movi_i32(cpu_imm, dec_alu_typeb_imm(dc));
197 return &cpu_imm;
198 }
199 return &cpu_R[dc->rb];
200 }
201
202 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
203 {
204 if (likely(reg != 0)) {
205 return cpu_R[reg];
206 }
207 if (!dc->r0_set) {
208 if (dc->r0 == NULL) {
209 dc->r0 = tcg_temp_new_i32();
210 }
211 tcg_gen_movi_i32(dc->r0, 0);
212 dc->r0_set = true;
213 }
214 return dc->r0;
215 }
216
217 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
218 {
219 if (likely(reg != 0)) {
220 return cpu_R[reg];
221 }
222 if (dc->r0 == NULL) {
223 dc->r0 = tcg_temp_new_i32();
224 }
225 return dc->r0;
226 }
227
228 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
229 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
230 {
231 TCGv_i32 rd, ra, rb;
232
233 if (arg->rd == 0 && !side_effects) {
234 return true;
235 }
236
237 rd = reg_for_write(dc, arg->rd);
238 ra = reg_for_read(dc, arg->ra);
239 rb = reg_for_read(dc, arg->rb);
240 fn(rd, ra, rb);
241 return true;
242 }
243
244 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
245 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
246 {
247 TCGv_i32 rd, ra;
248
249 if (arg->rd == 0 && !side_effects) {
250 return true;
251 }
252
253 rd = reg_for_write(dc, arg->rd);
254 ra = reg_for_read(dc, arg->ra);
255 fni(rd, ra, arg->imm);
256 return true;
257 }
258
259 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
260 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
261 {
262 TCGv_i32 rd, ra, imm;
263
264 if (arg->rd == 0 && !side_effects) {
265 return true;
266 }
267
268 rd = reg_for_write(dc, arg->rd);
269 ra = reg_for_read(dc, arg->ra);
270 imm = tcg_const_i32(arg->imm);
271
272 fn(rd, ra, imm);
273
274 tcg_temp_free_i32(imm);
275 return true;
276 }
277
278 #define DO_TYPEA(NAME, SE, FN) \
279 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
280 { return do_typea(dc, a, SE, FN); }
281
282 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
283 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
284 { return dc->cpu->cfg.CFG && do_typea(dc, a, SE, FN); }
285
286 #define DO_TYPEBI(NAME, SE, FNI) \
287 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
288 { return do_typeb_imm(dc, a, SE, FNI); }
289
290 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
291 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
292 { return dc->cpu->cfg.CFG && do_typeb_imm(dc, a, SE, FNI); }
293
294 #define DO_TYPEBV(NAME, SE, FN) \
295 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
296 { return do_typeb_val(dc, a, SE, FN); }
297
298 /* No input carry, but output carry. */
299 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
300 {
301 TCGv_i32 zero = tcg_const_i32(0);
302
303 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
304
305 tcg_temp_free_i32(zero);
306 }
307
308 /* Input and output carry. */
309 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
310 {
311 TCGv_i32 zero = tcg_const_i32(0);
312 TCGv_i32 tmp = tcg_temp_new_i32();
313
314 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
315 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
316
317 tcg_temp_free_i32(tmp);
318 tcg_temp_free_i32(zero);
319 }
320
321 /* Input carry, but no output carry. */
322 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
323 {
324 tcg_gen_add_i32(out, ina, inb);
325 tcg_gen_add_i32(out, out, cpu_msr_c);
326 }
327
328 DO_TYPEA(add, true, gen_add)
329 DO_TYPEA(addc, true, gen_addc)
330 DO_TYPEA(addk, false, tcg_gen_add_i32)
331 DO_TYPEA(addkc, true, gen_addkc)
332
333 DO_TYPEBV(addi, true, gen_add)
334 DO_TYPEBV(addic, true, gen_addc)
335 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
336 DO_TYPEBV(addikc, true, gen_addkc)
337
338 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
339 {
340 tcg_gen_andi_i32(out, ina, ~imm);
341 }
342
343 DO_TYPEA(and, false, tcg_gen_and_i32)
344 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
345 DO_TYPEA(andn, false, tcg_gen_andc_i32)
346 DO_TYPEBI(andni, false, gen_andni)
347
348 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
349 {
350 TCGv_i32 lt = tcg_temp_new_i32();
351
352 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
353 tcg_gen_sub_i32(out, inb, ina);
354 tcg_gen_deposit_i32(out, out, lt, 31, 1);
355 tcg_temp_free_i32(lt);
356 }
357
358 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
359 {
360 TCGv_i32 lt = tcg_temp_new_i32();
361
362 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
363 tcg_gen_sub_i32(out, inb, ina);
364 tcg_gen_deposit_i32(out, out, lt, 31, 1);
365 tcg_temp_free_i32(lt);
366 }
367
368 DO_TYPEA(cmp, false, gen_cmp)
369 DO_TYPEA(cmpu, false, gen_cmpu)
370
371 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
372 {
373 TCGv_i32 tmp = tcg_temp_new_i32();
374 tcg_gen_muls2_i32(tmp, out, ina, inb);
375 tcg_temp_free_i32(tmp);
376 }
377
378 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
379 {
380 TCGv_i32 tmp = tcg_temp_new_i32();
381 tcg_gen_mulu2_i32(tmp, out, ina, inb);
382 tcg_temp_free_i32(tmp);
383 }
384
385 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
386 {
387 TCGv_i32 tmp = tcg_temp_new_i32();
388 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
389 tcg_temp_free_i32(tmp);
390 }
391
392 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
393 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
394 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
395 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
396 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
397
398 DO_TYPEA(or, false, tcg_gen_or_i32)
399 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
400
401 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
402 {
403 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
404 }
405
406 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
407 {
408 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
409 }
410
411 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
412 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
413 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
414
415 /* No input carry, but output carry. */
416 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
417 {
418 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
419 tcg_gen_sub_i32(out, inb, ina);
420 }
421
422 /* Input and output carry. */
423 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
424 {
425 TCGv_i32 zero = tcg_const_i32(0);
426 TCGv_i32 tmp = tcg_temp_new_i32();
427
428 tcg_gen_not_i32(tmp, ina);
429 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
430 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
431
432 tcg_temp_free_i32(zero);
433 tcg_temp_free_i32(tmp);
434 }
435
436 /* No input or output carry. */
437 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
438 {
439 tcg_gen_sub_i32(out, inb, ina);
440 }
441
442 /* Input carry, no output carry. */
443 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
444 {
445 TCGv_i32 nota = tcg_temp_new_i32();
446
447 tcg_gen_not_i32(nota, ina);
448 tcg_gen_add_i32(out, inb, nota);
449 tcg_gen_add_i32(out, out, cpu_msr_c);
450
451 tcg_temp_free_i32(nota);
452 }
453
454 DO_TYPEA(rsub, true, gen_rsub)
455 DO_TYPEA(rsubc, true, gen_rsubc)
456 DO_TYPEA(rsubk, false, gen_rsubk)
457 DO_TYPEA(rsubkc, true, gen_rsubkc)
458
459 DO_TYPEBV(rsubi, true, gen_rsub)
460 DO_TYPEBV(rsubic, true, gen_rsubc)
461 DO_TYPEBV(rsubik, false, gen_rsubk)
462 DO_TYPEBV(rsubikc, true, gen_rsubkc)
463
464 DO_TYPEA(xor, false, tcg_gen_xor_i32)
465 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
466
467 static bool trans_zero(DisasContext *dc, arg_zero *arg)
468 {
469 /* If opcode_0_illegal, trap. */
470 if (dc->cpu->cfg.opcode_0_illegal) {
471 trap_illegal(dc, true);
472 return true;
473 }
474 /*
475 * Otherwise, this is "add r0, r0, r0".
476 * Continue to trans_add so that MSR[C] gets cleared.
477 */
478 return false;
479 }
480
481 static void msr_read(DisasContext *dc, TCGv_i32 d)
482 {
483 TCGv_i32 t;
484
485 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
486 t = tcg_temp_new_i32();
487 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
488 tcg_gen_or_i32(d, cpu_msr, t);
489 tcg_temp_free_i32(t);
490 }
491
492 static void msr_write(DisasContext *dc, TCGv_i32 v)
493 {
494 dc->cpustate_changed = 1;
495
496 /* Install MSR_C. */
497 tcg_gen_extract_i32(cpu_msr_c, v, 2, 1);
498
499 /* Clear MSR_C and MSR_CC; MSR_PVR is not writable, and is always clear. */
500 tcg_gen_andi_i32(cpu_msr, v, ~(MSR_C | MSR_CC | MSR_PVR));
501 }
502
503 static void dec_msr(DisasContext *dc)
504 {
505 CPUState *cs = CPU(dc->cpu);
506 TCGv_i32 t0, t1;
507 unsigned int sr, rn;
508 bool to, clrset, extended = false;
509
510 sr = extract32(dc->imm, 0, 14);
511 to = extract32(dc->imm, 14, 1);
512 clrset = extract32(dc->imm, 15, 1) == 0;
513 dc->type_b = 1;
514 if (to) {
515 dc->cpustate_changed = 1;
516 }
517
518 /* Extended MSRs are only available if addr_size > 32. */
519 if (dc->cpu->cfg.addr_size > 32) {
520 /* The E-bit is encoded differently for To/From MSR. */
521 static const unsigned int e_bit[] = { 19, 24 };
522
523 extended = extract32(dc->imm, e_bit[to], 1);
524 }
525
526 /* msrclr and msrset. */
527 if (clrset) {
528 bool clr = extract32(dc->ir, 16, 1);
529
530 if (!dc->cpu->cfg.use_msr_instr) {
531 /* nop??? */
532 return;
533 }
534
535 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
536 return;
537 }
538
539 if (dc->rd)
540 msr_read(dc, cpu_R[dc->rd]);
541
542 t0 = tcg_temp_new_i32();
543 t1 = tcg_temp_new_i32();
544 msr_read(dc, t0);
545 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
546
547 if (clr) {
548 tcg_gen_not_i32(t1, t1);
549 tcg_gen_and_i32(t0, t0, t1);
550 } else
551 tcg_gen_or_i32(t0, t0, t1);
552 msr_write(dc, t0);
553 tcg_temp_free_i32(t0);
554 tcg_temp_free_i32(t1);
555 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
556 dc->base.is_jmp = DISAS_UPDATE;
557 return;
558 }
559
560 if (trap_userspace(dc, to)) {
561 return;
562 }
563
564 #if !defined(CONFIG_USER_ONLY)
565 /* Catch read/writes to the mmu block. */
566 if ((sr & ~0xff) == 0x1000) {
567 TCGv_i32 tmp_ext = tcg_const_i32(extended);
568 TCGv_i32 tmp_sr;
569
570 sr &= 7;
571 tmp_sr = tcg_const_i32(sr);
572 if (to) {
573 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
574 } else {
575 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
576 }
577 tcg_temp_free_i32(tmp_sr);
578 tcg_temp_free_i32(tmp_ext);
579 return;
580 }
581 #endif
582
583 if (to) {
584 switch (sr) {
585 case SR_PC:
586 break;
587 case SR_MSR:
588 msr_write(dc, cpu_R[dc->ra]);
589 break;
590 case SR_EAR:
591 {
592 TCGv_i64 t64 = tcg_temp_new_i64();
593 tcg_gen_extu_i32_i64(t64, cpu_R[dc->ra]);
594 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUMBState, ear));
595 tcg_temp_free_i64(t64);
596 }
597 break;
598 case SR_ESR:
599 tcg_gen_st_i32(cpu_R[dc->ra],
600 cpu_env, offsetof(CPUMBState, esr));
601 break;
602 case SR_FSR:
603 tcg_gen_st_i32(cpu_R[dc->ra],
604 cpu_env, offsetof(CPUMBState, fsr));
605 break;
606 case SR_BTR:
607 tcg_gen_st_i32(cpu_R[dc->ra],
608 cpu_env, offsetof(CPUMBState, btr));
609 break;
610 case SR_EDR:
611 tcg_gen_st_i32(cpu_R[dc->ra],
612 cpu_env, offsetof(CPUMBState, edr));
613 break;
614 case 0x800:
615 tcg_gen_st_i32(cpu_R[dc->ra],
616 cpu_env, offsetof(CPUMBState, slr));
617 break;
618 case 0x802:
619 tcg_gen_st_i32(cpu_R[dc->ra],
620 cpu_env, offsetof(CPUMBState, shr));
621 break;
622 default:
623 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
624 break;
625 }
626 } else {
627 switch (sr) {
628 case SR_PC:
629 tcg_gen_movi_i32(cpu_R[dc->rd], dc->base.pc_next);
630 break;
631 case SR_MSR:
632 msr_read(dc, cpu_R[dc->rd]);
633 break;
634 case SR_EAR:
635 {
636 TCGv_i64 t64 = tcg_temp_new_i64();
637 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
638 if (extended) {
639 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], t64);
640 } else {
641 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], t64);
642 }
643 tcg_temp_free_i64(t64);
644 }
645 break;
646 case SR_ESR:
647 tcg_gen_ld_i32(cpu_R[dc->rd],
648 cpu_env, offsetof(CPUMBState, esr));
649 break;
650 case SR_FSR:
651 tcg_gen_ld_i32(cpu_R[dc->rd],
652 cpu_env, offsetof(CPUMBState, fsr));
653 break;
654 case SR_BTR:
655 tcg_gen_ld_i32(cpu_R[dc->rd],
656 cpu_env, offsetof(CPUMBState, btr));
657 break;
658 case SR_EDR:
659 tcg_gen_ld_i32(cpu_R[dc->rd],
660 cpu_env, offsetof(CPUMBState, edr));
661 break;
662 case 0x800:
663 tcg_gen_ld_i32(cpu_R[dc->rd],
664 cpu_env, offsetof(CPUMBState, slr));
665 break;
666 case 0x802:
667 tcg_gen_ld_i32(cpu_R[dc->rd],
668 cpu_env, offsetof(CPUMBState, shr));
669 break;
670 case 0x2000 ... 0x200c:
671 rn = sr & 0xf;
672 tcg_gen_ld_i32(cpu_R[dc->rd],
673 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
674 break;
675 default:
676 cpu_abort(cs, "unknown mfs reg %x\n", sr);
677 break;
678 }
679 }
680
681 if (dc->rd == 0) {
682 tcg_gen_movi_i32(cpu_R[0], 0);
683 }
684 }
685
686 /* Div unit. */
687 static void dec_div(DisasContext *dc)
688 {
689 unsigned int u;
690
691 u = dc->imm & 2;
692
693 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
694 return;
695 }
696
697 if (u)
698 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
699 cpu_R[dc->ra]);
700 else
701 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
702 cpu_R[dc->ra]);
703 if (!dc->rd)
704 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
705 }
706
707 static void dec_barrel(DisasContext *dc)
708 {
709 TCGv_i32 t0;
710 unsigned int imm_w, imm_s;
711 bool s, t, e = false, i = false;
712
713 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
714 return;
715 }
716
717 if (dc->type_b) {
718 /* Insert and extract are only available in immediate mode. */
719 i = extract32(dc->imm, 15, 1);
720 e = extract32(dc->imm, 14, 1);
721 }
722 s = extract32(dc->imm, 10, 1);
723 t = extract32(dc->imm, 9, 1);
724 imm_w = extract32(dc->imm, 6, 5);
725 imm_s = extract32(dc->imm, 0, 5);
726
727 if (e) {
728 if (imm_w + imm_s > 32 || imm_w == 0) {
729 /* These inputs have an undefined behavior. */
730 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
731 imm_w, imm_s);
732 } else {
733 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
734 }
735 } else if (i) {
736 int width = imm_w - imm_s + 1;
737
738 if (imm_w < imm_s) {
739 /* These inputs have an undefined behavior. */
740 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
741 imm_w, imm_s);
742 } else {
743 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
744 imm_s, width);
745 }
746 } else {
747 t0 = tcg_temp_new_i32();
748
749 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
750 tcg_gen_andi_i32(t0, t0, 31);
751
752 if (s) {
753 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
754 } else {
755 if (t) {
756 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
757 } else {
758 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
759 }
760 }
761 tcg_temp_free_i32(t0);
762 }
763 }
764
765 static void dec_bit(DisasContext *dc)
766 {
767 CPUState *cs = CPU(dc->cpu);
768 TCGv_i32 t0;
769 unsigned int op;
770
771 op = dc->ir & ((1 << 9) - 1);
772 switch (op) {
773 case 0x21:
774 /* src. */
775 t0 = tcg_temp_new_i32();
776
777 tcg_gen_shli_i32(t0, cpu_msr_c, 31);
778 tcg_gen_andi_i32(cpu_msr_c, cpu_R[dc->ra], 1);
779 if (dc->rd) {
780 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
781 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
782 }
783 tcg_temp_free_i32(t0);
784 break;
785
786 case 0x1:
787 case 0x41:
788 /* srl. */
789 tcg_gen_andi_i32(cpu_msr_c, cpu_R[dc->ra], 1);
790 if (dc->rd) {
791 if (op == 0x41)
792 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
793 else
794 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
795 }
796 break;
797 case 0x60:
798 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
799 break;
800 case 0x61:
801 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
802 break;
803 case 0x64:
804 case 0x66:
805 case 0x74:
806 case 0x76:
807 /* wdc. */
808 trap_userspace(dc, true);
809 break;
810 case 0x68:
811 /* wic. */
812 trap_userspace(dc, true);
813 break;
814 case 0xe0:
815 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
816 return;
817 }
818 if (dc->cpu->cfg.use_pcmp_instr) {
819 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
820 }
821 break;
822 case 0x1e0:
823 /* swapb */
824 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
825 break;
826 case 0x1e2:
827 /*swaph */
828 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
829 break;
830 default:
831 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
832 (uint32_t)dc->base.pc_next, op, dc->rd, dc->ra, dc->rb);
833 break;
834 }
835 }
836
837 static inline void sync_jmpstate(DisasContext *dc)
838 {
839 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
840 if (dc->jmp == JMP_DIRECT) {
841 tcg_gen_movi_i32(cpu_btaken, 1);
842 }
843 dc->jmp = JMP_INDIRECT;
844 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
845 }
846 }
847
848 static void dec_imm(DisasContext *dc)
849 {
850 dc->ext_imm = dc->imm << 16;
851 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
852 dc->tb_flags |= IMM_FLAG;
853 dc->clear_imm = 0;
854 }
855
856 static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
857 {
858 /* Should be set to true if r1 is used by loadstores. */
859 bool stackprot = false;
860 TCGv_i32 t32;
861
862 /* All load/stores use ra. */
863 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
864 stackprot = true;
865 }
866
867 /* Treat the common cases first. */
868 if (!dc->type_b) {
869 if (ea) {
870 int addr_size = dc->cpu->cfg.addr_size;
871
872 if (addr_size == 32) {
873 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
874 return;
875 }
876
877 tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
878 if (addr_size < 64) {
879 /* Mask off out of range bits. */
880 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
881 }
882 return;
883 }
884
885 /* If any of the regs is r0, set t to the value of the other reg. */
886 if (dc->ra == 0) {
887 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
888 return;
889 } else if (dc->rb == 0) {
890 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
891 return;
892 }
893
894 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
895 stackprot = true;
896 }
897
898 t32 = tcg_temp_new_i32();
899 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
900 tcg_gen_extu_i32_tl(t, t32);
901 tcg_temp_free_i32(t32);
902
903 if (stackprot) {
904 gen_helper_stackprot(cpu_env, t);
905 }
906 return;
907 }
908 /* Immediate. */
909 t32 = tcg_temp_new_i32();
910 tcg_gen_addi_i32(t32, cpu_R[dc->ra], dec_alu_typeb_imm(dc));
911 tcg_gen_extu_i32_tl(t, t32);
912 tcg_temp_free_i32(t32);
913
914 if (stackprot) {
915 gen_helper_stackprot(cpu_env, t);
916 }
917 return;
918 }
919
920 static void dec_load(DisasContext *dc)
921 {
922 TCGv_i32 v;
923 TCGv addr;
924 unsigned int size;
925 bool rev = false, ex = false, ea = false;
926 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
927 MemOp mop;
928
929 mop = dc->opcode & 3;
930 size = 1 << mop;
931 if (!dc->type_b) {
932 ea = extract32(dc->ir, 7, 1);
933 rev = extract32(dc->ir, 9, 1);
934 ex = extract32(dc->ir, 10, 1);
935 }
936 mop |= MO_TE;
937 if (rev) {
938 mop ^= MO_BSWAP;
939 }
940
941 if (trap_illegal(dc, size > 4)) {
942 return;
943 }
944
945 if (trap_userspace(dc, ea)) {
946 return;
947 }
948
949 t_sync_flags(dc);
950 addr = tcg_temp_new();
951 compute_ldst_addr(dc, ea, addr);
952 /* Extended addressing bypasses the MMU. */
953 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
954
955 /*
956 * When doing reverse accesses we need to do two things.
957 *
958 * 1. Reverse the address wrt endianness.
959 * 2. Byteswap the data lanes on the way back into the CPU core.
960 */
961 if (rev && size != 4) {
962 /* Endian reverse the address. t is addr. */
963 switch (size) {
964 case 1:
965 {
966 tcg_gen_xori_tl(addr, addr, 3);
967 break;
968 }
969
970 case 2:
971 /* 00 -> 10
972 10 -> 00. */
973 tcg_gen_xori_tl(addr, addr, 2);
974 break;
975 default:
976 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
977 break;
978 }
979 }
980
981 /* lwx does not throw unaligned access errors, so force alignment */
982 if (ex) {
983 tcg_gen_andi_tl(addr, addr, ~3);
984 }
985
986 /* If we get a fault on a dslot, the jmpstate better be in sync. */
987 sync_jmpstate(dc);
988
989 /* Verify alignment if needed. */
990 /*
991 * Microblaze gives MMU faults priority over faults due to
992 * unaligned addresses. That's why we speculatively do the load
993 * into v. If the load succeeds, we verify alignment of the
994 * address and if that succeeds we write into the destination reg.
995 */
996 v = tcg_temp_new_i32();
997 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
998
999 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1000 TCGv_i32 t0 = tcg_const_i32(0);
1001 TCGv_i32 treg = tcg_const_i32(dc->rd);
1002 TCGv_i32 tsize = tcg_const_i32(size - 1);
1003
1004 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1005 gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
1006
1007 tcg_temp_free_i32(t0);
1008 tcg_temp_free_i32(treg);
1009 tcg_temp_free_i32(tsize);
1010 }
1011
1012 if (ex) {
1013 tcg_gen_mov_tl(cpu_res_addr, addr);
1014 tcg_gen_mov_i32(cpu_res_val, v);
1015 }
1016 if (dc->rd) {
1017 tcg_gen_mov_i32(cpu_R[dc->rd], v);
1018 }
1019 tcg_temp_free_i32(v);
1020
1021 if (ex) { /* lwx */
1022 /* no support for AXI exclusive so always clear C */
1023 tcg_gen_movi_i32(cpu_msr_c, 0);
1024 }
1025
1026 tcg_temp_free(addr);
1027 }
1028
1029 static void dec_store(DisasContext *dc)
1030 {
1031 TCGv addr;
1032 TCGLabel *swx_skip = NULL;
1033 unsigned int size;
1034 bool rev = false, ex = false, ea = false;
1035 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1036 MemOp mop;
1037
1038 mop = dc->opcode & 3;
1039 size = 1 << mop;
1040 if (!dc->type_b) {
1041 ea = extract32(dc->ir, 7, 1);
1042 rev = extract32(dc->ir, 9, 1);
1043 ex = extract32(dc->ir, 10, 1);
1044 }
1045 mop |= MO_TE;
1046 if (rev) {
1047 mop ^= MO_BSWAP;
1048 }
1049
1050 if (trap_illegal(dc, size > 4)) {
1051 return;
1052 }
1053
1054 trap_userspace(dc, ea);
1055
1056 t_sync_flags(dc);
1057 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1058 sync_jmpstate(dc);
1059 /* SWX needs a temp_local. */
1060 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1061 compute_ldst_addr(dc, ea, addr);
1062 /* Extended addressing bypasses the MMU. */
1063 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1064
1065 if (ex) { /* swx */
1066 TCGv_i32 tval;
1067
1068 /* swx does not throw unaligned access errors, so force alignment */
1069 tcg_gen_andi_tl(addr, addr, ~3);
1070
1071 tcg_gen_movi_i32(cpu_msr_c, 1);
1072 swx_skip = gen_new_label();
1073 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_skip);
1074
1075 /*
1076 * Compare the value loaded at lwx with current contents of
1077 * the reserved location.
1078 */
1079 tval = tcg_temp_new_i32();
1080
1081 tcg_gen_atomic_cmpxchg_i32(tval, addr, cpu_res_val,
1082 cpu_R[dc->rd], mem_index,
1083 mop);
1084
1085 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_skip);
1086 tcg_gen_movi_i32(cpu_msr_c, 0);
1087 tcg_temp_free_i32(tval);
1088 }
1089
1090 if (rev && size != 4) {
1091 /* Endian reverse the address. t is addr. */
1092 switch (size) {
1093 case 1:
1094 {
1095 tcg_gen_xori_tl(addr, addr, 3);
1096 break;
1097 }
1098
1099 case 2:
1100 /* 00 -> 10
1101 10 -> 00. */
1102 /* Force addr into the temp. */
1103 tcg_gen_xori_tl(addr, addr, 2);
1104 break;
1105 default:
1106 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1107 break;
1108 }
1109 }
1110
1111 if (!ex) {
1112 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1113 }
1114
1115 /* Verify alignment if needed. */
1116 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1117 TCGv_i32 t1 = tcg_const_i32(1);
1118 TCGv_i32 treg = tcg_const_i32(dc->rd);
1119 TCGv_i32 tsize = tcg_const_i32(size - 1);
1120
1121 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1122 /* FIXME: if the alignment is wrong, we should restore the value
1123 * in memory. One possible way to achieve this is to probe
1124 * the MMU prior to the memaccess, thay way we could put
1125 * the alignment checks in between the probe and the mem
1126 * access.
1127 */
1128 gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
1129
1130 tcg_temp_free_i32(t1);
1131 tcg_temp_free_i32(treg);
1132 tcg_temp_free_i32(tsize);
1133 }
1134
1135 if (ex) {
1136 gen_set_label(swx_skip);
1137 }
1138
1139 tcg_temp_free(addr);
1140 }
1141
1142 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1143 TCGv_i32 d, TCGv_i32 a)
1144 {
1145 static const int mb_to_tcg_cc[] = {
1146 [CC_EQ] = TCG_COND_EQ,
1147 [CC_NE] = TCG_COND_NE,
1148 [CC_LT] = TCG_COND_LT,
1149 [CC_LE] = TCG_COND_LE,
1150 [CC_GE] = TCG_COND_GE,
1151 [CC_GT] = TCG_COND_GT,
1152 };
1153
1154 switch (cc) {
1155 case CC_EQ:
1156 case CC_NE:
1157 case CC_LT:
1158 case CC_LE:
1159 case CC_GE:
1160 case CC_GT:
1161 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1162 break;
1163 default:
1164 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1165 break;
1166 }
1167 }
1168
1169 static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
1170 {
1171 TCGv_i32 zero = tcg_const_i32(0);
1172
1173 tcg_gen_movcond_i32(TCG_COND_NE, cpu_pc,
1174 cpu_btaken, zero,
1175 pc_true, pc_false);
1176
1177 tcg_temp_free_i32(zero);
1178 }
1179
1180 static void dec_setup_dslot(DisasContext *dc)
1181 {
1182 TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1183
1184 dc->delayed_branch = 2;
1185 dc->tb_flags |= D_FLAG;
1186
1187 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1188 tcg_temp_free_i32(tmp);
1189 }
1190
1191 static void dec_bcc(DisasContext *dc)
1192 {
1193 unsigned int cc;
1194 unsigned int dslot;
1195
1196 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1197 dslot = dc->ir & (1 << 25);
1198
1199 dc->delayed_branch = 1;
1200 if (dslot) {
1201 dec_setup_dslot(dc);
1202 }
1203
1204 if (dc->type_b) {
1205 dc->jmp = JMP_DIRECT_CC;
1206 dc->jmp_pc = dc->base.pc_next + dec_alu_typeb_imm(dc);
1207 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
1208 } else {
1209 dc->jmp = JMP_INDIRECT;
1210 tcg_gen_addi_i32(cpu_btarget, cpu_R[dc->rb], dc->base.pc_next);
1211 }
1212 eval_cc(dc, cc, cpu_btaken, cpu_R[dc->ra]);
1213 }
1214
1215 static void dec_br(DisasContext *dc)
1216 {
1217 unsigned int dslot, link, abs, mbar;
1218
1219 dslot = dc->ir & (1 << 20);
1220 abs = dc->ir & (1 << 19);
1221 link = dc->ir & (1 << 18);
1222
1223 /* Memory barrier. */
1224 mbar = (dc->ir >> 16) & 31;
1225 if (mbar == 2 && dc->imm == 4) {
1226 uint16_t mbar_imm = dc->rd;
1227
1228 /* Data access memory barrier. */
1229 if ((mbar_imm & 2) == 0) {
1230 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1231 }
1232
1233 /* mbar IMM & 16 decodes to sleep. */
1234 if (mbar_imm & 16) {
1235 TCGv_i32 tmp_1;
1236
1237 if (trap_userspace(dc, true)) {
1238 /* Sleep is a privileged instruction. */
1239 return;
1240 }
1241
1242 t_sync_flags(dc);
1243
1244 tmp_1 = tcg_const_i32(1);
1245 tcg_gen_st_i32(tmp_1, cpu_env,
1246 -offsetof(MicroBlazeCPU, env)
1247 +offsetof(CPUState, halted));
1248 tcg_temp_free_i32(tmp_1);
1249
1250 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1251
1252 gen_raise_exception(dc, EXCP_HLT);
1253 return;
1254 }
1255 /* Break the TB. */
1256 dc->cpustate_changed = 1;
1257 return;
1258 }
1259
1260 if (abs && link && !dslot) {
1261 if (dc->type_b) {
1262 /* BRKI */
1263 uint32_t imm = dec_alu_typeb_imm(dc);
1264 if (trap_userspace(dc, imm != 8 && imm != 0x18)) {
1265 return;
1266 }
1267 } else {
1268 /* BRK */
1269 if (trap_userspace(dc, true)) {
1270 return;
1271 }
1272 }
1273 }
1274
1275 dc->delayed_branch = 1;
1276 if (dslot) {
1277 dec_setup_dslot(dc);
1278 }
1279 if (link && dc->rd) {
1280 tcg_gen_movi_i32(cpu_R[dc->rd], dc->base.pc_next);
1281 }
1282
1283 if (abs) {
1284 if (dc->type_b) {
1285 uint32_t dest = dec_alu_typeb_imm(dc);
1286
1287 dc->jmp = JMP_DIRECT;
1288 dc->jmp_pc = dest;
1289 tcg_gen_movi_i32(cpu_btarget, dest);
1290 if (link && !dslot) {
1291 switch (dest) {
1292 case 8:
1293 case 0x18:
1294 gen_raise_exception_sync(dc, EXCP_BREAK);
1295 break;
1296 case 0:
1297 gen_raise_exception_sync(dc, EXCP_DEBUG);
1298 break;
1299 }
1300 }
1301 } else {
1302 dc->jmp = JMP_INDIRECT;
1303 tcg_gen_mov_i32(cpu_btarget, cpu_R[dc->rb]);
1304 if (link && !dslot) {
1305 gen_raise_exception_sync(dc, EXCP_BREAK);
1306 }
1307 }
1308 } else if (dc->type_b) {
1309 dc->jmp = JMP_DIRECT;
1310 dc->jmp_pc = dc->base.pc_next + dec_alu_typeb_imm(dc);
1311 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
1312 } else {
1313 dc->jmp = JMP_INDIRECT;
1314 tcg_gen_addi_i32(cpu_btarget, cpu_R[dc->rb], dc->base.pc_next);
1315 }
1316 tcg_gen_movi_i32(cpu_btaken, 1);
1317 }
1318
1319 static inline void do_rti(DisasContext *dc)
1320 {
1321 TCGv_i32 t0, t1;
1322 t0 = tcg_temp_new_i32();
1323 t1 = tcg_temp_new_i32();
1324 tcg_gen_mov_i32(t1, cpu_msr);
1325 tcg_gen_shri_i32(t0, t1, 1);
1326 tcg_gen_ori_i32(t1, t1, MSR_IE);
1327 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1328
1329 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1330 tcg_gen_or_i32(t1, t1, t0);
1331 msr_write(dc, t1);
1332 tcg_temp_free_i32(t1);
1333 tcg_temp_free_i32(t0);
1334 dc->tb_flags &= ~DRTI_FLAG;
1335 }
1336
1337 static inline void do_rtb(DisasContext *dc)
1338 {
1339 TCGv_i32 t0, t1;
1340 t0 = tcg_temp_new_i32();
1341 t1 = tcg_temp_new_i32();
1342 tcg_gen_mov_i32(t1, cpu_msr);
1343 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1344 tcg_gen_shri_i32(t0, t1, 1);
1345 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1346
1347 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1348 tcg_gen_or_i32(t1, t1, t0);
1349 msr_write(dc, t1);
1350 tcg_temp_free_i32(t1);
1351 tcg_temp_free_i32(t0);
1352 dc->tb_flags &= ~DRTB_FLAG;
1353 }
1354
1355 static inline void do_rte(DisasContext *dc)
1356 {
1357 TCGv_i32 t0, t1;
1358 t0 = tcg_temp_new_i32();
1359 t1 = tcg_temp_new_i32();
1360
1361 tcg_gen_mov_i32(t1, cpu_msr);
1362 tcg_gen_ori_i32(t1, t1, MSR_EE);
1363 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1364 tcg_gen_shri_i32(t0, t1, 1);
1365 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1366
1367 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1368 tcg_gen_or_i32(t1, t1, t0);
1369 msr_write(dc, t1);
1370 tcg_temp_free_i32(t1);
1371 tcg_temp_free_i32(t0);
1372 dc->tb_flags &= ~DRTE_FLAG;
1373 }
1374
1375 static void dec_rts(DisasContext *dc)
1376 {
1377 unsigned int b_bit, i_bit, e_bit;
1378
1379 i_bit = dc->ir & (1 << 21);
1380 b_bit = dc->ir & (1 << 22);
1381 e_bit = dc->ir & (1 << 23);
1382
1383 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1384 return;
1385 }
1386
1387 dec_setup_dslot(dc);
1388
1389 if (i_bit) {
1390 dc->tb_flags |= DRTI_FLAG;
1391 } else if (b_bit) {
1392 dc->tb_flags |= DRTB_FLAG;
1393 } else if (e_bit) {
1394 dc->tb_flags |= DRTE_FLAG;
1395 }
1396
1397 dc->jmp = JMP_INDIRECT;
1398 tcg_gen_movi_i32(cpu_btaken, 1);
1399 tcg_gen_add_i32(cpu_btarget, cpu_R[dc->ra], *dec_alu_op_b(dc));
1400 }
1401
1402 static int dec_check_fpuv2(DisasContext *dc)
1403 {
1404 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1405 gen_raise_hw_excp(dc, ESR_EC_FPU);
1406 }
1407 return (dc->cpu->cfg.use_fpu == 2) ? PVR2_USE_FPU2_MASK : 0;
1408 }
1409
1410 static void dec_fpu(DisasContext *dc)
1411 {
1412 unsigned int fpu_insn;
1413
1414 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1415 return;
1416 }
1417
1418 fpu_insn = (dc->ir >> 7) & 7;
1419
1420 switch (fpu_insn) {
1421 case 0:
1422 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1423 cpu_R[dc->rb]);
1424 break;
1425
1426 case 1:
1427 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1428 cpu_R[dc->rb]);
1429 break;
1430
1431 case 2:
1432 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1433 cpu_R[dc->rb]);
1434 break;
1435
1436 case 3:
1437 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1438 cpu_R[dc->rb]);
1439 break;
1440
1441 case 4:
1442 switch ((dc->ir >> 4) & 7) {
1443 case 0:
1444 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1445 cpu_R[dc->ra], cpu_R[dc->rb]);
1446 break;
1447 case 1:
1448 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1449 cpu_R[dc->ra], cpu_R[dc->rb]);
1450 break;
1451 case 2:
1452 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1453 cpu_R[dc->ra], cpu_R[dc->rb]);
1454 break;
1455 case 3:
1456 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1457 cpu_R[dc->ra], cpu_R[dc->rb]);
1458 break;
1459 case 4:
1460 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1461 cpu_R[dc->ra], cpu_R[dc->rb]);
1462 break;
1463 case 5:
1464 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1465 cpu_R[dc->ra], cpu_R[dc->rb]);
1466 break;
1467 case 6:
1468 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1469 cpu_R[dc->ra], cpu_R[dc->rb]);
1470 break;
1471 default:
1472 qemu_log_mask(LOG_UNIMP,
1473 "unimplemented fcmp fpu_insn=%x pc=%x"
1474 " opc=%x\n",
1475 fpu_insn, (uint32_t)dc->base.pc_next,
1476 dc->opcode);
1477 dc->abort_at_next_insn = 1;
1478 break;
1479 }
1480 break;
1481
1482 case 5:
1483 if (!dec_check_fpuv2(dc)) {
1484 return;
1485 }
1486 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1487 break;
1488
1489 case 6:
1490 if (!dec_check_fpuv2(dc)) {
1491 return;
1492 }
1493 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1494 break;
1495
1496 case 7:
1497 if (!dec_check_fpuv2(dc)) {
1498 return;
1499 }
1500 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1501 break;
1502
1503 default:
1504 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1505 " opc=%x\n",
1506 fpu_insn, (uint32_t)dc->base.pc_next, dc->opcode);
1507 dc->abort_at_next_insn = 1;
1508 break;
1509 }
1510 }
1511
1512 static void dec_null(DisasContext *dc)
1513 {
1514 if (trap_illegal(dc, true)) {
1515 return;
1516 }
1517 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n",
1518 (uint32_t)dc->base.pc_next, dc->opcode);
1519 dc->abort_at_next_insn = 1;
1520 }
1521
1522 /* Insns connected to FSL or AXI stream attached devices. */
1523 static void dec_stream(DisasContext *dc)
1524 {
1525 TCGv_i32 t_id, t_ctrl;
1526 int ctrl;
1527
1528 if (trap_userspace(dc, true)) {
1529 return;
1530 }
1531
1532 t_id = tcg_temp_new_i32();
1533 if (dc->type_b) {
1534 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1535 ctrl = dc->imm >> 10;
1536 } else {
1537 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1538 ctrl = dc->imm >> 5;
1539 }
1540
1541 t_ctrl = tcg_const_i32(ctrl);
1542
1543 if (dc->rd == 0) {
1544 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1545 } else {
1546 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1547 }
1548 tcg_temp_free_i32(t_id);
1549 tcg_temp_free_i32(t_ctrl);
1550 }
1551
1552 static struct decoder_info {
1553 struct {
1554 uint32_t bits;
1555 uint32_t mask;
1556 };
1557 void (*dec)(DisasContext *dc);
1558 } decinfo[] = {
1559 {DEC_BIT, dec_bit},
1560 {DEC_BARREL, dec_barrel},
1561 {DEC_LD, dec_load},
1562 {DEC_ST, dec_store},
1563 {DEC_IMM, dec_imm},
1564 {DEC_BR, dec_br},
1565 {DEC_BCC, dec_bcc},
1566 {DEC_RTS, dec_rts},
1567 {DEC_FPU, dec_fpu},
1568 {DEC_DIV, dec_div},
1569 {DEC_MSR, dec_msr},
1570 {DEC_STREAM, dec_stream},
1571 {{0, 0}, dec_null}
1572 };
1573
1574 static void old_decode(DisasContext *dc, uint32_t ir)
1575 {
1576 int i;
1577
1578 dc->ir = ir;
1579
1580 /* bit 2 seems to indicate insn type. */
1581 dc->type_b = ir & (1 << 29);
1582
1583 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1584 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1585 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1586 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1587 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1588
1589 /* Large switch for all insns. */
1590 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1591 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1592 decinfo[i].dec(dc);
1593 break;
1594 }
1595 }
1596 }
1597
1598 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1599 {
1600 DisasContext *dc = container_of(dcb, DisasContext, base);
1601 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1602 int bound;
1603
1604 dc->cpu = cpu;
1605 dc->synced_flags = dc->tb_flags = dc->base.tb->flags;
1606 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1607 dc->jmp = dc->delayed_branch ? JMP_INDIRECT : JMP_NOJMP;
1608 dc->cpustate_changed = 0;
1609 dc->abort_at_next_insn = 0;
1610 dc->ext_imm = dc->base.tb->cs_base;
1611 dc->r0 = NULL;
1612 dc->r0_set = false;
1613
1614 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1615 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1616 }
1617
1618 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1619 {
1620 }
1621
1622 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1623 {
1624 tcg_gen_insn_start(dcb->pc_next);
1625 }
1626
1627 static bool mb_tr_breakpoint_check(DisasContextBase *dcb, CPUState *cs,
1628 const CPUBreakpoint *bp)
1629 {
1630 DisasContext *dc = container_of(dcb, DisasContext, base);
1631
1632 gen_raise_exception_sync(dc, EXCP_DEBUG);
1633
1634 /*
1635 * The address covered by the breakpoint must be included in
1636 * [tb->pc, tb->pc + tb->size) in order to for it to be
1637 * properly cleared -- thus we increment the PC here so that
1638 * the logic setting tb->size below does the right thing.
1639 */
1640 dc->base.pc_next += 4;
1641 return true;
1642 }
1643
1644 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1645 {
1646 DisasContext *dc = container_of(dcb, DisasContext, base);
1647 CPUMBState *env = cs->env_ptr;
1648 uint32_t ir;
1649
1650 /* TODO: This should raise an exception, not terminate qemu. */
1651 if (dc->base.pc_next & 3) {
1652 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1653 (uint32_t)dc->base.pc_next);
1654 }
1655
1656 dc->clear_imm = 1;
1657 ir = cpu_ldl_code(env, dc->base.pc_next);
1658 if (!decode(dc, ir)) {
1659 old_decode(dc, ir);
1660 }
1661
1662 if (dc->r0) {
1663 tcg_temp_free_i32(dc->r0);
1664 dc->r0 = NULL;
1665 dc->r0_set = false;
1666 }
1667
1668 if (dc->clear_imm && (dc->tb_flags & IMM_FLAG)) {
1669 dc->tb_flags &= ~IMM_FLAG;
1670 tcg_gen_discard_i32(cpu_imm);
1671 }
1672 dc->base.pc_next += 4;
1673
1674 if (dc->delayed_branch && --dc->delayed_branch == 0) {
1675 if (dc->tb_flags & DRTI_FLAG) {
1676 do_rti(dc);
1677 }
1678 if (dc->tb_flags & DRTB_FLAG) {
1679 do_rtb(dc);
1680 }
1681 if (dc->tb_flags & DRTE_FLAG) {
1682 do_rte(dc);
1683 }
1684 /* Clear the delay slot flag. */
1685 dc->tb_flags &= ~D_FLAG;
1686 dc->base.is_jmp = DISAS_JUMP;
1687 }
1688
1689 /* Force an exit if the per-tb cpu state has changed. */
1690 if (dc->base.is_jmp == DISAS_NEXT && dc->cpustate_changed) {
1691 dc->base.is_jmp = DISAS_UPDATE;
1692 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1693 }
1694 }
1695
1696 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1697 {
1698 DisasContext *dc = container_of(dcb, DisasContext, base);
1699
1700 assert(!dc->abort_at_next_insn);
1701
1702 if (dc->base.is_jmp == DISAS_NORETURN) {
1703 /* We have already exited the TB. */
1704 return;
1705 }
1706
1707 t_sync_flags(dc);
1708 if (dc->tb_flags & D_FLAG) {
1709 sync_jmpstate(dc);
1710 dc->jmp = JMP_NOJMP;
1711 }
1712
1713 switch (dc->base.is_jmp) {
1714 case DISAS_TOO_MANY:
1715 assert(dc->jmp == JMP_NOJMP);
1716 gen_goto_tb(dc, 0, dc->base.pc_next);
1717 return;
1718
1719 case DISAS_UPDATE:
1720 assert(dc->jmp == JMP_NOJMP);
1721 if (unlikely(cs->singlestep_enabled)) {
1722 gen_raise_exception(dc, EXCP_DEBUG);
1723 } else {
1724 tcg_gen_exit_tb(NULL, 0);
1725 }
1726 return;
1727
1728 case DISAS_JUMP:
1729 switch (dc->jmp) {
1730 case JMP_INDIRECT:
1731 {
1732 TCGv_i32 tmp_pc = tcg_const_i32(dc->base.pc_next);
1733 eval_cond_jmp(dc, cpu_btarget, tmp_pc);
1734 tcg_temp_free_i32(tmp_pc);
1735
1736 if (unlikely(cs->singlestep_enabled)) {
1737 gen_raise_exception(dc, EXCP_DEBUG);
1738 } else {
1739 tcg_gen_exit_tb(NULL, 0);
1740 }
1741 }
1742 return;
1743
1744 case JMP_DIRECT_CC:
1745 {
1746 TCGLabel *l1 = gen_new_label();
1747 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_btaken, 0, l1);
1748 gen_goto_tb(dc, 1, dc->base.pc_next);
1749 gen_set_label(l1);
1750 }
1751 /* fall through */
1752
1753 case JMP_DIRECT:
1754 gen_goto_tb(dc, 0, dc->jmp_pc);
1755 return;
1756 }
1757 /* fall through */
1758
1759 default:
1760 g_assert_not_reached();
1761 }
1762 }
1763
1764 static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
1765 {
1766 qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
1767 log_target_disas(cs, dcb->pc_first, dcb->tb->size);
1768 }
1769
1770 static const TranslatorOps mb_tr_ops = {
1771 .init_disas_context = mb_tr_init_disas_context,
1772 .tb_start = mb_tr_tb_start,
1773 .insn_start = mb_tr_insn_start,
1774 .breakpoint_check = mb_tr_breakpoint_check,
1775 .translate_insn = mb_tr_translate_insn,
1776 .tb_stop = mb_tr_tb_stop,
1777 .disas_log = mb_tr_disas_log,
1778 };
1779
1780 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1781 {
1782 DisasContext dc;
1783 translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
1784 }
1785
1786 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1787 {
1788 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1789 CPUMBState *env = &cpu->env;
1790 int i;
1791
1792 if (!env) {
1793 return;
1794 }
1795
1796 qemu_fprintf(f, "IN: PC=%x %s\n",
1797 env->pc, lookup_symbol(env->pc));
1798 qemu_fprintf(f, "rmsr=%x resr=%x rear=%" PRIx64 " "
1799 "imm=%x iflags=%x fsr=%x rbtr=%x\n",
1800 env->msr, env->esr, env->ear,
1801 env->imm, env->iflags, env->fsr, env->btr);
1802 qemu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1803 env->btaken, env->btarget,
1804 (env->msr & MSR_UM) ? "user" : "kernel",
1805 (env->msr & MSR_UMS) ? "user" : "kernel",
1806 (bool)(env->msr & MSR_EIP),
1807 (bool)(env->msr & MSR_IE));
1808 for (i = 0; i < 12; i++) {
1809 qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1810 if ((i + 1) % 4 == 0) {
1811 qemu_fprintf(f, "\n");
1812 }
1813 }
1814
1815 /* Registers that aren't modeled are reported as 0 */
1816 qemu_fprintf(f, "redr=%x rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
1817 "rtlblo=0 rtlbhi=0\n", env->edr);
1818 qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
1819 for (i = 0; i < 32; i++) {
1820 qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1821 if ((i + 1) % 4 == 0)
1822 qemu_fprintf(f, "\n");
1823 }
1824 qemu_fprintf(f, "\n\n");
1825 }
1826
1827 void mb_tcg_init(void)
1828 {
1829 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1830 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1831
1832 static const struct {
1833 TCGv_i32 *var; int ofs; char name[8];
1834 } i32s[] = {
1835 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1836 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1837 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1838 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1839
1840 SP(pc),
1841 SP(msr),
1842 SP(msr_c),
1843 SP(imm),
1844 SP(iflags),
1845 SP(btaken),
1846 SP(btarget),
1847 SP(res_val),
1848 };
1849
1850 #undef R
1851 #undef SP
1852
1853 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1854 *i32s[i].var =
1855 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1856 }
1857
1858 cpu_res_addr =
1859 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
1860 }
1861
1862 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1863 target_ulong *data)
1864 {
1865 env->pc = data[0];
1866 }