]> git.proxmox.com Git - mirror_qemu.git/blob - target/microblaze/translate.c
7e7f837c633d09f059c4d92a1053d5ff7fa149d5
[mirror_qemu.git] / target / microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "qemu/qemu-print.h"
31
32 #include "exec/log.h"
33
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef HELPER_H
37
38 #define EXTRACT_FIELD(src, start, end) \
39 (((src) >> start) & ((1 << (end - start + 1)) - 1))
40
41 /* is_jmp field values */
42 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
43 #define DISAS_EXIT DISAS_TARGET_1 /* all cpu state modified dynamically */
44
45 /* cpu state besides pc was modified dynamically; update pc to next */
46 #define DISAS_EXIT_NEXT DISAS_TARGET_2
47 /* cpu state besides pc was modified dynamically; update pc to btarget */
48 #define DISAS_EXIT_JUMP DISAS_TARGET_3
49
50 static TCGv_i32 cpu_R[32];
51 static TCGv_i32 cpu_pc;
52 static TCGv_i32 cpu_msr;
53 static TCGv_i32 cpu_msr_c;
54 static TCGv_i32 cpu_imm;
55 static TCGv_i32 cpu_bvalue;
56 static TCGv_i32 cpu_btarget;
57 static TCGv_i32 cpu_iflags;
58 static TCGv cpu_res_addr;
59 static TCGv_i32 cpu_res_val;
60
61 /* This is the state at translation time. */
62 typedef struct DisasContext {
63 DisasContextBase base;
64 const MicroBlazeCPUConfig *cfg;
65
66 /* TCG op of the current insn_start. */
67 TCGOp *insn_start;
68
69 TCGv_i32 r0;
70 bool r0_set;
71
72 /* Decoder. */
73 uint32_t ext_imm;
74 unsigned int tb_flags;
75 unsigned int tb_flags_to_set;
76 int mem_index;
77
78 /* Condition under which to jump, including NEVER and ALWAYS. */
79 TCGCond jmp_cond;
80
81 /* Immediate branch-taken destination, or -1 for indirect. */
82 uint32_t jmp_dest;
83 } DisasContext;
84
85 static int typeb_imm(DisasContext *dc, int x)
86 {
87 if (dc->tb_flags & IMM_FLAG) {
88 return deposit32(dc->ext_imm, 0, 16, x);
89 }
90 return x;
91 }
92
93 /* Include the auto-generated decoder. */
94 #include "decode-insns.c.inc"
95
96 static void t_sync_flags(DisasContext *dc)
97 {
98 /* Synch the tb dependent flags between translator and runtime. */
99 if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
100 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
101 }
102 }
103
104 static void gen_raise_exception(DisasContext *dc, uint32_t index)
105 {
106 gen_helper_raise_exception(cpu_env, tcg_constant_i32(index));
107 dc->base.is_jmp = DISAS_NORETURN;
108 }
109
110 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
111 {
112 t_sync_flags(dc);
113 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
114 gen_raise_exception(dc, index);
115 }
116
117 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
118 {
119 TCGv_i32 tmp = tcg_constant_i32(esr_ec);
120 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
121
122 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
123 }
124
125 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
126 {
127 if (translator_use_goto_tb(&dc->base, dest)) {
128 tcg_gen_goto_tb(n);
129 tcg_gen_movi_i32(cpu_pc, dest);
130 tcg_gen_exit_tb(dc->base.tb, n);
131 } else {
132 tcg_gen_movi_i32(cpu_pc, dest);
133 tcg_gen_lookup_and_goto_ptr();
134 }
135 dc->base.is_jmp = DISAS_NORETURN;
136 }
137
138 /*
139 * Returns true if the insn an illegal operation.
140 * If exceptions are enabled, an exception is raised.
141 */
142 static bool trap_illegal(DisasContext *dc, bool cond)
143 {
144 if (cond && (dc->tb_flags & MSR_EE)
145 && dc->cfg->illegal_opcode_exception) {
146 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
147 }
148 return cond;
149 }
150
151 /*
152 * Returns true if the insn is illegal in userspace.
153 * If exceptions are enabled, an exception is raised.
154 */
155 static bool trap_userspace(DisasContext *dc, bool cond)
156 {
157 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
158
159 if (cond_user && (dc->tb_flags & MSR_EE)) {
160 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
161 }
162 return cond_user;
163 }
164
165 /*
166 * Return true, and log an error, if the current insn is
167 * within a delay slot.
168 */
169 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
170 {
171 if (dc->tb_flags & D_FLAG) {
172 qemu_log_mask(LOG_GUEST_ERROR,
173 "Invalid insn in delay slot: %s at %08x\n",
174 insn_type, (uint32_t)dc->base.pc_next);
175 return true;
176 }
177 return false;
178 }
179
180 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
181 {
182 if (likely(reg != 0)) {
183 return cpu_R[reg];
184 }
185 if (!dc->r0_set) {
186 if (dc->r0 == NULL) {
187 dc->r0 = tcg_temp_new_i32();
188 }
189 tcg_gen_movi_i32(dc->r0, 0);
190 dc->r0_set = true;
191 }
192 return dc->r0;
193 }
194
195 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
196 {
197 if (likely(reg != 0)) {
198 return cpu_R[reg];
199 }
200 if (dc->r0 == NULL) {
201 dc->r0 = tcg_temp_new_i32();
202 }
203 return dc->r0;
204 }
205
206 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
207 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
208 {
209 TCGv_i32 rd, ra, rb;
210
211 if (arg->rd == 0 && !side_effects) {
212 return true;
213 }
214
215 rd = reg_for_write(dc, arg->rd);
216 ra = reg_for_read(dc, arg->ra);
217 rb = reg_for_read(dc, arg->rb);
218 fn(rd, ra, rb);
219 return true;
220 }
221
222 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
223 void (*fn)(TCGv_i32, TCGv_i32))
224 {
225 TCGv_i32 rd, ra;
226
227 if (arg->rd == 0 && !side_effects) {
228 return true;
229 }
230
231 rd = reg_for_write(dc, arg->rd);
232 ra = reg_for_read(dc, arg->ra);
233 fn(rd, ra);
234 return true;
235 }
236
237 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
238 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
239 {
240 TCGv_i32 rd, ra;
241
242 if (arg->rd == 0 && !side_effects) {
243 return true;
244 }
245
246 rd = reg_for_write(dc, arg->rd);
247 ra = reg_for_read(dc, arg->ra);
248 fni(rd, ra, arg->imm);
249 return true;
250 }
251
252 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
253 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
254 {
255 TCGv_i32 rd, ra, imm;
256
257 if (arg->rd == 0 && !side_effects) {
258 return true;
259 }
260
261 rd = reg_for_write(dc, arg->rd);
262 ra = reg_for_read(dc, arg->ra);
263 imm = tcg_constant_i32(arg->imm);
264
265 fn(rd, ra, imm);
266 return true;
267 }
268
269 #define DO_TYPEA(NAME, SE, FN) \
270 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
271 { return do_typea(dc, a, SE, FN); }
272
273 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
274 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
275 { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
276
277 #define DO_TYPEA0(NAME, SE, FN) \
278 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
279 { return do_typea0(dc, a, SE, FN); }
280
281 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
282 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
283 { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
284
285 #define DO_TYPEBI(NAME, SE, FNI) \
286 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
287 { return do_typeb_imm(dc, a, SE, FNI); }
288
289 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
290 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
291 { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
292
293 #define DO_TYPEBV(NAME, SE, FN) \
294 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
295 { return do_typeb_val(dc, a, SE, FN); }
296
297 #define ENV_WRAPPER2(NAME, HELPER) \
298 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
299 { HELPER(out, cpu_env, ina); }
300
301 #define ENV_WRAPPER3(NAME, HELPER) \
302 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
303 { HELPER(out, cpu_env, ina, inb); }
304
305 /* No input carry, but output carry. */
306 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
307 {
308 TCGv_i32 zero = tcg_constant_i32(0);
309
310 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
311 }
312
313 /* Input and output carry. */
314 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
315 {
316 TCGv_i32 zero = tcg_constant_i32(0);
317 TCGv_i32 tmp = tcg_temp_new_i32();
318
319 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
320 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
321 }
322
323 /* Input carry, but no output carry. */
324 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
325 {
326 tcg_gen_add_i32(out, ina, inb);
327 tcg_gen_add_i32(out, out, cpu_msr_c);
328 }
329
330 DO_TYPEA(add, true, gen_add)
331 DO_TYPEA(addc, true, gen_addc)
332 DO_TYPEA(addk, false, tcg_gen_add_i32)
333 DO_TYPEA(addkc, true, gen_addkc)
334
335 DO_TYPEBV(addi, true, gen_add)
336 DO_TYPEBV(addic, true, gen_addc)
337 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
338 DO_TYPEBV(addikc, true, gen_addkc)
339
340 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
341 {
342 tcg_gen_andi_i32(out, ina, ~imm);
343 }
344
345 DO_TYPEA(and, false, tcg_gen_and_i32)
346 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
347 DO_TYPEA(andn, false, tcg_gen_andc_i32)
348 DO_TYPEBI(andni, false, gen_andni)
349
350 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
351 {
352 TCGv_i32 tmp = tcg_temp_new_i32();
353 tcg_gen_andi_i32(tmp, inb, 31);
354 tcg_gen_sar_i32(out, ina, tmp);
355 }
356
357 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
358 {
359 TCGv_i32 tmp = tcg_temp_new_i32();
360 tcg_gen_andi_i32(tmp, inb, 31);
361 tcg_gen_shr_i32(out, ina, tmp);
362 }
363
364 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
365 {
366 TCGv_i32 tmp = tcg_temp_new_i32();
367 tcg_gen_andi_i32(tmp, inb, 31);
368 tcg_gen_shl_i32(out, ina, tmp);
369 }
370
371 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
372 {
373 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
374 int imm_w = extract32(imm, 5, 5);
375 int imm_s = extract32(imm, 0, 5);
376
377 if (imm_w + imm_s > 32 || imm_w == 0) {
378 /* These inputs have an undefined behavior. */
379 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
380 imm_w, imm_s);
381 } else {
382 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
383 }
384 }
385
386 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
387 {
388 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
389 int imm_w = extract32(imm, 5, 5);
390 int imm_s = extract32(imm, 0, 5);
391 int width = imm_w - imm_s + 1;
392
393 if (imm_w < imm_s) {
394 /* These inputs have an undefined behavior. */
395 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
396 imm_w, imm_s);
397 } else {
398 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
399 }
400 }
401
402 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
403 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
404 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
405
406 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
407 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
408 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
409
410 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
411 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
412
413 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
414 {
415 tcg_gen_clzi_i32(out, ina, 32);
416 }
417
418 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
419
420 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
421 {
422 TCGv_i32 lt = tcg_temp_new_i32();
423
424 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
425 tcg_gen_sub_i32(out, inb, ina);
426 tcg_gen_deposit_i32(out, out, lt, 31, 1);
427 }
428
429 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
430 {
431 TCGv_i32 lt = tcg_temp_new_i32();
432
433 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
434 tcg_gen_sub_i32(out, inb, ina);
435 tcg_gen_deposit_i32(out, out, lt, 31, 1);
436 }
437
438 DO_TYPEA(cmp, false, gen_cmp)
439 DO_TYPEA(cmpu, false, gen_cmpu)
440
441 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
442 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
443 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
444 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
445 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
446 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
447 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
448 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
449 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
450 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
451 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
452
453 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
454 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
455 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
456 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
457 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
458 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
459 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
460 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
461 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
462 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
463 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
464
465 ENV_WRAPPER2(gen_flt, gen_helper_flt)
466 ENV_WRAPPER2(gen_fint, gen_helper_fint)
467 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
468
469 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
470 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
471 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
472
473 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
474 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
475 {
476 gen_helper_divs(out, cpu_env, inb, ina);
477 }
478
479 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
480 {
481 gen_helper_divu(out, cpu_env, inb, ina);
482 }
483
484 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
485 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
486
487 static bool trans_imm(DisasContext *dc, arg_imm *arg)
488 {
489 if (invalid_delay_slot(dc, "imm")) {
490 return true;
491 }
492 dc->ext_imm = arg->imm << 16;
493 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
494 dc->tb_flags_to_set = IMM_FLAG;
495 return true;
496 }
497
498 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
499 {
500 TCGv_i32 tmp = tcg_temp_new_i32();
501 tcg_gen_muls2_i32(tmp, out, ina, inb);
502 }
503
504 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
505 {
506 TCGv_i32 tmp = tcg_temp_new_i32();
507 tcg_gen_mulu2_i32(tmp, out, ina, inb);
508 }
509
510 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
511 {
512 TCGv_i32 tmp = tcg_temp_new_i32();
513 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
514 }
515
516 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
517 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
518 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
519 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
520 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
521
522 DO_TYPEA(or, false, tcg_gen_or_i32)
523 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
524
525 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
526 {
527 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
528 }
529
530 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
531 {
532 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
533 }
534
535 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
536 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
537 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
538
539 /* No input carry, but output carry. */
540 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
541 {
542 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
543 tcg_gen_sub_i32(out, inb, ina);
544 }
545
546 /* Input and output carry. */
547 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
548 {
549 TCGv_i32 zero = tcg_constant_i32(0);
550 TCGv_i32 tmp = tcg_temp_new_i32();
551
552 tcg_gen_not_i32(tmp, ina);
553 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
554 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
555 }
556
557 /* No input or output carry. */
558 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
559 {
560 tcg_gen_sub_i32(out, inb, ina);
561 }
562
563 /* Input carry, no output carry. */
564 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
565 {
566 TCGv_i32 nota = tcg_temp_new_i32();
567
568 tcg_gen_not_i32(nota, ina);
569 tcg_gen_add_i32(out, inb, nota);
570 tcg_gen_add_i32(out, out, cpu_msr_c);
571 }
572
573 DO_TYPEA(rsub, true, gen_rsub)
574 DO_TYPEA(rsubc, true, gen_rsubc)
575 DO_TYPEA(rsubk, false, gen_rsubk)
576 DO_TYPEA(rsubkc, true, gen_rsubkc)
577
578 DO_TYPEBV(rsubi, true, gen_rsub)
579 DO_TYPEBV(rsubic, true, gen_rsubc)
580 DO_TYPEBV(rsubik, false, gen_rsubk)
581 DO_TYPEBV(rsubikc, true, gen_rsubkc)
582
583 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
584 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
585
586 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
587 {
588 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
589 tcg_gen_sari_i32(out, ina, 1);
590 }
591
592 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
593 {
594 TCGv_i32 tmp = tcg_temp_new_i32();
595
596 tcg_gen_mov_i32(tmp, cpu_msr_c);
597 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
598 tcg_gen_extract2_i32(out, ina, tmp, 1);
599 }
600
601 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
602 {
603 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
604 tcg_gen_shri_i32(out, ina, 1);
605 }
606
607 DO_TYPEA0(sra, false, gen_sra)
608 DO_TYPEA0(src, false, gen_src)
609 DO_TYPEA0(srl, false, gen_srl)
610
611 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
612 {
613 tcg_gen_rotri_i32(out, ina, 16);
614 }
615
616 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
617 DO_TYPEA0(swaph, false, gen_swaph)
618
619 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
620 {
621 /* Cache operations are nops: only check for supervisor mode. */
622 trap_userspace(dc, true);
623 return true;
624 }
625
626 DO_TYPEA(xor, false, tcg_gen_xor_i32)
627 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
628
629 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
630 {
631 TCGv ret = tcg_temp_new();
632
633 /* If any of the regs is r0, set t to the value of the other reg. */
634 if (ra && rb) {
635 TCGv_i32 tmp = tcg_temp_new_i32();
636 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
637 tcg_gen_extu_i32_tl(ret, tmp);
638 } else if (ra) {
639 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
640 } else if (rb) {
641 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
642 } else {
643 tcg_gen_movi_tl(ret, 0);
644 }
645
646 if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
647 gen_helper_stackprot(cpu_env, ret);
648 }
649 return ret;
650 }
651
652 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
653 {
654 TCGv ret = tcg_temp_new();
655
656 /* If any of the regs is r0, set t to the value of the other reg. */
657 if (ra) {
658 TCGv_i32 tmp = tcg_temp_new_i32();
659 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
660 tcg_gen_extu_i32_tl(ret, tmp);
661 } else {
662 tcg_gen_movi_tl(ret, (uint32_t)imm);
663 }
664
665 if (ra == 1 && dc->cfg->stackprot) {
666 gen_helper_stackprot(cpu_env, ret);
667 }
668 return ret;
669 }
670
671 #ifndef CONFIG_USER_ONLY
672 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
673 {
674 int addr_size = dc->cfg->addr_size;
675 TCGv ret = tcg_temp_new();
676
677 if (addr_size == 32 || ra == 0) {
678 if (rb) {
679 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
680 } else {
681 tcg_gen_movi_tl(ret, 0);
682 }
683 } else {
684 if (rb) {
685 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
686 } else {
687 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
688 tcg_gen_shli_tl(ret, ret, 32);
689 }
690 if (addr_size < 64) {
691 /* Mask off out of range bits. */
692 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
693 }
694 }
695 return ret;
696 }
697 #endif
698
699 #ifndef CONFIG_USER_ONLY
700 static void record_unaligned_ess(DisasContext *dc, int rd,
701 MemOp size, bool store)
702 {
703 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
704
705 iflags |= ESR_ESS_FLAG;
706 iflags |= rd << 5;
707 iflags |= store * ESR_S;
708 iflags |= (size == MO_32) * ESR_W;
709
710 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
711 }
712 #endif
713
714 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
715 int mem_index, bool rev)
716 {
717 MemOp size = mop & MO_SIZE;
718
719 /*
720 * When doing reverse accesses we need to do two things.
721 *
722 * 1. Reverse the address wrt endianness.
723 * 2. Byteswap the data lanes on the way back into the CPU core.
724 */
725 if (rev) {
726 if (size > MO_8) {
727 mop ^= MO_BSWAP;
728 }
729 if (size < MO_32) {
730 tcg_gen_xori_tl(addr, addr, 3 - size);
731 }
732 }
733
734 /*
735 * For system mode, enforce alignment if the cpu configuration
736 * requires it. For user-mode, the Linux kernel will have fixed up
737 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
738 */
739 #ifndef CONFIG_USER_ONLY
740 if (size > MO_8 &&
741 (dc->tb_flags & MSR_EE) &&
742 dc->cfg->unaligned_exceptions) {
743 record_unaligned_ess(dc, rd, size, false);
744 mop |= MO_ALIGN;
745 }
746 #endif
747
748 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
749 return true;
750 }
751
752 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
753 {
754 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
755 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
756 }
757
758 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
759 {
760 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
761 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
762 }
763
764 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
765 {
766 if (trap_userspace(dc, true)) {
767 return true;
768 }
769 #ifdef CONFIG_USER_ONLY
770 return true;
771 #else
772 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
773 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
774 #endif
775 }
776
777 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
778 {
779 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
780 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
781 }
782
783 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
784 {
785 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
786 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
787 }
788
789 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
790 {
791 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
792 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
793 }
794
795 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
796 {
797 if (trap_userspace(dc, true)) {
798 return true;
799 }
800 #ifdef CONFIG_USER_ONLY
801 return true;
802 #else
803 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
804 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
805 #endif
806 }
807
808 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
809 {
810 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
811 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
812 }
813
814 static bool trans_lw(DisasContext *dc, arg_typea *arg)
815 {
816 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
817 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
818 }
819
820 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
821 {
822 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
823 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
824 }
825
826 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
827 {
828 if (trap_userspace(dc, true)) {
829 return true;
830 }
831 #ifdef CONFIG_USER_ONLY
832 return true;
833 #else
834 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
835 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
836 #endif
837 }
838
839 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
840 {
841 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
842 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
843 }
844
845 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
846 {
847 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
848
849 /* lwx does not throw unaligned access errors, so force alignment */
850 tcg_gen_andi_tl(addr, addr, ~3);
851
852 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
853 tcg_gen_mov_tl(cpu_res_addr, addr);
854
855 if (arg->rd) {
856 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
857 }
858
859 /* No support for AXI exclusive so always clear C */
860 tcg_gen_movi_i32(cpu_msr_c, 0);
861 return true;
862 }
863
864 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
865 int mem_index, bool rev)
866 {
867 MemOp size = mop & MO_SIZE;
868
869 /*
870 * When doing reverse accesses we need to do two things.
871 *
872 * 1. Reverse the address wrt endianness.
873 * 2. Byteswap the data lanes on the way back into the CPU core.
874 */
875 if (rev) {
876 if (size > MO_8) {
877 mop ^= MO_BSWAP;
878 }
879 if (size < MO_32) {
880 tcg_gen_xori_tl(addr, addr, 3 - size);
881 }
882 }
883
884 /*
885 * For system mode, enforce alignment if the cpu configuration
886 * requires it. For user-mode, the Linux kernel will have fixed up
887 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
888 */
889 #ifndef CONFIG_USER_ONLY
890 if (size > MO_8 &&
891 (dc->tb_flags & MSR_EE) &&
892 dc->cfg->unaligned_exceptions) {
893 record_unaligned_ess(dc, rd, size, true);
894 mop |= MO_ALIGN;
895 }
896 #endif
897
898 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
899 return true;
900 }
901
902 static bool trans_sb(DisasContext *dc, arg_typea *arg)
903 {
904 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
905 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
906 }
907
908 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
909 {
910 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
911 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
912 }
913
914 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
915 {
916 if (trap_userspace(dc, true)) {
917 return true;
918 }
919 #ifdef CONFIG_USER_ONLY
920 return true;
921 #else
922 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
923 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
924 #endif
925 }
926
927 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
928 {
929 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
930 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
931 }
932
933 static bool trans_sh(DisasContext *dc, arg_typea *arg)
934 {
935 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
936 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
937 }
938
939 static bool trans_shr(DisasContext *dc, arg_typea *arg)
940 {
941 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
942 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
943 }
944
945 static bool trans_shea(DisasContext *dc, arg_typea *arg)
946 {
947 if (trap_userspace(dc, true)) {
948 return true;
949 }
950 #ifdef CONFIG_USER_ONLY
951 return true;
952 #else
953 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
954 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
955 #endif
956 }
957
958 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
959 {
960 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
961 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
962 }
963
964 static bool trans_sw(DisasContext *dc, arg_typea *arg)
965 {
966 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
967 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
968 }
969
970 static bool trans_swr(DisasContext *dc, arg_typea *arg)
971 {
972 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
973 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
974 }
975
976 static bool trans_swea(DisasContext *dc, arg_typea *arg)
977 {
978 if (trap_userspace(dc, true)) {
979 return true;
980 }
981 #ifdef CONFIG_USER_ONLY
982 return true;
983 #else
984 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
985 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
986 #endif
987 }
988
989 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
990 {
991 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
992 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
993 }
994
995 static bool trans_swx(DisasContext *dc, arg_typea *arg)
996 {
997 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
998 TCGLabel *swx_done = gen_new_label();
999 TCGLabel *swx_fail = gen_new_label();
1000 TCGv_i32 tval;
1001
1002 /* swx does not throw unaligned access errors, so force alignment */
1003 tcg_gen_andi_tl(addr, addr, ~3);
1004
1005 /*
1006 * Compare the address vs the one we used during lwx.
1007 * On mismatch, the operation fails. On match, addr dies at the
1008 * branch, but we know we can use the equal version in the global.
1009 * In either case, addr is no longer needed.
1010 */
1011 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1012
1013 /*
1014 * Compare the value loaded during lwx with current contents of
1015 * the reserved location.
1016 */
1017 tval = tcg_temp_new_i32();
1018
1019 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1020 reg_for_write(dc, arg->rd),
1021 dc->mem_index, MO_TEUL);
1022
1023 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1024
1025 /* Success */
1026 tcg_gen_movi_i32(cpu_msr_c, 0);
1027 tcg_gen_br(swx_done);
1028
1029 /* Failure */
1030 gen_set_label(swx_fail);
1031 tcg_gen_movi_i32(cpu_msr_c, 1);
1032
1033 gen_set_label(swx_done);
1034
1035 /*
1036 * Prevent the saved address from working again without another ldx.
1037 * Akin to the pseudocode setting reservation = 0.
1038 */
1039 tcg_gen_movi_tl(cpu_res_addr, -1);
1040 return true;
1041 }
1042
1043 static void setup_dslot(DisasContext *dc, bool type_b)
1044 {
1045 dc->tb_flags_to_set |= D_FLAG;
1046 if (type_b && (dc->tb_flags & IMM_FLAG)) {
1047 dc->tb_flags_to_set |= BIMM_FLAG;
1048 }
1049 }
1050
1051 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1052 bool delay, bool abs, int link)
1053 {
1054 uint32_t add_pc;
1055
1056 if (invalid_delay_slot(dc, "branch")) {
1057 return true;
1058 }
1059 if (delay) {
1060 setup_dslot(dc, dest_rb < 0);
1061 }
1062
1063 if (link) {
1064 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1065 }
1066
1067 /* Store the branch taken destination into btarget. */
1068 add_pc = abs ? 0 : dc->base.pc_next;
1069 if (dest_rb > 0) {
1070 dc->jmp_dest = -1;
1071 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1072 } else {
1073 dc->jmp_dest = add_pc + dest_imm;
1074 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1075 }
1076 dc->jmp_cond = TCG_COND_ALWAYS;
1077 return true;
1078 }
1079
1080 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
1081 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
1082 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1083 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
1084 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1085
1086 DO_BR(br, bri, false, false, false)
1087 DO_BR(bra, brai, false, true, false)
1088 DO_BR(brd, brid, true, false, false)
1089 DO_BR(brad, braid, true, true, false)
1090 DO_BR(brld, brlid, true, false, true)
1091 DO_BR(brald, bralid, true, true, true)
1092
1093 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1094 TCGCond cond, int ra, bool delay)
1095 {
1096 TCGv_i32 zero, next;
1097
1098 if (invalid_delay_slot(dc, "bcc")) {
1099 return true;
1100 }
1101 if (delay) {
1102 setup_dslot(dc, dest_rb < 0);
1103 }
1104
1105 dc->jmp_cond = cond;
1106
1107 /* Cache the condition register in cpu_bvalue across any delay slot. */
1108 tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1109
1110 /* Store the branch taken destination into btarget. */
1111 if (dest_rb > 0) {
1112 dc->jmp_dest = -1;
1113 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1114 } else {
1115 dc->jmp_dest = dc->base.pc_next + dest_imm;
1116 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1117 }
1118
1119 /* Compute the final destination into btarget. */
1120 zero = tcg_constant_i32(0);
1121 next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1122 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1123 reg_for_read(dc, ra), zero,
1124 cpu_btarget, next);
1125
1126 return true;
1127 }
1128
1129 #define DO_BCC(NAME, COND) \
1130 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
1131 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1132 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
1133 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1134 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
1135 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1136 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
1137 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1138
1139 DO_BCC(beq, TCG_COND_EQ)
1140 DO_BCC(bge, TCG_COND_GE)
1141 DO_BCC(bgt, TCG_COND_GT)
1142 DO_BCC(ble, TCG_COND_LE)
1143 DO_BCC(blt, TCG_COND_LT)
1144 DO_BCC(bne, TCG_COND_NE)
1145
1146 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1147 {
1148 if (trap_userspace(dc, true)) {
1149 return true;
1150 }
1151 if (invalid_delay_slot(dc, "brk")) {
1152 return true;
1153 }
1154
1155 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1156 if (arg->rd) {
1157 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1158 }
1159 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1160 tcg_gen_movi_tl(cpu_res_addr, -1);
1161
1162 dc->base.is_jmp = DISAS_EXIT;
1163 return true;
1164 }
1165
1166 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1167 {
1168 uint32_t imm = arg->imm;
1169
1170 if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1171 return true;
1172 }
1173 if (invalid_delay_slot(dc, "brki")) {
1174 return true;
1175 }
1176
1177 tcg_gen_movi_i32(cpu_pc, imm);
1178 if (arg->rd) {
1179 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1180 }
1181 tcg_gen_movi_tl(cpu_res_addr, -1);
1182
1183 #ifdef CONFIG_USER_ONLY
1184 switch (imm) {
1185 case 0x8: /* syscall trap */
1186 gen_raise_exception_sync(dc, EXCP_SYSCALL);
1187 break;
1188 case 0x18: /* debug trap */
1189 gen_raise_exception_sync(dc, EXCP_DEBUG);
1190 break;
1191 default: /* eliminated with trap_userspace check */
1192 g_assert_not_reached();
1193 }
1194 #else
1195 uint32_t msr_to_set = 0;
1196
1197 if (imm != 0x18) {
1198 msr_to_set |= MSR_BIP;
1199 }
1200 if (imm == 0x8 || imm == 0x18) {
1201 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1202 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1203 tcg_gen_andi_i32(cpu_msr, cpu_msr,
1204 ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1205 }
1206 tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1207 dc->base.is_jmp = DISAS_EXIT;
1208 #endif
1209
1210 return true;
1211 }
1212
1213 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1214 {
1215 int mbar_imm = arg->imm;
1216
1217 /* Note that mbar is a specialized branch instruction. */
1218 if (invalid_delay_slot(dc, "mbar")) {
1219 return true;
1220 }
1221
1222 /* Data access memory barrier. */
1223 if ((mbar_imm & 2) == 0) {
1224 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1225 }
1226
1227 /* Sleep. */
1228 if (mbar_imm & 16) {
1229 if (trap_userspace(dc, true)) {
1230 /* Sleep is a privileged instruction. */
1231 return true;
1232 }
1233
1234 t_sync_flags(dc);
1235
1236 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1237 -offsetof(MicroBlazeCPU, env)
1238 +offsetof(CPUState, halted));
1239
1240 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1241
1242 gen_raise_exception(dc, EXCP_HLT);
1243 }
1244
1245 /*
1246 * If !(mbar_imm & 1), this is an instruction access memory barrier
1247 * and we need to end the TB so that we recognize self-modified
1248 * code immediately.
1249 *
1250 * However, there are some data mbars that need the TB break
1251 * (and return to main loop) to recognize interrupts right away.
1252 * E.g. recognizing a change to an interrupt controller register.
1253 *
1254 * Therefore, choose to end the TB always.
1255 */
1256 dc->base.is_jmp = DISAS_EXIT_NEXT;
1257 return true;
1258 }
1259
1260 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1261 {
1262 if (trap_userspace(dc, to_set)) {
1263 return true;
1264 }
1265 if (invalid_delay_slot(dc, "rts")) {
1266 return true;
1267 }
1268
1269 dc->tb_flags_to_set |= to_set;
1270 setup_dslot(dc, true);
1271
1272 dc->jmp_cond = TCG_COND_ALWAYS;
1273 dc->jmp_dest = -1;
1274 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1275 return true;
1276 }
1277
1278 #define DO_RTS(NAME, IFLAG) \
1279 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1280 { return do_rts(dc, arg, IFLAG); }
1281
1282 DO_RTS(rtbd, DRTB_FLAG)
1283 DO_RTS(rtid, DRTI_FLAG)
1284 DO_RTS(rted, DRTE_FLAG)
1285 DO_RTS(rtsd, 0)
1286
1287 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1288 {
1289 /* If opcode_0_illegal, trap. */
1290 if (dc->cfg->opcode_0_illegal) {
1291 trap_illegal(dc, true);
1292 return true;
1293 }
1294 /*
1295 * Otherwise, this is "add r0, r0, r0".
1296 * Continue to trans_add so that MSR[C] gets cleared.
1297 */
1298 return false;
1299 }
1300
1301 static void msr_read(DisasContext *dc, TCGv_i32 d)
1302 {
1303 TCGv_i32 t;
1304
1305 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1306 t = tcg_temp_new_i32();
1307 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1308 tcg_gen_or_i32(d, cpu_msr, t);
1309 }
1310
1311 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1312 {
1313 uint32_t imm = arg->imm;
1314
1315 if (trap_userspace(dc, imm != MSR_C)) {
1316 return true;
1317 }
1318
1319 if (arg->rd) {
1320 msr_read(dc, cpu_R[arg->rd]);
1321 }
1322
1323 /*
1324 * Handle the carry bit separately.
1325 * This is the only bit that userspace can modify.
1326 */
1327 if (imm & MSR_C) {
1328 tcg_gen_movi_i32(cpu_msr_c, set);
1329 }
1330
1331 /*
1332 * MSR_C and MSR_CC set above.
1333 * MSR_PVR is not writable, and is always clear.
1334 */
1335 imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1336
1337 if (imm != 0) {
1338 if (set) {
1339 tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1340 } else {
1341 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1342 }
1343 dc->base.is_jmp = DISAS_EXIT_NEXT;
1344 }
1345 return true;
1346 }
1347
1348 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1349 {
1350 return do_msrclrset(dc, arg, false);
1351 }
1352
1353 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1354 {
1355 return do_msrclrset(dc, arg, true);
1356 }
1357
1358 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1359 {
1360 if (trap_userspace(dc, true)) {
1361 return true;
1362 }
1363
1364 #ifdef CONFIG_USER_ONLY
1365 g_assert_not_reached();
1366 #else
1367 if (arg->e && arg->rs != 0x1003) {
1368 qemu_log_mask(LOG_GUEST_ERROR,
1369 "Invalid extended mts reg 0x%x\n", arg->rs);
1370 return true;
1371 }
1372
1373 TCGv_i32 src = reg_for_read(dc, arg->ra);
1374 switch (arg->rs) {
1375 case SR_MSR:
1376 /* Install MSR_C. */
1377 tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1378 /*
1379 * Clear MSR_C and MSR_CC;
1380 * MSR_PVR is not writable, and is always clear.
1381 */
1382 tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1383 break;
1384 case SR_FSR:
1385 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
1386 break;
1387 case 0x800:
1388 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
1389 break;
1390 case 0x802:
1391 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
1392 break;
1393
1394 case 0x1000: /* PID */
1395 case 0x1001: /* ZPR */
1396 case 0x1002: /* TLBX */
1397 case 0x1003: /* TLBLO */
1398 case 0x1004: /* TLBHI */
1399 case 0x1005: /* TLBSX */
1400 {
1401 TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1402 TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1403
1404 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
1405 }
1406 break;
1407
1408 default:
1409 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1410 return true;
1411 }
1412 dc->base.is_jmp = DISAS_EXIT_NEXT;
1413 return true;
1414 #endif
1415 }
1416
1417 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1418 {
1419 TCGv_i32 dest = reg_for_write(dc, arg->rd);
1420
1421 if (arg->e) {
1422 switch (arg->rs) {
1423 case SR_EAR:
1424 {
1425 TCGv_i64 t64 = tcg_temp_new_i64();
1426 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1427 tcg_gen_extrh_i64_i32(dest, t64);
1428 }
1429 return true;
1430 #ifndef CONFIG_USER_ONLY
1431 case 0x1003: /* TLBLO */
1432 /* Handled below. */
1433 break;
1434 #endif
1435 case 0x2006 ... 0x2009:
1436 /* High bits of PVR6-9 not implemented. */
1437 tcg_gen_movi_i32(dest, 0);
1438 return true;
1439 default:
1440 qemu_log_mask(LOG_GUEST_ERROR,
1441 "Invalid extended mfs reg 0x%x\n", arg->rs);
1442 return true;
1443 }
1444 }
1445
1446 switch (arg->rs) {
1447 case SR_PC:
1448 tcg_gen_movi_i32(dest, dc->base.pc_next);
1449 break;
1450 case SR_MSR:
1451 msr_read(dc, dest);
1452 break;
1453 case SR_EAR:
1454 {
1455 TCGv_i64 t64 = tcg_temp_new_i64();
1456 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1457 tcg_gen_extrl_i64_i32(dest, t64);
1458 }
1459 break;
1460 case SR_ESR:
1461 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
1462 break;
1463 case SR_FSR:
1464 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
1465 break;
1466 case SR_BTR:
1467 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
1468 break;
1469 case SR_EDR:
1470 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
1471 break;
1472 case 0x800:
1473 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
1474 break;
1475 case 0x802:
1476 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
1477 break;
1478
1479 #ifndef CONFIG_USER_ONLY
1480 case 0x1000: /* PID */
1481 case 0x1001: /* ZPR */
1482 case 0x1002: /* TLBX */
1483 case 0x1003: /* TLBLO */
1484 case 0x1004: /* TLBHI */
1485 case 0x1005: /* TLBSX */
1486 {
1487 TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1488 TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1489
1490 gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
1491 }
1492 break;
1493 #endif
1494
1495 case 0x2000 ... 0x200c:
1496 tcg_gen_ld_i32(dest, cpu_env,
1497 offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1498 - offsetof(MicroBlazeCPU, env));
1499 break;
1500 default:
1501 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1502 break;
1503 }
1504 return true;
1505 }
1506
1507 static void do_rti(DisasContext *dc)
1508 {
1509 TCGv_i32 tmp = tcg_temp_new_i32();
1510
1511 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1512 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1513 tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1514 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1515 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1516 }
1517
1518 static void do_rtb(DisasContext *dc)
1519 {
1520 TCGv_i32 tmp = tcg_temp_new_i32();
1521
1522 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1523 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1524 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1525 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1526 }
1527
1528 static void do_rte(DisasContext *dc)
1529 {
1530 TCGv_i32 tmp = tcg_temp_new_i32();
1531
1532 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1533 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1534 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1535 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1536 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1537 }
1538
1539 /* Insns connected to FSL or AXI stream attached devices. */
1540 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1541 {
1542 TCGv_i32 t_id, t_ctrl;
1543
1544 if (trap_userspace(dc, true)) {
1545 return true;
1546 }
1547
1548 t_id = tcg_temp_new_i32();
1549 if (rb) {
1550 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1551 } else {
1552 tcg_gen_movi_i32(t_id, imm);
1553 }
1554
1555 t_ctrl = tcg_constant_i32(ctrl);
1556 gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1557 return true;
1558 }
1559
1560 static bool trans_get(DisasContext *dc, arg_get *arg)
1561 {
1562 return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1563 }
1564
1565 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1566 {
1567 return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1568 }
1569
1570 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1571 {
1572 TCGv_i32 t_id, t_ctrl;
1573
1574 if (trap_userspace(dc, true)) {
1575 return true;
1576 }
1577
1578 t_id = tcg_temp_new_i32();
1579 if (rb) {
1580 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1581 } else {
1582 tcg_gen_movi_i32(t_id, imm);
1583 }
1584
1585 t_ctrl = tcg_constant_i32(ctrl);
1586 gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1587 return true;
1588 }
1589
1590 static bool trans_put(DisasContext *dc, arg_put *arg)
1591 {
1592 return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1593 }
1594
1595 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1596 {
1597 return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1598 }
1599
1600 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1601 {
1602 DisasContext *dc = container_of(dcb, DisasContext, base);
1603 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1604 int bound;
1605
1606 dc->cfg = &cpu->cfg;
1607 dc->tb_flags = dc->base.tb->flags;
1608 dc->ext_imm = dc->base.tb->cs_base;
1609 dc->r0 = NULL;
1610 dc->r0_set = false;
1611 dc->mem_index = cpu_mmu_index(&cpu->env, false);
1612 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1613 dc->jmp_dest = -1;
1614
1615 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1616 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1617 }
1618
1619 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1620 {
1621 }
1622
1623 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1624 {
1625 DisasContext *dc = container_of(dcb, DisasContext, base);
1626
1627 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1628 dc->insn_start = tcg_last_op();
1629 }
1630
1631 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1632 {
1633 DisasContext *dc = container_of(dcb, DisasContext, base);
1634 CPUMBState *env = cs->env_ptr;
1635 uint32_t ir;
1636
1637 /* TODO: This should raise an exception, not terminate qemu. */
1638 if (dc->base.pc_next & 3) {
1639 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1640 (uint32_t)dc->base.pc_next);
1641 }
1642
1643 dc->tb_flags_to_set = 0;
1644
1645 ir = cpu_ldl_code(env, dc->base.pc_next);
1646 if (!decode(dc, ir)) {
1647 trap_illegal(dc, true);
1648 }
1649
1650 if (dc->r0) {
1651 dc->r0 = NULL;
1652 dc->r0_set = false;
1653 }
1654
1655 /* Discard the imm global when its contents cannot be used. */
1656 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1657 tcg_gen_discard_i32(cpu_imm);
1658 }
1659
1660 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1661 dc->tb_flags |= dc->tb_flags_to_set;
1662 dc->base.pc_next += 4;
1663
1664 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1665 /*
1666 * Finish any return-from branch.
1667 */
1668 uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1669 if (unlikely(rt_ibe != 0)) {
1670 dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1671 if (rt_ibe & DRTI_FLAG) {
1672 do_rti(dc);
1673 } else if (rt_ibe & DRTB_FLAG) {
1674 do_rtb(dc);
1675 } else {
1676 do_rte(dc);
1677 }
1678 }
1679
1680 /* Complete the branch, ending the TB. */
1681 switch (dc->base.is_jmp) {
1682 case DISAS_NORETURN:
1683 /*
1684 * E.g. illegal insn in a delay slot. We've already exited
1685 * and will handle D_FLAG in mb_cpu_do_interrupt.
1686 */
1687 break;
1688 case DISAS_NEXT:
1689 /*
1690 * Normal insn a delay slot.
1691 * However, the return-from-exception type insns should
1692 * return to the main loop, as they have adjusted MSR.
1693 */
1694 dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1695 break;
1696 case DISAS_EXIT_NEXT:
1697 /*
1698 * E.g. mts insn in a delay slot. Continue with btarget,
1699 * but still return to the main loop.
1700 */
1701 dc->base.is_jmp = DISAS_EXIT_JUMP;
1702 break;
1703 default:
1704 g_assert_not_reached();
1705 }
1706 }
1707 }
1708
1709 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1710 {
1711 DisasContext *dc = container_of(dcb, DisasContext, base);
1712
1713 if (dc->base.is_jmp == DISAS_NORETURN) {
1714 /* We have already exited the TB. */
1715 return;
1716 }
1717
1718 t_sync_flags(dc);
1719
1720 switch (dc->base.is_jmp) {
1721 case DISAS_TOO_MANY:
1722 gen_goto_tb(dc, 0, dc->base.pc_next);
1723 return;
1724
1725 case DISAS_EXIT:
1726 break;
1727 case DISAS_EXIT_NEXT:
1728 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1729 break;
1730 case DISAS_EXIT_JUMP:
1731 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1732 tcg_gen_discard_i32(cpu_btarget);
1733 break;
1734
1735 case DISAS_JUMP:
1736 if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1737 /* Direct jump. */
1738 tcg_gen_discard_i32(cpu_btarget);
1739
1740 if (dc->jmp_cond != TCG_COND_ALWAYS) {
1741 /* Conditional direct jump. */
1742 TCGLabel *taken = gen_new_label();
1743 TCGv_i32 tmp = tcg_temp_new_i32();
1744
1745 /*
1746 * Copy bvalue to a temp now, so we can discard bvalue.
1747 * This can avoid writing bvalue to memory when the
1748 * delay slot cannot raise an exception.
1749 */
1750 tcg_gen_mov_i32(tmp, cpu_bvalue);
1751 tcg_gen_discard_i32(cpu_bvalue);
1752
1753 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1754 gen_goto_tb(dc, 1, dc->base.pc_next);
1755 gen_set_label(taken);
1756 }
1757 gen_goto_tb(dc, 0, dc->jmp_dest);
1758 return;
1759 }
1760
1761 /* Indirect jump (or direct jump w/ goto_tb disabled) */
1762 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1763 tcg_gen_discard_i32(cpu_btarget);
1764 tcg_gen_lookup_and_goto_ptr();
1765 return;
1766
1767 default:
1768 g_assert_not_reached();
1769 }
1770
1771 /* Finish DISAS_EXIT_* */
1772 if (unlikely(cs->singlestep_enabled)) {
1773 gen_raise_exception(dc, EXCP_DEBUG);
1774 } else {
1775 tcg_gen_exit_tb(NULL, 0);
1776 }
1777 }
1778
1779 static void mb_tr_disas_log(const DisasContextBase *dcb,
1780 CPUState *cs, FILE *logfile)
1781 {
1782 fprintf(logfile, "IN: %s\n", lookup_symbol(dcb->pc_first));
1783 target_disas(logfile, cs, dcb->pc_first, dcb->tb->size);
1784 }
1785
1786 static const TranslatorOps mb_tr_ops = {
1787 .init_disas_context = mb_tr_init_disas_context,
1788 .tb_start = mb_tr_tb_start,
1789 .insn_start = mb_tr_insn_start,
1790 .translate_insn = mb_tr_translate_insn,
1791 .tb_stop = mb_tr_tb_stop,
1792 .disas_log = mb_tr_disas_log,
1793 };
1794
1795 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
1796 target_ulong pc, void *host_pc)
1797 {
1798 DisasContext dc;
1799 translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1800 }
1801
1802 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1803 {
1804 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1805 CPUMBState *env = &cpu->env;
1806 uint32_t iflags;
1807 int i;
1808
1809 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1810 env->pc, env->msr,
1811 (env->msr & MSR_UM) ? "user" : "kernel",
1812 (env->msr & MSR_UMS) ? "user" : "kernel",
1813 (bool)(env->msr & MSR_EIP),
1814 (bool)(env->msr & MSR_IE));
1815
1816 iflags = env->iflags;
1817 qemu_fprintf(f, "iflags: 0x%08x", iflags);
1818 if (iflags & IMM_FLAG) {
1819 qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1820 }
1821 if (iflags & BIMM_FLAG) {
1822 qemu_fprintf(f, " BIMM");
1823 }
1824 if (iflags & D_FLAG) {
1825 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1826 }
1827 if (iflags & DRTI_FLAG) {
1828 qemu_fprintf(f, " DRTI");
1829 }
1830 if (iflags & DRTE_FLAG) {
1831 qemu_fprintf(f, " DRTE");
1832 }
1833 if (iflags & DRTB_FLAG) {
1834 qemu_fprintf(f, " DRTB");
1835 }
1836 if (iflags & ESR_ESS_FLAG) {
1837 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1838 }
1839
1840 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1841 "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1842 env->esr, env->fsr, env->btr, env->edr,
1843 env->ear, env->slr, env->shr);
1844
1845 for (i = 0; i < 32; i++) {
1846 qemu_fprintf(f, "r%2.2d=%08x%c",
1847 i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1848 }
1849 qemu_fprintf(f, "\n");
1850 }
1851
1852 void mb_tcg_init(void)
1853 {
1854 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1855 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1856
1857 static const struct {
1858 TCGv_i32 *var; int ofs; char name[8];
1859 } i32s[] = {
1860 /*
1861 * Note that r0 is handled specially in reg_for_read
1862 * and reg_for_write. Nothing should touch cpu_R[0].
1863 * Leave that element NULL, which will assert quickly
1864 * inside the tcg generator functions.
1865 */
1866 R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1867 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1868 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1869 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1870
1871 SP(pc),
1872 SP(msr),
1873 SP(msr_c),
1874 SP(imm),
1875 SP(iflags),
1876 SP(bvalue),
1877 SP(btarget),
1878 SP(res_val),
1879 };
1880
1881 #undef R
1882 #undef SP
1883
1884 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1885 *i32s[i].var =
1886 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1887 }
1888
1889 cpu_res_addr =
1890 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
1891 }