]> git.proxmox.com Git - mirror_qemu.git/blob - target/microblaze/translate.c
target/microblaze: Split out MicroBlazeCPUConfig
[mirror_qemu.git] / target / microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "qemu/qemu-print.h"
31
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34
35 #define EXTRACT_FIELD(src, start, end) \
36 (((src) >> start) & ((1 << (end - start + 1)) - 1))
37
38 /* is_jmp field values */
39 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
40 #define DISAS_EXIT DISAS_TARGET_1 /* all cpu state modified dynamically */
41
42 /* cpu state besides pc was modified dynamically; update pc to next */
43 #define DISAS_EXIT_NEXT DISAS_TARGET_2
44 /* cpu state besides pc was modified dynamically; update pc to btarget */
45 #define DISAS_EXIT_JUMP DISAS_TARGET_3
46
47 static TCGv_i32 cpu_R[32];
48 static TCGv_i32 cpu_pc;
49 static TCGv_i32 cpu_msr;
50 static TCGv_i32 cpu_msr_c;
51 static TCGv_i32 cpu_imm;
52 static TCGv_i32 cpu_bvalue;
53 static TCGv_i32 cpu_btarget;
54 static TCGv_i32 cpu_iflags;
55 static TCGv cpu_res_addr;
56 static TCGv_i32 cpu_res_val;
57
58 #include "exec/gen-icount.h"
59
60 /* This is the state at translation time. */
61 typedef struct DisasContext {
62 DisasContextBase base;
63 MicroBlazeCPU *cpu;
64
65 /* TCG op of the current insn_start. */
66 TCGOp *insn_start;
67
68 TCGv_i32 r0;
69 bool r0_set;
70
71 /* Decoder. */
72 uint32_t ext_imm;
73 unsigned int tb_flags;
74 unsigned int tb_flags_to_set;
75 int mem_index;
76
77 /* Condition under which to jump, including NEVER and ALWAYS. */
78 TCGCond jmp_cond;
79
80 /* Immediate branch-taken destination, or -1 for indirect. */
81 uint32_t jmp_dest;
82 } DisasContext;
83
84 static int typeb_imm(DisasContext *dc, int x)
85 {
86 if (dc->tb_flags & IMM_FLAG) {
87 return deposit32(dc->ext_imm, 0, 16, x);
88 }
89 return x;
90 }
91
92 /* Include the auto-generated decoder. */
93 #include "decode-insns.c.inc"
94
95 static void t_sync_flags(DisasContext *dc)
96 {
97 /* Synch the tb dependent flags between translator and runtime. */
98 if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
99 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
100 }
101 }
102
103 static void gen_raise_exception(DisasContext *dc, uint32_t index)
104 {
105 TCGv_i32 tmp = tcg_const_i32(index);
106
107 gen_helper_raise_exception(cpu_env, tmp);
108 tcg_temp_free_i32(tmp);
109 dc->base.is_jmp = DISAS_NORETURN;
110 }
111
112 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
113 {
114 t_sync_flags(dc);
115 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
116 gen_raise_exception(dc, index);
117 }
118
119 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
120 {
121 TCGv_i32 tmp = tcg_const_i32(esr_ec);
122 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
123 tcg_temp_free_i32(tmp);
124
125 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
126 }
127
128 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
129 {
130 #ifndef CONFIG_USER_ONLY
131 return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
132 #else
133 return true;
134 #endif
135 }
136
137 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
138 {
139 if (dc->base.singlestep_enabled) {
140 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
141 tcg_gen_movi_i32(cpu_pc, dest);
142 gen_helper_raise_exception(cpu_env, tmp);
143 tcg_temp_free_i32(tmp);
144 } else if (use_goto_tb(dc, dest)) {
145 tcg_gen_goto_tb(n);
146 tcg_gen_movi_i32(cpu_pc, dest);
147 tcg_gen_exit_tb(dc->base.tb, n);
148 } else {
149 tcg_gen_movi_i32(cpu_pc, dest);
150 tcg_gen_lookup_and_goto_ptr();
151 }
152 dc->base.is_jmp = DISAS_NORETURN;
153 }
154
155 /*
156 * Returns true if the insn an illegal operation.
157 * If exceptions are enabled, an exception is raised.
158 */
159 static bool trap_illegal(DisasContext *dc, bool cond)
160 {
161 if (cond && (dc->tb_flags & MSR_EE)
162 && dc->cpu->cfg.illegal_opcode_exception) {
163 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
164 }
165 return cond;
166 }
167
168 /*
169 * Returns true if the insn is illegal in userspace.
170 * If exceptions are enabled, an exception is raised.
171 */
172 static bool trap_userspace(DisasContext *dc, bool cond)
173 {
174 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
175
176 if (cond_user && (dc->tb_flags & MSR_EE)) {
177 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
178 }
179 return cond_user;
180 }
181
182 /*
183 * Return true, and log an error, if the current insn is
184 * within a delay slot.
185 */
186 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
187 {
188 if (dc->tb_flags & D_FLAG) {
189 qemu_log_mask(LOG_GUEST_ERROR,
190 "Invalid insn in delay slot: %s at %08x\n",
191 insn_type, (uint32_t)dc->base.pc_next);
192 return true;
193 }
194 return false;
195 }
196
197 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
198 {
199 if (likely(reg != 0)) {
200 return cpu_R[reg];
201 }
202 if (!dc->r0_set) {
203 if (dc->r0 == NULL) {
204 dc->r0 = tcg_temp_new_i32();
205 }
206 tcg_gen_movi_i32(dc->r0, 0);
207 dc->r0_set = true;
208 }
209 return dc->r0;
210 }
211
212 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
213 {
214 if (likely(reg != 0)) {
215 return cpu_R[reg];
216 }
217 if (dc->r0 == NULL) {
218 dc->r0 = tcg_temp_new_i32();
219 }
220 return dc->r0;
221 }
222
223 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
224 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
225 {
226 TCGv_i32 rd, ra, rb;
227
228 if (arg->rd == 0 && !side_effects) {
229 return true;
230 }
231
232 rd = reg_for_write(dc, arg->rd);
233 ra = reg_for_read(dc, arg->ra);
234 rb = reg_for_read(dc, arg->rb);
235 fn(rd, ra, rb);
236 return true;
237 }
238
239 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
240 void (*fn)(TCGv_i32, TCGv_i32))
241 {
242 TCGv_i32 rd, ra;
243
244 if (arg->rd == 0 && !side_effects) {
245 return true;
246 }
247
248 rd = reg_for_write(dc, arg->rd);
249 ra = reg_for_read(dc, arg->ra);
250 fn(rd, ra);
251 return true;
252 }
253
254 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
255 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
256 {
257 TCGv_i32 rd, ra;
258
259 if (arg->rd == 0 && !side_effects) {
260 return true;
261 }
262
263 rd = reg_for_write(dc, arg->rd);
264 ra = reg_for_read(dc, arg->ra);
265 fni(rd, ra, arg->imm);
266 return true;
267 }
268
269 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
270 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
271 {
272 TCGv_i32 rd, ra, imm;
273
274 if (arg->rd == 0 && !side_effects) {
275 return true;
276 }
277
278 rd = reg_for_write(dc, arg->rd);
279 ra = reg_for_read(dc, arg->ra);
280 imm = tcg_const_i32(arg->imm);
281
282 fn(rd, ra, imm);
283
284 tcg_temp_free_i32(imm);
285 return true;
286 }
287
288 #define DO_TYPEA(NAME, SE, FN) \
289 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
290 { return do_typea(dc, a, SE, FN); }
291
292 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
293 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
294 { return dc->cpu->cfg.CFG && do_typea(dc, a, SE, FN); }
295
296 #define DO_TYPEA0(NAME, SE, FN) \
297 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
298 { return do_typea0(dc, a, SE, FN); }
299
300 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
301 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
302 { return dc->cpu->cfg.CFG && do_typea0(dc, a, SE, FN); }
303
304 #define DO_TYPEBI(NAME, SE, FNI) \
305 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
306 { return do_typeb_imm(dc, a, SE, FNI); }
307
308 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
309 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
310 { return dc->cpu->cfg.CFG && do_typeb_imm(dc, a, SE, FNI); }
311
312 #define DO_TYPEBV(NAME, SE, FN) \
313 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
314 { return do_typeb_val(dc, a, SE, FN); }
315
316 #define ENV_WRAPPER2(NAME, HELPER) \
317 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
318 { HELPER(out, cpu_env, ina); }
319
320 #define ENV_WRAPPER3(NAME, HELPER) \
321 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
322 { HELPER(out, cpu_env, ina, inb); }
323
324 /* No input carry, but output carry. */
325 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
326 {
327 TCGv_i32 zero = tcg_const_i32(0);
328
329 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
330
331 tcg_temp_free_i32(zero);
332 }
333
334 /* Input and output carry. */
335 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
336 {
337 TCGv_i32 zero = tcg_const_i32(0);
338 TCGv_i32 tmp = tcg_temp_new_i32();
339
340 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
341 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
342
343 tcg_temp_free_i32(tmp);
344 tcg_temp_free_i32(zero);
345 }
346
347 /* Input carry, but no output carry. */
348 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
349 {
350 tcg_gen_add_i32(out, ina, inb);
351 tcg_gen_add_i32(out, out, cpu_msr_c);
352 }
353
354 DO_TYPEA(add, true, gen_add)
355 DO_TYPEA(addc, true, gen_addc)
356 DO_TYPEA(addk, false, tcg_gen_add_i32)
357 DO_TYPEA(addkc, true, gen_addkc)
358
359 DO_TYPEBV(addi, true, gen_add)
360 DO_TYPEBV(addic, true, gen_addc)
361 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
362 DO_TYPEBV(addikc, true, gen_addkc)
363
364 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
365 {
366 tcg_gen_andi_i32(out, ina, ~imm);
367 }
368
369 DO_TYPEA(and, false, tcg_gen_and_i32)
370 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
371 DO_TYPEA(andn, false, tcg_gen_andc_i32)
372 DO_TYPEBI(andni, false, gen_andni)
373
374 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
375 {
376 TCGv_i32 tmp = tcg_temp_new_i32();
377 tcg_gen_andi_i32(tmp, inb, 31);
378 tcg_gen_sar_i32(out, ina, tmp);
379 tcg_temp_free_i32(tmp);
380 }
381
382 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
383 {
384 TCGv_i32 tmp = tcg_temp_new_i32();
385 tcg_gen_andi_i32(tmp, inb, 31);
386 tcg_gen_shr_i32(out, ina, tmp);
387 tcg_temp_free_i32(tmp);
388 }
389
390 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
391 {
392 TCGv_i32 tmp = tcg_temp_new_i32();
393 tcg_gen_andi_i32(tmp, inb, 31);
394 tcg_gen_shl_i32(out, ina, tmp);
395 tcg_temp_free_i32(tmp);
396 }
397
398 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
399 {
400 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
401 int imm_w = extract32(imm, 5, 5);
402 int imm_s = extract32(imm, 0, 5);
403
404 if (imm_w + imm_s > 32 || imm_w == 0) {
405 /* These inputs have an undefined behavior. */
406 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
407 imm_w, imm_s);
408 } else {
409 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
410 }
411 }
412
413 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
414 {
415 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
416 int imm_w = extract32(imm, 5, 5);
417 int imm_s = extract32(imm, 0, 5);
418 int width = imm_w - imm_s + 1;
419
420 if (imm_w < imm_s) {
421 /* These inputs have an undefined behavior. */
422 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
423 imm_w, imm_s);
424 } else {
425 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
426 }
427 }
428
429 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
430 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
431 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
432
433 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
434 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
435 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
436
437 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
438 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
439
440 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
441 {
442 tcg_gen_clzi_i32(out, ina, 32);
443 }
444
445 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
446
447 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
448 {
449 TCGv_i32 lt = tcg_temp_new_i32();
450
451 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
452 tcg_gen_sub_i32(out, inb, ina);
453 tcg_gen_deposit_i32(out, out, lt, 31, 1);
454 tcg_temp_free_i32(lt);
455 }
456
457 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
458 {
459 TCGv_i32 lt = tcg_temp_new_i32();
460
461 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
462 tcg_gen_sub_i32(out, inb, ina);
463 tcg_gen_deposit_i32(out, out, lt, 31, 1);
464 tcg_temp_free_i32(lt);
465 }
466
467 DO_TYPEA(cmp, false, gen_cmp)
468 DO_TYPEA(cmpu, false, gen_cmpu)
469
470 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
471 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
472 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
473 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
474 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
475 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
476 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
477 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
478 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
479 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
480 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
481
482 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
483 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
484 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
485 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
486 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
487 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
488 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
489 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
490 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
491 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
492 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
493
494 ENV_WRAPPER2(gen_flt, gen_helper_flt)
495 ENV_WRAPPER2(gen_fint, gen_helper_fint)
496 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
497
498 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
499 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
500 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
501
502 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
503 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
504 {
505 gen_helper_divs(out, cpu_env, inb, ina);
506 }
507
508 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
509 {
510 gen_helper_divu(out, cpu_env, inb, ina);
511 }
512
513 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
514 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
515
516 static bool trans_imm(DisasContext *dc, arg_imm *arg)
517 {
518 if (invalid_delay_slot(dc, "imm")) {
519 return true;
520 }
521 dc->ext_imm = arg->imm << 16;
522 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
523 dc->tb_flags_to_set = IMM_FLAG;
524 return true;
525 }
526
527 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
528 {
529 TCGv_i32 tmp = tcg_temp_new_i32();
530 tcg_gen_muls2_i32(tmp, out, ina, inb);
531 tcg_temp_free_i32(tmp);
532 }
533
534 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
535 {
536 TCGv_i32 tmp = tcg_temp_new_i32();
537 tcg_gen_mulu2_i32(tmp, out, ina, inb);
538 tcg_temp_free_i32(tmp);
539 }
540
541 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
542 {
543 TCGv_i32 tmp = tcg_temp_new_i32();
544 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
545 tcg_temp_free_i32(tmp);
546 }
547
548 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
549 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
550 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
551 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
552 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
553
554 DO_TYPEA(or, false, tcg_gen_or_i32)
555 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
556
557 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
558 {
559 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
560 }
561
562 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
563 {
564 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
565 }
566
567 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
568 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
569 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
570
571 /* No input carry, but output carry. */
572 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
573 {
574 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
575 tcg_gen_sub_i32(out, inb, ina);
576 }
577
578 /* Input and output carry. */
579 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
580 {
581 TCGv_i32 zero = tcg_const_i32(0);
582 TCGv_i32 tmp = tcg_temp_new_i32();
583
584 tcg_gen_not_i32(tmp, ina);
585 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
586 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
587
588 tcg_temp_free_i32(zero);
589 tcg_temp_free_i32(tmp);
590 }
591
592 /* No input or output carry. */
593 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
594 {
595 tcg_gen_sub_i32(out, inb, ina);
596 }
597
598 /* Input carry, no output carry. */
599 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
600 {
601 TCGv_i32 nota = tcg_temp_new_i32();
602
603 tcg_gen_not_i32(nota, ina);
604 tcg_gen_add_i32(out, inb, nota);
605 tcg_gen_add_i32(out, out, cpu_msr_c);
606
607 tcg_temp_free_i32(nota);
608 }
609
610 DO_TYPEA(rsub, true, gen_rsub)
611 DO_TYPEA(rsubc, true, gen_rsubc)
612 DO_TYPEA(rsubk, false, gen_rsubk)
613 DO_TYPEA(rsubkc, true, gen_rsubkc)
614
615 DO_TYPEBV(rsubi, true, gen_rsub)
616 DO_TYPEBV(rsubic, true, gen_rsubc)
617 DO_TYPEBV(rsubik, false, gen_rsubk)
618 DO_TYPEBV(rsubikc, true, gen_rsubkc)
619
620 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
621 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
622
623 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
624 {
625 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
626 tcg_gen_sari_i32(out, ina, 1);
627 }
628
629 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
630 {
631 TCGv_i32 tmp = tcg_temp_new_i32();
632
633 tcg_gen_mov_i32(tmp, cpu_msr_c);
634 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
635 tcg_gen_extract2_i32(out, ina, tmp, 1);
636
637 tcg_temp_free_i32(tmp);
638 }
639
640 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
641 {
642 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
643 tcg_gen_shri_i32(out, ina, 1);
644 }
645
646 DO_TYPEA0(sra, false, gen_sra)
647 DO_TYPEA0(src, false, gen_src)
648 DO_TYPEA0(srl, false, gen_srl)
649
650 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
651 {
652 tcg_gen_rotri_i32(out, ina, 16);
653 }
654
655 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
656 DO_TYPEA0(swaph, false, gen_swaph)
657
658 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
659 {
660 /* Cache operations are nops: only check for supervisor mode. */
661 trap_userspace(dc, true);
662 return true;
663 }
664
665 DO_TYPEA(xor, false, tcg_gen_xor_i32)
666 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
667
668 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
669 {
670 TCGv ret = tcg_temp_new();
671
672 /* If any of the regs is r0, set t to the value of the other reg. */
673 if (ra && rb) {
674 TCGv_i32 tmp = tcg_temp_new_i32();
675 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
676 tcg_gen_extu_i32_tl(ret, tmp);
677 tcg_temp_free_i32(tmp);
678 } else if (ra) {
679 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
680 } else if (rb) {
681 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
682 } else {
683 tcg_gen_movi_tl(ret, 0);
684 }
685
686 if ((ra == 1 || rb == 1) && dc->cpu->cfg.stackprot) {
687 gen_helper_stackprot(cpu_env, ret);
688 }
689 return ret;
690 }
691
692 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
693 {
694 TCGv ret = tcg_temp_new();
695
696 /* If any of the regs is r0, set t to the value of the other reg. */
697 if (ra) {
698 TCGv_i32 tmp = tcg_temp_new_i32();
699 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
700 tcg_gen_extu_i32_tl(ret, tmp);
701 tcg_temp_free_i32(tmp);
702 } else {
703 tcg_gen_movi_tl(ret, (uint32_t)imm);
704 }
705
706 if (ra == 1 && dc->cpu->cfg.stackprot) {
707 gen_helper_stackprot(cpu_env, ret);
708 }
709 return ret;
710 }
711
712 #ifndef CONFIG_USER_ONLY
713 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
714 {
715 int addr_size = dc->cpu->cfg.addr_size;
716 TCGv ret = tcg_temp_new();
717
718 if (addr_size == 32 || ra == 0) {
719 if (rb) {
720 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
721 } else {
722 tcg_gen_movi_tl(ret, 0);
723 }
724 } else {
725 if (rb) {
726 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
727 } else {
728 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
729 tcg_gen_shli_tl(ret, ret, 32);
730 }
731 if (addr_size < 64) {
732 /* Mask off out of range bits. */
733 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
734 }
735 }
736 return ret;
737 }
738 #endif
739
740 static void record_unaligned_ess(DisasContext *dc, int rd,
741 MemOp size, bool store)
742 {
743 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
744
745 iflags |= ESR_ESS_FLAG;
746 iflags |= rd << 5;
747 iflags |= store * ESR_S;
748 iflags |= (size == MO_32) * ESR_W;
749
750 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
751 }
752
753 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
754 int mem_index, bool rev)
755 {
756 MemOp size = mop & MO_SIZE;
757
758 /*
759 * When doing reverse accesses we need to do two things.
760 *
761 * 1. Reverse the address wrt endianness.
762 * 2. Byteswap the data lanes on the way back into the CPU core.
763 */
764 if (rev) {
765 if (size > MO_8) {
766 mop ^= MO_BSWAP;
767 }
768 if (size < MO_32) {
769 tcg_gen_xori_tl(addr, addr, 3 - size);
770 }
771 }
772
773 if (size > MO_8 &&
774 (dc->tb_flags & MSR_EE) &&
775 dc->cpu->cfg.unaligned_exceptions) {
776 record_unaligned_ess(dc, rd, size, false);
777 mop |= MO_ALIGN;
778 }
779
780 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
781
782 tcg_temp_free(addr);
783 return true;
784 }
785
786 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
787 {
788 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
789 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
790 }
791
792 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
793 {
794 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
795 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
796 }
797
798 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
799 {
800 if (trap_userspace(dc, true)) {
801 return true;
802 }
803 #ifdef CONFIG_USER_ONLY
804 return true;
805 #else
806 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
807 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
808 #endif
809 }
810
811 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
812 {
813 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
814 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
815 }
816
817 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
818 {
819 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
820 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
821 }
822
823 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
824 {
825 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
826 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
827 }
828
829 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
830 {
831 if (trap_userspace(dc, true)) {
832 return true;
833 }
834 #ifdef CONFIG_USER_ONLY
835 return true;
836 #else
837 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
838 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
839 #endif
840 }
841
842 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
843 {
844 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
845 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
846 }
847
848 static bool trans_lw(DisasContext *dc, arg_typea *arg)
849 {
850 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
851 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
852 }
853
854 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
855 {
856 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
857 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
858 }
859
860 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
861 {
862 if (trap_userspace(dc, true)) {
863 return true;
864 }
865 #ifdef CONFIG_USER_ONLY
866 return true;
867 #else
868 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
869 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
870 #endif
871 }
872
873 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
874 {
875 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
876 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
877 }
878
879 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
880 {
881 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
882
883 /* lwx does not throw unaligned access errors, so force alignment */
884 tcg_gen_andi_tl(addr, addr, ~3);
885
886 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
887 tcg_gen_mov_tl(cpu_res_addr, addr);
888 tcg_temp_free(addr);
889
890 if (arg->rd) {
891 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
892 }
893
894 /* No support for AXI exclusive so always clear C */
895 tcg_gen_movi_i32(cpu_msr_c, 0);
896 return true;
897 }
898
899 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
900 int mem_index, bool rev)
901 {
902 MemOp size = mop & MO_SIZE;
903
904 /*
905 * When doing reverse accesses we need to do two things.
906 *
907 * 1. Reverse the address wrt endianness.
908 * 2. Byteswap the data lanes on the way back into the CPU core.
909 */
910 if (rev) {
911 if (size > MO_8) {
912 mop ^= MO_BSWAP;
913 }
914 if (size < MO_32) {
915 tcg_gen_xori_tl(addr, addr, 3 - size);
916 }
917 }
918
919 if (size > MO_8 &&
920 (dc->tb_flags & MSR_EE) &&
921 dc->cpu->cfg.unaligned_exceptions) {
922 record_unaligned_ess(dc, rd, size, true);
923 mop |= MO_ALIGN;
924 }
925
926 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
927
928 tcg_temp_free(addr);
929 return true;
930 }
931
932 static bool trans_sb(DisasContext *dc, arg_typea *arg)
933 {
934 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
935 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
936 }
937
938 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
939 {
940 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
941 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
942 }
943
944 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
945 {
946 if (trap_userspace(dc, true)) {
947 return true;
948 }
949 #ifdef CONFIG_USER_ONLY
950 return true;
951 #else
952 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
953 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
954 #endif
955 }
956
957 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
958 {
959 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
960 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
961 }
962
963 static bool trans_sh(DisasContext *dc, arg_typea *arg)
964 {
965 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
966 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
967 }
968
969 static bool trans_shr(DisasContext *dc, arg_typea *arg)
970 {
971 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
972 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
973 }
974
975 static bool trans_shea(DisasContext *dc, arg_typea *arg)
976 {
977 if (trap_userspace(dc, true)) {
978 return true;
979 }
980 #ifdef CONFIG_USER_ONLY
981 return true;
982 #else
983 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
984 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
985 #endif
986 }
987
988 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
989 {
990 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
991 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
992 }
993
994 static bool trans_sw(DisasContext *dc, arg_typea *arg)
995 {
996 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
997 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
998 }
999
1000 static bool trans_swr(DisasContext *dc, arg_typea *arg)
1001 {
1002 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1003 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
1004 }
1005
1006 static bool trans_swea(DisasContext *dc, arg_typea *arg)
1007 {
1008 if (trap_userspace(dc, true)) {
1009 return true;
1010 }
1011 #ifdef CONFIG_USER_ONLY
1012 return true;
1013 #else
1014 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
1015 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
1016 #endif
1017 }
1018
1019 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
1020 {
1021 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1022 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
1023 }
1024
1025 static bool trans_swx(DisasContext *dc, arg_typea *arg)
1026 {
1027 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1028 TCGLabel *swx_done = gen_new_label();
1029 TCGLabel *swx_fail = gen_new_label();
1030 TCGv_i32 tval;
1031
1032 /* swx does not throw unaligned access errors, so force alignment */
1033 tcg_gen_andi_tl(addr, addr, ~3);
1034
1035 /*
1036 * Compare the address vs the one we used during lwx.
1037 * On mismatch, the operation fails. On match, addr dies at the
1038 * branch, but we know we can use the equal version in the global.
1039 * In either case, addr is no longer needed.
1040 */
1041 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1042 tcg_temp_free(addr);
1043
1044 /*
1045 * Compare the value loaded during lwx with current contents of
1046 * the reserved location.
1047 */
1048 tval = tcg_temp_new_i32();
1049
1050 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1051 reg_for_write(dc, arg->rd),
1052 dc->mem_index, MO_TEUL);
1053
1054 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1055 tcg_temp_free_i32(tval);
1056
1057 /* Success */
1058 tcg_gen_movi_i32(cpu_msr_c, 0);
1059 tcg_gen_br(swx_done);
1060
1061 /* Failure */
1062 gen_set_label(swx_fail);
1063 tcg_gen_movi_i32(cpu_msr_c, 1);
1064
1065 gen_set_label(swx_done);
1066
1067 /*
1068 * Prevent the saved address from working again without another ldx.
1069 * Akin to the pseudocode setting reservation = 0.
1070 */
1071 tcg_gen_movi_tl(cpu_res_addr, -1);
1072 return true;
1073 }
1074
1075 static void setup_dslot(DisasContext *dc, bool type_b)
1076 {
1077 dc->tb_flags_to_set |= D_FLAG;
1078 if (type_b && (dc->tb_flags & IMM_FLAG)) {
1079 dc->tb_flags_to_set |= BIMM_FLAG;
1080 }
1081 }
1082
1083 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1084 bool delay, bool abs, int link)
1085 {
1086 uint32_t add_pc;
1087
1088 if (invalid_delay_slot(dc, "branch")) {
1089 return true;
1090 }
1091 if (delay) {
1092 setup_dslot(dc, dest_rb < 0);
1093 }
1094
1095 if (link) {
1096 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1097 }
1098
1099 /* Store the branch taken destination into btarget. */
1100 add_pc = abs ? 0 : dc->base.pc_next;
1101 if (dest_rb > 0) {
1102 dc->jmp_dest = -1;
1103 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1104 } else {
1105 dc->jmp_dest = add_pc + dest_imm;
1106 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1107 }
1108 dc->jmp_cond = TCG_COND_ALWAYS;
1109 return true;
1110 }
1111
1112 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
1113 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
1114 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1115 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
1116 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1117
1118 DO_BR(br, bri, false, false, false)
1119 DO_BR(bra, brai, false, true, false)
1120 DO_BR(brd, brid, true, false, false)
1121 DO_BR(brad, braid, true, true, false)
1122 DO_BR(brld, brlid, true, false, true)
1123 DO_BR(brald, bralid, true, true, true)
1124
1125 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1126 TCGCond cond, int ra, bool delay)
1127 {
1128 TCGv_i32 zero, next;
1129
1130 if (invalid_delay_slot(dc, "bcc")) {
1131 return true;
1132 }
1133 if (delay) {
1134 setup_dslot(dc, dest_rb < 0);
1135 }
1136
1137 dc->jmp_cond = cond;
1138
1139 /* Cache the condition register in cpu_bvalue across any delay slot. */
1140 tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1141
1142 /* Store the branch taken destination into btarget. */
1143 if (dest_rb > 0) {
1144 dc->jmp_dest = -1;
1145 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1146 } else {
1147 dc->jmp_dest = dc->base.pc_next + dest_imm;
1148 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1149 }
1150
1151 /* Compute the final destination into btarget. */
1152 zero = tcg_const_i32(0);
1153 next = tcg_const_i32(dc->base.pc_next + (delay + 1) * 4);
1154 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1155 reg_for_read(dc, ra), zero,
1156 cpu_btarget, next);
1157 tcg_temp_free_i32(zero);
1158 tcg_temp_free_i32(next);
1159
1160 return true;
1161 }
1162
1163 #define DO_BCC(NAME, COND) \
1164 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
1165 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1166 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
1167 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1168 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
1169 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1170 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
1171 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1172
1173 DO_BCC(beq, TCG_COND_EQ)
1174 DO_BCC(bge, TCG_COND_GE)
1175 DO_BCC(bgt, TCG_COND_GT)
1176 DO_BCC(ble, TCG_COND_LE)
1177 DO_BCC(blt, TCG_COND_LT)
1178 DO_BCC(bne, TCG_COND_NE)
1179
1180 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1181 {
1182 if (trap_userspace(dc, true)) {
1183 return true;
1184 }
1185 if (invalid_delay_slot(dc, "brk")) {
1186 return true;
1187 }
1188
1189 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1190 if (arg->rd) {
1191 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1192 }
1193 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1194 tcg_gen_movi_tl(cpu_res_addr, -1);
1195
1196 dc->base.is_jmp = DISAS_EXIT;
1197 return true;
1198 }
1199
1200 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1201 {
1202 uint32_t imm = arg->imm;
1203
1204 if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1205 return true;
1206 }
1207 if (invalid_delay_slot(dc, "brki")) {
1208 return true;
1209 }
1210
1211 tcg_gen_movi_i32(cpu_pc, imm);
1212 if (arg->rd) {
1213 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1214 }
1215 tcg_gen_movi_tl(cpu_res_addr, -1);
1216
1217 #ifdef CONFIG_USER_ONLY
1218 switch (imm) {
1219 case 0x8: /* syscall trap */
1220 gen_raise_exception_sync(dc, EXCP_SYSCALL);
1221 break;
1222 case 0x18: /* debug trap */
1223 gen_raise_exception_sync(dc, EXCP_DEBUG);
1224 break;
1225 default: /* eliminated with trap_userspace check */
1226 g_assert_not_reached();
1227 }
1228 #else
1229 uint32_t msr_to_set = 0;
1230
1231 if (imm != 0x18) {
1232 msr_to_set |= MSR_BIP;
1233 }
1234 if (imm == 0x8 || imm == 0x18) {
1235 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1236 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1237 tcg_gen_andi_i32(cpu_msr, cpu_msr,
1238 ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1239 }
1240 tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1241 dc->base.is_jmp = DISAS_EXIT;
1242 #endif
1243
1244 return true;
1245 }
1246
1247 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1248 {
1249 int mbar_imm = arg->imm;
1250
1251 /* Note that mbar is a specialized branch instruction. */
1252 if (invalid_delay_slot(dc, "mbar")) {
1253 return true;
1254 }
1255
1256 /* Data access memory barrier. */
1257 if ((mbar_imm & 2) == 0) {
1258 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1259 }
1260
1261 /* Sleep. */
1262 if (mbar_imm & 16) {
1263 TCGv_i32 tmp_1;
1264
1265 if (trap_userspace(dc, true)) {
1266 /* Sleep is a privileged instruction. */
1267 return true;
1268 }
1269
1270 t_sync_flags(dc);
1271
1272 tmp_1 = tcg_const_i32(1);
1273 tcg_gen_st_i32(tmp_1, cpu_env,
1274 -offsetof(MicroBlazeCPU, env)
1275 +offsetof(CPUState, halted));
1276 tcg_temp_free_i32(tmp_1);
1277
1278 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1279
1280 gen_raise_exception(dc, EXCP_HLT);
1281 }
1282
1283 /*
1284 * If !(mbar_imm & 1), this is an instruction access memory barrier
1285 * and we need to end the TB so that we recognize self-modified
1286 * code immediately.
1287 *
1288 * However, there are some data mbars that need the TB break
1289 * (and return to main loop) to recognize interrupts right away.
1290 * E.g. recognizing a change to an interrupt controller register.
1291 *
1292 * Therefore, choose to end the TB always.
1293 */
1294 dc->base.is_jmp = DISAS_EXIT_NEXT;
1295 return true;
1296 }
1297
1298 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1299 {
1300 if (trap_userspace(dc, to_set)) {
1301 return true;
1302 }
1303 if (invalid_delay_slot(dc, "rts")) {
1304 return true;
1305 }
1306
1307 dc->tb_flags_to_set |= to_set;
1308 setup_dslot(dc, true);
1309
1310 dc->jmp_cond = TCG_COND_ALWAYS;
1311 dc->jmp_dest = -1;
1312 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1313 return true;
1314 }
1315
1316 #define DO_RTS(NAME, IFLAG) \
1317 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1318 { return do_rts(dc, arg, IFLAG); }
1319
1320 DO_RTS(rtbd, DRTB_FLAG)
1321 DO_RTS(rtid, DRTI_FLAG)
1322 DO_RTS(rted, DRTE_FLAG)
1323 DO_RTS(rtsd, 0)
1324
1325 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1326 {
1327 /* If opcode_0_illegal, trap. */
1328 if (dc->cpu->cfg.opcode_0_illegal) {
1329 trap_illegal(dc, true);
1330 return true;
1331 }
1332 /*
1333 * Otherwise, this is "add r0, r0, r0".
1334 * Continue to trans_add so that MSR[C] gets cleared.
1335 */
1336 return false;
1337 }
1338
1339 static void msr_read(DisasContext *dc, TCGv_i32 d)
1340 {
1341 TCGv_i32 t;
1342
1343 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1344 t = tcg_temp_new_i32();
1345 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1346 tcg_gen_or_i32(d, cpu_msr, t);
1347 tcg_temp_free_i32(t);
1348 }
1349
1350 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1351 {
1352 uint32_t imm = arg->imm;
1353
1354 if (trap_userspace(dc, imm != MSR_C)) {
1355 return true;
1356 }
1357
1358 if (arg->rd) {
1359 msr_read(dc, cpu_R[arg->rd]);
1360 }
1361
1362 /*
1363 * Handle the carry bit separately.
1364 * This is the only bit that userspace can modify.
1365 */
1366 if (imm & MSR_C) {
1367 tcg_gen_movi_i32(cpu_msr_c, set);
1368 }
1369
1370 /*
1371 * MSR_C and MSR_CC set above.
1372 * MSR_PVR is not writable, and is always clear.
1373 */
1374 imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1375
1376 if (imm != 0) {
1377 if (set) {
1378 tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1379 } else {
1380 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1381 }
1382 dc->base.is_jmp = DISAS_EXIT_NEXT;
1383 }
1384 return true;
1385 }
1386
1387 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1388 {
1389 return do_msrclrset(dc, arg, false);
1390 }
1391
1392 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1393 {
1394 return do_msrclrset(dc, arg, true);
1395 }
1396
1397 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1398 {
1399 if (trap_userspace(dc, true)) {
1400 return true;
1401 }
1402
1403 #ifdef CONFIG_USER_ONLY
1404 g_assert_not_reached();
1405 #else
1406 if (arg->e && arg->rs != 0x1003) {
1407 qemu_log_mask(LOG_GUEST_ERROR,
1408 "Invalid extended mts reg 0x%x\n", arg->rs);
1409 return true;
1410 }
1411
1412 TCGv_i32 src = reg_for_read(dc, arg->ra);
1413 switch (arg->rs) {
1414 case SR_MSR:
1415 /* Install MSR_C. */
1416 tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1417 /*
1418 * Clear MSR_C and MSR_CC;
1419 * MSR_PVR is not writable, and is always clear.
1420 */
1421 tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1422 break;
1423 case SR_FSR:
1424 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
1425 break;
1426 case 0x800:
1427 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
1428 break;
1429 case 0x802:
1430 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
1431 break;
1432
1433 case 0x1000: /* PID */
1434 case 0x1001: /* ZPR */
1435 case 0x1002: /* TLBX */
1436 case 0x1003: /* TLBLO */
1437 case 0x1004: /* TLBHI */
1438 case 0x1005: /* TLBSX */
1439 {
1440 TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1441 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1442
1443 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
1444 tcg_temp_free_i32(tmp_reg);
1445 tcg_temp_free_i32(tmp_ext);
1446 }
1447 break;
1448
1449 default:
1450 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1451 return true;
1452 }
1453 dc->base.is_jmp = DISAS_EXIT_NEXT;
1454 return true;
1455 #endif
1456 }
1457
1458 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1459 {
1460 TCGv_i32 dest = reg_for_write(dc, arg->rd);
1461
1462 if (arg->e) {
1463 switch (arg->rs) {
1464 case SR_EAR:
1465 {
1466 TCGv_i64 t64 = tcg_temp_new_i64();
1467 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1468 tcg_gen_extrh_i64_i32(dest, t64);
1469 tcg_temp_free_i64(t64);
1470 }
1471 return true;
1472 #ifndef CONFIG_USER_ONLY
1473 case 0x1003: /* TLBLO */
1474 /* Handled below. */
1475 break;
1476 #endif
1477 case 0x2006 ... 0x2009:
1478 /* High bits of PVR6-9 not implemented. */
1479 tcg_gen_movi_i32(dest, 0);
1480 return true;
1481 default:
1482 qemu_log_mask(LOG_GUEST_ERROR,
1483 "Invalid extended mfs reg 0x%x\n", arg->rs);
1484 return true;
1485 }
1486 }
1487
1488 switch (arg->rs) {
1489 case SR_PC:
1490 tcg_gen_movi_i32(dest, dc->base.pc_next);
1491 break;
1492 case SR_MSR:
1493 msr_read(dc, dest);
1494 break;
1495 case SR_EAR:
1496 {
1497 TCGv_i64 t64 = tcg_temp_new_i64();
1498 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1499 tcg_gen_extrl_i64_i32(dest, t64);
1500 tcg_temp_free_i64(t64);
1501 }
1502 break;
1503 case SR_ESR:
1504 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
1505 break;
1506 case SR_FSR:
1507 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
1508 break;
1509 case SR_BTR:
1510 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
1511 break;
1512 case SR_EDR:
1513 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
1514 break;
1515 case 0x800:
1516 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
1517 break;
1518 case 0x802:
1519 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
1520 break;
1521
1522 #ifndef CONFIG_USER_ONLY
1523 case 0x1000: /* PID */
1524 case 0x1001: /* ZPR */
1525 case 0x1002: /* TLBX */
1526 case 0x1003: /* TLBLO */
1527 case 0x1004: /* TLBHI */
1528 case 0x1005: /* TLBSX */
1529 {
1530 TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1531 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1532
1533 gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
1534 tcg_temp_free_i32(tmp_reg);
1535 tcg_temp_free_i32(tmp_ext);
1536 }
1537 break;
1538 #endif
1539
1540 case 0x2000 ... 0x200c:
1541 tcg_gen_ld_i32(dest, cpu_env,
1542 offsetof(CPUMBState, pvr.regs[arg->rs - 0x2000]));
1543 break;
1544 default:
1545 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1546 break;
1547 }
1548 return true;
1549 }
1550
1551 static void do_rti(DisasContext *dc)
1552 {
1553 TCGv_i32 tmp = tcg_temp_new_i32();
1554
1555 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1556 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1557 tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1558 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1559 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1560
1561 tcg_temp_free_i32(tmp);
1562 }
1563
1564 static void do_rtb(DisasContext *dc)
1565 {
1566 TCGv_i32 tmp = tcg_temp_new_i32();
1567
1568 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1569 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1570 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1571 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1572
1573 tcg_temp_free_i32(tmp);
1574 }
1575
1576 static void do_rte(DisasContext *dc)
1577 {
1578 TCGv_i32 tmp = tcg_temp_new_i32();
1579
1580 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1581 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1582 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1583 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1584 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1585
1586 tcg_temp_free_i32(tmp);
1587 }
1588
1589 /* Insns connected to FSL or AXI stream attached devices. */
1590 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1591 {
1592 TCGv_i32 t_id, t_ctrl;
1593
1594 if (trap_userspace(dc, true)) {
1595 return true;
1596 }
1597
1598 t_id = tcg_temp_new_i32();
1599 if (rb) {
1600 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1601 } else {
1602 tcg_gen_movi_i32(t_id, imm);
1603 }
1604
1605 t_ctrl = tcg_const_i32(ctrl);
1606 gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1607 tcg_temp_free_i32(t_id);
1608 tcg_temp_free_i32(t_ctrl);
1609 return true;
1610 }
1611
1612 static bool trans_get(DisasContext *dc, arg_get *arg)
1613 {
1614 return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1615 }
1616
1617 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1618 {
1619 return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1620 }
1621
1622 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1623 {
1624 TCGv_i32 t_id, t_ctrl;
1625
1626 if (trap_userspace(dc, true)) {
1627 return true;
1628 }
1629
1630 t_id = tcg_temp_new_i32();
1631 if (rb) {
1632 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1633 } else {
1634 tcg_gen_movi_i32(t_id, imm);
1635 }
1636
1637 t_ctrl = tcg_const_i32(ctrl);
1638 gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1639 tcg_temp_free_i32(t_id);
1640 tcg_temp_free_i32(t_ctrl);
1641 return true;
1642 }
1643
1644 static bool trans_put(DisasContext *dc, arg_put *arg)
1645 {
1646 return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1647 }
1648
1649 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1650 {
1651 return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1652 }
1653
1654 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1655 {
1656 DisasContext *dc = container_of(dcb, DisasContext, base);
1657 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1658 int bound;
1659
1660 dc->cpu = cpu;
1661 dc->tb_flags = dc->base.tb->flags;
1662 dc->ext_imm = dc->base.tb->cs_base;
1663 dc->r0 = NULL;
1664 dc->r0_set = false;
1665 dc->mem_index = cpu_mmu_index(&cpu->env, false);
1666 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1667 dc->jmp_dest = -1;
1668
1669 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1670 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1671 }
1672
1673 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1674 {
1675 }
1676
1677 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1678 {
1679 DisasContext *dc = container_of(dcb, DisasContext, base);
1680
1681 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1682 dc->insn_start = tcg_last_op();
1683 }
1684
1685 static bool mb_tr_breakpoint_check(DisasContextBase *dcb, CPUState *cs,
1686 const CPUBreakpoint *bp)
1687 {
1688 DisasContext *dc = container_of(dcb, DisasContext, base);
1689
1690 gen_raise_exception_sync(dc, EXCP_DEBUG);
1691
1692 /*
1693 * The address covered by the breakpoint must be included in
1694 * [tb->pc, tb->pc + tb->size) in order to for it to be
1695 * properly cleared -- thus we increment the PC here so that
1696 * the logic setting tb->size below does the right thing.
1697 */
1698 dc->base.pc_next += 4;
1699 return true;
1700 }
1701
1702 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1703 {
1704 DisasContext *dc = container_of(dcb, DisasContext, base);
1705 CPUMBState *env = cs->env_ptr;
1706 uint32_t ir;
1707
1708 /* TODO: This should raise an exception, not terminate qemu. */
1709 if (dc->base.pc_next & 3) {
1710 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1711 (uint32_t)dc->base.pc_next);
1712 }
1713
1714 dc->tb_flags_to_set = 0;
1715
1716 ir = cpu_ldl_code(env, dc->base.pc_next);
1717 if (!decode(dc, ir)) {
1718 trap_illegal(dc, true);
1719 }
1720
1721 if (dc->r0) {
1722 tcg_temp_free_i32(dc->r0);
1723 dc->r0 = NULL;
1724 dc->r0_set = false;
1725 }
1726
1727 /* Discard the imm global when its contents cannot be used. */
1728 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1729 tcg_gen_discard_i32(cpu_imm);
1730 }
1731
1732 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1733 dc->tb_flags |= dc->tb_flags_to_set;
1734 dc->base.pc_next += 4;
1735
1736 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1737 /*
1738 * Finish any return-from branch.
1739 */
1740 uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1741 if (unlikely(rt_ibe != 0)) {
1742 dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1743 if (rt_ibe & DRTI_FLAG) {
1744 do_rti(dc);
1745 } else if (rt_ibe & DRTB_FLAG) {
1746 do_rtb(dc);
1747 } else {
1748 do_rte(dc);
1749 }
1750 }
1751
1752 /* Complete the branch, ending the TB. */
1753 switch (dc->base.is_jmp) {
1754 case DISAS_NORETURN:
1755 /*
1756 * E.g. illegal insn in a delay slot. We've already exited
1757 * and will handle D_FLAG in mb_cpu_do_interrupt.
1758 */
1759 break;
1760 case DISAS_NEXT:
1761 /*
1762 * Normal insn a delay slot.
1763 * However, the return-from-exception type insns should
1764 * return to the main loop, as they have adjusted MSR.
1765 */
1766 dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1767 break;
1768 case DISAS_EXIT_NEXT:
1769 /*
1770 * E.g. mts insn in a delay slot. Continue with btarget,
1771 * but still return to the main loop.
1772 */
1773 dc->base.is_jmp = DISAS_EXIT_JUMP;
1774 break;
1775 default:
1776 g_assert_not_reached();
1777 }
1778 }
1779 }
1780
1781 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1782 {
1783 DisasContext *dc = container_of(dcb, DisasContext, base);
1784
1785 if (dc->base.is_jmp == DISAS_NORETURN) {
1786 /* We have already exited the TB. */
1787 return;
1788 }
1789
1790 t_sync_flags(dc);
1791
1792 switch (dc->base.is_jmp) {
1793 case DISAS_TOO_MANY:
1794 gen_goto_tb(dc, 0, dc->base.pc_next);
1795 return;
1796
1797 case DISAS_EXIT:
1798 break;
1799 case DISAS_EXIT_NEXT:
1800 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1801 break;
1802 case DISAS_EXIT_JUMP:
1803 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1804 tcg_gen_discard_i32(cpu_btarget);
1805 break;
1806
1807 case DISAS_JUMP:
1808 if (dc->jmp_dest != -1 && !cs->singlestep_enabled) {
1809 /* Direct jump. */
1810 tcg_gen_discard_i32(cpu_btarget);
1811
1812 if (dc->jmp_cond != TCG_COND_ALWAYS) {
1813 /* Conditional direct jump. */
1814 TCGLabel *taken = gen_new_label();
1815 TCGv_i32 tmp = tcg_temp_new_i32();
1816
1817 /*
1818 * Copy bvalue to a temp now, so we can discard bvalue.
1819 * This can avoid writing bvalue to memory when the
1820 * delay slot cannot raise an exception.
1821 */
1822 tcg_gen_mov_i32(tmp, cpu_bvalue);
1823 tcg_gen_discard_i32(cpu_bvalue);
1824
1825 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1826 gen_goto_tb(dc, 1, dc->base.pc_next);
1827 gen_set_label(taken);
1828 }
1829 gen_goto_tb(dc, 0, dc->jmp_dest);
1830 return;
1831 }
1832
1833 /* Indirect jump (or direct jump w/ singlestep) */
1834 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1835 tcg_gen_discard_i32(cpu_btarget);
1836
1837 if (unlikely(cs->singlestep_enabled)) {
1838 gen_raise_exception(dc, EXCP_DEBUG);
1839 } else {
1840 tcg_gen_lookup_and_goto_ptr();
1841 }
1842 return;
1843
1844 default:
1845 g_assert_not_reached();
1846 }
1847
1848 /* Finish DISAS_EXIT_* */
1849 if (unlikely(cs->singlestep_enabled)) {
1850 gen_raise_exception(dc, EXCP_DEBUG);
1851 } else {
1852 tcg_gen_exit_tb(NULL, 0);
1853 }
1854 }
1855
1856 static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
1857 {
1858 qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
1859 log_target_disas(cs, dcb->pc_first, dcb->tb->size);
1860 }
1861
1862 static const TranslatorOps mb_tr_ops = {
1863 .init_disas_context = mb_tr_init_disas_context,
1864 .tb_start = mb_tr_tb_start,
1865 .insn_start = mb_tr_insn_start,
1866 .breakpoint_check = mb_tr_breakpoint_check,
1867 .translate_insn = mb_tr_translate_insn,
1868 .tb_stop = mb_tr_tb_stop,
1869 .disas_log = mb_tr_disas_log,
1870 };
1871
1872 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1873 {
1874 DisasContext dc;
1875 translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
1876 }
1877
1878 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1879 {
1880 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1881 CPUMBState *env = &cpu->env;
1882 uint32_t iflags;
1883 int i;
1884
1885 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1886 env->pc, env->msr,
1887 (env->msr & MSR_UM) ? "user" : "kernel",
1888 (env->msr & MSR_UMS) ? "user" : "kernel",
1889 (bool)(env->msr & MSR_EIP),
1890 (bool)(env->msr & MSR_IE));
1891
1892 iflags = env->iflags;
1893 qemu_fprintf(f, "iflags: 0x%08x", iflags);
1894 if (iflags & IMM_FLAG) {
1895 qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1896 }
1897 if (iflags & BIMM_FLAG) {
1898 qemu_fprintf(f, " BIMM");
1899 }
1900 if (iflags & D_FLAG) {
1901 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1902 }
1903 if (iflags & DRTI_FLAG) {
1904 qemu_fprintf(f, " DRTI");
1905 }
1906 if (iflags & DRTE_FLAG) {
1907 qemu_fprintf(f, " DRTE");
1908 }
1909 if (iflags & DRTB_FLAG) {
1910 qemu_fprintf(f, " DRTB");
1911 }
1912 if (iflags & ESR_ESS_FLAG) {
1913 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1914 }
1915
1916 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1917 "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1918 env->esr, env->fsr, env->btr, env->edr,
1919 env->ear, env->slr, env->shr);
1920
1921 for (i = 0; i < 12; i++) {
1922 qemu_fprintf(f, "rpvr%-2d=%08x%c",
1923 i, env->pvr.regs[i], i % 4 == 3 ? '\n' : ' ');
1924 }
1925
1926 for (i = 0; i < 32; i++) {
1927 qemu_fprintf(f, "r%2.2d=%08x%c",
1928 i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1929 }
1930 qemu_fprintf(f, "\n");
1931 }
1932
1933 void mb_tcg_init(void)
1934 {
1935 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1936 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1937
1938 static const struct {
1939 TCGv_i32 *var; int ofs; char name[8];
1940 } i32s[] = {
1941 /*
1942 * Note that r0 is handled specially in reg_for_read
1943 * and reg_for_write. Nothing should touch cpu_R[0].
1944 * Leave that element NULL, which will assert quickly
1945 * inside the tcg generator functions.
1946 */
1947 R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1948 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1949 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1950 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1951
1952 SP(pc),
1953 SP(msr),
1954 SP(msr_c),
1955 SP(imm),
1956 SP(iflags),
1957 SP(bvalue),
1958 SP(btarget),
1959 SP(res_val),
1960 };
1961
1962 #undef R
1963 #undef SP
1964
1965 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1966 *i32s[i].var =
1967 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1968 }
1969
1970 cpu_res_addr =
1971 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
1972 }
1973
1974 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1975 target_ulong *data)
1976 {
1977 env->pc = data[0];
1978 env->iflags = data[1];
1979 }