]> git.proxmox.com Git - mirror_qemu.git/blame - target/microblaze/translate.c
target/microblaze: Convert dec_rts to decodetree
[mirror_qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
4acb54ba 27#include "microblaze-decode.h"
f08b6170 28#include "exec/cpu_ldst.h"
2ef6175a 29#include "exec/helper-gen.h"
77fc6f5e 30#include "exec/translator.h"
90c84c56 31#include "qemu/qemu-print.h"
4acb54ba 32
a7e30d84 33#include "trace-tcg.h"
508127e2 34#include "exec/log.h"
a7e30d84 35
4acb54ba
EI
36#define EXTRACT_FIELD(src, start, end) \
37 (((src) >> start) & ((1 << (end - start + 1)) - 1))
38
77fc6f5e
LV
39/* is_jmp field values */
40#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
41#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
77fc6f5e 42
cfeea807 43static TCGv_i32 cpu_R[32];
0f96e96b 44static TCGv_i32 cpu_pc;
3e0e16ae 45static TCGv_i32 cpu_msr;
1074c0fb 46static TCGv_i32 cpu_msr_c;
9b158558 47static TCGv_i32 cpu_imm;
b9c58aab 48static TCGv_i32 cpu_bvalue;
0f96e96b 49static TCGv_i32 cpu_btarget;
9b158558
RH
50static TCGv_i32 cpu_iflags;
51static TCGv cpu_res_addr;
52static TCGv_i32 cpu_res_val;
4acb54ba 53
022c62cb 54#include "exec/gen-icount.h"
4acb54ba
EI
55
56/* This is the state at translation time. */
57typedef struct DisasContext {
d4705ae0 58 DisasContextBase base;
0063ebd6 59 MicroBlazeCPU *cpu;
4acb54ba 60
683a247e
RH
61 /* TCG op of the current insn_start. */
62 TCGOp *insn_start;
63
20800179
RH
64 TCGv_i32 r0;
65 bool r0_set;
66
4acb54ba
EI
67 /* Decoder. */
68 int type_b;
69 uint32_t ir;
d7ecb757 70 uint32_t ext_imm;
4acb54ba
EI
71 uint8_t opcode;
72 uint8_t rd, ra, rb;
73 uint16_t imm;
74
75 unsigned int cpustate_changed;
683a247e 76 unsigned int tb_flags;
6f9642d7 77 unsigned int tb_flags_to_set;
287b1def 78 int mem_index;
4acb54ba 79
b9c58aab
RH
80 /* Condition under which to jump, including NEVER and ALWAYS. */
81 TCGCond jmp_cond;
82
83 /* Immediate branch-taken destination, or -1 for indirect. */
84 uint32_t jmp_dest;
4acb54ba
EI
85
86 int abort_at_next_insn;
4acb54ba
EI
87} DisasContext;
88
20800179
RH
89static int typeb_imm(DisasContext *dc, int x)
90{
91 if (dc->tb_flags & IMM_FLAG) {
92 return deposit32(dc->ext_imm, 0, 16, x);
93 }
94 return x;
95}
96
44d1432b
RH
97/* Include the auto-generated decoder. */
98#include "decode-insns.c.inc"
99
683a247e 100static void t_sync_flags(DisasContext *dc)
4acb54ba 101{
4abf79a4 102 /* Synch the tb dependent flags between translator and runtime. */
683a247e
RH
103 if ((dc->tb_flags ^ dc->base.tb->flags) & ~MSR_TB_MASK) {
104 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & ~MSR_TB_MASK);
4acb54ba
EI
105 }
106}
107
41ba37c4 108static void gen_raise_exception(DisasContext *dc, uint32_t index)
4acb54ba
EI
109{
110 TCGv_i32 tmp = tcg_const_i32(index);
111
64254eba 112 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba 113 tcg_temp_free_i32(tmp);
d4705ae0 114 dc->base.is_jmp = DISAS_NORETURN;
4acb54ba
EI
115}
116
41ba37c4
RH
117static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
118{
119 t_sync_flags(dc);
d4705ae0 120 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
41ba37c4
RH
121 gen_raise_exception(dc, index);
122}
123
124static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
125{
126 TCGv_i32 tmp = tcg_const_i32(esr_ec);
127 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
128 tcg_temp_free_i32(tmp);
129
130 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
131}
132
90aa39a1
SF
133static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
134{
135#ifndef CONFIG_USER_ONLY
d4705ae0 136 return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
90aa39a1
SF
137#else
138 return true;
139#endif
140}
141
4acb54ba
EI
142static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
143{
d4705ae0 144 if (dc->base.singlestep_enabled) {
0b46fa08
RH
145 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
146 tcg_gen_movi_i32(cpu_pc, dest);
147 gen_helper_raise_exception(cpu_env, tmp);
148 tcg_temp_free_i32(tmp);
149 } else if (use_goto_tb(dc, dest)) {
4acb54ba 150 tcg_gen_goto_tb(n);
0f96e96b 151 tcg_gen_movi_i32(cpu_pc, dest);
d4705ae0 152 tcg_gen_exit_tb(dc->base.tb, n);
4acb54ba 153 } else {
0f96e96b 154 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 155 tcg_gen_exit_tb(NULL, 0);
4acb54ba 156 }
d4705ae0 157 dc->base.is_jmp = DISAS_NORETURN;
4acb54ba
EI
158}
159
9ba8cd45
EI
160/*
161 * Returns true if the insn an illegal operation.
162 * If exceptions are enabled, an exception is raised.
163 */
164static bool trap_illegal(DisasContext *dc, bool cond)
165{
2c32179f 166 if (cond && (dc->tb_flags & MSR_EE)
5143fdf3 167 && dc->cpu->cfg.illegal_opcode_exception) {
41ba37c4 168 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
9ba8cd45
EI
169 }
170 return cond;
171}
172
bdfc1e88
EI
173/*
174 * Returns true if the insn is illegal in userspace.
175 * If exceptions are enabled, an exception is raised.
176 */
177static bool trap_userspace(DisasContext *dc, bool cond)
178{
287b1def 179 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
bdfc1e88 180
2c32179f 181 if (cond_user && (dc->tb_flags & MSR_EE)) {
41ba37c4 182 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
bdfc1e88
EI
183 }
184 return cond_user;
185}
186
d7ecb757 187static int32_t dec_alu_typeb_imm(DisasContext *dc)
61204ce8 188{
d7ecb757 189 tcg_debug_assert(dc->type_b);
20800179 190 return typeb_imm(dc, (int16_t)dc->imm);
61204ce8
EI
191}
192
cfeea807 193static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
4acb54ba
EI
194{
195 if (dc->type_b) {
d7ecb757 196 tcg_gen_movi_i32(cpu_imm, dec_alu_typeb_imm(dc));
9b158558 197 return &cpu_imm;
d7ecb757
RH
198 }
199 return &cpu_R[dc->rb];
4acb54ba
EI
200}
201
20800179 202static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
4acb54ba 203{
20800179
RH
204 if (likely(reg != 0)) {
205 return cpu_R[reg];
206 }
207 if (!dc->r0_set) {
208 if (dc->r0 == NULL) {
209 dc->r0 = tcg_temp_new_i32();
210 }
211 tcg_gen_movi_i32(dc->r0, 0);
212 dc->r0_set = true;
213 }
214 return dc->r0;
215}
4acb54ba 216
20800179
RH
217static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
218{
219 if (likely(reg != 0)) {
220 return cpu_R[reg];
221 }
222 if (dc->r0 == NULL) {
223 dc->r0 = tcg_temp_new_i32();
224 }
225 return dc->r0;
226}
4acb54ba 227
20800179
RH
228static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
229 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
230{
231 TCGv_i32 rd, ra, rb;
40cbf5b7 232
20800179
RH
233 if (arg->rd == 0 && !side_effects) {
234 return true;
40cbf5b7
EI
235 }
236
20800179
RH
237 rd = reg_for_write(dc, arg->rd);
238 ra = reg_for_read(dc, arg->ra);
239 rb = reg_for_read(dc, arg->rb);
240 fn(rd, ra, rb);
241 return true;
242}
243
39cf3864
RH
244static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
245 void (*fn)(TCGv_i32, TCGv_i32))
246{
247 TCGv_i32 rd, ra;
248
249 if (arg->rd == 0 && !side_effects) {
250 return true;
251 }
252
253 rd = reg_for_write(dc, arg->rd);
254 ra = reg_for_read(dc, arg->ra);
255 fn(rd, ra);
256 return true;
257}
258
20800179
RH
259static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
260 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
261{
262 TCGv_i32 rd, ra;
263
264 if (arg->rd == 0 && !side_effects) {
265 return true;
40cbf5b7
EI
266 }
267
20800179
RH
268 rd = reg_for_write(dc, arg->rd);
269 ra = reg_for_read(dc, arg->ra);
270 fni(rd, ra, arg->imm);
271 return true;
272}
273
274static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
275 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
276{
277 TCGv_i32 rd, ra, imm;
278
279 if (arg->rd == 0 && !side_effects) {
280 return true;
4acb54ba 281 }
20800179
RH
282
283 rd = reg_for_write(dc, arg->rd);
284 ra = reg_for_read(dc, arg->ra);
285 imm = tcg_const_i32(arg->imm);
286
287 fn(rd, ra, imm);
288
289 tcg_temp_free_i32(imm);
290 return true;
291}
292
293#define DO_TYPEA(NAME, SE, FN) \
294 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
295 { return do_typea(dc, a, SE, FN); }
296
607f5767
RH
297#define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
298 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
299 { return dc->cpu->cfg.CFG && do_typea(dc, a, SE, FN); }
300
39cf3864
RH
301#define DO_TYPEA0(NAME, SE, FN) \
302 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
303 { return do_typea0(dc, a, SE, FN); }
304
305#define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
306 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
307 { return dc->cpu->cfg.CFG && do_typea0(dc, a, SE, FN); }
308
20800179
RH
309#define DO_TYPEBI(NAME, SE, FNI) \
310 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
311 { return do_typeb_imm(dc, a, SE, FNI); }
312
97955ceb
RH
313#define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
314 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
315 { return dc->cpu->cfg.CFG && do_typeb_imm(dc, a, SE, FNI); }
316
20800179
RH
317#define DO_TYPEBV(NAME, SE, FN) \
318 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
319 { return do_typeb_val(dc, a, SE, FN); }
320
d5aead3d
RH
321#define ENV_WRAPPER2(NAME, HELPER) \
322 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
323 { HELPER(out, cpu_env, ina); }
324
325#define ENV_WRAPPER3(NAME, HELPER) \
326 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
327 { HELPER(out, cpu_env, ina, inb); }
328
20800179
RH
329/* No input carry, but output carry. */
330static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
331{
332 TCGv_i32 zero = tcg_const_i32(0);
333
334 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
335
336 tcg_temp_free_i32(zero);
337}
338
339/* Input and output carry. */
340static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
341{
342 TCGv_i32 zero = tcg_const_i32(0);
343 TCGv_i32 tmp = tcg_temp_new_i32();
344
345 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
346 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
347
348 tcg_temp_free_i32(tmp);
349 tcg_temp_free_i32(zero);
350}
351
352/* Input carry, but no output carry. */
353static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
354{
355 tcg_gen_add_i32(out, ina, inb);
356 tcg_gen_add_i32(out, out, cpu_msr_c);
357}
358
359DO_TYPEA(add, true, gen_add)
360DO_TYPEA(addc, true, gen_addc)
361DO_TYPEA(addk, false, tcg_gen_add_i32)
362DO_TYPEA(addkc, true, gen_addkc)
363
364DO_TYPEBV(addi, true, gen_add)
365DO_TYPEBV(addic, true, gen_addc)
366DO_TYPEBI(addik, false, tcg_gen_addi_i32)
367DO_TYPEBV(addikc, true, gen_addkc)
368
cb0a0a4c
RH
369static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
370{
371 tcg_gen_andi_i32(out, ina, ~imm);
372}
373
374DO_TYPEA(and, false, tcg_gen_and_i32)
375DO_TYPEBI(andi, false, tcg_gen_andi_i32)
376DO_TYPEA(andn, false, tcg_gen_andc_i32)
377DO_TYPEBI(andni, false, gen_andni)
378
081d8e02
RH
379static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
380{
381 TCGv_i32 tmp = tcg_temp_new_i32();
382 tcg_gen_andi_i32(tmp, inb, 31);
383 tcg_gen_sar_i32(out, ina, tmp);
384 tcg_temp_free_i32(tmp);
385}
386
387static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
388{
389 TCGv_i32 tmp = tcg_temp_new_i32();
390 tcg_gen_andi_i32(tmp, inb, 31);
391 tcg_gen_shr_i32(out, ina, tmp);
392 tcg_temp_free_i32(tmp);
393}
394
395static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
396{
397 TCGv_i32 tmp = tcg_temp_new_i32();
398 tcg_gen_andi_i32(tmp, inb, 31);
399 tcg_gen_shl_i32(out, ina, tmp);
400 tcg_temp_free_i32(tmp);
401}
402
403static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
404{
405 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
406 int imm_w = extract32(imm, 5, 5);
407 int imm_s = extract32(imm, 0, 5);
408
409 if (imm_w + imm_s > 32 || imm_w == 0) {
410 /* These inputs have an undefined behavior. */
411 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
412 imm_w, imm_s);
413 } else {
414 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
415 }
416}
417
418static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
419{
420 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
421 int imm_w = extract32(imm, 5, 5);
422 int imm_s = extract32(imm, 0, 5);
423 int width = imm_w - imm_s + 1;
424
425 if (imm_w < imm_s) {
426 /* These inputs have an undefined behavior. */
427 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
428 imm_w, imm_s);
429 } else {
430 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
431 }
432}
433
434DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
435DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
436DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
437
438DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
439DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
440DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
441
442DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
443DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
444
39cf3864
RH
445static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
446{
447 tcg_gen_clzi_i32(out, ina, 32);
448}
449
450DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
451
58b48b63
RH
452static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
453{
454 TCGv_i32 lt = tcg_temp_new_i32();
455
456 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
457 tcg_gen_sub_i32(out, inb, ina);
458 tcg_gen_deposit_i32(out, out, lt, 31, 1);
459 tcg_temp_free_i32(lt);
460}
461
462static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
463{
464 TCGv_i32 lt = tcg_temp_new_i32();
465
466 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
467 tcg_gen_sub_i32(out, inb, ina);
468 tcg_gen_deposit_i32(out, out, lt, 31, 1);
469 tcg_temp_free_i32(lt);
470}
471
472DO_TYPEA(cmp, false, gen_cmp)
473DO_TYPEA(cmpu, false, gen_cmpu)
a2b0b90e 474
d5aead3d
RH
475ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
476ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
477ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
478ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
479ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
480ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
481ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
482ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
483ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
484ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
485ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
486
487DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
488DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
489DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
490DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
491DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
492DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
493DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
494DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
495DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
496DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
497DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
498
499ENV_WRAPPER2(gen_flt, gen_helper_flt)
500ENV_WRAPPER2(gen_fint, gen_helper_fint)
501ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
502
503DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
504DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
505DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
506
507/* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
b1354342
RH
508static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
509{
510 gen_helper_divs(out, cpu_env, inb, ina);
511}
512
513static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
514{
515 gen_helper_divu(out, cpu_env, inb, ina);
516}
517
518DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
519DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
520
e64b2e5c
RH
521static bool trans_imm(DisasContext *dc, arg_imm *arg)
522{
523 dc->ext_imm = arg->imm << 16;
524 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
6f9642d7 525 dc->tb_flags_to_set = IMM_FLAG;
e64b2e5c
RH
526 return true;
527}
528
97955ceb
RH
529static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
530{
531 TCGv_i32 tmp = tcg_temp_new_i32();
532 tcg_gen_muls2_i32(tmp, out, ina, inb);
533 tcg_temp_free_i32(tmp);
534}
535
536static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
537{
538 TCGv_i32 tmp = tcg_temp_new_i32();
539 tcg_gen_mulu2_i32(tmp, out, ina, inb);
540 tcg_temp_free_i32(tmp);
541}
542
543static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
544{
545 TCGv_i32 tmp = tcg_temp_new_i32();
546 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
547 tcg_temp_free_i32(tmp);
548}
549
550DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
551DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
552DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
553DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
554DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
555
cb0a0a4c
RH
556DO_TYPEA(or, false, tcg_gen_or_i32)
557DO_TYPEBI(ori, false, tcg_gen_ori_i32)
558
607f5767
RH
559static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
560{
561 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
562}
563
564static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
565{
566 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
567}
568
569DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
570DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
571DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
572
a2b0b90e
RH
573/* No input carry, but output carry. */
574static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
575{
576 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
577 tcg_gen_sub_i32(out, inb, ina);
578}
579
580/* Input and output carry. */
581static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
582{
583 TCGv_i32 zero = tcg_const_i32(0);
584 TCGv_i32 tmp = tcg_temp_new_i32();
585
586 tcg_gen_not_i32(tmp, ina);
587 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
588 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
589
590 tcg_temp_free_i32(zero);
591 tcg_temp_free_i32(tmp);
592}
593
594/* No input or output carry. */
595static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
596{
597 tcg_gen_sub_i32(out, inb, ina);
598}
599
600/* Input carry, no output carry. */
601static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
602{
603 TCGv_i32 nota = tcg_temp_new_i32();
604
605 tcg_gen_not_i32(nota, ina);
606 tcg_gen_add_i32(out, inb, nota);
607 tcg_gen_add_i32(out, out, cpu_msr_c);
608
609 tcg_temp_free_i32(nota);
610}
611
612DO_TYPEA(rsub, true, gen_rsub)
613DO_TYPEA(rsubc, true, gen_rsubc)
614DO_TYPEA(rsubk, false, gen_rsubk)
615DO_TYPEA(rsubkc, true, gen_rsubkc)
616
617DO_TYPEBV(rsubi, true, gen_rsub)
618DO_TYPEBV(rsubic, true, gen_rsubc)
619DO_TYPEBV(rsubik, false, gen_rsubk)
620DO_TYPEBV(rsubikc, true, gen_rsubkc)
621
39cf3864
RH
622DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
623DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
624
625static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
626{
627 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
628 tcg_gen_sari_i32(out, ina, 1);
629}
630
631static void gen_src(TCGv_i32 out, TCGv_i32 ina)
632{
633 TCGv_i32 tmp = tcg_temp_new_i32();
634
635 tcg_gen_mov_i32(tmp, cpu_msr_c);
636 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
637 tcg_gen_extract2_i32(out, ina, tmp, 1);
638
639 tcg_temp_free_i32(tmp);
640}
641
642static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
643{
644 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
645 tcg_gen_shri_i32(out, ina, 1);
646}
647
648DO_TYPEA0(sra, false, gen_sra)
649DO_TYPEA0(src, false, gen_src)
650DO_TYPEA0(srl, false, gen_srl)
651
652static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
653{
654 tcg_gen_rotri_i32(out, ina, 16);
655}
656
657DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
658DO_TYPEA0(swaph, false, gen_swaph)
659
660static bool trans_wdic(DisasContext *dc, arg_wdic *a)
661{
662 /* Cache operations are nops: only check for supervisor mode. */
663 trap_userspace(dc, true);
664 return true;
665}
666
cb0a0a4c
RH
667DO_TYPEA(xor, false, tcg_gen_xor_i32)
668DO_TYPEBI(xori, false, tcg_gen_xori_i32)
669
d8e59c4a
RH
670static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
671{
672 TCGv ret = tcg_temp_new();
673
674 /* If any of the regs is r0, set t to the value of the other reg. */
675 if (ra && rb) {
676 TCGv_i32 tmp = tcg_temp_new_i32();
677 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
678 tcg_gen_extu_i32_tl(ret, tmp);
679 tcg_temp_free_i32(tmp);
680 } else if (ra) {
681 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
682 } else if (rb) {
683 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
684 } else {
685 tcg_gen_movi_tl(ret, 0);
686 }
687
688 if ((ra == 1 || rb == 1) && dc->cpu->cfg.stackprot) {
689 gen_helper_stackprot(cpu_env, ret);
690 }
691 return ret;
692}
693
694static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
695{
696 TCGv ret = tcg_temp_new();
697
698 /* If any of the regs is r0, set t to the value of the other reg. */
699 if (ra) {
700 TCGv_i32 tmp = tcg_temp_new_i32();
701 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
702 tcg_gen_extu_i32_tl(ret, tmp);
703 tcg_temp_free_i32(tmp);
704 } else {
705 tcg_gen_movi_tl(ret, (uint32_t)imm);
706 }
707
708 if (ra == 1 && dc->cpu->cfg.stackprot) {
709 gen_helper_stackprot(cpu_env, ret);
710 }
711 return ret;
712}
713
714static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
715{
716 int addr_size = dc->cpu->cfg.addr_size;
717 TCGv ret = tcg_temp_new();
718
719 if (addr_size == 32 || ra == 0) {
720 if (rb) {
721 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
722 } else {
723 tcg_gen_movi_tl(ret, 0);
724 }
725 } else {
726 if (rb) {
727 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
728 } else {
729 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
730 tcg_gen_shli_tl(ret, ret, 32);
731 }
732 if (addr_size < 64) {
733 /* Mask off out of range bits. */
734 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
735 }
736 }
737 return ret;
738}
739
ab0c8d0f
RH
740static void record_unaligned_ess(DisasContext *dc, int rd,
741 MemOp size, bool store)
742{
743 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
744
745 iflags |= ESR_ESS_FLAG;
746 iflags |= rd << 5;
747 iflags |= store * ESR_S;
748 iflags |= (size == MO_32) * ESR_W;
749
750 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
751}
752
d8e59c4a
RH
753static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
754 int mem_index, bool rev)
755{
d8e59c4a
RH
756 MemOp size = mop & MO_SIZE;
757
758 /*
759 * When doing reverse accesses we need to do two things.
760 *
761 * 1. Reverse the address wrt endianness.
762 * 2. Byteswap the data lanes on the way back into the CPU core.
763 */
764 if (rev) {
765 if (size > MO_8) {
766 mop ^= MO_BSWAP;
767 }
768 if (size < MO_32) {
769 tcg_gen_xori_tl(addr, addr, 3 - size);
770 }
771 }
772
ab0c8d0f
RH
773 if (size > MO_8 &&
774 (dc->tb_flags & MSR_EE) &&
775 dc->cpu->cfg.unaligned_exceptions) {
776 record_unaligned_ess(dc, rd, size, false);
777 mop |= MO_ALIGN;
d8e59c4a
RH
778 }
779
ab0c8d0f 780 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
d8e59c4a 781
d8e59c4a
RH
782 tcg_temp_free(addr);
783 return true;
784}
785
786static bool trans_lbu(DisasContext *dc, arg_typea *arg)
787{
788 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
789 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
790}
791
792static bool trans_lbur(DisasContext *dc, arg_typea *arg)
793{
794 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
795 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
796}
797
798static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
799{
800 if (trap_userspace(dc, true)) {
801 return true;
802 }
803 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
804 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
805}
806
807static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
808{
809 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
810 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
811}
812
813static bool trans_lhu(DisasContext *dc, arg_typea *arg)
814{
815 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
816 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
817}
818
819static bool trans_lhur(DisasContext *dc, arg_typea *arg)
820{
821 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
822 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
823}
824
825static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
826{
827 if (trap_userspace(dc, true)) {
828 return true;
829 }
830 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
831 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
832}
833
834static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
835{
836 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
837 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
838}
839
840static bool trans_lw(DisasContext *dc, arg_typea *arg)
841{
842 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
843 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
844}
845
846static bool trans_lwr(DisasContext *dc, arg_typea *arg)
847{
848 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
849 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
850}
851
852static bool trans_lwea(DisasContext *dc, arg_typea *arg)
853{
854 if (trap_userspace(dc, true)) {
855 return true;
856 }
857 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
858 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
859}
860
861static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
862{
863 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
864 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
865}
866
867static bool trans_lwx(DisasContext *dc, arg_typea *arg)
868{
869 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
870
871 /* lwx does not throw unaligned access errors, so force alignment */
872 tcg_gen_andi_tl(addr, addr, ~3);
873
d8e59c4a
RH
874 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
875 tcg_gen_mov_tl(cpu_res_addr, addr);
876 tcg_temp_free(addr);
877
878 if (arg->rd) {
879 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
880 }
881
882 /* No support for AXI exclusive so always clear C */
883 tcg_gen_movi_i32(cpu_msr_c, 0);
884 return true;
885}
886
887static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
888 int mem_index, bool rev)
889{
890 MemOp size = mop & MO_SIZE;
891
892 /*
893 * When doing reverse accesses we need to do two things.
894 *
895 * 1. Reverse the address wrt endianness.
896 * 2. Byteswap the data lanes on the way back into the CPU core.
897 */
898 if (rev) {
899 if (size > MO_8) {
900 mop ^= MO_BSWAP;
901 }
902 if (size < MO_32) {
903 tcg_gen_xori_tl(addr, addr, 3 - size);
904 }
905 }
906
ab0c8d0f
RH
907 if (size > MO_8 &&
908 (dc->tb_flags & MSR_EE) &&
909 dc->cpu->cfg.unaligned_exceptions) {
910 record_unaligned_ess(dc, rd, size, true);
911 mop |= MO_ALIGN;
d8e59c4a
RH
912 }
913
ab0c8d0f
RH
914 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
915
d8e59c4a
RH
916 tcg_temp_free(addr);
917 return true;
918}
919
920static bool trans_sb(DisasContext *dc, arg_typea *arg)
921{
922 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
923 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
924}
925
926static bool trans_sbr(DisasContext *dc, arg_typea *arg)
927{
928 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
929 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
930}
931
932static bool trans_sbea(DisasContext *dc, arg_typea *arg)
933{
934 if (trap_userspace(dc, true)) {
935 return true;
936 }
937 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
938 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
939}
940
941static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
942{
943 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
944 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
945}
946
947static bool trans_sh(DisasContext *dc, arg_typea *arg)
948{
949 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
950 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
951}
952
953static bool trans_shr(DisasContext *dc, arg_typea *arg)
954{
955 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
956 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
957}
958
959static bool trans_shea(DisasContext *dc, arg_typea *arg)
960{
961 if (trap_userspace(dc, true)) {
962 return true;
963 }
964 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
965 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
966}
967
968static bool trans_shi(DisasContext *dc, arg_typeb *arg)
969{
970 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
971 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
972}
973
974static bool trans_sw(DisasContext *dc, arg_typea *arg)
975{
976 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
977 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
978}
979
980static bool trans_swr(DisasContext *dc, arg_typea *arg)
981{
982 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
983 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
984}
985
986static bool trans_swea(DisasContext *dc, arg_typea *arg)
987{
988 if (trap_userspace(dc, true)) {
989 return true;
990 }
991 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
992 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
993}
994
995static bool trans_swi(DisasContext *dc, arg_typeb *arg)
996{
997 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
998 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
999}
1000
1001static bool trans_swx(DisasContext *dc, arg_typea *arg)
1002{
1003 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1004 TCGLabel *swx_done = gen_new_label();
1005 TCGLabel *swx_fail = gen_new_label();
1006 TCGv_i32 tval;
1007
d8e59c4a
RH
1008 /* swx does not throw unaligned access errors, so force alignment */
1009 tcg_gen_andi_tl(addr, addr, ~3);
1010
1011 /*
1012 * Compare the address vs the one we used during lwx.
1013 * On mismatch, the operation fails. On match, addr dies at the
1014 * branch, but we know we can use the equal version in the global.
1015 * In either case, addr is no longer needed.
1016 */
1017 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1018 tcg_temp_free(addr);
1019
1020 /*
1021 * Compare the value loaded during lwx with current contents of
1022 * the reserved location.
1023 */
1024 tval = tcg_temp_new_i32();
1025
1026 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1027 reg_for_write(dc, arg->rd),
1028 dc->mem_index, MO_TEUL);
1029
1030 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1031 tcg_temp_free_i32(tval);
1032
1033 /* Success */
1034 tcg_gen_movi_i32(cpu_msr_c, 0);
1035 tcg_gen_br(swx_done);
1036
1037 /* Failure */
1038 gen_set_label(swx_fail);
1039 tcg_gen_movi_i32(cpu_msr_c, 1);
1040
1041 gen_set_label(swx_done);
1042
1043 /*
1044 * Prevent the saved address from working again without another ldx.
1045 * Akin to the pseudocode setting reservation = 0.
1046 */
1047 tcg_gen_movi_tl(cpu_res_addr, -1);
1048 return true;
1049}
1050
16bbbbc9
RH
1051static void setup_dslot(DisasContext *dc, bool type_b)
1052{
1053 dc->tb_flags_to_set |= D_FLAG;
1054 if (type_b && (dc->tb_flags & IMM_FLAG)) {
1055 dc->tb_flags_to_set |= BIMM_FLAG;
1056 }
1057}
1058
1059static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1060 bool delay, bool abs, int link)
1061{
1062 uint32_t add_pc;
1063
1064 if (delay) {
1065 setup_dslot(dc, dest_rb < 0);
1066 }
1067
1068 if (link) {
1069 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1070 }
1071
1072 /* Store the branch taken destination into btarget. */
1073 add_pc = abs ? 0 : dc->base.pc_next;
1074 if (dest_rb > 0) {
1075 dc->jmp_dest = -1;
1076 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1077 } else {
1078 dc->jmp_dest = add_pc + dest_imm;
1079 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1080 }
1081 dc->jmp_cond = TCG_COND_ALWAYS;
1082 return true;
1083}
1084
1085#define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
1086 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
1087 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1088 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
1089 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1090
1091DO_BR(br, bri, false, false, false)
1092DO_BR(bra, brai, false, true, false)
1093DO_BR(brd, brid, true, false, false)
1094DO_BR(brad, braid, true, true, false)
1095DO_BR(brld, brlid, true, false, true)
1096DO_BR(brald, bralid, true, true, true)
1097
fd779113
RH
1098static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1099 TCGCond cond, int ra, bool delay)
1100{
1101 TCGv_i32 zero, next;
1102
1103 if (delay) {
1104 setup_dslot(dc, dest_rb < 0);
1105 }
1106
1107 dc->jmp_cond = cond;
1108
1109 /* Cache the condition register in cpu_bvalue across any delay slot. */
1110 tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1111
1112 /* Store the branch taken destination into btarget. */
1113 if (dest_rb > 0) {
1114 dc->jmp_dest = -1;
1115 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1116 } else {
1117 dc->jmp_dest = dc->base.pc_next + dest_imm;
1118 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1119 }
1120
1121 /* Compute the final destination into btarget. */
1122 zero = tcg_const_i32(0);
1123 next = tcg_const_i32(dc->base.pc_next + (delay + 1) * 4);
1124 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1125 reg_for_read(dc, ra), zero,
1126 cpu_btarget, next);
1127 tcg_temp_free_i32(zero);
1128 tcg_temp_free_i32(next);
1129
1130 return true;
1131}
1132
1133#define DO_BCC(NAME, COND) \
1134 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
1135 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1136 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
1137 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1138 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
1139 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1140 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
1141 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1142
1143DO_BCC(beq, TCG_COND_EQ)
1144DO_BCC(bge, TCG_COND_GE)
1145DO_BCC(bgt, TCG_COND_GT)
1146DO_BCC(ble, TCG_COND_LE)
1147DO_BCC(blt, TCG_COND_LT)
1148DO_BCC(bne, TCG_COND_NE)
1149
f5235314
RH
1150static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1151{
1152 if (trap_userspace(dc, true)) {
1153 return true;
1154 }
1155 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1156 if (arg->rd) {
1157 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1158 }
1159 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1160 tcg_gen_movi_tl(cpu_res_addr, -1);
1161
1162 dc->base.is_jmp = DISAS_UPDATE;
1163 return true;
1164}
1165
1166static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1167{
1168 uint32_t imm = arg->imm;
1169
1170 if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1171 return true;
1172 }
1173 tcg_gen_movi_i32(cpu_pc, imm);
1174 if (arg->rd) {
1175 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1176 }
1177 tcg_gen_movi_tl(cpu_res_addr, -1);
1178
1179#ifdef CONFIG_USER_ONLY
1180 switch (imm) {
1181 case 0x8: /* syscall trap */
1182 gen_raise_exception_sync(dc, EXCP_SYSCALL);
1183 break;
1184 case 0x18: /* debug trap */
1185 gen_raise_exception_sync(dc, EXCP_DEBUG);
1186 break;
1187 default: /* eliminated with trap_userspace check */
1188 g_assert_not_reached();
1189 }
1190#else
1191 uint32_t msr_to_set = 0;
1192
1193 if (imm != 0x18) {
1194 msr_to_set |= MSR_BIP;
1195 }
1196 if (imm == 0x8 || imm == 0x18) {
1197 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1198 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1199 tcg_gen_andi_i32(cpu_msr, cpu_msr,
1200 ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1201 }
1202 tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1203 dc->base.is_jmp = DISAS_UPDATE;
1204#endif
1205
1206 return true;
1207}
1208
ee8c7f9f
RH
1209static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1210{
1211 int mbar_imm = arg->imm;
1212
1213 /* Data access memory barrier. */
1214 if ((mbar_imm & 2) == 0) {
1215 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1216 }
1217
1218 /* Sleep. */
1219 if (mbar_imm & 16) {
1220 TCGv_i32 tmp_1;
1221
1222 if (trap_userspace(dc, true)) {
1223 /* Sleep is a privileged instruction. */
1224 return true;
1225 }
1226
1227 t_sync_flags(dc);
1228
1229 tmp_1 = tcg_const_i32(1);
1230 tcg_gen_st_i32(tmp_1, cpu_env,
1231 -offsetof(MicroBlazeCPU, env)
1232 +offsetof(CPUState, halted));
1233 tcg_temp_free_i32(tmp_1);
1234
1235 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1236
1237 gen_raise_exception(dc, EXCP_HLT);
1238 }
1239
1240 /*
1241 * If !(mbar_imm & 1), this is an instruction access memory barrier
1242 * and we need to end the TB so that we recognize self-modified
1243 * code immediately.
1244 *
1245 * However, there are some data mbars that need the TB break
1246 * (and return to main loop) to recognize interrupts right away.
1247 * E.g. recognizing a change to an interrupt controller register.
1248 *
1249 * Therefore, choose to end the TB always.
1250 */
1251 dc->cpustate_changed = 1;
1252 return true;
1253}
1254
e6cb0354
RH
1255static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1256{
1257 if (trap_userspace(dc, to_set)) {
1258 return true;
1259 }
1260 dc->tb_flags_to_set |= to_set;
1261 setup_dslot(dc, true);
1262
1263 dc->jmp_cond = TCG_COND_ALWAYS;
1264 dc->jmp_dest = -1;
1265 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1266 return true;
1267}
1268
1269#define DO_RTS(NAME, IFLAG) \
1270 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1271 { return do_rts(dc, arg, IFLAG); }
1272
1273DO_RTS(rtbd, DRTB_FLAG)
1274DO_RTS(rtid, DRTI_FLAG)
1275DO_RTS(rted, DRTE_FLAG)
1276DO_RTS(rtsd, 0)
1277
20800179
RH
1278static bool trans_zero(DisasContext *dc, arg_zero *arg)
1279{
1280 /* If opcode_0_illegal, trap. */
1281 if (dc->cpu->cfg.opcode_0_illegal) {
1282 trap_illegal(dc, true);
1283 return true;
1284 }
1285 /*
1286 * Otherwise, this is "add r0, r0, r0".
1287 * Continue to trans_add so that MSR[C] gets cleared.
1288 */
1289 return false;
4acb54ba
EI
1290}
1291
1074c0fb 1292static void msr_read(DisasContext *dc, TCGv_i32 d)
4acb54ba 1293{
1074c0fb
RH
1294 TCGv_i32 t;
1295
1296 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1297 t = tcg_temp_new_i32();
1298 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1299 tcg_gen_or_i32(d, cpu_msr, t);
1300 tcg_temp_free_i32(t);
4acb54ba
EI
1301}
1302
1074c0fb 1303static void msr_write(DisasContext *dc, TCGv_i32 v)
4acb54ba
EI
1304{
1305 dc->cpustate_changed = 1;
1074c0fb
RH
1306
1307 /* Install MSR_C. */
1308 tcg_gen_extract_i32(cpu_msr_c, v, 2, 1);
1309
1310 /* Clear MSR_C and MSR_CC; MSR_PVR is not writable, and is always clear. */
1311 tcg_gen_andi_i32(cpu_msr, v, ~(MSR_C | MSR_CC | MSR_PVR));
4acb54ba
EI
1312}
1313
1314static void dec_msr(DisasContext *dc)
1315{
0063ebd6 1316 CPUState *cs = CPU(dc->cpu);
cfeea807 1317 TCGv_i32 t0, t1;
2023e9a3 1318 unsigned int sr, rn;
f0f7e7f7 1319 bool to, clrset, extended = false;
4acb54ba 1320
2023e9a3
EI
1321 sr = extract32(dc->imm, 0, 14);
1322 to = extract32(dc->imm, 14, 1);
1323 clrset = extract32(dc->imm, 15, 1) == 0;
4acb54ba 1324 dc->type_b = 1;
2023e9a3 1325 if (to) {
4acb54ba 1326 dc->cpustate_changed = 1;
f0f7e7f7
EI
1327 }
1328
1329 /* Extended MSRs are only available if addr_size > 32. */
1330 if (dc->cpu->cfg.addr_size > 32) {
1331 /* The E-bit is encoded differently for To/From MSR. */
1332 static const unsigned int e_bit[] = { 19, 24 };
1333
1334 extended = extract32(dc->imm, e_bit[to], 1);
2023e9a3 1335 }
4acb54ba
EI
1336
1337 /* msrclr and msrset. */
2023e9a3
EI
1338 if (clrset) {
1339 bool clr = extract32(dc->ir, 16, 1);
4acb54ba 1340
56837509 1341 if (!dc->cpu->cfg.use_msr_instr) {
1567a005
EI
1342 /* nop??? */
1343 return;
1344 }
1345
bdfc1e88 1346 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
1567a005
EI
1347 return;
1348 }
1349
4acb54ba
EI
1350 if (dc->rd)
1351 msr_read(dc, cpu_R[dc->rd]);
1352
cfeea807
EI
1353 t0 = tcg_temp_new_i32();
1354 t1 = tcg_temp_new_i32();
4acb54ba 1355 msr_read(dc, t0);
cfeea807 1356 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
4acb54ba
EI
1357
1358 if (clr) {
cfeea807
EI
1359 tcg_gen_not_i32(t1, t1);
1360 tcg_gen_and_i32(t0, t0, t1);
4acb54ba 1361 } else
cfeea807 1362 tcg_gen_or_i32(t0, t0, t1);
4acb54ba 1363 msr_write(dc, t0);
cfeea807
EI
1364 tcg_temp_free_i32(t0);
1365 tcg_temp_free_i32(t1);
d4705ae0
RH
1366 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1367 dc->base.is_jmp = DISAS_UPDATE;
4acb54ba
EI
1368 return;
1369 }
1370
bdfc1e88
EI
1371 if (trap_userspace(dc, to)) {
1372 return;
1567a005
EI
1373 }
1374
4acb54ba
EI
1375#if !defined(CONFIG_USER_ONLY)
1376 /* Catch read/writes to the mmu block. */
1377 if ((sr & ~0xff) == 0x1000) {
f0f7e7f7 1378 TCGv_i32 tmp_ext = tcg_const_i32(extended);
05a9a651
EI
1379 TCGv_i32 tmp_sr;
1380
4acb54ba 1381 sr &= 7;
05a9a651 1382 tmp_sr = tcg_const_i32(sr);
05a9a651 1383 if (to) {
f0f7e7f7 1384 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
05a9a651 1385 } else {
f0f7e7f7 1386 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
05a9a651
EI
1387 }
1388 tcg_temp_free_i32(tmp_sr);
f0f7e7f7 1389 tcg_temp_free_i32(tmp_ext);
4acb54ba
EI
1390 return;
1391 }
1392#endif
1393
1394 if (to) {
4acb54ba 1395 switch (sr) {
aa28e6d4 1396 case SR_PC:
4acb54ba 1397 break;
aa28e6d4 1398 case SR_MSR:
4acb54ba
EI
1399 msr_write(dc, cpu_R[dc->ra]);
1400 break;
351527b7 1401 case SR_EAR:
dbdb77c4
RH
1402 {
1403 TCGv_i64 t64 = tcg_temp_new_i64();
1404 tcg_gen_extu_i32_i64(t64, cpu_R[dc->ra]);
1405 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1406 tcg_temp_free_i64(t64);
1407 }
aa28e6d4 1408 break;
351527b7 1409 case SR_ESR:
41ba37c4
RH
1410 tcg_gen_st_i32(cpu_R[dc->ra],
1411 cpu_env, offsetof(CPUMBState, esr));
aa28e6d4 1412 break;
ab6dd380 1413 case SR_FSR:
86017ccf
RH
1414 tcg_gen_st_i32(cpu_R[dc->ra],
1415 cpu_env, offsetof(CPUMBState, fsr));
aa28e6d4
RH
1416 break;
1417 case SR_BTR:
ccf628b7
RH
1418 tcg_gen_st_i32(cpu_R[dc->ra],
1419 cpu_env, offsetof(CPUMBState, btr));
aa28e6d4
RH
1420 break;
1421 case SR_EDR:
39db007e
RH
1422 tcg_gen_st_i32(cpu_R[dc->ra],
1423 cpu_env, offsetof(CPUMBState, edr));
4acb54ba 1424 break;
5818dee5 1425 case 0x800:
cfeea807
EI
1426 tcg_gen_st_i32(cpu_R[dc->ra],
1427 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
1428 break;
1429 case 0x802:
cfeea807
EI
1430 tcg_gen_st_i32(cpu_R[dc->ra],
1431 cpu_env, offsetof(CPUMBState, shr));
5818dee5 1432 break;
4acb54ba 1433 default:
0063ebd6 1434 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
1435 break;
1436 }
1437 } else {
4acb54ba 1438 switch (sr) {
aa28e6d4 1439 case SR_PC:
d4705ae0 1440 tcg_gen_movi_i32(cpu_R[dc->rd], dc->base.pc_next);
4acb54ba 1441 break;
aa28e6d4 1442 case SR_MSR:
4acb54ba
EI
1443 msr_read(dc, cpu_R[dc->rd]);
1444 break;
351527b7 1445 case SR_EAR:
dbdb77c4
RH
1446 {
1447 TCGv_i64 t64 = tcg_temp_new_i64();
1448 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1449 if (extended) {
1450 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], t64);
1451 } else {
1452 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], t64);
1453 }
1454 tcg_temp_free_i64(t64);
a1b48e3a 1455 }
aa28e6d4 1456 break;
351527b7 1457 case SR_ESR:
41ba37c4
RH
1458 tcg_gen_ld_i32(cpu_R[dc->rd],
1459 cpu_env, offsetof(CPUMBState, esr));
aa28e6d4 1460 break;
351527b7 1461 case SR_FSR:
86017ccf
RH
1462 tcg_gen_ld_i32(cpu_R[dc->rd],
1463 cpu_env, offsetof(CPUMBState, fsr));
aa28e6d4 1464 break;
351527b7 1465 case SR_BTR:
ccf628b7
RH
1466 tcg_gen_ld_i32(cpu_R[dc->rd],
1467 cpu_env, offsetof(CPUMBState, btr));
aa28e6d4 1468 break;
7cdae31d 1469 case SR_EDR:
39db007e
RH
1470 tcg_gen_ld_i32(cpu_R[dc->rd],
1471 cpu_env, offsetof(CPUMBState, edr));
4acb54ba 1472 break;
5818dee5 1473 case 0x800:
cfeea807
EI
1474 tcg_gen_ld_i32(cpu_R[dc->rd],
1475 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
1476 break;
1477 case 0x802:
cfeea807
EI
1478 tcg_gen_ld_i32(cpu_R[dc->rd],
1479 cpu_env, offsetof(CPUMBState, shr));
5818dee5 1480 break;
351527b7 1481 case 0x2000 ... 0x200c:
4acb54ba 1482 rn = sr & 0xf;
cfeea807 1483 tcg_gen_ld_i32(cpu_R[dc->rd],
68cee38a 1484 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
1485 break;
1486 default:
a47dddd7 1487 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
1488 break;
1489 }
1490 }
ee7dbcf8
EI
1491
1492 if (dc->rd == 0) {
cfeea807 1493 tcg_gen_movi_i32(cpu_R[0], 0);
ee7dbcf8 1494 }
4acb54ba
EI
1495}
1496
4acb54ba
EI
1497static inline void do_rti(DisasContext *dc)
1498{
cfeea807
EI
1499 TCGv_i32 t0, t1;
1500 t0 = tcg_temp_new_i32();
1501 t1 = tcg_temp_new_i32();
3e0e16ae 1502 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf
EI
1503 tcg_gen_shri_i32(t0, t1, 1);
1504 tcg_gen_ori_i32(t1, t1, MSR_IE);
cfeea807
EI
1505 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1506
1507 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1508 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1509 msr_write(dc, t1);
cfeea807
EI
1510 tcg_temp_free_i32(t1);
1511 tcg_temp_free_i32(t0);
4acb54ba
EI
1512 dc->tb_flags &= ~DRTI_FLAG;
1513}
1514
1515static inline void do_rtb(DisasContext *dc)
1516{
cfeea807
EI
1517 TCGv_i32 t0, t1;
1518 t0 = tcg_temp_new_i32();
1519 t1 = tcg_temp_new_i32();
3e0e16ae 1520 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf 1521 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
cfeea807
EI
1522 tcg_gen_shri_i32(t0, t1, 1);
1523 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1524
1525 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1526 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1527 msr_write(dc, t1);
cfeea807
EI
1528 tcg_temp_free_i32(t1);
1529 tcg_temp_free_i32(t0);
4acb54ba
EI
1530 dc->tb_flags &= ~DRTB_FLAG;
1531}
1532
1533static inline void do_rte(DisasContext *dc)
1534{
cfeea807
EI
1535 TCGv_i32 t0, t1;
1536 t0 = tcg_temp_new_i32();
1537 t1 = tcg_temp_new_i32();
4acb54ba 1538
3e0e16ae 1539 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf 1540 tcg_gen_ori_i32(t1, t1, MSR_EE);
cfeea807
EI
1541 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1542 tcg_gen_shri_i32(t0, t1, 1);
1543 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
4acb54ba 1544
cfeea807
EI
1545 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1546 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1547 msr_write(dc, t1);
cfeea807
EI
1548 tcg_temp_free_i32(t1);
1549 tcg_temp_free_i32(t0);
4acb54ba
EI
1550 dc->tb_flags &= ~DRTE_FLAG;
1551}
1552
4acb54ba
EI
1553static void dec_null(DisasContext *dc)
1554{
9ba8cd45 1555 if (trap_illegal(dc, true)) {
02b33596
EI
1556 return;
1557 }
d4705ae0
RH
1558 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n",
1559 (uint32_t)dc->base.pc_next, dc->opcode);
4acb54ba
EI
1560 dc->abort_at_next_insn = 1;
1561}
1562
6d76d23e
EI
1563/* Insns connected to FSL or AXI stream attached devices. */
1564static void dec_stream(DisasContext *dc)
1565{
6d76d23e
EI
1566 TCGv_i32 t_id, t_ctrl;
1567 int ctrl;
1568
bdfc1e88 1569 if (trap_userspace(dc, true)) {
6d76d23e
EI
1570 return;
1571 }
1572
cfeea807 1573 t_id = tcg_temp_new_i32();
6d76d23e 1574 if (dc->type_b) {
cfeea807 1575 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
6d76d23e
EI
1576 ctrl = dc->imm >> 10;
1577 } else {
cfeea807 1578 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
6d76d23e
EI
1579 ctrl = dc->imm >> 5;
1580 }
1581
cfeea807 1582 t_ctrl = tcg_const_i32(ctrl);
6d76d23e
EI
1583
1584 if (dc->rd == 0) {
1585 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1586 } else {
1587 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1588 }
cfeea807
EI
1589 tcg_temp_free_i32(t_id);
1590 tcg_temp_free_i32(t_ctrl);
6d76d23e
EI
1591}
1592
4acb54ba
EI
1593static struct decoder_info {
1594 struct {
1595 uint32_t bits;
1596 uint32_t mask;
1597 };
1598 void (*dec)(DisasContext *dc);
1599} decinfo[] = {
4acb54ba 1600 {DEC_MSR, dec_msr},
6d76d23e 1601 {DEC_STREAM, dec_stream},
4acb54ba
EI
1602 {{0, 0}, dec_null}
1603};
1604
44d1432b 1605static void old_decode(DisasContext *dc, uint32_t ir)
4acb54ba 1606{
4acb54ba
EI
1607 int i;
1608
64254eba 1609 dc->ir = ir;
4acb54ba 1610
4acb54ba
EI
1611 /* bit 2 seems to indicate insn type. */
1612 dc->type_b = ir & (1 << 29);
1613
1614 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1615 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1616 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1617 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1618 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1619
1620 /* Large switch for all insns. */
1621 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1622 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1623 decinfo[i].dec(dc);
1624 break;
1625 }
1626 }
1627}
1628
372122e3 1629static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
4acb54ba 1630{
372122e3
RH
1631 DisasContext *dc = container_of(dcb, DisasContext, base);
1632 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1633 int bound;
4acb54ba 1634
372122e3 1635 dc->cpu = cpu;
683a247e 1636 dc->tb_flags = dc->base.tb->flags;
4acb54ba
EI
1637 dc->cpustate_changed = 0;
1638 dc->abort_at_next_insn = 0;
d7ecb757 1639 dc->ext_imm = dc->base.tb->cs_base;
20800179
RH
1640 dc->r0 = NULL;
1641 dc->r0_set = false;
287b1def 1642 dc->mem_index = cpu_mmu_index(&cpu->env, false);
b9c58aab
RH
1643 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1644 dc->jmp_dest = -1;
4acb54ba 1645
372122e3
RH
1646 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1647 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1648}
4acb54ba 1649
372122e3
RH
1650static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1651{
1652}
4acb54ba 1653
372122e3
RH
1654static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1655{
683a247e
RH
1656 DisasContext *dc = container_of(dcb, DisasContext, base);
1657
1658 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1659 dc->insn_start = tcg_last_op();
372122e3 1660}
4acb54ba 1661
372122e3
RH
1662static bool mb_tr_breakpoint_check(DisasContextBase *dcb, CPUState *cs,
1663 const CPUBreakpoint *bp)
1664{
1665 DisasContext *dc = container_of(dcb, DisasContext, base);
b933066a 1666
372122e3 1667 gen_raise_exception_sync(dc, EXCP_DEBUG);
4acb54ba 1668
372122e3
RH
1669 /*
1670 * The address covered by the breakpoint must be included in
1671 * [tb->pc, tb->pc + tb->size) in order to for it to be
1672 * properly cleared -- thus we increment the PC here so that
1673 * the logic setting tb->size below does the right thing.
1674 */
1675 dc->base.pc_next += 4;
1676 return true;
1677}
4acb54ba 1678
372122e3
RH
1679static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1680{
1681 DisasContext *dc = container_of(dcb, DisasContext, base);
1682 CPUMBState *env = cs->env_ptr;
44d1432b 1683 uint32_t ir;
372122e3
RH
1684
1685 /* TODO: This should raise an exception, not terminate qemu. */
1686 if (dc->base.pc_next & 3) {
1687 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1688 (uint32_t)dc->base.pc_next);
1689 }
1690
6f9642d7
RH
1691 dc->tb_flags_to_set = 0;
1692
44d1432b
RH
1693 ir = cpu_ldl_code(env, dc->base.pc_next);
1694 if (!decode(dc, ir)) {
1695 old_decode(dc, ir);
1696 }
20800179
RH
1697
1698 if (dc->r0) {
1699 tcg_temp_free_i32(dc->r0);
1700 dc->r0 = NULL;
1701 dc->r0_set = false;
1702 }
1703
6f9642d7
RH
1704 /* Discard the imm global when its contents cannot be used. */
1705 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
d7ecb757 1706 tcg_gen_discard_i32(cpu_imm);
372122e3 1707 }
6f9642d7 1708
1e521ce3 1709 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
6f9642d7 1710 dc->tb_flags |= dc->tb_flags_to_set;
372122e3
RH
1711 dc->base.pc_next += 4;
1712
b9c58aab 1713 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
372122e3
RH
1714 if (dc->tb_flags & DRTI_FLAG) {
1715 do_rti(dc);
b9c58aab 1716 } else if (dc->tb_flags & DRTB_FLAG) {
372122e3 1717 do_rtb(dc);
b9c58aab 1718 } else if (dc->tb_flags & DRTE_FLAG) {
372122e3
RH
1719 do_rte(dc);
1720 }
372122e3 1721 dc->base.is_jmp = DISAS_JUMP;
4acb54ba
EI
1722 }
1723
372122e3
RH
1724 /* Force an exit if the per-tb cpu state has changed. */
1725 if (dc->base.is_jmp == DISAS_NEXT && dc->cpustate_changed) {
d4705ae0 1726 dc->base.is_jmp = DISAS_UPDATE;
372122e3 1727 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
4acb54ba 1728 }
372122e3
RH
1729}
1730
1731static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1732{
1733 DisasContext *dc = container_of(dcb, DisasContext, base);
1734
1735 assert(!dc->abort_at_next_insn);
4acb54ba 1736
d4705ae0 1737 if (dc->base.is_jmp == DISAS_NORETURN) {
372122e3
RH
1738 /* We have already exited the TB. */
1739 return;
1740 }
1741
1742 t_sync_flags(dc);
372122e3
RH
1743
1744 switch (dc->base.is_jmp) {
1745 case DISAS_TOO_MANY:
372122e3
RH
1746 gen_goto_tb(dc, 0, dc->base.pc_next);
1747 return;
6c5f738d 1748
372122e3 1749 case DISAS_UPDATE:
372122e3
RH
1750 if (unlikely(cs->singlestep_enabled)) {
1751 gen_raise_exception(dc, EXCP_DEBUG);
1752 } else {
1753 tcg_gen_exit_tb(NULL, 0);
6c5f738d 1754 }
372122e3
RH
1755 return;
1756
1757 case DISAS_JUMP:
b9c58aab
RH
1758 if (dc->jmp_dest != -1 && !cs->singlestep_enabled) {
1759 /* Direct jump. */
1760 tcg_gen_discard_i32(cpu_btarget);
1761
1762 if (dc->jmp_cond != TCG_COND_ALWAYS) {
1763 /* Conditional direct jump. */
1764 TCGLabel *taken = gen_new_label();
1765 TCGv_i32 tmp = tcg_temp_new_i32();
1766
1767 /*
1768 * Copy bvalue to a temp now, so we can discard bvalue.
1769 * This can avoid writing bvalue to memory when the
1770 * delay slot cannot raise an exception.
1771 */
1772 tcg_gen_mov_i32(tmp, cpu_bvalue);
1773 tcg_gen_discard_i32(cpu_bvalue);
1774
1775 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1776 gen_goto_tb(dc, 1, dc->base.pc_next);
1777 gen_set_label(taken);
372122e3 1778 }
b9c58aab 1779 gen_goto_tb(dc, 0, dc->jmp_dest);
372122e3 1780 return;
b9c58aab 1781 }
372122e3 1782
b9c58aab
RH
1783 /* Indirect jump (or direct jump w/ singlestep) */
1784 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1785 tcg_gen_discard_i32(cpu_btarget);
372122e3 1786
b9c58aab
RH
1787 if (unlikely(cs->singlestep_enabled)) {
1788 gen_raise_exception(dc, EXCP_DEBUG);
1789 } else {
1790 tcg_gen_exit_tb(NULL, 0);
4acb54ba 1791 }
b9c58aab 1792 return;
0a7df5da 1793
372122e3
RH
1794 default:
1795 g_assert_not_reached();
1796 }
1797}
4acb54ba 1798
372122e3
RH
1799static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
1800{
372122e3
RH
1801 qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
1802 log_target_disas(cs, dcb->pc_first, dcb->tb->size);
372122e3
RH
1803}
1804
1805static const TranslatorOps mb_tr_ops = {
1806 .init_disas_context = mb_tr_init_disas_context,
1807 .tb_start = mb_tr_tb_start,
1808 .insn_start = mb_tr_insn_start,
1809 .breakpoint_check = mb_tr_breakpoint_check,
1810 .translate_insn = mb_tr_translate_insn,
1811 .tb_stop = mb_tr_tb_stop,
1812 .disas_log = mb_tr_disas_log,
1813};
1814
1815void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1816{
1817 DisasContext dc;
1818 translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
4acb54ba
EI
1819}
1820
90c84c56 1821void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
4acb54ba 1822{
878096ee
AF
1823 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1824 CPUMBState *env = &cpu->env;
0c3da918 1825 uint32_t iflags;
4acb54ba
EI
1826 int i;
1827
0c3da918
RH
1828 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1829 env->pc, env->msr,
2e5282ca
RH
1830 (env->msr & MSR_UM) ? "user" : "kernel",
1831 (env->msr & MSR_UMS) ? "user" : "kernel",
1832 (bool)(env->msr & MSR_EIP),
1833 (bool)(env->msr & MSR_IE));
0c3da918
RH
1834
1835 iflags = env->iflags;
1836 qemu_fprintf(f, "iflags: 0x%08x", iflags);
1837 if (iflags & IMM_FLAG) {
1838 qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1839 }
1840 if (iflags & BIMM_FLAG) {
1841 qemu_fprintf(f, " BIMM");
1842 }
1843 if (iflags & D_FLAG) {
b9c58aab 1844 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
0c3da918
RH
1845 }
1846 if (iflags & DRTI_FLAG) {
1847 qemu_fprintf(f, " DRTI");
1848 }
1849 if (iflags & DRTE_FLAG) {
1850 qemu_fprintf(f, " DRTE");
1851 }
1852 if (iflags & DRTB_FLAG) {
1853 qemu_fprintf(f, " DRTB");
1854 }
1855 if (iflags & ESR_ESS_FLAG) {
1856 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1857 }
1858
1859 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1860 "ear=0x%016" PRIx64 " slr=0x%x shr=0x%x\n",
1861 env->esr, env->fsr, env->btr, env->edr,
1862 env->ear, env->slr, env->shr);
1863
2ead1b18 1864 for (i = 0; i < 12; i++) {
0c3da918
RH
1865 qemu_fprintf(f, "rpvr%-2d=%08x%c",
1866 i, env->pvr.regs[i], i % 4 == 3 ? '\n' : ' ');
2ead1b18 1867 }
17c52a43 1868
4acb54ba 1869 for (i = 0; i < 32; i++) {
0c3da918
RH
1870 qemu_fprintf(f, "r%2.2d=%08x%c",
1871 i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1872 }
1873 qemu_fprintf(f, "\n");
4acb54ba
EI
1874}
1875
cd0c24f9
AF
1876void mb_tcg_init(void)
1877{
480d29a8
RH
1878#define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1879#define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1880
1881 static const struct {
1882 TCGv_i32 *var; int ofs; char name[8];
1883 } i32s[] = {
1884 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1885 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1886 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1887 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1888
1889 SP(pc),
1890 SP(msr),
1074c0fb 1891 SP(msr_c),
480d29a8
RH
1892 SP(imm),
1893 SP(iflags),
b9c58aab 1894 SP(bvalue),
480d29a8
RH
1895 SP(btarget),
1896 SP(res_val),
1897 };
1898
1899#undef R
1900#undef SP
1901
1902 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1903 *i32s[i].var =
1904 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1905 }
4acb54ba 1906
480d29a8
RH
1907 cpu_res_addr =
1908 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
4acb54ba
EI
1909}
1910
bad729e2
RH
1911void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1912 target_ulong *data)
4acb54ba 1913{
76e8187d 1914 env->pc = data[0];
683a247e 1915 env->iflags = data[1];
4acb54ba 1916}