]> git.proxmox.com Git - mirror_qemu.git/blame - target/microblaze/translate.c
target/microblaze: Add flags markup to some helpers
[mirror_qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
f08b6170 27#include "exec/cpu_ldst.h"
2ef6175a 28#include "exec/helper-gen.h"
77fc6f5e 29#include "exec/translator.h"
90c84c56 30#include "qemu/qemu-print.h"
4acb54ba 31
a7e30d84 32#include "trace-tcg.h"
508127e2 33#include "exec/log.h"
a7e30d84 34
4acb54ba
EI
35#define EXTRACT_FIELD(src, start, end) \
36 (((src) >> start) & ((1 << (end - start + 1)) - 1))
37
77fc6f5e
LV
38/* is_jmp field values */
39#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
40#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
77fc6f5e 41
cfeea807 42static TCGv_i32 cpu_R[32];
0f96e96b 43static TCGv_i32 cpu_pc;
3e0e16ae 44static TCGv_i32 cpu_msr;
1074c0fb 45static TCGv_i32 cpu_msr_c;
9b158558 46static TCGv_i32 cpu_imm;
b9c58aab 47static TCGv_i32 cpu_bvalue;
0f96e96b 48static TCGv_i32 cpu_btarget;
9b158558
RH
49static TCGv_i32 cpu_iflags;
50static TCGv cpu_res_addr;
51static TCGv_i32 cpu_res_val;
4acb54ba 52
022c62cb 53#include "exec/gen-icount.h"
4acb54ba
EI
54
55/* This is the state at translation time. */
56typedef struct DisasContext {
d4705ae0 57 DisasContextBase base;
0063ebd6 58 MicroBlazeCPU *cpu;
4acb54ba 59
683a247e
RH
60 /* TCG op of the current insn_start. */
61 TCGOp *insn_start;
62
20800179
RH
63 TCGv_i32 r0;
64 bool r0_set;
65
4acb54ba 66 /* Decoder. */
d7ecb757 67 uint32_t ext_imm;
4acb54ba 68 unsigned int cpustate_changed;
683a247e 69 unsigned int tb_flags;
6f9642d7 70 unsigned int tb_flags_to_set;
287b1def 71 int mem_index;
4acb54ba 72
b9c58aab
RH
73 /* Condition under which to jump, including NEVER and ALWAYS. */
74 TCGCond jmp_cond;
75
76 /* Immediate branch-taken destination, or -1 for indirect. */
77 uint32_t jmp_dest;
4acb54ba
EI
78} DisasContext;
79
20800179
RH
80static int typeb_imm(DisasContext *dc, int x)
81{
82 if (dc->tb_flags & IMM_FLAG) {
83 return deposit32(dc->ext_imm, 0, 16, x);
84 }
85 return x;
86}
87
44d1432b
RH
88/* Include the auto-generated decoder. */
89#include "decode-insns.c.inc"
90
683a247e 91static void t_sync_flags(DisasContext *dc)
4acb54ba 92{
4abf79a4 93 /* Synch the tb dependent flags between translator and runtime. */
683a247e
RH
94 if ((dc->tb_flags ^ dc->base.tb->flags) & ~MSR_TB_MASK) {
95 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & ~MSR_TB_MASK);
4acb54ba
EI
96 }
97}
98
41ba37c4 99static void gen_raise_exception(DisasContext *dc, uint32_t index)
4acb54ba
EI
100{
101 TCGv_i32 tmp = tcg_const_i32(index);
102
64254eba 103 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba 104 tcg_temp_free_i32(tmp);
d4705ae0 105 dc->base.is_jmp = DISAS_NORETURN;
4acb54ba
EI
106}
107
41ba37c4
RH
108static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
109{
110 t_sync_flags(dc);
d4705ae0 111 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
41ba37c4
RH
112 gen_raise_exception(dc, index);
113}
114
115static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
116{
117 TCGv_i32 tmp = tcg_const_i32(esr_ec);
118 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
119 tcg_temp_free_i32(tmp);
120
121 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
122}
123
90aa39a1
SF
124static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
125{
126#ifndef CONFIG_USER_ONLY
d4705ae0 127 return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
90aa39a1
SF
128#else
129 return true;
130#endif
131}
132
4acb54ba
EI
133static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
134{
d4705ae0 135 if (dc->base.singlestep_enabled) {
0b46fa08
RH
136 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
137 tcg_gen_movi_i32(cpu_pc, dest);
138 gen_helper_raise_exception(cpu_env, tmp);
139 tcg_temp_free_i32(tmp);
140 } else if (use_goto_tb(dc, dest)) {
4acb54ba 141 tcg_gen_goto_tb(n);
0f96e96b 142 tcg_gen_movi_i32(cpu_pc, dest);
d4705ae0 143 tcg_gen_exit_tb(dc->base.tb, n);
4acb54ba 144 } else {
0f96e96b 145 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 146 tcg_gen_exit_tb(NULL, 0);
4acb54ba 147 }
d4705ae0 148 dc->base.is_jmp = DISAS_NORETURN;
4acb54ba
EI
149}
150
9ba8cd45
EI
151/*
152 * Returns true if the insn an illegal operation.
153 * If exceptions are enabled, an exception is raised.
154 */
155static bool trap_illegal(DisasContext *dc, bool cond)
156{
2c32179f 157 if (cond && (dc->tb_flags & MSR_EE)
5143fdf3 158 && dc->cpu->cfg.illegal_opcode_exception) {
41ba37c4 159 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
9ba8cd45
EI
160 }
161 return cond;
162}
163
bdfc1e88
EI
164/*
165 * Returns true if the insn is illegal in userspace.
166 * If exceptions are enabled, an exception is raised.
167 */
168static bool trap_userspace(DisasContext *dc, bool cond)
169{
287b1def 170 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
bdfc1e88 171
2c32179f 172 if (cond_user && (dc->tb_flags & MSR_EE)) {
41ba37c4 173 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
bdfc1e88
EI
174 }
175 return cond_user;
176}
177
20800179 178static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
4acb54ba 179{
20800179
RH
180 if (likely(reg != 0)) {
181 return cpu_R[reg];
182 }
183 if (!dc->r0_set) {
184 if (dc->r0 == NULL) {
185 dc->r0 = tcg_temp_new_i32();
186 }
187 tcg_gen_movi_i32(dc->r0, 0);
188 dc->r0_set = true;
189 }
190 return dc->r0;
191}
4acb54ba 192
20800179
RH
193static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
194{
195 if (likely(reg != 0)) {
196 return cpu_R[reg];
197 }
198 if (dc->r0 == NULL) {
199 dc->r0 = tcg_temp_new_i32();
200 }
201 return dc->r0;
202}
4acb54ba 203
20800179
RH
204static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
205 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
206{
207 TCGv_i32 rd, ra, rb;
40cbf5b7 208
20800179
RH
209 if (arg->rd == 0 && !side_effects) {
210 return true;
40cbf5b7
EI
211 }
212
20800179
RH
213 rd = reg_for_write(dc, arg->rd);
214 ra = reg_for_read(dc, arg->ra);
215 rb = reg_for_read(dc, arg->rb);
216 fn(rd, ra, rb);
217 return true;
218}
219
39cf3864
RH
220static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
221 void (*fn)(TCGv_i32, TCGv_i32))
222{
223 TCGv_i32 rd, ra;
224
225 if (arg->rd == 0 && !side_effects) {
226 return true;
227 }
228
229 rd = reg_for_write(dc, arg->rd);
230 ra = reg_for_read(dc, arg->ra);
231 fn(rd, ra);
232 return true;
233}
234
20800179
RH
235static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
236 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
237{
238 TCGv_i32 rd, ra;
239
240 if (arg->rd == 0 && !side_effects) {
241 return true;
40cbf5b7
EI
242 }
243
20800179
RH
244 rd = reg_for_write(dc, arg->rd);
245 ra = reg_for_read(dc, arg->ra);
246 fni(rd, ra, arg->imm);
247 return true;
248}
249
250static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
251 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
252{
253 TCGv_i32 rd, ra, imm;
254
255 if (arg->rd == 0 && !side_effects) {
256 return true;
4acb54ba 257 }
20800179
RH
258
259 rd = reg_for_write(dc, arg->rd);
260 ra = reg_for_read(dc, arg->ra);
261 imm = tcg_const_i32(arg->imm);
262
263 fn(rd, ra, imm);
264
265 tcg_temp_free_i32(imm);
266 return true;
267}
268
269#define DO_TYPEA(NAME, SE, FN) \
270 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
271 { return do_typea(dc, a, SE, FN); }
272
607f5767
RH
273#define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
274 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
275 { return dc->cpu->cfg.CFG && do_typea(dc, a, SE, FN); }
276
39cf3864
RH
277#define DO_TYPEA0(NAME, SE, FN) \
278 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
279 { return do_typea0(dc, a, SE, FN); }
280
281#define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
282 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
283 { return dc->cpu->cfg.CFG && do_typea0(dc, a, SE, FN); }
284
20800179
RH
285#define DO_TYPEBI(NAME, SE, FNI) \
286 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
287 { return do_typeb_imm(dc, a, SE, FNI); }
288
97955ceb
RH
289#define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
290 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
291 { return dc->cpu->cfg.CFG && do_typeb_imm(dc, a, SE, FNI); }
292
20800179
RH
293#define DO_TYPEBV(NAME, SE, FN) \
294 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
295 { return do_typeb_val(dc, a, SE, FN); }
296
d5aead3d
RH
297#define ENV_WRAPPER2(NAME, HELPER) \
298 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
299 { HELPER(out, cpu_env, ina); }
300
301#define ENV_WRAPPER3(NAME, HELPER) \
302 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
303 { HELPER(out, cpu_env, ina, inb); }
304
20800179
RH
305/* No input carry, but output carry. */
306static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
307{
308 TCGv_i32 zero = tcg_const_i32(0);
309
310 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
311
312 tcg_temp_free_i32(zero);
313}
314
315/* Input and output carry. */
316static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
317{
318 TCGv_i32 zero = tcg_const_i32(0);
319 TCGv_i32 tmp = tcg_temp_new_i32();
320
321 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
322 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
323
324 tcg_temp_free_i32(tmp);
325 tcg_temp_free_i32(zero);
326}
327
328/* Input carry, but no output carry. */
329static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
330{
331 tcg_gen_add_i32(out, ina, inb);
332 tcg_gen_add_i32(out, out, cpu_msr_c);
333}
334
335DO_TYPEA(add, true, gen_add)
336DO_TYPEA(addc, true, gen_addc)
337DO_TYPEA(addk, false, tcg_gen_add_i32)
338DO_TYPEA(addkc, true, gen_addkc)
339
340DO_TYPEBV(addi, true, gen_add)
341DO_TYPEBV(addic, true, gen_addc)
342DO_TYPEBI(addik, false, tcg_gen_addi_i32)
343DO_TYPEBV(addikc, true, gen_addkc)
344
cb0a0a4c
RH
345static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
346{
347 tcg_gen_andi_i32(out, ina, ~imm);
348}
349
350DO_TYPEA(and, false, tcg_gen_and_i32)
351DO_TYPEBI(andi, false, tcg_gen_andi_i32)
352DO_TYPEA(andn, false, tcg_gen_andc_i32)
353DO_TYPEBI(andni, false, gen_andni)
354
081d8e02
RH
355static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
356{
357 TCGv_i32 tmp = tcg_temp_new_i32();
358 tcg_gen_andi_i32(tmp, inb, 31);
359 tcg_gen_sar_i32(out, ina, tmp);
360 tcg_temp_free_i32(tmp);
361}
362
363static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
364{
365 TCGv_i32 tmp = tcg_temp_new_i32();
366 tcg_gen_andi_i32(tmp, inb, 31);
367 tcg_gen_shr_i32(out, ina, tmp);
368 tcg_temp_free_i32(tmp);
369}
370
371static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
372{
373 TCGv_i32 tmp = tcg_temp_new_i32();
374 tcg_gen_andi_i32(tmp, inb, 31);
375 tcg_gen_shl_i32(out, ina, tmp);
376 tcg_temp_free_i32(tmp);
377}
378
379static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
380{
381 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
382 int imm_w = extract32(imm, 5, 5);
383 int imm_s = extract32(imm, 0, 5);
384
385 if (imm_w + imm_s > 32 || imm_w == 0) {
386 /* These inputs have an undefined behavior. */
387 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
388 imm_w, imm_s);
389 } else {
390 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
391 }
392}
393
394static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
395{
396 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
397 int imm_w = extract32(imm, 5, 5);
398 int imm_s = extract32(imm, 0, 5);
399 int width = imm_w - imm_s + 1;
400
401 if (imm_w < imm_s) {
402 /* These inputs have an undefined behavior. */
403 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
404 imm_w, imm_s);
405 } else {
406 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
407 }
408}
409
410DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
411DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
412DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
413
414DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
415DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
416DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
417
418DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
419DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
420
39cf3864
RH
421static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
422{
423 tcg_gen_clzi_i32(out, ina, 32);
424}
425
426DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
427
58b48b63
RH
428static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
429{
430 TCGv_i32 lt = tcg_temp_new_i32();
431
432 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
433 tcg_gen_sub_i32(out, inb, ina);
434 tcg_gen_deposit_i32(out, out, lt, 31, 1);
435 tcg_temp_free_i32(lt);
436}
437
438static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
439{
440 TCGv_i32 lt = tcg_temp_new_i32();
441
442 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
443 tcg_gen_sub_i32(out, inb, ina);
444 tcg_gen_deposit_i32(out, out, lt, 31, 1);
445 tcg_temp_free_i32(lt);
446}
447
448DO_TYPEA(cmp, false, gen_cmp)
449DO_TYPEA(cmpu, false, gen_cmpu)
a2b0b90e 450
d5aead3d
RH
451ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
452ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
453ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
454ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
455ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
456ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
457ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
458ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
459ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
460ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
461ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
462
463DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
464DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
465DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
466DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
467DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
468DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
469DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
470DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
471DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
472DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
473DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
474
475ENV_WRAPPER2(gen_flt, gen_helper_flt)
476ENV_WRAPPER2(gen_fint, gen_helper_fint)
477ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
478
479DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
480DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
481DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
482
483/* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
b1354342
RH
484static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
485{
486 gen_helper_divs(out, cpu_env, inb, ina);
487}
488
489static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
490{
491 gen_helper_divu(out, cpu_env, inb, ina);
492}
493
494DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
495DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
496
e64b2e5c
RH
497static bool trans_imm(DisasContext *dc, arg_imm *arg)
498{
499 dc->ext_imm = arg->imm << 16;
500 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
6f9642d7 501 dc->tb_flags_to_set = IMM_FLAG;
e64b2e5c
RH
502 return true;
503}
504
97955ceb
RH
505static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
506{
507 TCGv_i32 tmp = tcg_temp_new_i32();
508 tcg_gen_muls2_i32(tmp, out, ina, inb);
509 tcg_temp_free_i32(tmp);
510}
511
512static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
513{
514 TCGv_i32 tmp = tcg_temp_new_i32();
515 tcg_gen_mulu2_i32(tmp, out, ina, inb);
516 tcg_temp_free_i32(tmp);
517}
518
519static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
520{
521 TCGv_i32 tmp = tcg_temp_new_i32();
522 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
523 tcg_temp_free_i32(tmp);
524}
525
526DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
527DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
528DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
529DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
530DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
531
cb0a0a4c
RH
532DO_TYPEA(or, false, tcg_gen_or_i32)
533DO_TYPEBI(ori, false, tcg_gen_ori_i32)
534
607f5767
RH
535static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
536{
537 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
538}
539
540static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
541{
542 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
543}
544
545DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
546DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
547DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
548
a2b0b90e
RH
549/* No input carry, but output carry. */
550static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
551{
552 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
553 tcg_gen_sub_i32(out, inb, ina);
554}
555
556/* Input and output carry. */
557static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
558{
559 TCGv_i32 zero = tcg_const_i32(0);
560 TCGv_i32 tmp = tcg_temp_new_i32();
561
562 tcg_gen_not_i32(tmp, ina);
563 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
564 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
565
566 tcg_temp_free_i32(zero);
567 tcg_temp_free_i32(tmp);
568}
569
570/* No input or output carry. */
571static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
572{
573 tcg_gen_sub_i32(out, inb, ina);
574}
575
576/* Input carry, no output carry. */
577static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
578{
579 TCGv_i32 nota = tcg_temp_new_i32();
580
581 tcg_gen_not_i32(nota, ina);
582 tcg_gen_add_i32(out, inb, nota);
583 tcg_gen_add_i32(out, out, cpu_msr_c);
584
585 tcg_temp_free_i32(nota);
586}
587
588DO_TYPEA(rsub, true, gen_rsub)
589DO_TYPEA(rsubc, true, gen_rsubc)
590DO_TYPEA(rsubk, false, gen_rsubk)
591DO_TYPEA(rsubkc, true, gen_rsubkc)
592
593DO_TYPEBV(rsubi, true, gen_rsub)
594DO_TYPEBV(rsubic, true, gen_rsubc)
595DO_TYPEBV(rsubik, false, gen_rsubk)
596DO_TYPEBV(rsubikc, true, gen_rsubkc)
597
39cf3864
RH
598DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
599DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
600
601static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
602{
603 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
604 tcg_gen_sari_i32(out, ina, 1);
605}
606
607static void gen_src(TCGv_i32 out, TCGv_i32 ina)
608{
609 TCGv_i32 tmp = tcg_temp_new_i32();
610
611 tcg_gen_mov_i32(tmp, cpu_msr_c);
612 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
613 tcg_gen_extract2_i32(out, ina, tmp, 1);
614
615 tcg_temp_free_i32(tmp);
616}
617
618static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
619{
620 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
621 tcg_gen_shri_i32(out, ina, 1);
622}
623
624DO_TYPEA0(sra, false, gen_sra)
625DO_TYPEA0(src, false, gen_src)
626DO_TYPEA0(srl, false, gen_srl)
627
628static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
629{
630 tcg_gen_rotri_i32(out, ina, 16);
631}
632
633DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
634DO_TYPEA0(swaph, false, gen_swaph)
635
636static bool trans_wdic(DisasContext *dc, arg_wdic *a)
637{
638 /* Cache operations are nops: only check for supervisor mode. */
639 trap_userspace(dc, true);
640 return true;
641}
642
cb0a0a4c
RH
643DO_TYPEA(xor, false, tcg_gen_xor_i32)
644DO_TYPEBI(xori, false, tcg_gen_xori_i32)
645
d8e59c4a
RH
646static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
647{
648 TCGv ret = tcg_temp_new();
649
650 /* If any of the regs is r0, set t to the value of the other reg. */
651 if (ra && rb) {
652 TCGv_i32 tmp = tcg_temp_new_i32();
653 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
654 tcg_gen_extu_i32_tl(ret, tmp);
655 tcg_temp_free_i32(tmp);
656 } else if (ra) {
657 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
658 } else if (rb) {
659 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
660 } else {
661 tcg_gen_movi_tl(ret, 0);
662 }
663
664 if ((ra == 1 || rb == 1) && dc->cpu->cfg.stackprot) {
665 gen_helper_stackprot(cpu_env, ret);
666 }
667 return ret;
668}
669
670static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
671{
672 TCGv ret = tcg_temp_new();
673
674 /* If any of the regs is r0, set t to the value of the other reg. */
675 if (ra) {
676 TCGv_i32 tmp = tcg_temp_new_i32();
677 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
678 tcg_gen_extu_i32_tl(ret, tmp);
679 tcg_temp_free_i32(tmp);
680 } else {
681 tcg_gen_movi_tl(ret, (uint32_t)imm);
682 }
683
684 if (ra == 1 && dc->cpu->cfg.stackprot) {
685 gen_helper_stackprot(cpu_env, ret);
686 }
687 return ret;
688}
689
690static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
691{
692 int addr_size = dc->cpu->cfg.addr_size;
693 TCGv ret = tcg_temp_new();
694
695 if (addr_size == 32 || ra == 0) {
696 if (rb) {
697 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
698 } else {
699 tcg_gen_movi_tl(ret, 0);
700 }
701 } else {
702 if (rb) {
703 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
704 } else {
705 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
706 tcg_gen_shli_tl(ret, ret, 32);
707 }
708 if (addr_size < 64) {
709 /* Mask off out of range bits. */
710 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
711 }
712 }
713 return ret;
714}
715
ab0c8d0f
RH
716static void record_unaligned_ess(DisasContext *dc, int rd,
717 MemOp size, bool store)
718{
719 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
720
721 iflags |= ESR_ESS_FLAG;
722 iflags |= rd << 5;
723 iflags |= store * ESR_S;
724 iflags |= (size == MO_32) * ESR_W;
725
726 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
727}
728
d8e59c4a
RH
729static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
730 int mem_index, bool rev)
731{
d8e59c4a
RH
732 MemOp size = mop & MO_SIZE;
733
734 /*
735 * When doing reverse accesses we need to do two things.
736 *
737 * 1. Reverse the address wrt endianness.
738 * 2. Byteswap the data lanes on the way back into the CPU core.
739 */
740 if (rev) {
741 if (size > MO_8) {
742 mop ^= MO_BSWAP;
743 }
744 if (size < MO_32) {
745 tcg_gen_xori_tl(addr, addr, 3 - size);
746 }
747 }
748
ab0c8d0f
RH
749 if (size > MO_8 &&
750 (dc->tb_flags & MSR_EE) &&
751 dc->cpu->cfg.unaligned_exceptions) {
752 record_unaligned_ess(dc, rd, size, false);
753 mop |= MO_ALIGN;
d8e59c4a
RH
754 }
755
ab0c8d0f 756 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
d8e59c4a 757
d8e59c4a
RH
758 tcg_temp_free(addr);
759 return true;
760}
761
762static bool trans_lbu(DisasContext *dc, arg_typea *arg)
763{
764 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
765 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
766}
767
768static bool trans_lbur(DisasContext *dc, arg_typea *arg)
769{
770 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
771 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
772}
773
774static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
775{
776 if (trap_userspace(dc, true)) {
777 return true;
778 }
779 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
780 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
781}
782
783static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
784{
785 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
786 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
787}
788
789static bool trans_lhu(DisasContext *dc, arg_typea *arg)
790{
791 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
792 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
793}
794
795static bool trans_lhur(DisasContext *dc, arg_typea *arg)
796{
797 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
798 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
799}
800
801static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
802{
803 if (trap_userspace(dc, true)) {
804 return true;
805 }
806 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
807 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
808}
809
810static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
811{
812 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
813 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
814}
815
816static bool trans_lw(DisasContext *dc, arg_typea *arg)
817{
818 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
819 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
820}
821
822static bool trans_lwr(DisasContext *dc, arg_typea *arg)
823{
824 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
825 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
826}
827
828static bool trans_lwea(DisasContext *dc, arg_typea *arg)
829{
830 if (trap_userspace(dc, true)) {
831 return true;
832 }
833 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
834 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
835}
836
837static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
838{
839 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
840 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
841}
842
843static bool trans_lwx(DisasContext *dc, arg_typea *arg)
844{
845 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
846
847 /* lwx does not throw unaligned access errors, so force alignment */
848 tcg_gen_andi_tl(addr, addr, ~3);
849
d8e59c4a
RH
850 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
851 tcg_gen_mov_tl(cpu_res_addr, addr);
852 tcg_temp_free(addr);
853
854 if (arg->rd) {
855 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
856 }
857
858 /* No support for AXI exclusive so always clear C */
859 tcg_gen_movi_i32(cpu_msr_c, 0);
860 return true;
861}
862
863static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
864 int mem_index, bool rev)
865{
866 MemOp size = mop & MO_SIZE;
867
868 /*
869 * When doing reverse accesses we need to do two things.
870 *
871 * 1. Reverse the address wrt endianness.
872 * 2. Byteswap the data lanes on the way back into the CPU core.
873 */
874 if (rev) {
875 if (size > MO_8) {
876 mop ^= MO_BSWAP;
877 }
878 if (size < MO_32) {
879 tcg_gen_xori_tl(addr, addr, 3 - size);
880 }
881 }
882
ab0c8d0f
RH
883 if (size > MO_8 &&
884 (dc->tb_flags & MSR_EE) &&
885 dc->cpu->cfg.unaligned_exceptions) {
886 record_unaligned_ess(dc, rd, size, true);
887 mop |= MO_ALIGN;
d8e59c4a
RH
888 }
889
ab0c8d0f
RH
890 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
891
d8e59c4a
RH
892 tcg_temp_free(addr);
893 return true;
894}
895
896static bool trans_sb(DisasContext *dc, arg_typea *arg)
897{
898 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
899 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
900}
901
902static bool trans_sbr(DisasContext *dc, arg_typea *arg)
903{
904 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
905 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
906}
907
908static bool trans_sbea(DisasContext *dc, arg_typea *arg)
909{
910 if (trap_userspace(dc, true)) {
911 return true;
912 }
913 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
914 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
915}
916
917static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
918{
919 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
920 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
921}
922
923static bool trans_sh(DisasContext *dc, arg_typea *arg)
924{
925 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
926 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
927}
928
929static bool trans_shr(DisasContext *dc, arg_typea *arg)
930{
931 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
932 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
933}
934
935static bool trans_shea(DisasContext *dc, arg_typea *arg)
936{
937 if (trap_userspace(dc, true)) {
938 return true;
939 }
940 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
941 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
942}
943
944static bool trans_shi(DisasContext *dc, arg_typeb *arg)
945{
946 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
947 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
948}
949
950static bool trans_sw(DisasContext *dc, arg_typea *arg)
951{
952 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
953 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
954}
955
956static bool trans_swr(DisasContext *dc, arg_typea *arg)
957{
958 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
959 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
960}
961
962static bool trans_swea(DisasContext *dc, arg_typea *arg)
963{
964 if (trap_userspace(dc, true)) {
965 return true;
966 }
967 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
968 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
969}
970
971static bool trans_swi(DisasContext *dc, arg_typeb *arg)
972{
973 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
974 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
975}
976
977static bool trans_swx(DisasContext *dc, arg_typea *arg)
978{
979 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
980 TCGLabel *swx_done = gen_new_label();
981 TCGLabel *swx_fail = gen_new_label();
982 TCGv_i32 tval;
983
d8e59c4a
RH
984 /* swx does not throw unaligned access errors, so force alignment */
985 tcg_gen_andi_tl(addr, addr, ~3);
986
987 /*
988 * Compare the address vs the one we used during lwx.
989 * On mismatch, the operation fails. On match, addr dies at the
990 * branch, but we know we can use the equal version in the global.
991 * In either case, addr is no longer needed.
992 */
993 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
994 tcg_temp_free(addr);
995
996 /*
997 * Compare the value loaded during lwx with current contents of
998 * the reserved location.
999 */
1000 tval = tcg_temp_new_i32();
1001
1002 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1003 reg_for_write(dc, arg->rd),
1004 dc->mem_index, MO_TEUL);
1005
1006 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1007 tcg_temp_free_i32(tval);
1008
1009 /* Success */
1010 tcg_gen_movi_i32(cpu_msr_c, 0);
1011 tcg_gen_br(swx_done);
1012
1013 /* Failure */
1014 gen_set_label(swx_fail);
1015 tcg_gen_movi_i32(cpu_msr_c, 1);
1016
1017 gen_set_label(swx_done);
1018
1019 /*
1020 * Prevent the saved address from working again without another ldx.
1021 * Akin to the pseudocode setting reservation = 0.
1022 */
1023 tcg_gen_movi_tl(cpu_res_addr, -1);
1024 return true;
1025}
1026
16bbbbc9
RH
1027static void setup_dslot(DisasContext *dc, bool type_b)
1028{
1029 dc->tb_flags_to_set |= D_FLAG;
1030 if (type_b && (dc->tb_flags & IMM_FLAG)) {
1031 dc->tb_flags_to_set |= BIMM_FLAG;
1032 }
1033}
1034
1035static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1036 bool delay, bool abs, int link)
1037{
1038 uint32_t add_pc;
1039
1040 if (delay) {
1041 setup_dslot(dc, dest_rb < 0);
1042 }
1043
1044 if (link) {
1045 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1046 }
1047
1048 /* Store the branch taken destination into btarget. */
1049 add_pc = abs ? 0 : dc->base.pc_next;
1050 if (dest_rb > 0) {
1051 dc->jmp_dest = -1;
1052 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1053 } else {
1054 dc->jmp_dest = add_pc + dest_imm;
1055 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1056 }
1057 dc->jmp_cond = TCG_COND_ALWAYS;
1058 return true;
1059}
1060
1061#define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
1062 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
1063 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1064 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
1065 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1066
1067DO_BR(br, bri, false, false, false)
1068DO_BR(bra, brai, false, true, false)
1069DO_BR(brd, brid, true, false, false)
1070DO_BR(brad, braid, true, true, false)
1071DO_BR(brld, brlid, true, false, true)
1072DO_BR(brald, bralid, true, true, true)
1073
fd779113
RH
1074static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1075 TCGCond cond, int ra, bool delay)
1076{
1077 TCGv_i32 zero, next;
1078
1079 if (delay) {
1080 setup_dslot(dc, dest_rb < 0);
1081 }
1082
1083 dc->jmp_cond = cond;
1084
1085 /* Cache the condition register in cpu_bvalue across any delay slot. */
1086 tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1087
1088 /* Store the branch taken destination into btarget. */
1089 if (dest_rb > 0) {
1090 dc->jmp_dest = -1;
1091 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1092 } else {
1093 dc->jmp_dest = dc->base.pc_next + dest_imm;
1094 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1095 }
1096
1097 /* Compute the final destination into btarget. */
1098 zero = tcg_const_i32(0);
1099 next = tcg_const_i32(dc->base.pc_next + (delay + 1) * 4);
1100 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1101 reg_for_read(dc, ra), zero,
1102 cpu_btarget, next);
1103 tcg_temp_free_i32(zero);
1104 tcg_temp_free_i32(next);
1105
1106 return true;
1107}
1108
1109#define DO_BCC(NAME, COND) \
1110 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
1111 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1112 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
1113 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1114 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
1115 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1116 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
1117 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1118
1119DO_BCC(beq, TCG_COND_EQ)
1120DO_BCC(bge, TCG_COND_GE)
1121DO_BCC(bgt, TCG_COND_GT)
1122DO_BCC(ble, TCG_COND_LE)
1123DO_BCC(blt, TCG_COND_LT)
1124DO_BCC(bne, TCG_COND_NE)
1125
f5235314
RH
1126static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1127{
1128 if (trap_userspace(dc, true)) {
1129 return true;
1130 }
1131 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1132 if (arg->rd) {
1133 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1134 }
1135 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1136 tcg_gen_movi_tl(cpu_res_addr, -1);
1137
1138 dc->base.is_jmp = DISAS_UPDATE;
1139 return true;
1140}
1141
1142static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1143{
1144 uint32_t imm = arg->imm;
1145
1146 if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1147 return true;
1148 }
1149 tcg_gen_movi_i32(cpu_pc, imm);
1150 if (arg->rd) {
1151 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1152 }
1153 tcg_gen_movi_tl(cpu_res_addr, -1);
1154
1155#ifdef CONFIG_USER_ONLY
1156 switch (imm) {
1157 case 0x8: /* syscall trap */
1158 gen_raise_exception_sync(dc, EXCP_SYSCALL);
1159 break;
1160 case 0x18: /* debug trap */
1161 gen_raise_exception_sync(dc, EXCP_DEBUG);
1162 break;
1163 default: /* eliminated with trap_userspace check */
1164 g_assert_not_reached();
1165 }
1166#else
1167 uint32_t msr_to_set = 0;
1168
1169 if (imm != 0x18) {
1170 msr_to_set |= MSR_BIP;
1171 }
1172 if (imm == 0x8 || imm == 0x18) {
1173 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1174 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1175 tcg_gen_andi_i32(cpu_msr, cpu_msr,
1176 ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1177 }
1178 tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1179 dc->base.is_jmp = DISAS_UPDATE;
1180#endif
1181
1182 return true;
1183}
1184
ee8c7f9f
RH
1185static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1186{
1187 int mbar_imm = arg->imm;
1188
1189 /* Data access memory barrier. */
1190 if ((mbar_imm & 2) == 0) {
1191 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1192 }
1193
1194 /* Sleep. */
1195 if (mbar_imm & 16) {
1196 TCGv_i32 tmp_1;
1197
1198 if (trap_userspace(dc, true)) {
1199 /* Sleep is a privileged instruction. */
1200 return true;
1201 }
1202
1203 t_sync_flags(dc);
1204
1205 tmp_1 = tcg_const_i32(1);
1206 tcg_gen_st_i32(tmp_1, cpu_env,
1207 -offsetof(MicroBlazeCPU, env)
1208 +offsetof(CPUState, halted));
1209 tcg_temp_free_i32(tmp_1);
1210
1211 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1212
1213 gen_raise_exception(dc, EXCP_HLT);
1214 }
1215
1216 /*
1217 * If !(mbar_imm & 1), this is an instruction access memory barrier
1218 * and we need to end the TB so that we recognize self-modified
1219 * code immediately.
1220 *
1221 * However, there are some data mbars that need the TB break
1222 * (and return to main loop) to recognize interrupts right away.
1223 * E.g. recognizing a change to an interrupt controller register.
1224 *
1225 * Therefore, choose to end the TB always.
1226 */
1227 dc->cpustate_changed = 1;
1228 return true;
1229}
1230
e6cb0354
RH
1231static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1232{
1233 if (trap_userspace(dc, to_set)) {
1234 return true;
1235 }
1236 dc->tb_flags_to_set |= to_set;
1237 setup_dslot(dc, true);
1238
1239 dc->jmp_cond = TCG_COND_ALWAYS;
1240 dc->jmp_dest = -1;
1241 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1242 return true;
1243}
1244
1245#define DO_RTS(NAME, IFLAG) \
1246 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1247 { return do_rts(dc, arg, IFLAG); }
1248
1249DO_RTS(rtbd, DRTB_FLAG)
1250DO_RTS(rtid, DRTI_FLAG)
1251DO_RTS(rted, DRTE_FLAG)
1252DO_RTS(rtsd, 0)
1253
20800179
RH
1254static bool trans_zero(DisasContext *dc, arg_zero *arg)
1255{
1256 /* If opcode_0_illegal, trap. */
1257 if (dc->cpu->cfg.opcode_0_illegal) {
1258 trap_illegal(dc, true);
1259 return true;
1260 }
1261 /*
1262 * Otherwise, this is "add r0, r0, r0".
1263 * Continue to trans_add so that MSR[C] gets cleared.
1264 */
1265 return false;
4acb54ba
EI
1266}
1267
1074c0fb 1268static void msr_read(DisasContext *dc, TCGv_i32 d)
4acb54ba 1269{
1074c0fb
RH
1270 TCGv_i32 t;
1271
1272 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1273 t = tcg_temp_new_i32();
1274 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1275 tcg_gen_or_i32(d, cpu_msr, t);
1276 tcg_temp_free_i32(t);
4acb54ba
EI
1277}
1278
9df297a2 1279#ifndef CONFIG_USER_ONLY
1074c0fb 1280static void msr_write(DisasContext *dc, TCGv_i32 v)
4acb54ba
EI
1281{
1282 dc->cpustate_changed = 1;
1074c0fb
RH
1283
1284 /* Install MSR_C. */
1285 tcg_gen_extract_i32(cpu_msr_c, v, 2, 1);
1286
1287 /* Clear MSR_C and MSR_CC; MSR_PVR is not writable, and is always clear. */
1288 tcg_gen_andi_i32(cpu_msr, v, ~(MSR_C | MSR_CC | MSR_PVR));
4acb54ba 1289}
9df297a2 1290#endif
4acb54ba 1291
536e340f
RH
1292static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1293{
1294 uint32_t imm = arg->imm;
1295
1296 if (trap_userspace(dc, imm != MSR_C)) {
1297 return true;
1298 }
1299
1300 if (arg->rd) {
1301 msr_read(dc, cpu_R[arg->rd]);
1302 }
1303
1304 /*
1305 * Handle the carry bit separately.
1306 * This is the only bit that userspace can modify.
1307 */
1308 if (imm & MSR_C) {
1309 tcg_gen_movi_i32(cpu_msr_c, set);
1310 }
1311
1312 /*
1313 * MSR_C and MSR_CC set above.
1314 * MSR_PVR is not writable, and is always clear.
1315 */
1316 imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1317
1318 if (imm != 0) {
1319 if (set) {
1320 tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1321 } else {
1322 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1323 }
1324 dc->cpustate_changed = 1;
1325 }
1326 return true;
1327}
1328
1329static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1330{
1331 return do_msrclrset(dc, arg, false);
1332}
1333
1334static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1335{
1336 return do_msrclrset(dc, arg, true);
1337}
1338
9df297a2 1339static bool trans_mts(DisasContext *dc, arg_mts *arg)
4acb54ba 1340{
9df297a2
RH
1341 if (trap_userspace(dc, true)) {
1342 return true;
1343 }
4acb54ba 1344
9df297a2
RH
1345#ifdef CONFIG_USER_ONLY
1346 g_assert_not_reached();
1347#else
1348 if (arg->e && arg->rs != 0x1003) {
1349 qemu_log_mask(LOG_GUEST_ERROR,
1350 "Invalid extended mts reg 0x%x\n", arg->rs);
1351 return true;
f0f7e7f7
EI
1352 }
1353
9df297a2
RH
1354 TCGv_i32 src = reg_for_read(dc, arg->ra);
1355 switch (arg->rs) {
1356 case SR_MSR:
1357 msr_write(dc, src);
1358 break;
1359 case SR_FSR:
1360 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
1361 break;
1362 case 0x800:
1363 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
1364 break;
1365 case 0x802:
1366 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
1367 break;
f0f7e7f7 1368
9df297a2
RH
1369 case 0x1000: /* PID */
1370 case 0x1001: /* ZPR */
1371 case 0x1002: /* TLBX */
1372 case 0x1003: /* TLBLO */
1373 case 0x1004: /* TLBHI */
1374 case 0x1005: /* TLBSX */
1375 {
1376 TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1377 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1378
1379 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
1380 tcg_temp_free_i32(tmp_reg);
1381 tcg_temp_free_i32(tmp_ext);
1382 }
1383 break;
4acb54ba 1384
9df297a2
RH
1385 default:
1386 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1387 return true;
1567a005 1388 }
9df297a2
RH
1389 dc->cpustate_changed = 1;
1390 return true;
1391#endif
1392}
1567a005 1393
9df297a2
RH
1394static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1395{
1396 TCGv_i32 dest = reg_for_write(dc, arg->rd);
05a9a651 1397
9df297a2
RH
1398 if (arg->e) {
1399 switch (arg->rs) {
1400 case SR_EAR:
1401 {
1402 TCGv_i64 t64 = tcg_temp_new_i64();
1403 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1404 tcg_gen_extrh_i64_i32(dest, t64);
1405 tcg_temp_free_i64(t64);
1406 }
1407 return true;
1408#ifndef CONFIG_USER_ONLY
1409 case 0x1003: /* TLBLO */
1410 /* Handled below. */
1411 break;
1412#endif
1413 case 0x2006 ... 0x2009:
1414 /* High bits of PVR6-9 not implemented. */
1415 tcg_gen_movi_i32(dest, 0);
1416 return true;
1417 default:
1418 qemu_log_mask(LOG_GUEST_ERROR,
1419 "Invalid extended mfs reg 0x%x\n", arg->rs);
1420 return true;
05a9a651 1421 }
4acb54ba 1422 }
4acb54ba 1423
9df297a2
RH
1424 switch (arg->rs) {
1425 case SR_PC:
1426 tcg_gen_movi_i32(dest, dc->base.pc_next);
1427 break;
1428 case SR_MSR:
1429 msr_read(dc, dest);
1430 break;
1431 case SR_EAR:
1432 {
1433 TCGv_i64 t64 = tcg_temp_new_i64();
1434 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1435 tcg_gen_extrl_i64_i32(dest, t64);
1436 tcg_temp_free_i64(t64);
4acb54ba 1437 }
9df297a2
RH
1438 break;
1439 case SR_ESR:
1440 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
1441 break;
1442 case SR_FSR:
1443 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
1444 break;
1445 case SR_BTR:
1446 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
1447 break;
1448 case SR_EDR:
1449 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
1450 break;
1451 case 0x800:
1452 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
1453 break;
1454 case 0x802:
1455 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
1456 break;
1457
1458#ifndef CONFIG_USER_ONLY
1459 case 0x1000: /* PID */
1460 case 0x1001: /* ZPR */
1461 case 0x1002: /* TLBX */
1462 case 0x1003: /* TLBLO */
1463 case 0x1004: /* TLBHI */
1464 case 0x1005: /* TLBSX */
1465 {
1466 TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1467 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1468
1469 gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
1470 tcg_temp_free_i32(tmp_reg);
1471 tcg_temp_free_i32(tmp_ext);
4acb54ba 1472 }
9df297a2
RH
1473 break;
1474#endif
ee7dbcf8 1475
9df297a2
RH
1476 case 0x2000 ... 0x200c:
1477 tcg_gen_ld_i32(dest, cpu_env,
1478 offsetof(CPUMBState, pvr.regs[arg->rs - 0x2000]));
1479 break;
1480 default:
1481 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1482 break;
ee7dbcf8 1483 }
9df297a2 1484 return true;
4acb54ba
EI
1485}
1486
3fb394fd 1487static void do_rti(DisasContext *dc)
4acb54ba 1488{
3fb394fd
RH
1489 TCGv_i32 tmp = tcg_temp_new_i32();
1490
1491 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1492 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1493 tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1494 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1495 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1496
1497 tcg_temp_free_i32(tmp);
4acb54ba
EI
1498 dc->tb_flags &= ~DRTI_FLAG;
1499}
1500
3fb394fd 1501static void do_rtb(DisasContext *dc)
4acb54ba 1502{
3fb394fd
RH
1503 TCGv_i32 tmp = tcg_temp_new_i32();
1504
1505 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1506 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1507 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1508 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1509
1510 tcg_temp_free_i32(tmp);
4acb54ba
EI
1511 dc->tb_flags &= ~DRTB_FLAG;
1512}
1513
3fb394fd 1514static void do_rte(DisasContext *dc)
4acb54ba 1515{
3fb394fd
RH
1516 TCGv_i32 tmp = tcg_temp_new_i32();
1517
1518 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1519 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1520 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1521 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1522 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1523
1524 tcg_temp_free_i32(tmp);
4acb54ba
EI
1525 dc->tb_flags &= ~DRTE_FLAG;
1526}
1527
6d76d23e 1528/* Insns connected to FSL or AXI stream attached devices. */
52065d8f 1529static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
6d76d23e 1530{
6d76d23e 1531 TCGv_i32 t_id, t_ctrl;
6d76d23e 1532
bdfc1e88 1533 if (trap_userspace(dc, true)) {
52065d8f 1534 return true;
6d76d23e
EI
1535 }
1536
cfeea807 1537 t_id = tcg_temp_new_i32();
52065d8f
RH
1538 if (rb) {
1539 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
6d76d23e 1540 } else {
52065d8f 1541 tcg_gen_movi_i32(t_id, imm);
6d76d23e
EI
1542 }
1543
cfeea807 1544 t_ctrl = tcg_const_i32(ctrl);
52065d8f
RH
1545 gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1546 tcg_temp_free_i32(t_id);
1547 tcg_temp_free_i32(t_ctrl);
1548 return true;
1549}
1550
1551static bool trans_get(DisasContext *dc, arg_get *arg)
1552{
1553 return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1554}
1555
1556static bool trans_getd(DisasContext *dc, arg_getd *arg)
1557{
1558 return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1559}
1560
1561static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1562{
1563 TCGv_i32 t_id, t_ctrl;
1564
1565 if (trap_userspace(dc, true)) {
1566 return true;
1567 }
6d76d23e 1568
52065d8f
RH
1569 t_id = tcg_temp_new_i32();
1570 if (rb) {
1571 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
6d76d23e 1572 } else {
52065d8f 1573 tcg_gen_movi_i32(t_id, imm);
6d76d23e 1574 }
52065d8f
RH
1575
1576 t_ctrl = tcg_const_i32(ctrl);
1577 gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
cfeea807
EI
1578 tcg_temp_free_i32(t_id);
1579 tcg_temp_free_i32(t_ctrl);
52065d8f
RH
1580 return true;
1581}
1582
1583static bool trans_put(DisasContext *dc, arg_put *arg)
1584{
1585 return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1586}
1587
1588static bool trans_putd(DisasContext *dc, arg_putd *arg)
1589{
1590 return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
6d76d23e
EI
1591}
1592
372122e3 1593static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
4acb54ba 1594{
372122e3
RH
1595 DisasContext *dc = container_of(dcb, DisasContext, base);
1596 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1597 int bound;
4acb54ba 1598
372122e3 1599 dc->cpu = cpu;
683a247e 1600 dc->tb_flags = dc->base.tb->flags;
4acb54ba 1601 dc->cpustate_changed = 0;
d7ecb757 1602 dc->ext_imm = dc->base.tb->cs_base;
20800179
RH
1603 dc->r0 = NULL;
1604 dc->r0_set = false;
287b1def 1605 dc->mem_index = cpu_mmu_index(&cpu->env, false);
b9c58aab
RH
1606 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1607 dc->jmp_dest = -1;
4acb54ba 1608
372122e3
RH
1609 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1610 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1611}
4acb54ba 1612
372122e3
RH
1613static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1614{
1615}
4acb54ba 1616
372122e3
RH
1617static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1618{
683a247e
RH
1619 DisasContext *dc = container_of(dcb, DisasContext, base);
1620
1621 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1622 dc->insn_start = tcg_last_op();
372122e3 1623}
4acb54ba 1624
372122e3
RH
1625static bool mb_tr_breakpoint_check(DisasContextBase *dcb, CPUState *cs,
1626 const CPUBreakpoint *bp)
1627{
1628 DisasContext *dc = container_of(dcb, DisasContext, base);
b933066a 1629
372122e3 1630 gen_raise_exception_sync(dc, EXCP_DEBUG);
4acb54ba 1631
372122e3
RH
1632 /*
1633 * The address covered by the breakpoint must be included in
1634 * [tb->pc, tb->pc + tb->size) in order to for it to be
1635 * properly cleared -- thus we increment the PC here so that
1636 * the logic setting tb->size below does the right thing.
1637 */
1638 dc->base.pc_next += 4;
1639 return true;
1640}
4acb54ba 1641
372122e3
RH
1642static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1643{
1644 DisasContext *dc = container_of(dcb, DisasContext, base);
1645 CPUMBState *env = cs->env_ptr;
44d1432b 1646 uint32_t ir;
372122e3
RH
1647
1648 /* TODO: This should raise an exception, not terminate qemu. */
1649 if (dc->base.pc_next & 3) {
1650 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1651 (uint32_t)dc->base.pc_next);
1652 }
1653
6f9642d7
RH
1654 dc->tb_flags_to_set = 0;
1655
44d1432b
RH
1656 ir = cpu_ldl_code(env, dc->base.pc_next);
1657 if (!decode(dc, ir)) {
921afa9d 1658 trap_illegal(dc, true);
44d1432b 1659 }
20800179
RH
1660
1661 if (dc->r0) {
1662 tcg_temp_free_i32(dc->r0);
1663 dc->r0 = NULL;
1664 dc->r0_set = false;
1665 }
1666
6f9642d7
RH
1667 /* Discard the imm global when its contents cannot be used. */
1668 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
d7ecb757 1669 tcg_gen_discard_i32(cpu_imm);
372122e3 1670 }
6f9642d7 1671
1e521ce3 1672 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
6f9642d7 1673 dc->tb_flags |= dc->tb_flags_to_set;
372122e3
RH
1674 dc->base.pc_next += 4;
1675
b9c58aab 1676 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
372122e3
RH
1677 if (dc->tb_flags & DRTI_FLAG) {
1678 do_rti(dc);
b9c58aab 1679 } else if (dc->tb_flags & DRTB_FLAG) {
372122e3 1680 do_rtb(dc);
b9c58aab 1681 } else if (dc->tb_flags & DRTE_FLAG) {
372122e3
RH
1682 do_rte(dc);
1683 }
372122e3 1684 dc->base.is_jmp = DISAS_JUMP;
4acb54ba
EI
1685 }
1686
372122e3
RH
1687 /* Force an exit if the per-tb cpu state has changed. */
1688 if (dc->base.is_jmp == DISAS_NEXT && dc->cpustate_changed) {
d4705ae0 1689 dc->base.is_jmp = DISAS_UPDATE;
372122e3 1690 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
4acb54ba 1691 }
372122e3
RH
1692}
1693
1694static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1695{
1696 DisasContext *dc = container_of(dcb, DisasContext, base);
1697
d4705ae0 1698 if (dc->base.is_jmp == DISAS_NORETURN) {
372122e3
RH
1699 /* We have already exited the TB. */
1700 return;
1701 }
1702
1703 t_sync_flags(dc);
372122e3
RH
1704
1705 switch (dc->base.is_jmp) {
1706 case DISAS_TOO_MANY:
372122e3
RH
1707 gen_goto_tb(dc, 0, dc->base.pc_next);
1708 return;
6c5f738d 1709
372122e3 1710 case DISAS_UPDATE:
372122e3
RH
1711 if (unlikely(cs->singlestep_enabled)) {
1712 gen_raise_exception(dc, EXCP_DEBUG);
1713 } else {
1714 tcg_gen_exit_tb(NULL, 0);
6c5f738d 1715 }
372122e3
RH
1716 return;
1717
1718 case DISAS_JUMP:
b9c58aab
RH
1719 if (dc->jmp_dest != -1 && !cs->singlestep_enabled) {
1720 /* Direct jump. */
1721 tcg_gen_discard_i32(cpu_btarget);
1722
1723 if (dc->jmp_cond != TCG_COND_ALWAYS) {
1724 /* Conditional direct jump. */
1725 TCGLabel *taken = gen_new_label();
1726 TCGv_i32 tmp = tcg_temp_new_i32();
1727
1728 /*
1729 * Copy bvalue to a temp now, so we can discard bvalue.
1730 * This can avoid writing bvalue to memory when the
1731 * delay slot cannot raise an exception.
1732 */
1733 tcg_gen_mov_i32(tmp, cpu_bvalue);
1734 tcg_gen_discard_i32(cpu_bvalue);
1735
1736 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1737 gen_goto_tb(dc, 1, dc->base.pc_next);
1738 gen_set_label(taken);
372122e3 1739 }
b9c58aab 1740 gen_goto_tb(dc, 0, dc->jmp_dest);
372122e3 1741 return;
b9c58aab 1742 }
372122e3 1743
b9c58aab
RH
1744 /* Indirect jump (or direct jump w/ singlestep) */
1745 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1746 tcg_gen_discard_i32(cpu_btarget);
372122e3 1747
b9c58aab
RH
1748 if (unlikely(cs->singlestep_enabled)) {
1749 gen_raise_exception(dc, EXCP_DEBUG);
1750 } else {
1751 tcg_gen_exit_tb(NULL, 0);
4acb54ba 1752 }
b9c58aab 1753 return;
0a7df5da 1754
372122e3
RH
1755 default:
1756 g_assert_not_reached();
1757 }
1758}
4acb54ba 1759
372122e3
RH
1760static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
1761{
372122e3
RH
1762 qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
1763 log_target_disas(cs, dcb->pc_first, dcb->tb->size);
372122e3
RH
1764}
1765
1766static const TranslatorOps mb_tr_ops = {
1767 .init_disas_context = mb_tr_init_disas_context,
1768 .tb_start = mb_tr_tb_start,
1769 .insn_start = mb_tr_insn_start,
1770 .breakpoint_check = mb_tr_breakpoint_check,
1771 .translate_insn = mb_tr_translate_insn,
1772 .tb_stop = mb_tr_tb_stop,
1773 .disas_log = mb_tr_disas_log,
1774};
1775
1776void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1777{
1778 DisasContext dc;
1779 translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
4acb54ba
EI
1780}
1781
90c84c56 1782void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
4acb54ba 1783{
878096ee
AF
1784 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1785 CPUMBState *env = &cpu->env;
0c3da918 1786 uint32_t iflags;
4acb54ba
EI
1787 int i;
1788
0c3da918
RH
1789 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1790 env->pc, env->msr,
2e5282ca
RH
1791 (env->msr & MSR_UM) ? "user" : "kernel",
1792 (env->msr & MSR_UMS) ? "user" : "kernel",
1793 (bool)(env->msr & MSR_EIP),
1794 (bool)(env->msr & MSR_IE));
0c3da918
RH
1795
1796 iflags = env->iflags;
1797 qemu_fprintf(f, "iflags: 0x%08x", iflags);
1798 if (iflags & IMM_FLAG) {
1799 qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1800 }
1801 if (iflags & BIMM_FLAG) {
1802 qemu_fprintf(f, " BIMM");
1803 }
1804 if (iflags & D_FLAG) {
b9c58aab 1805 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
0c3da918
RH
1806 }
1807 if (iflags & DRTI_FLAG) {
1808 qemu_fprintf(f, " DRTI");
1809 }
1810 if (iflags & DRTE_FLAG) {
1811 qemu_fprintf(f, " DRTE");
1812 }
1813 if (iflags & DRTB_FLAG) {
1814 qemu_fprintf(f, " DRTB");
1815 }
1816 if (iflags & ESR_ESS_FLAG) {
1817 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1818 }
1819
1820 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1821 "ear=0x%016" PRIx64 " slr=0x%x shr=0x%x\n",
1822 env->esr, env->fsr, env->btr, env->edr,
1823 env->ear, env->slr, env->shr);
1824
2ead1b18 1825 for (i = 0; i < 12; i++) {
0c3da918
RH
1826 qemu_fprintf(f, "rpvr%-2d=%08x%c",
1827 i, env->pvr.regs[i], i % 4 == 3 ? '\n' : ' ');
2ead1b18 1828 }
17c52a43 1829
4acb54ba 1830 for (i = 0; i < 32; i++) {
0c3da918
RH
1831 qemu_fprintf(f, "r%2.2d=%08x%c",
1832 i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1833 }
1834 qemu_fprintf(f, "\n");
4acb54ba
EI
1835}
1836
cd0c24f9
AF
1837void mb_tcg_init(void)
1838{
480d29a8
RH
1839#define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1840#define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1841
1842 static const struct {
1843 TCGv_i32 *var; int ofs; char name[8];
1844 } i32s[] = {
e47c2231
RH
1845 /*
1846 * Note that r0 is handled specially in reg_for_read
1847 * and reg_for_write. Nothing should touch cpu_R[0].
1848 * Leave that element NULL, which will assert quickly
1849 * inside the tcg generator functions.
1850 */
1851 R(1), R(2), R(3), R(4), R(5), R(6), R(7),
480d29a8
RH
1852 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1853 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1854 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1855
1856 SP(pc),
1857 SP(msr),
1074c0fb 1858 SP(msr_c),
480d29a8
RH
1859 SP(imm),
1860 SP(iflags),
b9c58aab 1861 SP(bvalue),
480d29a8
RH
1862 SP(btarget),
1863 SP(res_val),
1864 };
1865
1866#undef R
1867#undef SP
1868
1869 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1870 *i32s[i].var =
1871 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1872 }
4acb54ba 1873
480d29a8
RH
1874 cpu_res_addr =
1875 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
4acb54ba
EI
1876}
1877
bad729e2
RH
1878void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1879 target_ulong *data)
4acb54ba 1880{
76e8187d 1881 env->pc = data[0];
683a247e 1882 env->iflags = data[1];
4acb54ba 1883}