]> git.proxmox.com Git - mirror_qemu.git/blame - target/microblaze/translate.c
target: Use vaddr in gen_intermediate_code
[mirror_qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
ee452036 10 * version 2.1 of the License, or (at your option) any later version.
4acb54ba
EI
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
2ef6175a 27#include "exec/helper-gen.h"
77fc6f5e 28#include "exec/translator.h"
90c84c56 29#include "qemu/qemu-print.h"
4acb54ba 30
508127e2 31#include "exec/log.h"
a7e30d84 32
d53106c9
RH
33#define HELPER_H "helper.h"
34#include "exec/helper-info.c.inc"
35#undef HELPER_H
36
4acb54ba
EI
37#define EXTRACT_FIELD(src, start, end) \
38 (((src) >> start) & ((1 << (end - start + 1)) - 1))
39
77fc6f5e
LV
40/* is_jmp field values */
41#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
17e77796 42#define DISAS_EXIT DISAS_TARGET_1 /* all cpu state modified dynamically */
77fc6f5e 43
f6278ca9
RH
44/* cpu state besides pc was modified dynamically; update pc to next */
45#define DISAS_EXIT_NEXT DISAS_TARGET_2
46/* cpu state besides pc was modified dynamically; update pc to btarget */
47#define DISAS_EXIT_JUMP DISAS_TARGET_3
48
cfeea807 49static TCGv_i32 cpu_R[32];
0f96e96b 50static TCGv_i32 cpu_pc;
3e0e16ae 51static TCGv_i32 cpu_msr;
1074c0fb 52static TCGv_i32 cpu_msr_c;
9b158558 53static TCGv_i32 cpu_imm;
b9c58aab 54static TCGv_i32 cpu_bvalue;
0f96e96b 55static TCGv_i32 cpu_btarget;
9b158558
RH
56static TCGv_i32 cpu_iflags;
57static TCGv cpu_res_addr;
58static TCGv_i32 cpu_res_val;
4acb54ba 59
4acb54ba
EI
60/* This is the state at translation time. */
61typedef struct DisasContext {
d4705ae0 62 DisasContextBase base;
4b893631 63 const MicroBlazeCPUConfig *cfg;
4acb54ba 64
683a247e
RH
65 /* TCG op of the current insn_start. */
66 TCGOp *insn_start;
67
20800179
RH
68 TCGv_i32 r0;
69 bool r0_set;
70
4acb54ba 71 /* Decoder. */
d7ecb757 72 uint32_t ext_imm;
683a247e 73 unsigned int tb_flags;
6f9642d7 74 unsigned int tb_flags_to_set;
287b1def 75 int mem_index;
4acb54ba 76
b9c58aab
RH
77 /* Condition under which to jump, including NEVER and ALWAYS. */
78 TCGCond jmp_cond;
79
80 /* Immediate branch-taken destination, or -1 for indirect. */
81 uint32_t jmp_dest;
4acb54ba
EI
82} DisasContext;
83
20800179
RH
84static int typeb_imm(DisasContext *dc, int x)
85{
86 if (dc->tb_flags & IMM_FLAG) {
87 return deposit32(dc->ext_imm, 0, 16, x);
88 }
89 return x;
90}
91
44d1432b
RH
92/* Include the auto-generated decoder. */
93#include "decode-insns.c.inc"
94
683a247e 95static void t_sync_flags(DisasContext *dc)
4acb54ba 96{
4abf79a4 97 /* Synch the tb dependent flags between translator and runtime. */
88e74b61
RH
98 if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
99 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
4acb54ba
EI
100 }
101}
102
41ba37c4 103static void gen_raise_exception(DisasContext *dc, uint32_t index)
4acb54ba 104{
ad75a51e 105 gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
d4705ae0 106 dc->base.is_jmp = DISAS_NORETURN;
4acb54ba
EI
107}
108
41ba37c4
RH
109static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
110{
111 t_sync_flags(dc);
d4705ae0 112 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
41ba37c4
RH
113 gen_raise_exception(dc, index);
114}
115
116static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
117{
a5ea3dd7 118 TCGv_i32 tmp = tcg_constant_i32(esr_ec);
ad75a51e 119 tcg_gen_st_i32(tmp, tcg_env, offsetof(CPUMBState, esr));
41ba37c4
RH
120
121 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
122}
123
4acb54ba
EI
124static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
125{
66345580 126 if (translator_use_goto_tb(&dc->base, dest)) {
4acb54ba 127 tcg_gen_goto_tb(n);
0f96e96b 128 tcg_gen_movi_i32(cpu_pc, dest);
d4705ae0 129 tcg_gen_exit_tb(dc->base.tb, n);
4acb54ba 130 } else {
0f96e96b 131 tcg_gen_movi_i32(cpu_pc, dest);
4059bd90 132 tcg_gen_lookup_and_goto_ptr();
4acb54ba 133 }
d4705ae0 134 dc->base.is_jmp = DISAS_NORETURN;
4acb54ba
EI
135}
136
9ba8cd45
EI
137/*
138 * Returns true if the insn an illegal operation.
139 * If exceptions are enabled, an exception is raised.
140 */
141static bool trap_illegal(DisasContext *dc, bool cond)
142{
2c32179f 143 if (cond && (dc->tb_flags & MSR_EE)
4b893631 144 && dc->cfg->illegal_opcode_exception) {
41ba37c4 145 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
9ba8cd45
EI
146 }
147 return cond;
148}
149
bdfc1e88
EI
150/*
151 * Returns true if the insn is illegal in userspace.
152 * If exceptions are enabled, an exception is raised.
153 */
154static bool trap_userspace(DisasContext *dc, bool cond)
155{
287b1def 156 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
bdfc1e88 157
2c32179f 158 if (cond_user && (dc->tb_flags & MSR_EE)) {
41ba37c4 159 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
bdfc1e88
EI
160 }
161 return cond_user;
162}
163
2a7567a2
RH
164/*
165 * Return true, and log an error, if the current insn is
166 * within a delay slot.
167 */
168static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
169{
170 if (dc->tb_flags & D_FLAG) {
171 qemu_log_mask(LOG_GUEST_ERROR,
172 "Invalid insn in delay slot: %s at %08x\n",
173 insn_type, (uint32_t)dc->base.pc_next);
174 return true;
175 }
176 return false;
177}
178
20800179 179static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
4acb54ba 180{
20800179
RH
181 if (likely(reg != 0)) {
182 return cpu_R[reg];
183 }
184 if (!dc->r0_set) {
185 if (dc->r0 == NULL) {
186 dc->r0 = tcg_temp_new_i32();
187 }
188 tcg_gen_movi_i32(dc->r0, 0);
189 dc->r0_set = true;
190 }
191 return dc->r0;
192}
4acb54ba 193
20800179
RH
194static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
195{
196 if (likely(reg != 0)) {
197 return cpu_R[reg];
198 }
199 if (dc->r0 == NULL) {
200 dc->r0 = tcg_temp_new_i32();
201 }
202 return dc->r0;
203}
4acb54ba 204
20800179
RH
205static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
206 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
207{
208 TCGv_i32 rd, ra, rb;
40cbf5b7 209
20800179
RH
210 if (arg->rd == 0 && !side_effects) {
211 return true;
40cbf5b7
EI
212 }
213
20800179
RH
214 rd = reg_for_write(dc, arg->rd);
215 ra = reg_for_read(dc, arg->ra);
216 rb = reg_for_read(dc, arg->rb);
217 fn(rd, ra, rb);
218 return true;
219}
220
39cf3864
RH
221static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
222 void (*fn)(TCGv_i32, TCGv_i32))
223{
224 TCGv_i32 rd, ra;
225
226 if (arg->rd == 0 && !side_effects) {
227 return true;
228 }
229
230 rd = reg_for_write(dc, arg->rd);
231 ra = reg_for_read(dc, arg->ra);
232 fn(rd, ra);
233 return true;
234}
235
20800179
RH
236static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
237 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
238{
239 TCGv_i32 rd, ra;
240
241 if (arg->rd == 0 && !side_effects) {
242 return true;
40cbf5b7
EI
243 }
244
20800179
RH
245 rd = reg_for_write(dc, arg->rd);
246 ra = reg_for_read(dc, arg->ra);
247 fni(rd, ra, arg->imm);
248 return true;
249}
250
251static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
252 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
253{
254 TCGv_i32 rd, ra, imm;
255
256 if (arg->rd == 0 && !side_effects) {
257 return true;
4acb54ba 258 }
20800179
RH
259
260 rd = reg_for_write(dc, arg->rd);
261 ra = reg_for_read(dc, arg->ra);
a5ea3dd7 262 imm = tcg_constant_i32(arg->imm);
20800179
RH
263
264 fn(rd, ra, imm);
20800179
RH
265 return true;
266}
267
268#define DO_TYPEA(NAME, SE, FN) \
269 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
270 { return do_typea(dc, a, SE, FN); }
271
607f5767
RH
272#define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
273 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
4b893631 274 { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
607f5767 275
39cf3864
RH
276#define DO_TYPEA0(NAME, SE, FN) \
277 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
278 { return do_typea0(dc, a, SE, FN); }
279
280#define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
281 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
4b893631 282 { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
39cf3864 283
20800179
RH
284#define DO_TYPEBI(NAME, SE, FNI) \
285 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
286 { return do_typeb_imm(dc, a, SE, FNI); }
287
97955ceb
RH
288#define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
289 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
4b893631 290 { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
97955ceb 291
20800179
RH
292#define DO_TYPEBV(NAME, SE, FN) \
293 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
294 { return do_typeb_val(dc, a, SE, FN); }
295
d5aead3d
RH
296#define ENV_WRAPPER2(NAME, HELPER) \
297 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
ad75a51e 298 { HELPER(out, tcg_env, ina); }
d5aead3d
RH
299
300#define ENV_WRAPPER3(NAME, HELPER) \
301 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
ad75a51e 302 { HELPER(out, tcg_env, ina, inb); }
d5aead3d 303
20800179
RH
304/* No input carry, but output carry. */
305static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
306{
a5ea3dd7 307 TCGv_i32 zero = tcg_constant_i32(0);
20800179
RH
308
309 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
20800179
RH
310}
311
312/* Input and output carry. */
313static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
314{
a5ea3dd7 315 TCGv_i32 zero = tcg_constant_i32(0);
20800179
RH
316 TCGv_i32 tmp = tcg_temp_new_i32();
317
318 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
319 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
20800179
RH
320}
321
322/* Input carry, but no output carry. */
323static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
324{
325 tcg_gen_add_i32(out, ina, inb);
326 tcg_gen_add_i32(out, out, cpu_msr_c);
327}
328
329DO_TYPEA(add, true, gen_add)
330DO_TYPEA(addc, true, gen_addc)
331DO_TYPEA(addk, false, tcg_gen_add_i32)
332DO_TYPEA(addkc, true, gen_addkc)
333
334DO_TYPEBV(addi, true, gen_add)
335DO_TYPEBV(addic, true, gen_addc)
336DO_TYPEBI(addik, false, tcg_gen_addi_i32)
337DO_TYPEBV(addikc, true, gen_addkc)
338
cb0a0a4c
RH
339static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
340{
341 tcg_gen_andi_i32(out, ina, ~imm);
342}
343
344DO_TYPEA(and, false, tcg_gen_and_i32)
345DO_TYPEBI(andi, false, tcg_gen_andi_i32)
346DO_TYPEA(andn, false, tcg_gen_andc_i32)
347DO_TYPEBI(andni, false, gen_andni)
348
081d8e02
RH
349static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
350{
351 TCGv_i32 tmp = tcg_temp_new_i32();
352 tcg_gen_andi_i32(tmp, inb, 31);
353 tcg_gen_sar_i32(out, ina, tmp);
081d8e02
RH
354}
355
356static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
357{
358 TCGv_i32 tmp = tcg_temp_new_i32();
359 tcg_gen_andi_i32(tmp, inb, 31);
360 tcg_gen_shr_i32(out, ina, tmp);
081d8e02
RH
361}
362
363static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
364{
365 TCGv_i32 tmp = tcg_temp_new_i32();
366 tcg_gen_andi_i32(tmp, inb, 31);
367 tcg_gen_shl_i32(out, ina, tmp);
081d8e02
RH
368}
369
370static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
371{
372 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
373 int imm_w = extract32(imm, 5, 5);
374 int imm_s = extract32(imm, 0, 5);
375
376 if (imm_w + imm_s > 32 || imm_w == 0) {
377 /* These inputs have an undefined behavior. */
378 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
379 imm_w, imm_s);
380 } else {
381 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
382 }
383}
384
385static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
386{
387 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
388 int imm_w = extract32(imm, 5, 5);
389 int imm_s = extract32(imm, 0, 5);
390 int width = imm_w - imm_s + 1;
391
392 if (imm_w < imm_s) {
393 /* These inputs have an undefined behavior. */
394 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
395 imm_w, imm_s);
396 } else {
397 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
398 }
399}
400
401DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
402DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
403DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
404
405DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
406DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
407DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
408
409DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
410DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
411
39cf3864
RH
412static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
413{
414 tcg_gen_clzi_i32(out, ina, 32);
415}
416
417DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
418
58b48b63
RH
419static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
420{
421 TCGv_i32 lt = tcg_temp_new_i32();
422
423 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
424 tcg_gen_sub_i32(out, inb, ina);
425 tcg_gen_deposit_i32(out, out, lt, 31, 1);
58b48b63
RH
426}
427
428static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
429{
430 TCGv_i32 lt = tcg_temp_new_i32();
431
432 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
433 tcg_gen_sub_i32(out, inb, ina);
434 tcg_gen_deposit_i32(out, out, lt, 31, 1);
58b48b63
RH
435}
436
437DO_TYPEA(cmp, false, gen_cmp)
438DO_TYPEA(cmpu, false, gen_cmpu)
a2b0b90e 439
d5aead3d
RH
440ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
441ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
442ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
443ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
444ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
445ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
446ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
447ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
448ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
449ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
450ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
451
452DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
453DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
454DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
455DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
456DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
457DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
458DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
459DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
460DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
461DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
462DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
463
464ENV_WRAPPER2(gen_flt, gen_helper_flt)
465ENV_WRAPPER2(gen_fint, gen_helper_fint)
466ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
467
468DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
469DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
470DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
471
472/* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
b1354342
RH
473static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
474{
ad75a51e 475 gen_helper_divs(out, tcg_env, inb, ina);
b1354342
RH
476}
477
478static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
479{
ad75a51e 480 gen_helper_divu(out, tcg_env, inb, ina);
b1354342
RH
481}
482
483DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
484DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
485
e64b2e5c
RH
486static bool trans_imm(DisasContext *dc, arg_imm *arg)
487{
2a7567a2
RH
488 if (invalid_delay_slot(dc, "imm")) {
489 return true;
490 }
e64b2e5c
RH
491 dc->ext_imm = arg->imm << 16;
492 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
6f9642d7 493 dc->tb_flags_to_set = IMM_FLAG;
e64b2e5c
RH
494 return true;
495}
496
97955ceb
RH
497static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
498{
499 TCGv_i32 tmp = tcg_temp_new_i32();
500 tcg_gen_muls2_i32(tmp, out, ina, inb);
97955ceb
RH
501}
502
503static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
504{
505 TCGv_i32 tmp = tcg_temp_new_i32();
506 tcg_gen_mulu2_i32(tmp, out, ina, inb);
97955ceb
RH
507}
508
509static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
510{
511 TCGv_i32 tmp = tcg_temp_new_i32();
512 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
97955ceb
RH
513}
514
515DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
516DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
517DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
518DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
519DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
520
cb0a0a4c
RH
521DO_TYPEA(or, false, tcg_gen_or_i32)
522DO_TYPEBI(ori, false, tcg_gen_ori_i32)
523
607f5767
RH
524static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
525{
526 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
527}
528
529static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
530{
531 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
532}
533
534DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
535DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
536DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
537
a2b0b90e
RH
538/* No input carry, but output carry. */
539static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
540{
541 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
542 tcg_gen_sub_i32(out, inb, ina);
543}
544
545/* Input and output carry. */
546static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
547{
a5ea3dd7 548 TCGv_i32 zero = tcg_constant_i32(0);
a2b0b90e
RH
549 TCGv_i32 tmp = tcg_temp_new_i32();
550
551 tcg_gen_not_i32(tmp, ina);
552 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
553 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
a2b0b90e
RH
554}
555
556/* No input or output carry. */
557static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
558{
559 tcg_gen_sub_i32(out, inb, ina);
560}
561
562/* Input carry, no output carry. */
563static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
564{
565 TCGv_i32 nota = tcg_temp_new_i32();
566
567 tcg_gen_not_i32(nota, ina);
568 tcg_gen_add_i32(out, inb, nota);
569 tcg_gen_add_i32(out, out, cpu_msr_c);
a2b0b90e
RH
570}
571
572DO_TYPEA(rsub, true, gen_rsub)
573DO_TYPEA(rsubc, true, gen_rsubc)
574DO_TYPEA(rsubk, false, gen_rsubk)
575DO_TYPEA(rsubkc, true, gen_rsubkc)
576
577DO_TYPEBV(rsubi, true, gen_rsub)
578DO_TYPEBV(rsubic, true, gen_rsubc)
579DO_TYPEBV(rsubik, false, gen_rsubk)
580DO_TYPEBV(rsubikc, true, gen_rsubkc)
581
39cf3864
RH
582DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
583DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
584
585static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
586{
587 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
588 tcg_gen_sari_i32(out, ina, 1);
589}
590
591static void gen_src(TCGv_i32 out, TCGv_i32 ina)
592{
593 TCGv_i32 tmp = tcg_temp_new_i32();
594
595 tcg_gen_mov_i32(tmp, cpu_msr_c);
596 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
597 tcg_gen_extract2_i32(out, ina, tmp, 1);
39cf3864
RH
598}
599
600static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
601{
602 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
603 tcg_gen_shri_i32(out, ina, 1);
604}
605
606DO_TYPEA0(sra, false, gen_sra)
607DO_TYPEA0(src, false, gen_src)
608DO_TYPEA0(srl, false, gen_srl)
609
610static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
611{
612 tcg_gen_rotri_i32(out, ina, 16);
613}
614
615DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
616DO_TYPEA0(swaph, false, gen_swaph)
617
618static bool trans_wdic(DisasContext *dc, arg_wdic *a)
619{
620 /* Cache operations are nops: only check for supervisor mode. */
621 trap_userspace(dc, true);
622 return true;
623}
624
cb0a0a4c
RH
625DO_TYPEA(xor, false, tcg_gen_xor_i32)
626DO_TYPEBI(xori, false, tcg_gen_xori_i32)
627
d8e59c4a
RH
628static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
629{
630 TCGv ret = tcg_temp_new();
631
632 /* If any of the regs is r0, set t to the value of the other reg. */
633 if (ra && rb) {
634 TCGv_i32 tmp = tcg_temp_new_i32();
635 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
636 tcg_gen_extu_i32_tl(ret, tmp);
d8e59c4a
RH
637 } else if (ra) {
638 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
639 } else if (rb) {
640 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
641 } else {
642 tcg_gen_movi_tl(ret, 0);
643 }
644
4b893631 645 if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
ad75a51e 646 gen_helper_stackprot(tcg_env, ret);
d8e59c4a
RH
647 }
648 return ret;
649}
650
651static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
652{
653 TCGv ret = tcg_temp_new();
654
655 /* If any of the regs is r0, set t to the value of the other reg. */
656 if (ra) {
657 TCGv_i32 tmp = tcg_temp_new_i32();
658 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
659 tcg_gen_extu_i32_tl(ret, tmp);
d8e59c4a
RH
660 } else {
661 tcg_gen_movi_tl(ret, (uint32_t)imm);
662 }
663
4b893631 664 if (ra == 1 && dc->cfg->stackprot) {
ad75a51e 665 gen_helper_stackprot(tcg_env, ret);
d8e59c4a
RH
666 }
667 return ret;
668}
669
19f27b6c 670#ifndef CONFIG_USER_ONLY
d8e59c4a
RH
671static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
672{
4b893631 673 int addr_size = dc->cfg->addr_size;
d8e59c4a
RH
674 TCGv ret = tcg_temp_new();
675
676 if (addr_size == 32 || ra == 0) {
677 if (rb) {
678 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
679 } else {
680 tcg_gen_movi_tl(ret, 0);
681 }
682 } else {
683 if (rb) {
684 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
685 } else {
686 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
687 tcg_gen_shli_tl(ret, ret, 32);
688 }
689 if (addr_size < 64) {
690 /* Mask off out of range bits. */
691 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
692 }
693 }
694 return ret;
695}
19f27b6c 696#endif
d8e59c4a 697
b414df75 698#ifndef CONFIG_USER_ONLY
ab0c8d0f
RH
699static void record_unaligned_ess(DisasContext *dc, int rd,
700 MemOp size, bool store)
701{
702 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
703
704 iflags |= ESR_ESS_FLAG;
705 iflags |= rd << 5;
706 iflags |= store * ESR_S;
707 iflags |= (size == MO_32) * ESR_W;
708
709 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
710}
b414df75 711#endif
ab0c8d0f 712
d8e59c4a
RH
713static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
714 int mem_index, bool rev)
715{
d8e59c4a
RH
716 MemOp size = mop & MO_SIZE;
717
718 /*
719 * When doing reverse accesses we need to do two things.
720 *
721 * 1. Reverse the address wrt endianness.
722 * 2. Byteswap the data lanes on the way back into the CPU core.
723 */
724 if (rev) {
725 if (size > MO_8) {
726 mop ^= MO_BSWAP;
727 }
728 if (size < MO_32) {
729 tcg_gen_xori_tl(addr, addr, 3 - size);
730 }
731 }
732
b414df75
RH
733 /*
734 * For system mode, enforce alignment if the cpu configuration
735 * requires it. For user-mode, the Linux kernel will have fixed up
736 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
737 */
738#ifndef CONFIG_USER_ONLY
ab0c8d0f
RH
739 if (size > MO_8 &&
740 (dc->tb_flags & MSR_EE) &&
4b893631 741 dc->cfg->unaligned_exceptions) {
ab0c8d0f
RH
742 record_unaligned_ess(dc, rd, size, false);
743 mop |= MO_ALIGN;
d8e59c4a 744 }
b414df75 745#endif
d8e59c4a 746
ab0c8d0f 747 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
d8e59c4a
RH
748 return true;
749}
750
751static bool trans_lbu(DisasContext *dc, arg_typea *arg)
752{
753 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
754 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
755}
756
757static bool trans_lbur(DisasContext *dc, arg_typea *arg)
758{
759 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
760 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
761}
762
763static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
764{
765 if (trap_userspace(dc, true)) {
766 return true;
767 }
19f27b6c
RH
768#ifdef CONFIG_USER_ONLY
769 return true;
770#else
d8e59c4a
RH
771 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
772 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
19f27b6c 773#endif
d8e59c4a
RH
774}
775
776static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
777{
778 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
779 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
780}
781
782static bool trans_lhu(DisasContext *dc, arg_typea *arg)
783{
784 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
785 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
786}
787
788static bool trans_lhur(DisasContext *dc, arg_typea *arg)
789{
790 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
791 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
792}
793
794static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
795{
796 if (trap_userspace(dc, true)) {
797 return true;
798 }
19f27b6c
RH
799#ifdef CONFIG_USER_ONLY
800 return true;
801#else
d8e59c4a
RH
802 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
803 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
19f27b6c 804#endif
d8e59c4a
RH
805}
806
807static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
808{
809 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
810 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
811}
812
813static bool trans_lw(DisasContext *dc, arg_typea *arg)
814{
815 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
816 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
817}
818
819static bool trans_lwr(DisasContext *dc, arg_typea *arg)
820{
821 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
822 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
823}
824
825static bool trans_lwea(DisasContext *dc, arg_typea *arg)
826{
827 if (trap_userspace(dc, true)) {
828 return true;
829 }
19f27b6c
RH
830#ifdef CONFIG_USER_ONLY
831 return true;
832#else
d8e59c4a
RH
833 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
834 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
19f27b6c 835#endif
d8e59c4a
RH
836}
837
838static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
839{
840 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
841 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
842}
843
844static bool trans_lwx(DisasContext *dc, arg_typea *arg)
845{
846 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
847
848 /* lwx does not throw unaligned access errors, so force alignment */
849 tcg_gen_andi_tl(addr, addr, ~3);
850
d8e59c4a
RH
851 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
852 tcg_gen_mov_tl(cpu_res_addr, addr);
d8e59c4a
RH
853
854 if (arg->rd) {
855 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
856 }
857
858 /* No support for AXI exclusive so always clear C */
859 tcg_gen_movi_i32(cpu_msr_c, 0);
860 return true;
861}
862
863static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
864 int mem_index, bool rev)
865{
866 MemOp size = mop & MO_SIZE;
867
868 /*
869 * When doing reverse accesses we need to do two things.
870 *
871 * 1. Reverse the address wrt endianness.
872 * 2. Byteswap the data lanes on the way back into the CPU core.
873 */
874 if (rev) {
875 if (size > MO_8) {
876 mop ^= MO_BSWAP;
877 }
878 if (size < MO_32) {
879 tcg_gen_xori_tl(addr, addr, 3 - size);
880 }
881 }
882
b414df75
RH
883 /*
884 * For system mode, enforce alignment if the cpu configuration
885 * requires it. For user-mode, the Linux kernel will have fixed up
886 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
887 */
888#ifndef CONFIG_USER_ONLY
ab0c8d0f
RH
889 if (size > MO_8 &&
890 (dc->tb_flags & MSR_EE) &&
4b893631 891 dc->cfg->unaligned_exceptions) {
ab0c8d0f
RH
892 record_unaligned_ess(dc, rd, size, true);
893 mop |= MO_ALIGN;
d8e59c4a 894 }
b414df75 895#endif
d8e59c4a 896
ab0c8d0f 897 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
d8e59c4a
RH
898 return true;
899}
900
901static bool trans_sb(DisasContext *dc, arg_typea *arg)
902{
903 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
904 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
905}
906
907static bool trans_sbr(DisasContext *dc, arg_typea *arg)
908{
909 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
910 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
911}
912
913static bool trans_sbea(DisasContext *dc, arg_typea *arg)
914{
915 if (trap_userspace(dc, true)) {
916 return true;
917 }
19f27b6c
RH
918#ifdef CONFIG_USER_ONLY
919 return true;
920#else
d8e59c4a
RH
921 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
922 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
19f27b6c 923#endif
d8e59c4a
RH
924}
925
926static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
927{
928 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
929 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
930}
931
932static bool trans_sh(DisasContext *dc, arg_typea *arg)
933{
934 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
935 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
936}
937
938static bool trans_shr(DisasContext *dc, arg_typea *arg)
939{
940 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
941 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
942}
943
944static bool trans_shea(DisasContext *dc, arg_typea *arg)
945{
946 if (trap_userspace(dc, true)) {
947 return true;
948 }
19f27b6c
RH
949#ifdef CONFIG_USER_ONLY
950 return true;
951#else
d8e59c4a
RH
952 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
953 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
19f27b6c 954#endif
d8e59c4a
RH
955}
956
957static bool trans_shi(DisasContext *dc, arg_typeb *arg)
958{
959 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
960 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
961}
962
963static bool trans_sw(DisasContext *dc, arg_typea *arg)
964{
965 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
966 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
967}
968
969static bool trans_swr(DisasContext *dc, arg_typea *arg)
970{
971 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
972 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
973}
974
975static bool trans_swea(DisasContext *dc, arg_typea *arg)
976{
977 if (trap_userspace(dc, true)) {
978 return true;
979 }
19f27b6c
RH
980#ifdef CONFIG_USER_ONLY
981 return true;
982#else
d8e59c4a
RH
983 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
984 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
19f27b6c 985#endif
d8e59c4a
RH
986}
987
988static bool trans_swi(DisasContext *dc, arg_typeb *arg)
989{
990 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
991 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
992}
993
994static bool trans_swx(DisasContext *dc, arg_typea *arg)
995{
996 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
997 TCGLabel *swx_done = gen_new_label();
998 TCGLabel *swx_fail = gen_new_label();
999 TCGv_i32 tval;
1000
d8e59c4a
RH
1001 /* swx does not throw unaligned access errors, so force alignment */
1002 tcg_gen_andi_tl(addr, addr, ~3);
1003
1004 /*
1005 * Compare the address vs the one we used during lwx.
1006 * On mismatch, the operation fails. On match, addr dies at the
1007 * branch, but we know we can use the equal version in the global.
1008 * In either case, addr is no longer needed.
1009 */
1010 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
d8e59c4a
RH
1011
1012 /*
1013 * Compare the value loaded during lwx with current contents of
1014 * the reserved location.
1015 */
1016 tval = tcg_temp_new_i32();
1017
1018 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1019 reg_for_write(dc, arg->rd),
1020 dc->mem_index, MO_TEUL);
1021
1022 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
d8e59c4a
RH
1023
1024 /* Success */
1025 tcg_gen_movi_i32(cpu_msr_c, 0);
1026 tcg_gen_br(swx_done);
1027
1028 /* Failure */
1029 gen_set_label(swx_fail);
1030 tcg_gen_movi_i32(cpu_msr_c, 1);
1031
1032 gen_set_label(swx_done);
1033
1034 /*
1035 * Prevent the saved address from working again without another ldx.
1036 * Akin to the pseudocode setting reservation = 0.
1037 */
1038 tcg_gen_movi_tl(cpu_res_addr, -1);
1039 return true;
1040}
1041
16bbbbc9
RH
1042static void setup_dslot(DisasContext *dc, bool type_b)
1043{
1044 dc->tb_flags_to_set |= D_FLAG;
1045 if (type_b && (dc->tb_flags & IMM_FLAG)) {
1046 dc->tb_flags_to_set |= BIMM_FLAG;
1047 }
1048}
1049
1050static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1051 bool delay, bool abs, int link)
1052{
1053 uint32_t add_pc;
1054
2a7567a2
RH
1055 if (invalid_delay_slot(dc, "branch")) {
1056 return true;
1057 }
16bbbbc9
RH
1058 if (delay) {
1059 setup_dslot(dc, dest_rb < 0);
1060 }
1061
1062 if (link) {
1063 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1064 }
1065
1066 /* Store the branch taken destination into btarget. */
1067 add_pc = abs ? 0 : dc->base.pc_next;
1068 if (dest_rb > 0) {
1069 dc->jmp_dest = -1;
1070 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1071 } else {
1072 dc->jmp_dest = add_pc + dest_imm;
1073 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1074 }
1075 dc->jmp_cond = TCG_COND_ALWAYS;
1076 return true;
1077}
1078
1079#define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
1080 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
1081 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1082 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
1083 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1084
1085DO_BR(br, bri, false, false, false)
1086DO_BR(bra, brai, false, true, false)
1087DO_BR(brd, brid, true, false, false)
1088DO_BR(brad, braid, true, true, false)
1089DO_BR(brld, brlid, true, false, true)
1090DO_BR(brald, bralid, true, true, true)
1091
fd779113
RH
1092static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1093 TCGCond cond, int ra, bool delay)
1094{
1095 TCGv_i32 zero, next;
1096
2a7567a2
RH
1097 if (invalid_delay_slot(dc, "bcc")) {
1098 return true;
1099 }
fd779113
RH
1100 if (delay) {
1101 setup_dslot(dc, dest_rb < 0);
1102 }
1103
1104 dc->jmp_cond = cond;
1105
1106 /* Cache the condition register in cpu_bvalue across any delay slot. */
1107 tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1108
1109 /* Store the branch taken destination into btarget. */
1110 if (dest_rb > 0) {
1111 dc->jmp_dest = -1;
1112 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1113 } else {
1114 dc->jmp_dest = dc->base.pc_next + dest_imm;
1115 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1116 }
1117
1118 /* Compute the final destination into btarget. */
a5ea3dd7
RH
1119 zero = tcg_constant_i32(0);
1120 next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
fd779113
RH
1121 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1122 reg_for_read(dc, ra), zero,
1123 cpu_btarget, next);
fd779113
RH
1124
1125 return true;
1126}
1127
1128#define DO_BCC(NAME, COND) \
1129 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
1130 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1131 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
1132 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1133 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
1134 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1135 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
1136 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1137
1138DO_BCC(beq, TCG_COND_EQ)
1139DO_BCC(bge, TCG_COND_GE)
1140DO_BCC(bgt, TCG_COND_GT)
1141DO_BCC(ble, TCG_COND_LE)
1142DO_BCC(blt, TCG_COND_LT)
1143DO_BCC(bne, TCG_COND_NE)
1144
f5235314
RH
1145static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1146{
1147 if (trap_userspace(dc, true)) {
1148 return true;
1149 }
2a7567a2
RH
1150 if (invalid_delay_slot(dc, "brk")) {
1151 return true;
1152 }
1153
f5235314
RH
1154 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1155 if (arg->rd) {
1156 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1157 }
1158 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1159 tcg_gen_movi_tl(cpu_res_addr, -1);
1160
17e77796 1161 dc->base.is_jmp = DISAS_EXIT;
f5235314
RH
1162 return true;
1163}
1164
1165static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1166{
1167 uint32_t imm = arg->imm;
1168
1169 if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1170 return true;
1171 }
2a7567a2
RH
1172 if (invalid_delay_slot(dc, "brki")) {
1173 return true;
1174 }
1175
f5235314
RH
1176 tcg_gen_movi_i32(cpu_pc, imm);
1177 if (arg->rd) {
1178 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1179 }
1180 tcg_gen_movi_tl(cpu_res_addr, -1);
1181
1182#ifdef CONFIG_USER_ONLY
1183 switch (imm) {
1184 case 0x8: /* syscall trap */
1185 gen_raise_exception_sync(dc, EXCP_SYSCALL);
1186 break;
1187 case 0x18: /* debug trap */
1188 gen_raise_exception_sync(dc, EXCP_DEBUG);
1189 break;
1190 default: /* eliminated with trap_userspace check */
1191 g_assert_not_reached();
1192 }
1193#else
1194 uint32_t msr_to_set = 0;
1195
1196 if (imm != 0x18) {
1197 msr_to_set |= MSR_BIP;
1198 }
1199 if (imm == 0x8 || imm == 0x18) {
1200 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1201 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1202 tcg_gen_andi_i32(cpu_msr, cpu_msr,
1203 ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1204 }
1205 tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
17e77796 1206 dc->base.is_jmp = DISAS_EXIT;
f5235314
RH
1207#endif
1208
1209 return true;
1210}
1211
ee8c7f9f
RH
1212static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1213{
1214 int mbar_imm = arg->imm;
1215
2a7567a2
RH
1216 /* Note that mbar is a specialized branch instruction. */
1217 if (invalid_delay_slot(dc, "mbar")) {
1218 return true;
1219 }
1220
ee8c7f9f
RH
1221 /* Data access memory barrier. */
1222 if ((mbar_imm & 2) == 0) {
1223 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1224 }
1225
1226 /* Sleep. */
1227 if (mbar_imm & 16) {
ee8c7f9f
RH
1228 if (trap_userspace(dc, true)) {
1229 /* Sleep is a privileged instruction. */
1230 return true;
1231 }
1232
1233 t_sync_flags(dc);
1234
ad75a51e 1235 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
ee8c7f9f
RH
1236 -offsetof(MicroBlazeCPU, env)
1237 +offsetof(CPUState, halted));
ee8c7f9f
RH
1238
1239 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1240
1241 gen_raise_exception(dc, EXCP_HLT);
1242 }
1243
1244 /*
1245 * If !(mbar_imm & 1), this is an instruction access memory barrier
1246 * and we need to end the TB so that we recognize self-modified
1247 * code immediately.
1248 *
1249 * However, there are some data mbars that need the TB break
1250 * (and return to main loop) to recognize interrupts right away.
1251 * E.g. recognizing a change to an interrupt controller register.
1252 *
1253 * Therefore, choose to end the TB always.
1254 */
43b34134 1255 dc->base.is_jmp = DISAS_EXIT_NEXT;
ee8c7f9f
RH
1256 return true;
1257}
1258
e6cb0354
RH
1259static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1260{
1261 if (trap_userspace(dc, to_set)) {
1262 return true;
1263 }
2a7567a2
RH
1264 if (invalid_delay_slot(dc, "rts")) {
1265 return true;
1266 }
1267
e6cb0354
RH
1268 dc->tb_flags_to_set |= to_set;
1269 setup_dslot(dc, true);
1270
1271 dc->jmp_cond = TCG_COND_ALWAYS;
1272 dc->jmp_dest = -1;
1273 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1274 return true;
1275}
1276
1277#define DO_RTS(NAME, IFLAG) \
1278 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1279 { return do_rts(dc, arg, IFLAG); }
1280
1281DO_RTS(rtbd, DRTB_FLAG)
1282DO_RTS(rtid, DRTI_FLAG)
1283DO_RTS(rted, DRTE_FLAG)
1284DO_RTS(rtsd, 0)
1285
20800179
RH
1286static bool trans_zero(DisasContext *dc, arg_zero *arg)
1287{
1288 /* If opcode_0_illegal, trap. */
4b893631 1289 if (dc->cfg->opcode_0_illegal) {
20800179
RH
1290 trap_illegal(dc, true);
1291 return true;
1292 }
1293 /*
1294 * Otherwise, this is "add r0, r0, r0".
1295 * Continue to trans_add so that MSR[C] gets cleared.
1296 */
1297 return false;
4acb54ba
EI
1298}
1299
1074c0fb 1300static void msr_read(DisasContext *dc, TCGv_i32 d)
4acb54ba 1301{
1074c0fb
RH
1302 TCGv_i32 t;
1303
1304 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1305 t = tcg_temp_new_i32();
1306 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1307 tcg_gen_or_i32(d, cpu_msr, t);
4acb54ba
EI
1308}
1309
536e340f
RH
1310static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1311{
1312 uint32_t imm = arg->imm;
1313
1314 if (trap_userspace(dc, imm != MSR_C)) {
1315 return true;
1316 }
1317
1318 if (arg->rd) {
1319 msr_read(dc, cpu_R[arg->rd]);
1320 }
1321
1322 /*
1323 * Handle the carry bit separately.
1324 * This is the only bit that userspace can modify.
1325 */
1326 if (imm & MSR_C) {
1327 tcg_gen_movi_i32(cpu_msr_c, set);
1328 }
1329
1330 /*
1331 * MSR_C and MSR_CC set above.
1332 * MSR_PVR is not writable, and is always clear.
1333 */
1334 imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1335
1336 if (imm != 0) {
1337 if (set) {
1338 tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1339 } else {
1340 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1341 }
43b34134 1342 dc->base.is_jmp = DISAS_EXIT_NEXT;
536e340f
RH
1343 }
1344 return true;
1345}
1346
1347static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1348{
1349 return do_msrclrset(dc, arg, false);
1350}
1351
1352static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1353{
1354 return do_msrclrset(dc, arg, true);
1355}
1356
9df297a2 1357static bool trans_mts(DisasContext *dc, arg_mts *arg)
4acb54ba 1358{
9df297a2
RH
1359 if (trap_userspace(dc, true)) {
1360 return true;
1361 }
4acb54ba 1362
9df297a2
RH
1363#ifdef CONFIG_USER_ONLY
1364 g_assert_not_reached();
1365#else
1366 if (arg->e && arg->rs != 0x1003) {
1367 qemu_log_mask(LOG_GUEST_ERROR,
1368 "Invalid extended mts reg 0x%x\n", arg->rs);
1369 return true;
f0f7e7f7
EI
1370 }
1371
9df297a2
RH
1372 TCGv_i32 src = reg_for_read(dc, arg->ra);
1373 switch (arg->rs) {
1374 case SR_MSR:
43b34134
RH
1375 /* Install MSR_C. */
1376 tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1377 /*
1378 * Clear MSR_C and MSR_CC;
1379 * MSR_PVR is not writable, and is always clear.
1380 */
1381 tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
9df297a2
RH
1382 break;
1383 case SR_FSR:
ad75a51e 1384 tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, fsr));
9df297a2
RH
1385 break;
1386 case 0x800:
ad75a51e 1387 tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, slr));
9df297a2
RH
1388 break;
1389 case 0x802:
ad75a51e 1390 tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, shr));
9df297a2 1391 break;
f0f7e7f7 1392
9df297a2
RH
1393 case 0x1000: /* PID */
1394 case 0x1001: /* ZPR */
1395 case 0x1002: /* TLBX */
1396 case 0x1003: /* TLBLO */
1397 case 0x1004: /* TLBHI */
1398 case 0x1005: /* TLBSX */
1399 {
a5ea3dd7
RH
1400 TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1401 TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
9df297a2 1402
ad75a51e 1403 gen_helper_mmu_write(tcg_env, tmp_ext, tmp_reg, src);
9df297a2
RH
1404 }
1405 break;
4acb54ba 1406
9df297a2
RH
1407 default:
1408 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1409 return true;
1567a005 1410 }
43b34134 1411 dc->base.is_jmp = DISAS_EXIT_NEXT;
9df297a2
RH
1412 return true;
1413#endif
1414}
1567a005 1415
9df297a2
RH
1416static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1417{
1418 TCGv_i32 dest = reg_for_write(dc, arg->rd);
05a9a651 1419
9df297a2
RH
1420 if (arg->e) {
1421 switch (arg->rs) {
1422 case SR_EAR:
1423 {
1424 TCGv_i64 t64 = tcg_temp_new_i64();
ad75a51e 1425 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
9df297a2 1426 tcg_gen_extrh_i64_i32(dest, t64);
9df297a2
RH
1427 }
1428 return true;
1429#ifndef CONFIG_USER_ONLY
1430 case 0x1003: /* TLBLO */
1431 /* Handled below. */
1432 break;
1433#endif
1434 case 0x2006 ... 0x2009:
1435 /* High bits of PVR6-9 not implemented. */
1436 tcg_gen_movi_i32(dest, 0);
1437 return true;
1438 default:
1439 qemu_log_mask(LOG_GUEST_ERROR,
1440 "Invalid extended mfs reg 0x%x\n", arg->rs);
1441 return true;
05a9a651 1442 }
4acb54ba 1443 }
4acb54ba 1444
9df297a2
RH
1445 switch (arg->rs) {
1446 case SR_PC:
1447 tcg_gen_movi_i32(dest, dc->base.pc_next);
1448 break;
1449 case SR_MSR:
1450 msr_read(dc, dest);
1451 break;
1452 case SR_EAR:
1453 {
1454 TCGv_i64 t64 = tcg_temp_new_i64();
ad75a51e 1455 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
9df297a2 1456 tcg_gen_extrl_i64_i32(dest, t64);
4acb54ba 1457 }
9df297a2
RH
1458 break;
1459 case SR_ESR:
ad75a51e 1460 tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, esr));
9df297a2
RH
1461 break;
1462 case SR_FSR:
ad75a51e 1463 tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, fsr));
9df297a2
RH
1464 break;
1465 case SR_BTR:
ad75a51e 1466 tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, btr));
9df297a2
RH
1467 break;
1468 case SR_EDR:
ad75a51e 1469 tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, edr));
9df297a2
RH
1470 break;
1471 case 0x800:
ad75a51e 1472 tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, slr));
9df297a2
RH
1473 break;
1474 case 0x802:
ad75a51e 1475 tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, shr));
9df297a2
RH
1476 break;
1477
1478#ifndef CONFIG_USER_ONLY
1479 case 0x1000: /* PID */
1480 case 0x1001: /* ZPR */
1481 case 0x1002: /* TLBX */
1482 case 0x1003: /* TLBLO */
1483 case 0x1004: /* TLBHI */
1484 case 0x1005: /* TLBSX */
1485 {
a5ea3dd7
RH
1486 TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1487 TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
9df297a2 1488
ad75a51e 1489 gen_helper_mmu_read(dest, tcg_env, tmp_ext, tmp_reg);
4acb54ba 1490 }
9df297a2
RH
1491 break;
1492#endif
ee7dbcf8 1493
9df297a2 1494 case 0x2000 ... 0x200c:
ad75a51e 1495 tcg_gen_ld_i32(dest, tcg_env,
a4bcfc33
RH
1496 offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1497 - offsetof(MicroBlazeCPU, env));
9df297a2
RH
1498 break;
1499 default:
1500 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1501 break;
ee7dbcf8 1502 }
9df297a2 1503 return true;
4acb54ba
EI
1504}
1505
3fb394fd 1506static void do_rti(DisasContext *dc)
4acb54ba 1507{
3fb394fd
RH
1508 TCGv_i32 tmp = tcg_temp_new_i32();
1509
1510 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1511 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1512 tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1513 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1514 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
4acb54ba
EI
1515}
1516
3fb394fd 1517static void do_rtb(DisasContext *dc)
4acb54ba 1518{
3fb394fd
RH
1519 TCGv_i32 tmp = tcg_temp_new_i32();
1520
1521 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1522 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1523 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1524 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
4acb54ba
EI
1525}
1526
3fb394fd 1527static void do_rte(DisasContext *dc)
4acb54ba 1528{
3fb394fd
RH
1529 TCGv_i32 tmp = tcg_temp_new_i32();
1530
1531 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1532 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1533 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1534 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1535 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
4acb54ba
EI
1536}
1537
6d76d23e 1538/* Insns connected to FSL or AXI stream attached devices. */
52065d8f 1539static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
6d76d23e 1540{
6d76d23e 1541 TCGv_i32 t_id, t_ctrl;
6d76d23e 1542
bdfc1e88 1543 if (trap_userspace(dc, true)) {
52065d8f 1544 return true;
6d76d23e
EI
1545 }
1546
cfeea807 1547 t_id = tcg_temp_new_i32();
52065d8f
RH
1548 if (rb) {
1549 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
6d76d23e 1550 } else {
52065d8f 1551 tcg_gen_movi_i32(t_id, imm);
6d76d23e
EI
1552 }
1553
a5ea3dd7 1554 t_ctrl = tcg_constant_i32(ctrl);
52065d8f 1555 gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
52065d8f
RH
1556 return true;
1557}
1558
1559static bool trans_get(DisasContext *dc, arg_get *arg)
1560{
1561 return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1562}
1563
1564static bool trans_getd(DisasContext *dc, arg_getd *arg)
1565{
1566 return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1567}
1568
1569static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1570{
1571 TCGv_i32 t_id, t_ctrl;
1572
1573 if (trap_userspace(dc, true)) {
1574 return true;
1575 }
6d76d23e 1576
52065d8f
RH
1577 t_id = tcg_temp_new_i32();
1578 if (rb) {
1579 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
6d76d23e 1580 } else {
52065d8f 1581 tcg_gen_movi_i32(t_id, imm);
6d76d23e 1582 }
52065d8f 1583
a5ea3dd7 1584 t_ctrl = tcg_constant_i32(ctrl);
52065d8f 1585 gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
52065d8f
RH
1586 return true;
1587}
1588
1589static bool trans_put(DisasContext *dc, arg_put *arg)
1590{
1591 return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1592}
1593
1594static bool trans_putd(DisasContext *dc, arg_putd *arg)
1595{
1596 return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
6d76d23e
EI
1597}
1598
372122e3 1599static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
4acb54ba 1600{
372122e3
RH
1601 DisasContext *dc = container_of(dcb, DisasContext, base);
1602 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1603 int bound;
4acb54ba 1604
4b893631 1605 dc->cfg = &cpu->cfg;
683a247e 1606 dc->tb_flags = dc->base.tb->flags;
d7ecb757 1607 dc->ext_imm = dc->base.tb->cs_base;
20800179
RH
1608 dc->r0 = NULL;
1609 dc->r0_set = false;
287b1def 1610 dc->mem_index = cpu_mmu_index(&cpu->env, false);
b9c58aab
RH
1611 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1612 dc->jmp_dest = -1;
4acb54ba 1613
372122e3
RH
1614 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1615 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1616}
4acb54ba 1617
372122e3
RH
1618static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1619{
1620}
4acb54ba 1621
372122e3
RH
1622static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1623{
683a247e
RH
1624 DisasContext *dc = container_of(dcb, DisasContext, base);
1625
1626 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1627 dc->insn_start = tcg_last_op();
372122e3 1628}
4acb54ba 1629
372122e3
RH
1630static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1631{
1632 DisasContext *dc = container_of(dcb, DisasContext, base);
b77af26e 1633 CPUMBState *env = cpu_env(cs);
44d1432b 1634 uint32_t ir;
372122e3
RH
1635
1636 /* TODO: This should raise an exception, not terminate qemu. */
1637 if (dc->base.pc_next & 3) {
1638 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1639 (uint32_t)dc->base.pc_next);
1640 }
1641
6f9642d7
RH
1642 dc->tb_flags_to_set = 0;
1643
44d1432b
RH
1644 ir = cpu_ldl_code(env, dc->base.pc_next);
1645 if (!decode(dc, ir)) {
921afa9d 1646 trap_illegal(dc, true);
44d1432b 1647 }
20800179
RH
1648
1649 if (dc->r0) {
20800179
RH
1650 dc->r0 = NULL;
1651 dc->r0_set = false;
1652 }
1653
6f9642d7
RH
1654 /* Discard the imm global when its contents cannot be used. */
1655 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
d7ecb757 1656 tcg_gen_discard_i32(cpu_imm);
372122e3 1657 }
6f9642d7 1658
1e521ce3 1659 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
6f9642d7 1660 dc->tb_flags |= dc->tb_flags_to_set;
372122e3
RH
1661 dc->base.pc_next += 4;
1662
b9c58aab 1663 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
3d35bcc2
RH
1664 /*
1665 * Finish any return-from branch.
3d35bcc2 1666 */
3c745866
RH
1667 uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1668 if (unlikely(rt_ibe != 0)) {
1669 dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1670 if (rt_ibe & DRTI_FLAG) {
1671 do_rti(dc);
1672 } else if (rt_ibe & DRTB_FLAG) {
1673 do_rtb(dc);
1674 } else {
1675 do_rte(dc);
1676 }
372122e3 1677 }
3d35bcc2
RH
1678
1679 /* Complete the branch, ending the TB. */
1680 switch (dc->base.is_jmp) {
1681 case DISAS_NORETURN:
1682 /*
1683 * E.g. illegal insn in a delay slot. We've already exited
1684 * and will handle D_FLAG in mb_cpu_do_interrupt.
1685 */
1686 break;
3d35bcc2 1687 case DISAS_NEXT:
3c745866
RH
1688 /*
1689 * Normal insn a delay slot.
1690 * However, the return-from-exception type insns should
1691 * return to the main loop, as they have adjusted MSR.
1692 */
1693 dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
3d35bcc2
RH
1694 break;
1695 case DISAS_EXIT_NEXT:
1696 /*
1697 * E.g. mts insn in a delay slot. Continue with btarget,
1698 * but still return to the main loop.
1699 */
1700 dc->base.is_jmp = DISAS_EXIT_JUMP;
1701 break;
1702 default:
1703 g_assert_not_reached();
1704 }
4acb54ba 1705 }
372122e3
RH
1706}
1707
1708static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1709{
1710 DisasContext *dc = container_of(dcb, DisasContext, base);
1711
d4705ae0 1712 if (dc->base.is_jmp == DISAS_NORETURN) {
372122e3
RH
1713 /* We have already exited the TB. */
1714 return;
1715 }
1716
1717 t_sync_flags(dc);
372122e3
RH
1718
1719 switch (dc->base.is_jmp) {
1720 case DISAS_TOO_MANY:
372122e3
RH
1721 gen_goto_tb(dc, 0, dc->base.pc_next);
1722 return;
6c5f738d 1723
17e77796 1724 case DISAS_EXIT:
f6278ca9
RH
1725 break;
1726 case DISAS_EXIT_NEXT:
1727 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1728 break;
1729 case DISAS_EXIT_JUMP:
1730 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1731 tcg_gen_discard_i32(cpu_btarget);
1732 break;
372122e3
RH
1733
1734 case DISAS_JUMP:
fbafb3a4 1735 if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
b9c58aab
RH
1736 /* Direct jump. */
1737 tcg_gen_discard_i32(cpu_btarget);
1738
1739 if (dc->jmp_cond != TCG_COND_ALWAYS) {
1740 /* Conditional direct jump. */
1741 TCGLabel *taken = gen_new_label();
1742 TCGv_i32 tmp = tcg_temp_new_i32();
1743
1744 /*
1745 * Copy bvalue to a temp now, so we can discard bvalue.
1746 * This can avoid writing bvalue to memory when the
1747 * delay slot cannot raise an exception.
1748 */
1749 tcg_gen_mov_i32(tmp, cpu_bvalue);
1750 tcg_gen_discard_i32(cpu_bvalue);
1751
1752 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1753 gen_goto_tb(dc, 1, dc->base.pc_next);
1754 gen_set_label(taken);
372122e3 1755 }
b9c58aab 1756 gen_goto_tb(dc, 0, dc->jmp_dest);
372122e3 1757 return;
b9c58aab 1758 }
372122e3 1759
fbafb3a4 1760 /* Indirect jump (or direct jump w/ goto_tb disabled) */
b9c58aab
RH
1761 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1762 tcg_gen_discard_i32(cpu_btarget);
66345580 1763 tcg_gen_lookup_and_goto_ptr();
b9c58aab 1764 return;
0a7df5da 1765
372122e3
RH
1766 default:
1767 g_assert_not_reached();
1768 }
f6278ca9
RH
1769
1770 /* Finish DISAS_EXIT_* */
1771 if (unlikely(cs->singlestep_enabled)) {
1772 gen_raise_exception(dc, EXCP_DEBUG);
1773 } else {
1774 tcg_gen_exit_tb(NULL, 0);
1775 }
372122e3 1776}
4acb54ba 1777
8eb806a7
RH
1778static void mb_tr_disas_log(const DisasContextBase *dcb,
1779 CPUState *cs, FILE *logfile)
372122e3 1780{
8eb806a7
RH
1781 fprintf(logfile, "IN: %s\n", lookup_symbol(dcb->pc_first));
1782 target_disas(logfile, cs, dcb->pc_first, dcb->tb->size);
372122e3
RH
1783}
1784
1785static const TranslatorOps mb_tr_ops = {
1786 .init_disas_context = mb_tr_init_disas_context,
1787 .tb_start = mb_tr_tb_start,
1788 .insn_start = mb_tr_insn_start,
372122e3
RH
1789 .translate_insn = mb_tr_translate_insn,
1790 .tb_stop = mb_tr_tb_stop,
1791 .disas_log = mb_tr_disas_log,
1792};
1793
597f9b2d 1794void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
32f0c394 1795 vaddr pc, void *host_pc)
372122e3
RH
1796{
1797 DisasContext dc;
306c8721 1798 translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
4acb54ba
EI
1799}
1800
90c84c56 1801void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
4acb54ba 1802{
878096ee
AF
1803 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1804 CPUMBState *env = &cpu->env;
0c3da918 1805 uint32_t iflags;
4acb54ba
EI
1806 int i;
1807
0c3da918
RH
1808 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1809 env->pc, env->msr,
2e5282ca
RH
1810 (env->msr & MSR_UM) ? "user" : "kernel",
1811 (env->msr & MSR_UMS) ? "user" : "kernel",
1812 (bool)(env->msr & MSR_EIP),
1813 (bool)(env->msr & MSR_IE));
0c3da918
RH
1814
1815 iflags = env->iflags;
1816 qemu_fprintf(f, "iflags: 0x%08x", iflags);
1817 if (iflags & IMM_FLAG) {
1818 qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1819 }
1820 if (iflags & BIMM_FLAG) {
1821 qemu_fprintf(f, " BIMM");
1822 }
1823 if (iflags & D_FLAG) {
b9c58aab 1824 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
0c3da918
RH
1825 }
1826 if (iflags & DRTI_FLAG) {
1827 qemu_fprintf(f, " DRTI");
1828 }
1829 if (iflags & DRTE_FLAG) {
1830 qemu_fprintf(f, " DRTE");
1831 }
1832 if (iflags & DRTB_FLAG) {
1833 qemu_fprintf(f, " DRTB");
1834 }
1835 if (iflags & ESR_ESS_FLAG) {
1836 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1837 }
1838
1839 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
19f27b6c 1840 "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
0c3da918
RH
1841 env->esr, env->fsr, env->btr, env->edr,
1842 env->ear, env->slr, env->shr);
1843
4acb54ba 1844 for (i = 0; i < 32; i++) {
0c3da918
RH
1845 qemu_fprintf(f, "r%2.2d=%08x%c",
1846 i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1847 }
1848 qemu_fprintf(f, "\n");
4acb54ba
EI
1849}
1850
cd0c24f9
AF
1851void mb_tcg_init(void)
1852{
480d29a8
RH
1853#define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1854#define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1855
1856 static const struct {
1857 TCGv_i32 *var; int ofs; char name[8];
1858 } i32s[] = {
e47c2231
RH
1859 /*
1860 * Note that r0 is handled specially in reg_for_read
1861 * and reg_for_write. Nothing should touch cpu_R[0].
1862 * Leave that element NULL, which will assert quickly
1863 * inside the tcg generator functions.
1864 */
1865 R(1), R(2), R(3), R(4), R(5), R(6), R(7),
480d29a8
RH
1866 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1867 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1868 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1869
1870 SP(pc),
1871 SP(msr),
1074c0fb 1872 SP(msr_c),
480d29a8
RH
1873 SP(imm),
1874 SP(iflags),
b9c58aab 1875 SP(bvalue),
480d29a8
RH
1876 SP(btarget),
1877 SP(res_val),
1878 };
1879
1880#undef R
1881#undef SP
1882
1883 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1884 *i32s[i].var =
ad75a51e 1885 tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
480d29a8 1886 }
4acb54ba 1887
480d29a8 1888 cpu_res_addr =
ad75a51e 1889 tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
4acb54ba 1890}