]> git.proxmox.com Git - mirror_qemu.git/blame - target/microblaze/translate.c
target/microblaze: Fix no-op mb_cpu_transaction_failed
[mirror_qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
4acb54ba 27#include "microblaze-decode.h"
f08b6170 28#include "exec/cpu_ldst.h"
2ef6175a 29#include "exec/helper-gen.h"
77fc6f5e 30#include "exec/translator.h"
90c84c56 31#include "qemu/qemu-print.h"
4acb54ba 32
a7e30d84 33#include "trace-tcg.h"
508127e2 34#include "exec/log.h"
a7e30d84 35
4acb54ba
EI
36#define EXTRACT_FIELD(src, start, end) \
37 (((src) >> start) & ((1 << (end - start + 1)) - 1))
38
77fc6f5e
LV
39/* is_jmp field values */
40#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
41#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
77fc6f5e 42
cfeea807 43static TCGv_i32 cpu_R[32];
0f96e96b 44static TCGv_i32 cpu_pc;
3e0e16ae 45static TCGv_i32 cpu_msr;
1074c0fb 46static TCGv_i32 cpu_msr_c;
9b158558
RH
47static TCGv_i32 cpu_imm;
48static TCGv_i32 cpu_btaken;
0f96e96b 49static TCGv_i32 cpu_btarget;
9b158558
RH
50static TCGv_i32 cpu_iflags;
51static TCGv cpu_res_addr;
52static TCGv_i32 cpu_res_val;
4acb54ba 53
022c62cb 54#include "exec/gen-icount.h"
4acb54ba
EI
55
56/* This is the state at translation time. */
57typedef struct DisasContext {
d4705ae0 58 DisasContextBase base;
0063ebd6 59 MicroBlazeCPU *cpu;
4acb54ba 60
20800179
RH
61 TCGv_i32 r0;
62 bool r0_set;
63
4acb54ba
EI
64 /* Decoder. */
65 int type_b;
66 uint32_t ir;
d7ecb757 67 uint32_t ext_imm;
4acb54ba
EI
68 uint8_t opcode;
69 uint8_t rd, ra, rb;
70 uint16_t imm;
71
72 unsigned int cpustate_changed;
73 unsigned int delayed_branch;
74 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
75 unsigned int clear_imm;
287b1def 76 int mem_index;
4acb54ba 77
844bab60
EI
78#define JMP_NOJMP 0
79#define JMP_DIRECT 1
80#define JMP_DIRECT_CC 2
81#define JMP_INDIRECT 3
4acb54ba
EI
82 unsigned int jmp;
83 uint32_t jmp_pc;
84
85 int abort_at_next_insn;
4acb54ba
EI
86} DisasContext;
87
20800179
RH
88static int typeb_imm(DisasContext *dc, int x)
89{
90 if (dc->tb_flags & IMM_FLAG) {
91 return deposit32(dc->ext_imm, 0, 16, x);
92 }
93 return x;
94}
95
44d1432b
RH
96/* Include the auto-generated decoder. */
97#include "decode-insns.c.inc"
98
4acb54ba
EI
99static inline void t_sync_flags(DisasContext *dc)
100{
4abf79a4 101 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba 102 if (dc->tb_flags != dc->synced_flags) {
9b158558 103 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags);
4acb54ba
EI
104 dc->synced_flags = dc->tb_flags;
105 }
106}
107
d8e59c4a
RH
108static inline void sync_jmpstate(DisasContext *dc)
109{
110 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
111 if (dc->jmp == JMP_DIRECT) {
112 tcg_gen_movi_i32(cpu_btaken, 1);
113 }
114 dc->jmp = JMP_INDIRECT;
115 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
116 }
117}
118
41ba37c4 119static void gen_raise_exception(DisasContext *dc, uint32_t index)
4acb54ba
EI
120{
121 TCGv_i32 tmp = tcg_const_i32(index);
122
64254eba 123 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba 124 tcg_temp_free_i32(tmp);
d4705ae0 125 dc->base.is_jmp = DISAS_NORETURN;
4acb54ba
EI
126}
127
41ba37c4
RH
128static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
129{
130 t_sync_flags(dc);
d4705ae0 131 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
41ba37c4
RH
132 gen_raise_exception(dc, index);
133}
134
135static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
136{
137 TCGv_i32 tmp = tcg_const_i32(esr_ec);
138 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
139 tcg_temp_free_i32(tmp);
140
141 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
142}
143
90aa39a1
SF
144static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
145{
146#ifndef CONFIG_USER_ONLY
d4705ae0 147 return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
90aa39a1
SF
148#else
149 return true;
150#endif
151}
152
4acb54ba
EI
153static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
154{
d4705ae0 155 if (dc->base.singlestep_enabled) {
0b46fa08
RH
156 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
157 tcg_gen_movi_i32(cpu_pc, dest);
158 gen_helper_raise_exception(cpu_env, tmp);
159 tcg_temp_free_i32(tmp);
160 } else if (use_goto_tb(dc, dest)) {
4acb54ba 161 tcg_gen_goto_tb(n);
0f96e96b 162 tcg_gen_movi_i32(cpu_pc, dest);
d4705ae0 163 tcg_gen_exit_tb(dc->base.tb, n);
4acb54ba 164 } else {
0f96e96b 165 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 166 tcg_gen_exit_tb(NULL, 0);
4acb54ba 167 }
d4705ae0 168 dc->base.is_jmp = DISAS_NORETURN;
4acb54ba
EI
169}
170
9ba8cd45
EI
171/*
172 * Returns true if the insn an illegal operation.
173 * If exceptions are enabled, an exception is raised.
174 */
175static bool trap_illegal(DisasContext *dc, bool cond)
176{
2c32179f 177 if (cond && (dc->tb_flags & MSR_EE)
5143fdf3 178 && dc->cpu->cfg.illegal_opcode_exception) {
41ba37c4 179 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
9ba8cd45
EI
180 }
181 return cond;
182}
183
bdfc1e88
EI
184/*
185 * Returns true if the insn is illegal in userspace.
186 * If exceptions are enabled, an exception is raised.
187 */
188static bool trap_userspace(DisasContext *dc, bool cond)
189{
287b1def 190 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
bdfc1e88 191
2c32179f 192 if (cond_user && (dc->tb_flags & MSR_EE)) {
41ba37c4 193 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
bdfc1e88
EI
194 }
195 return cond_user;
196}
197
d7ecb757 198static int32_t dec_alu_typeb_imm(DisasContext *dc)
61204ce8 199{
d7ecb757 200 tcg_debug_assert(dc->type_b);
20800179 201 return typeb_imm(dc, (int16_t)dc->imm);
61204ce8
EI
202}
203
cfeea807 204static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
4acb54ba
EI
205{
206 if (dc->type_b) {
d7ecb757 207 tcg_gen_movi_i32(cpu_imm, dec_alu_typeb_imm(dc));
9b158558 208 return &cpu_imm;
d7ecb757
RH
209 }
210 return &cpu_R[dc->rb];
4acb54ba
EI
211}
212
20800179 213static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
4acb54ba 214{
20800179
RH
215 if (likely(reg != 0)) {
216 return cpu_R[reg];
217 }
218 if (!dc->r0_set) {
219 if (dc->r0 == NULL) {
220 dc->r0 = tcg_temp_new_i32();
221 }
222 tcg_gen_movi_i32(dc->r0, 0);
223 dc->r0_set = true;
224 }
225 return dc->r0;
226}
4acb54ba 227
20800179
RH
228static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
229{
230 if (likely(reg != 0)) {
231 return cpu_R[reg];
232 }
233 if (dc->r0 == NULL) {
234 dc->r0 = tcg_temp_new_i32();
235 }
236 return dc->r0;
237}
4acb54ba 238
20800179
RH
239static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
240 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
241{
242 TCGv_i32 rd, ra, rb;
40cbf5b7 243
20800179
RH
244 if (arg->rd == 0 && !side_effects) {
245 return true;
40cbf5b7
EI
246 }
247
20800179
RH
248 rd = reg_for_write(dc, arg->rd);
249 ra = reg_for_read(dc, arg->ra);
250 rb = reg_for_read(dc, arg->rb);
251 fn(rd, ra, rb);
252 return true;
253}
254
39cf3864
RH
255static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
256 void (*fn)(TCGv_i32, TCGv_i32))
257{
258 TCGv_i32 rd, ra;
259
260 if (arg->rd == 0 && !side_effects) {
261 return true;
262 }
263
264 rd = reg_for_write(dc, arg->rd);
265 ra = reg_for_read(dc, arg->ra);
266 fn(rd, ra);
267 return true;
268}
269
20800179
RH
270static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
271 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
272{
273 TCGv_i32 rd, ra;
274
275 if (arg->rd == 0 && !side_effects) {
276 return true;
40cbf5b7
EI
277 }
278
20800179
RH
279 rd = reg_for_write(dc, arg->rd);
280 ra = reg_for_read(dc, arg->ra);
281 fni(rd, ra, arg->imm);
282 return true;
283}
284
285static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
286 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
287{
288 TCGv_i32 rd, ra, imm;
289
290 if (arg->rd == 0 && !side_effects) {
291 return true;
4acb54ba 292 }
20800179
RH
293
294 rd = reg_for_write(dc, arg->rd);
295 ra = reg_for_read(dc, arg->ra);
296 imm = tcg_const_i32(arg->imm);
297
298 fn(rd, ra, imm);
299
300 tcg_temp_free_i32(imm);
301 return true;
302}
303
304#define DO_TYPEA(NAME, SE, FN) \
305 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
306 { return do_typea(dc, a, SE, FN); }
307
607f5767
RH
308#define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
309 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
310 { return dc->cpu->cfg.CFG && do_typea(dc, a, SE, FN); }
311
39cf3864
RH
312#define DO_TYPEA0(NAME, SE, FN) \
313 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
314 { return do_typea0(dc, a, SE, FN); }
315
316#define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
317 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
318 { return dc->cpu->cfg.CFG && do_typea0(dc, a, SE, FN); }
319
20800179
RH
320#define DO_TYPEBI(NAME, SE, FNI) \
321 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
322 { return do_typeb_imm(dc, a, SE, FNI); }
323
97955ceb
RH
324#define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
325 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
326 { return dc->cpu->cfg.CFG && do_typeb_imm(dc, a, SE, FNI); }
327
20800179
RH
328#define DO_TYPEBV(NAME, SE, FN) \
329 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
330 { return do_typeb_val(dc, a, SE, FN); }
331
d5aead3d
RH
332#define ENV_WRAPPER2(NAME, HELPER) \
333 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
334 { HELPER(out, cpu_env, ina); }
335
336#define ENV_WRAPPER3(NAME, HELPER) \
337 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
338 { HELPER(out, cpu_env, ina, inb); }
339
20800179
RH
340/* No input carry, but output carry. */
341static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
342{
343 TCGv_i32 zero = tcg_const_i32(0);
344
345 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
346
347 tcg_temp_free_i32(zero);
348}
349
350/* Input and output carry. */
351static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
352{
353 TCGv_i32 zero = tcg_const_i32(0);
354 TCGv_i32 tmp = tcg_temp_new_i32();
355
356 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
357 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
358
359 tcg_temp_free_i32(tmp);
360 tcg_temp_free_i32(zero);
361}
362
363/* Input carry, but no output carry. */
364static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
365{
366 tcg_gen_add_i32(out, ina, inb);
367 tcg_gen_add_i32(out, out, cpu_msr_c);
368}
369
370DO_TYPEA(add, true, gen_add)
371DO_TYPEA(addc, true, gen_addc)
372DO_TYPEA(addk, false, tcg_gen_add_i32)
373DO_TYPEA(addkc, true, gen_addkc)
374
375DO_TYPEBV(addi, true, gen_add)
376DO_TYPEBV(addic, true, gen_addc)
377DO_TYPEBI(addik, false, tcg_gen_addi_i32)
378DO_TYPEBV(addikc, true, gen_addkc)
379
cb0a0a4c
RH
380static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
381{
382 tcg_gen_andi_i32(out, ina, ~imm);
383}
384
385DO_TYPEA(and, false, tcg_gen_and_i32)
386DO_TYPEBI(andi, false, tcg_gen_andi_i32)
387DO_TYPEA(andn, false, tcg_gen_andc_i32)
388DO_TYPEBI(andni, false, gen_andni)
389
081d8e02
RH
390static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
391{
392 TCGv_i32 tmp = tcg_temp_new_i32();
393 tcg_gen_andi_i32(tmp, inb, 31);
394 tcg_gen_sar_i32(out, ina, tmp);
395 tcg_temp_free_i32(tmp);
396}
397
398static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
399{
400 TCGv_i32 tmp = tcg_temp_new_i32();
401 tcg_gen_andi_i32(tmp, inb, 31);
402 tcg_gen_shr_i32(out, ina, tmp);
403 tcg_temp_free_i32(tmp);
404}
405
406static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
407{
408 TCGv_i32 tmp = tcg_temp_new_i32();
409 tcg_gen_andi_i32(tmp, inb, 31);
410 tcg_gen_shl_i32(out, ina, tmp);
411 tcg_temp_free_i32(tmp);
412}
413
414static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
415{
416 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
417 int imm_w = extract32(imm, 5, 5);
418 int imm_s = extract32(imm, 0, 5);
419
420 if (imm_w + imm_s > 32 || imm_w == 0) {
421 /* These inputs have an undefined behavior. */
422 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
423 imm_w, imm_s);
424 } else {
425 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
426 }
427}
428
429static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
430{
431 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
432 int imm_w = extract32(imm, 5, 5);
433 int imm_s = extract32(imm, 0, 5);
434 int width = imm_w - imm_s + 1;
435
436 if (imm_w < imm_s) {
437 /* These inputs have an undefined behavior. */
438 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
439 imm_w, imm_s);
440 } else {
441 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
442 }
443}
444
445DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
446DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
447DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
448
449DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
450DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
451DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
452
453DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
454DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
455
39cf3864
RH
456static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
457{
458 tcg_gen_clzi_i32(out, ina, 32);
459}
460
461DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
462
58b48b63
RH
463static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
464{
465 TCGv_i32 lt = tcg_temp_new_i32();
466
467 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
468 tcg_gen_sub_i32(out, inb, ina);
469 tcg_gen_deposit_i32(out, out, lt, 31, 1);
470 tcg_temp_free_i32(lt);
471}
472
473static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
474{
475 TCGv_i32 lt = tcg_temp_new_i32();
476
477 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
478 tcg_gen_sub_i32(out, inb, ina);
479 tcg_gen_deposit_i32(out, out, lt, 31, 1);
480 tcg_temp_free_i32(lt);
481}
482
483DO_TYPEA(cmp, false, gen_cmp)
484DO_TYPEA(cmpu, false, gen_cmpu)
a2b0b90e 485
d5aead3d
RH
486ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
487ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
488ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
489ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
490ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
491ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
492ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
493ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
494ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
495ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
496ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
497
498DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
499DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
500DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
501DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
502DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
503DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
504DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
505DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
506DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
507DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
508DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
509
510ENV_WRAPPER2(gen_flt, gen_helper_flt)
511ENV_WRAPPER2(gen_fint, gen_helper_fint)
512ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
513
514DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
515DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
516DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
517
518/* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
b1354342
RH
519static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
520{
521 gen_helper_divs(out, cpu_env, inb, ina);
522}
523
524static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
525{
526 gen_helper_divu(out, cpu_env, inb, ina);
527}
528
529DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
530DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
531
e64b2e5c
RH
532static bool trans_imm(DisasContext *dc, arg_imm *arg)
533{
534 dc->ext_imm = arg->imm << 16;
535 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
536 dc->tb_flags |= IMM_FLAG;
537 dc->clear_imm = 0;
538 return true;
539}
540
97955ceb
RH
541static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
542{
543 TCGv_i32 tmp = tcg_temp_new_i32();
544 tcg_gen_muls2_i32(tmp, out, ina, inb);
545 tcg_temp_free_i32(tmp);
546}
547
548static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
549{
550 TCGv_i32 tmp = tcg_temp_new_i32();
551 tcg_gen_mulu2_i32(tmp, out, ina, inb);
552 tcg_temp_free_i32(tmp);
553}
554
555static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
556{
557 TCGv_i32 tmp = tcg_temp_new_i32();
558 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
559 tcg_temp_free_i32(tmp);
560}
561
562DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
563DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
564DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
565DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
566DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
567
cb0a0a4c
RH
568DO_TYPEA(or, false, tcg_gen_or_i32)
569DO_TYPEBI(ori, false, tcg_gen_ori_i32)
570
607f5767
RH
571static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
572{
573 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
574}
575
576static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
577{
578 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
579}
580
581DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
582DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
583DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
584
a2b0b90e
RH
585/* No input carry, but output carry. */
586static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
587{
588 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
589 tcg_gen_sub_i32(out, inb, ina);
590}
591
592/* Input and output carry. */
593static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
594{
595 TCGv_i32 zero = tcg_const_i32(0);
596 TCGv_i32 tmp = tcg_temp_new_i32();
597
598 tcg_gen_not_i32(tmp, ina);
599 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
600 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
601
602 tcg_temp_free_i32(zero);
603 tcg_temp_free_i32(tmp);
604}
605
606/* No input or output carry. */
607static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
608{
609 tcg_gen_sub_i32(out, inb, ina);
610}
611
612/* Input carry, no output carry. */
613static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
614{
615 TCGv_i32 nota = tcg_temp_new_i32();
616
617 tcg_gen_not_i32(nota, ina);
618 tcg_gen_add_i32(out, inb, nota);
619 tcg_gen_add_i32(out, out, cpu_msr_c);
620
621 tcg_temp_free_i32(nota);
622}
623
624DO_TYPEA(rsub, true, gen_rsub)
625DO_TYPEA(rsubc, true, gen_rsubc)
626DO_TYPEA(rsubk, false, gen_rsubk)
627DO_TYPEA(rsubkc, true, gen_rsubkc)
628
629DO_TYPEBV(rsubi, true, gen_rsub)
630DO_TYPEBV(rsubic, true, gen_rsubc)
631DO_TYPEBV(rsubik, false, gen_rsubk)
632DO_TYPEBV(rsubikc, true, gen_rsubkc)
633
39cf3864
RH
634DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
635DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
636
637static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
638{
639 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
640 tcg_gen_sari_i32(out, ina, 1);
641}
642
643static void gen_src(TCGv_i32 out, TCGv_i32 ina)
644{
645 TCGv_i32 tmp = tcg_temp_new_i32();
646
647 tcg_gen_mov_i32(tmp, cpu_msr_c);
648 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
649 tcg_gen_extract2_i32(out, ina, tmp, 1);
650
651 tcg_temp_free_i32(tmp);
652}
653
654static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
655{
656 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
657 tcg_gen_shri_i32(out, ina, 1);
658}
659
660DO_TYPEA0(sra, false, gen_sra)
661DO_TYPEA0(src, false, gen_src)
662DO_TYPEA0(srl, false, gen_srl)
663
664static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
665{
666 tcg_gen_rotri_i32(out, ina, 16);
667}
668
669DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
670DO_TYPEA0(swaph, false, gen_swaph)
671
672static bool trans_wdic(DisasContext *dc, arg_wdic *a)
673{
674 /* Cache operations are nops: only check for supervisor mode. */
675 trap_userspace(dc, true);
676 return true;
677}
678
cb0a0a4c
RH
679DO_TYPEA(xor, false, tcg_gen_xor_i32)
680DO_TYPEBI(xori, false, tcg_gen_xori_i32)
681
d8e59c4a
RH
682static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
683{
684 TCGv ret = tcg_temp_new();
685
686 /* If any of the regs is r0, set t to the value of the other reg. */
687 if (ra && rb) {
688 TCGv_i32 tmp = tcg_temp_new_i32();
689 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
690 tcg_gen_extu_i32_tl(ret, tmp);
691 tcg_temp_free_i32(tmp);
692 } else if (ra) {
693 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
694 } else if (rb) {
695 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
696 } else {
697 tcg_gen_movi_tl(ret, 0);
698 }
699
700 if ((ra == 1 || rb == 1) && dc->cpu->cfg.stackprot) {
701 gen_helper_stackprot(cpu_env, ret);
702 }
703 return ret;
704}
705
706static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
707{
708 TCGv ret = tcg_temp_new();
709
710 /* If any of the regs is r0, set t to the value of the other reg. */
711 if (ra) {
712 TCGv_i32 tmp = tcg_temp_new_i32();
713 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
714 tcg_gen_extu_i32_tl(ret, tmp);
715 tcg_temp_free_i32(tmp);
716 } else {
717 tcg_gen_movi_tl(ret, (uint32_t)imm);
718 }
719
720 if (ra == 1 && dc->cpu->cfg.stackprot) {
721 gen_helper_stackprot(cpu_env, ret);
722 }
723 return ret;
724}
725
726static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
727{
728 int addr_size = dc->cpu->cfg.addr_size;
729 TCGv ret = tcg_temp_new();
730
731 if (addr_size == 32 || ra == 0) {
732 if (rb) {
733 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
734 } else {
735 tcg_gen_movi_tl(ret, 0);
736 }
737 } else {
738 if (rb) {
739 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
740 } else {
741 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
742 tcg_gen_shli_tl(ret, ret, 32);
743 }
744 if (addr_size < 64) {
745 /* Mask off out of range bits. */
746 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
747 }
748 }
749 return ret;
750}
751
752static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
753 int mem_index, bool rev)
754{
755 TCGv_i32 v;
756 MemOp size = mop & MO_SIZE;
757
758 /*
759 * When doing reverse accesses we need to do two things.
760 *
761 * 1. Reverse the address wrt endianness.
762 * 2. Byteswap the data lanes on the way back into the CPU core.
763 */
764 if (rev) {
765 if (size > MO_8) {
766 mop ^= MO_BSWAP;
767 }
768 if (size < MO_32) {
769 tcg_gen_xori_tl(addr, addr, 3 - size);
770 }
771 }
772
773 t_sync_flags(dc);
774 sync_jmpstate(dc);
775
776 /*
777 * Microblaze gives MMU faults priority over faults due to
778 * unaligned addresses. That's why we speculatively do the load
779 * into v. If the load succeeds, we verify alignment of the
780 * address and if that succeeds we write into the destination reg.
781 */
782 v = tcg_temp_new_i32();
783 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
784
785 /* TODO: Convert to CPUClass::do_unaligned_access. */
786 if (dc->cpu->cfg.unaligned_exceptions && size > MO_8) {
787 TCGv_i32 t0 = tcg_const_i32(0);
788 TCGv_i32 treg = tcg_const_i32(rd);
789 TCGv_i32 tsize = tcg_const_i32((1 << size) - 1);
790
791 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
792 gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
793
794 tcg_temp_free_i32(t0);
795 tcg_temp_free_i32(treg);
796 tcg_temp_free_i32(tsize);
797 }
798
799 if (rd) {
800 tcg_gen_mov_i32(cpu_R[rd], v);
801 }
802
803 tcg_temp_free_i32(v);
804 tcg_temp_free(addr);
805 return true;
806}
807
808static bool trans_lbu(DisasContext *dc, arg_typea *arg)
809{
810 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
811 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
812}
813
814static bool trans_lbur(DisasContext *dc, arg_typea *arg)
815{
816 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
817 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
818}
819
820static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
821{
822 if (trap_userspace(dc, true)) {
823 return true;
824 }
825 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
826 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
827}
828
829static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
830{
831 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
832 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
833}
834
835static bool trans_lhu(DisasContext *dc, arg_typea *arg)
836{
837 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
838 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
839}
840
841static bool trans_lhur(DisasContext *dc, arg_typea *arg)
842{
843 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
844 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
845}
846
847static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
848{
849 if (trap_userspace(dc, true)) {
850 return true;
851 }
852 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
853 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
854}
855
856static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
857{
858 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
859 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
860}
861
862static bool trans_lw(DisasContext *dc, arg_typea *arg)
863{
864 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
865 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
866}
867
868static bool trans_lwr(DisasContext *dc, arg_typea *arg)
869{
870 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
871 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
872}
873
874static bool trans_lwea(DisasContext *dc, arg_typea *arg)
875{
876 if (trap_userspace(dc, true)) {
877 return true;
878 }
879 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
880 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
881}
882
883static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
884{
885 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
886 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
887}
888
889static bool trans_lwx(DisasContext *dc, arg_typea *arg)
890{
891 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
892
893 /* lwx does not throw unaligned access errors, so force alignment */
894 tcg_gen_andi_tl(addr, addr, ~3);
895
896 t_sync_flags(dc);
897 sync_jmpstate(dc);
898
899 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
900 tcg_gen_mov_tl(cpu_res_addr, addr);
901 tcg_temp_free(addr);
902
903 if (arg->rd) {
904 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
905 }
906
907 /* No support for AXI exclusive so always clear C */
908 tcg_gen_movi_i32(cpu_msr_c, 0);
909 return true;
910}
911
912static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
913 int mem_index, bool rev)
914{
915 MemOp size = mop & MO_SIZE;
916
917 /*
918 * When doing reverse accesses we need to do two things.
919 *
920 * 1. Reverse the address wrt endianness.
921 * 2. Byteswap the data lanes on the way back into the CPU core.
922 */
923 if (rev) {
924 if (size > MO_8) {
925 mop ^= MO_BSWAP;
926 }
927 if (size < MO_32) {
928 tcg_gen_xori_tl(addr, addr, 3 - size);
929 }
930 }
931
932 t_sync_flags(dc);
933 sync_jmpstate(dc);
934
935 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
936
937 /* TODO: Convert to CPUClass::do_unaligned_access. */
938 if (dc->cpu->cfg.unaligned_exceptions && size > MO_8) {
939 TCGv_i32 t1 = tcg_const_i32(1);
940 TCGv_i32 treg = tcg_const_i32(rd);
941 TCGv_i32 tsize = tcg_const_i32((1 << size) - 1);
942
943 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
944 /* FIXME: if the alignment is wrong, we should restore the value
945 * in memory. One possible way to achieve this is to probe
946 * the MMU prior to the memaccess, thay way we could put
947 * the alignment checks in between the probe and the mem
948 * access.
949 */
950 gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
951
952 tcg_temp_free_i32(t1);
953 tcg_temp_free_i32(treg);
954 tcg_temp_free_i32(tsize);
955 }
956
957 tcg_temp_free(addr);
958 return true;
959}
960
961static bool trans_sb(DisasContext *dc, arg_typea *arg)
962{
963 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
964 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
965}
966
967static bool trans_sbr(DisasContext *dc, arg_typea *arg)
968{
969 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
970 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
971}
972
973static bool trans_sbea(DisasContext *dc, arg_typea *arg)
974{
975 if (trap_userspace(dc, true)) {
976 return true;
977 }
978 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
979 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
980}
981
982static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
983{
984 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
985 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
986}
987
988static bool trans_sh(DisasContext *dc, arg_typea *arg)
989{
990 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
991 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
992}
993
994static bool trans_shr(DisasContext *dc, arg_typea *arg)
995{
996 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
997 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
998}
999
1000static bool trans_shea(DisasContext *dc, arg_typea *arg)
1001{
1002 if (trap_userspace(dc, true)) {
1003 return true;
1004 }
1005 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
1006 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
1007}
1008
1009static bool trans_shi(DisasContext *dc, arg_typeb *arg)
1010{
1011 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1012 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
1013}
1014
1015static bool trans_sw(DisasContext *dc, arg_typea *arg)
1016{
1017 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1018 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
1019}
1020
1021static bool trans_swr(DisasContext *dc, arg_typea *arg)
1022{
1023 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1024 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
1025}
1026
1027static bool trans_swea(DisasContext *dc, arg_typea *arg)
1028{
1029 if (trap_userspace(dc, true)) {
1030 return true;
1031 }
1032 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
1033 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
1034}
1035
1036static bool trans_swi(DisasContext *dc, arg_typeb *arg)
1037{
1038 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1039 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
1040}
1041
1042static bool trans_swx(DisasContext *dc, arg_typea *arg)
1043{
1044 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1045 TCGLabel *swx_done = gen_new_label();
1046 TCGLabel *swx_fail = gen_new_label();
1047 TCGv_i32 tval;
1048
1049 t_sync_flags(dc);
1050 sync_jmpstate(dc);
1051
1052 /* swx does not throw unaligned access errors, so force alignment */
1053 tcg_gen_andi_tl(addr, addr, ~3);
1054
1055 /*
1056 * Compare the address vs the one we used during lwx.
1057 * On mismatch, the operation fails. On match, addr dies at the
1058 * branch, but we know we can use the equal version in the global.
1059 * In either case, addr is no longer needed.
1060 */
1061 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1062 tcg_temp_free(addr);
1063
1064 /*
1065 * Compare the value loaded during lwx with current contents of
1066 * the reserved location.
1067 */
1068 tval = tcg_temp_new_i32();
1069
1070 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1071 reg_for_write(dc, arg->rd),
1072 dc->mem_index, MO_TEUL);
1073
1074 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1075 tcg_temp_free_i32(tval);
1076
1077 /* Success */
1078 tcg_gen_movi_i32(cpu_msr_c, 0);
1079 tcg_gen_br(swx_done);
1080
1081 /* Failure */
1082 gen_set_label(swx_fail);
1083 tcg_gen_movi_i32(cpu_msr_c, 1);
1084
1085 gen_set_label(swx_done);
1086
1087 /*
1088 * Prevent the saved address from working again without another ldx.
1089 * Akin to the pseudocode setting reservation = 0.
1090 */
1091 tcg_gen_movi_tl(cpu_res_addr, -1);
1092 return true;
1093}
1094
20800179
RH
1095static bool trans_zero(DisasContext *dc, arg_zero *arg)
1096{
1097 /* If opcode_0_illegal, trap. */
1098 if (dc->cpu->cfg.opcode_0_illegal) {
1099 trap_illegal(dc, true);
1100 return true;
1101 }
1102 /*
1103 * Otherwise, this is "add r0, r0, r0".
1104 * Continue to trans_add so that MSR[C] gets cleared.
1105 */
1106 return false;
4acb54ba
EI
1107}
1108
1074c0fb 1109static void msr_read(DisasContext *dc, TCGv_i32 d)
4acb54ba 1110{
1074c0fb
RH
1111 TCGv_i32 t;
1112
1113 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1114 t = tcg_temp_new_i32();
1115 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1116 tcg_gen_or_i32(d, cpu_msr, t);
1117 tcg_temp_free_i32(t);
4acb54ba
EI
1118}
1119
1074c0fb 1120static void msr_write(DisasContext *dc, TCGv_i32 v)
4acb54ba
EI
1121{
1122 dc->cpustate_changed = 1;
1074c0fb
RH
1123
1124 /* Install MSR_C. */
1125 tcg_gen_extract_i32(cpu_msr_c, v, 2, 1);
1126
1127 /* Clear MSR_C and MSR_CC; MSR_PVR is not writable, and is always clear. */
1128 tcg_gen_andi_i32(cpu_msr, v, ~(MSR_C | MSR_CC | MSR_PVR));
4acb54ba
EI
1129}
1130
1131static void dec_msr(DisasContext *dc)
1132{
0063ebd6 1133 CPUState *cs = CPU(dc->cpu);
cfeea807 1134 TCGv_i32 t0, t1;
2023e9a3 1135 unsigned int sr, rn;
f0f7e7f7 1136 bool to, clrset, extended = false;
4acb54ba 1137
2023e9a3
EI
1138 sr = extract32(dc->imm, 0, 14);
1139 to = extract32(dc->imm, 14, 1);
1140 clrset = extract32(dc->imm, 15, 1) == 0;
4acb54ba 1141 dc->type_b = 1;
2023e9a3 1142 if (to) {
4acb54ba 1143 dc->cpustate_changed = 1;
f0f7e7f7
EI
1144 }
1145
1146 /* Extended MSRs are only available if addr_size > 32. */
1147 if (dc->cpu->cfg.addr_size > 32) {
1148 /* The E-bit is encoded differently for To/From MSR. */
1149 static const unsigned int e_bit[] = { 19, 24 };
1150
1151 extended = extract32(dc->imm, e_bit[to], 1);
2023e9a3 1152 }
4acb54ba
EI
1153
1154 /* msrclr and msrset. */
2023e9a3
EI
1155 if (clrset) {
1156 bool clr = extract32(dc->ir, 16, 1);
4acb54ba 1157
56837509 1158 if (!dc->cpu->cfg.use_msr_instr) {
1567a005
EI
1159 /* nop??? */
1160 return;
1161 }
1162
bdfc1e88 1163 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
1567a005
EI
1164 return;
1165 }
1166
4acb54ba
EI
1167 if (dc->rd)
1168 msr_read(dc, cpu_R[dc->rd]);
1169
cfeea807
EI
1170 t0 = tcg_temp_new_i32();
1171 t1 = tcg_temp_new_i32();
4acb54ba 1172 msr_read(dc, t0);
cfeea807 1173 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
4acb54ba
EI
1174
1175 if (clr) {
cfeea807
EI
1176 tcg_gen_not_i32(t1, t1);
1177 tcg_gen_and_i32(t0, t0, t1);
4acb54ba 1178 } else
cfeea807 1179 tcg_gen_or_i32(t0, t0, t1);
4acb54ba 1180 msr_write(dc, t0);
cfeea807
EI
1181 tcg_temp_free_i32(t0);
1182 tcg_temp_free_i32(t1);
d4705ae0
RH
1183 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1184 dc->base.is_jmp = DISAS_UPDATE;
4acb54ba
EI
1185 return;
1186 }
1187
bdfc1e88
EI
1188 if (trap_userspace(dc, to)) {
1189 return;
1567a005
EI
1190 }
1191
4acb54ba
EI
1192#if !defined(CONFIG_USER_ONLY)
1193 /* Catch read/writes to the mmu block. */
1194 if ((sr & ~0xff) == 0x1000) {
f0f7e7f7 1195 TCGv_i32 tmp_ext = tcg_const_i32(extended);
05a9a651
EI
1196 TCGv_i32 tmp_sr;
1197
4acb54ba 1198 sr &= 7;
05a9a651 1199 tmp_sr = tcg_const_i32(sr);
05a9a651 1200 if (to) {
f0f7e7f7 1201 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
05a9a651 1202 } else {
f0f7e7f7 1203 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
05a9a651
EI
1204 }
1205 tcg_temp_free_i32(tmp_sr);
f0f7e7f7 1206 tcg_temp_free_i32(tmp_ext);
4acb54ba
EI
1207 return;
1208 }
1209#endif
1210
1211 if (to) {
4acb54ba 1212 switch (sr) {
aa28e6d4 1213 case SR_PC:
4acb54ba 1214 break;
aa28e6d4 1215 case SR_MSR:
4acb54ba
EI
1216 msr_write(dc, cpu_R[dc->ra]);
1217 break;
351527b7 1218 case SR_EAR:
dbdb77c4
RH
1219 {
1220 TCGv_i64 t64 = tcg_temp_new_i64();
1221 tcg_gen_extu_i32_i64(t64, cpu_R[dc->ra]);
1222 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1223 tcg_temp_free_i64(t64);
1224 }
aa28e6d4 1225 break;
351527b7 1226 case SR_ESR:
41ba37c4
RH
1227 tcg_gen_st_i32(cpu_R[dc->ra],
1228 cpu_env, offsetof(CPUMBState, esr));
aa28e6d4 1229 break;
ab6dd380 1230 case SR_FSR:
86017ccf
RH
1231 tcg_gen_st_i32(cpu_R[dc->ra],
1232 cpu_env, offsetof(CPUMBState, fsr));
aa28e6d4
RH
1233 break;
1234 case SR_BTR:
ccf628b7
RH
1235 tcg_gen_st_i32(cpu_R[dc->ra],
1236 cpu_env, offsetof(CPUMBState, btr));
aa28e6d4
RH
1237 break;
1238 case SR_EDR:
39db007e
RH
1239 tcg_gen_st_i32(cpu_R[dc->ra],
1240 cpu_env, offsetof(CPUMBState, edr));
4acb54ba 1241 break;
5818dee5 1242 case 0x800:
cfeea807
EI
1243 tcg_gen_st_i32(cpu_R[dc->ra],
1244 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
1245 break;
1246 case 0x802:
cfeea807
EI
1247 tcg_gen_st_i32(cpu_R[dc->ra],
1248 cpu_env, offsetof(CPUMBState, shr));
5818dee5 1249 break;
4acb54ba 1250 default:
0063ebd6 1251 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
1252 break;
1253 }
1254 } else {
4acb54ba 1255 switch (sr) {
aa28e6d4 1256 case SR_PC:
d4705ae0 1257 tcg_gen_movi_i32(cpu_R[dc->rd], dc->base.pc_next);
4acb54ba 1258 break;
aa28e6d4 1259 case SR_MSR:
4acb54ba
EI
1260 msr_read(dc, cpu_R[dc->rd]);
1261 break;
351527b7 1262 case SR_EAR:
dbdb77c4
RH
1263 {
1264 TCGv_i64 t64 = tcg_temp_new_i64();
1265 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1266 if (extended) {
1267 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], t64);
1268 } else {
1269 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], t64);
1270 }
1271 tcg_temp_free_i64(t64);
a1b48e3a 1272 }
aa28e6d4 1273 break;
351527b7 1274 case SR_ESR:
41ba37c4
RH
1275 tcg_gen_ld_i32(cpu_R[dc->rd],
1276 cpu_env, offsetof(CPUMBState, esr));
aa28e6d4 1277 break;
351527b7 1278 case SR_FSR:
86017ccf
RH
1279 tcg_gen_ld_i32(cpu_R[dc->rd],
1280 cpu_env, offsetof(CPUMBState, fsr));
aa28e6d4 1281 break;
351527b7 1282 case SR_BTR:
ccf628b7
RH
1283 tcg_gen_ld_i32(cpu_R[dc->rd],
1284 cpu_env, offsetof(CPUMBState, btr));
aa28e6d4 1285 break;
7cdae31d 1286 case SR_EDR:
39db007e
RH
1287 tcg_gen_ld_i32(cpu_R[dc->rd],
1288 cpu_env, offsetof(CPUMBState, edr));
4acb54ba 1289 break;
5818dee5 1290 case 0x800:
cfeea807
EI
1291 tcg_gen_ld_i32(cpu_R[dc->rd],
1292 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
1293 break;
1294 case 0x802:
cfeea807
EI
1295 tcg_gen_ld_i32(cpu_R[dc->rd],
1296 cpu_env, offsetof(CPUMBState, shr));
5818dee5 1297 break;
351527b7 1298 case 0x2000 ... 0x200c:
4acb54ba 1299 rn = sr & 0xf;
cfeea807 1300 tcg_gen_ld_i32(cpu_R[dc->rd],
68cee38a 1301 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
1302 break;
1303 default:
a47dddd7 1304 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
1305 break;
1306 }
1307 }
ee7dbcf8
EI
1308
1309 if (dc->rd == 0) {
cfeea807 1310 tcg_gen_movi_i32(cpu_R[0], 0);
ee7dbcf8 1311 }
4acb54ba
EI
1312}
1313
4acb54ba 1314static inline void eval_cc(DisasContext *dc, unsigned int cc,
9e6e1828 1315 TCGv_i32 d, TCGv_i32 a)
4acb54ba 1316{
d89b86e9
EI
1317 static const int mb_to_tcg_cc[] = {
1318 [CC_EQ] = TCG_COND_EQ,
1319 [CC_NE] = TCG_COND_NE,
1320 [CC_LT] = TCG_COND_LT,
1321 [CC_LE] = TCG_COND_LE,
1322 [CC_GE] = TCG_COND_GE,
1323 [CC_GT] = TCG_COND_GT,
1324 };
1325
4acb54ba 1326 switch (cc) {
d89b86e9
EI
1327 case CC_EQ:
1328 case CC_NE:
1329 case CC_LT:
1330 case CC_LE:
1331 case CC_GE:
1332 case CC_GT:
9e6e1828 1333 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
d89b86e9
EI
1334 break;
1335 default:
1336 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1337 break;
4acb54ba
EI
1338 }
1339}
1340
0f96e96b 1341static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
4acb54ba 1342{
0f96e96b 1343 TCGv_i32 zero = tcg_const_i32(0);
e956caf2 1344
0f96e96b 1345 tcg_gen_movcond_i32(TCG_COND_NE, cpu_pc,
9b158558 1346 cpu_btaken, zero,
e956caf2
EI
1347 pc_true, pc_false);
1348
0f96e96b 1349 tcg_temp_free_i32(zero);
4acb54ba
EI
1350}
1351
f91c60f0
EI
1352static void dec_setup_dslot(DisasContext *dc)
1353{
7b34f45f
RH
1354 dc->delayed_branch = 2;
1355 dc->tb_flags |= D_FLAG;
1356 if (dc->type_b && (dc->tb_flags & IMM_FLAG)) {
1357 dc->tb_flags |= BIMM_FLAG;
1358 }
f91c60f0
EI
1359}
1360
4acb54ba
EI
1361static void dec_bcc(DisasContext *dc)
1362{
1363 unsigned int cc;
1364 unsigned int dslot;
1365
1366 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1367 dslot = dc->ir & (1 << 25);
4acb54ba
EI
1368
1369 dc->delayed_branch = 1;
1370 if (dslot) {
f91c60f0 1371 dec_setup_dslot(dc);
4acb54ba
EI
1372 }
1373
d7ecb757 1374 if (dc->type_b) {
844bab60 1375 dc->jmp = JMP_DIRECT_CC;
d7ecb757
RH
1376 dc->jmp_pc = dc->base.pc_next + dec_alu_typeb_imm(dc);
1377 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
61204ce8 1378 } else {
23979dc5 1379 dc->jmp = JMP_INDIRECT;
d7ecb757 1380 tcg_gen_addi_i32(cpu_btarget, cpu_R[dc->rb], dc->base.pc_next);
61204ce8 1381 }
9b158558 1382 eval_cc(dc, cc, cpu_btaken, cpu_R[dc->ra]);
4acb54ba
EI
1383}
1384
1385static void dec_br(DisasContext *dc)
1386{
9f6113c7 1387 unsigned int dslot, link, abs, mbar;
4acb54ba
EI
1388
1389 dslot = dc->ir & (1 << 20);
1390 abs = dc->ir & (1 << 19);
1391 link = dc->ir & (1 << 18);
9f6113c7
EI
1392
1393 /* Memory barrier. */
1394 mbar = (dc->ir >> 16) & 31;
1395 if (mbar == 2 && dc->imm == 4) {
badcbf9d
EI
1396 uint16_t mbar_imm = dc->rd;
1397
3f172744
EI
1398 /* Data access memory barrier. */
1399 if ((mbar_imm & 2) == 0) {
1400 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1401 }
1402
5d45de97 1403 /* mbar IMM & 16 decodes to sleep. */
badcbf9d 1404 if (mbar_imm & 16) {
41ba37c4 1405 TCGv_i32 tmp_1;
5d45de97 1406
b4919e7d
EI
1407 if (trap_userspace(dc, true)) {
1408 /* Sleep is a privileged instruction. */
1409 return;
1410 }
1411
5d45de97 1412 t_sync_flags(dc);
41ba37c4
RH
1413
1414 tmp_1 = tcg_const_i32(1);
5d45de97
EI
1415 tcg_gen_st_i32(tmp_1, cpu_env,
1416 -offsetof(MicroBlazeCPU, env)
1417 +offsetof(CPUState, halted));
5d45de97 1418 tcg_temp_free_i32(tmp_1);
41ba37c4 1419
d4705ae0 1420 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
41ba37c4
RH
1421
1422 gen_raise_exception(dc, EXCP_HLT);
5d45de97
EI
1423 return;
1424 }
9f6113c7
EI
1425 /* Break the TB. */
1426 dc->cpustate_changed = 1;
1427 return;
1428 }
1429
d7ecb757
RH
1430 if (abs && link && !dslot) {
1431 if (dc->type_b) {
1432 /* BRKI */
1433 uint32_t imm = dec_alu_typeb_imm(dc);
1434 if (trap_userspace(dc, imm != 8 && imm != 0x18)) {
1435 return;
1436 }
1437 } else {
1438 /* BRK */
1439 if (trap_userspace(dc, true)) {
1440 return;
1441 }
1442 }
1443 }
1444
4acb54ba
EI
1445 dc->delayed_branch = 1;
1446 if (dslot) {
f91c60f0 1447 dec_setup_dslot(dc);
4acb54ba 1448 }
d7ecb757 1449 if (link && dc->rd) {
d4705ae0 1450 tcg_gen_movi_i32(cpu_R[dc->rd], dc->base.pc_next);
d7ecb757 1451 }
4acb54ba 1452
4acb54ba 1453 if (abs) {
d7ecb757
RH
1454 if (dc->type_b) {
1455 uint32_t dest = dec_alu_typeb_imm(dc);
1456
1457 dc->jmp = JMP_DIRECT;
1458 dc->jmp_pc = dest;
1459 tcg_gen_movi_i32(cpu_btarget, dest);
1460 if (link && !dslot) {
1461 switch (dest) {
1462 case 8:
1463 case 0x18:
1464 gen_raise_exception_sync(dc, EXCP_BREAK);
1465 break;
1466 case 0:
1467 gen_raise_exception_sync(dc, EXCP_DEBUG);
1468 break;
ff21f70a 1469 }
ff21f70a 1470 }
61204ce8 1471 } else {
d7ecb757
RH
1472 dc->jmp = JMP_INDIRECT;
1473 tcg_gen_mov_i32(cpu_btarget, cpu_R[dc->rb]);
1474 if (link && !dslot) {
1475 gen_raise_exception_sync(dc, EXCP_BREAK);
1476 }
4acb54ba 1477 }
d7ecb757
RH
1478 } else if (dc->type_b) {
1479 dc->jmp = JMP_DIRECT;
1480 dc->jmp_pc = dc->base.pc_next + dec_alu_typeb_imm(dc);
1481 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
1482 } else {
1483 dc->jmp = JMP_INDIRECT;
1484 tcg_gen_addi_i32(cpu_btarget, cpu_R[dc->rb], dc->base.pc_next);
4acb54ba 1485 }
d7ecb757 1486 tcg_gen_movi_i32(cpu_btaken, 1);
4acb54ba
EI
1487}
1488
1489static inline void do_rti(DisasContext *dc)
1490{
cfeea807
EI
1491 TCGv_i32 t0, t1;
1492 t0 = tcg_temp_new_i32();
1493 t1 = tcg_temp_new_i32();
3e0e16ae 1494 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf
EI
1495 tcg_gen_shri_i32(t0, t1, 1);
1496 tcg_gen_ori_i32(t1, t1, MSR_IE);
cfeea807
EI
1497 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1498
1499 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1500 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1501 msr_write(dc, t1);
cfeea807
EI
1502 tcg_temp_free_i32(t1);
1503 tcg_temp_free_i32(t0);
4acb54ba
EI
1504 dc->tb_flags &= ~DRTI_FLAG;
1505}
1506
1507static inline void do_rtb(DisasContext *dc)
1508{
cfeea807
EI
1509 TCGv_i32 t0, t1;
1510 t0 = tcg_temp_new_i32();
1511 t1 = tcg_temp_new_i32();
3e0e16ae 1512 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf 1513 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
cfeea807
EI
1514 tcg_gen_shri_i32(t0, t1, 1);
1515 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1516
1517 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1518 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1519 msr_write(dc, t1);
cfeea807
EI
1520 tcg_temp_free_i32(t1);
1521 tcg_temp_free_i32(t0);
4acb54ba
EI
1522 dc->tb_flags &= ~DRTB_FLAG;
1523}
1524
1525static inline void do_rte(DisasContext *dc)
1526{
cfeea807
EI
1527 TCGv_i32 t0, t1;
1528 t0 = tcg_temp_new_i32();
1529 t1 = tcg_temp_new_i32();
4acb54ba 1530
3e0e16ae 1531 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf 1532 tcg_gen_ori_i32(t1, t1, MSR_EE);
cfeea807
EI
1533 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1534 tcg_gen_shri_i32(t0, t1, 1);
1535 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
4acb54ba 1536
cfeea807
EI
1537 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1538 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1539 msr_write(dc, t1);
cfeea807
EI
1540 tcg_temp_free_i32(t1);
1541 tcg_temp_free_i32(t0);
4acb54ba
EI
1542 dc->tb_flags &= ~DRTE_FLAG;
1543}
1544
1545static void dec_rts(DisasContext *dc)
1546{
1547 unsigned int b_bit, i_bit, e_bit;
1548
1549 i_bit = dc->ir & (1 << 21);
1550 b_bit = dc->ir & (1 << 22);
1551 e_bit = dc->ir & (1 << 23);
1552
bdfc1e88
EI
1553 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1554 return;
1555 }
1556
f91c60f0 1557 dec_setup_dslot(dc);
4acb54ba
EI
1558
1559 if (i_bit) {
4acb54ba
EI
1560 dc->tb_flags |= DRTI_FLAG;
1561 } else if (b_bit) {
4acb54ba
EI
1562 dc->tb_flags |= DRTB_FLAG;
1563 } else if (e_bit) {
4acb54ba 1564 dc->tb_flags |= DRTE_FLAG;
11105d67 1565 }
4acb54ba 1566
23979dc5 1567 dc->jmp = JMP_INDIRECT;
9b158558 1568 tcg_gen_movi_i32(cpu_btaken, 1);
0f96e96b 1569 tcg_gen_add_i32(cpu_btarget, cpu_R[dc->ra], *dec_alu_op_b(dc));
4acb54ba
EI
1570}
1571
1572static void dec_null(DisasContext *dc)
1573{
9ba8cd45 1574 if (trap_illegal(dc, true)) {
02b33596
EI
1575 return;
1576 }
d4705ae0
RH
1577 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n",
1578 (uint32_t)dc->base.pc_next, dc->opcode);
4acb54ba
EI
1579 dc->abort_at_next_insn = 1;
1580}
1581
6d76d23e
EI
1582/* Insns connected to FSL or AXI stream attached devices. */
1583static void dec_stream(DisasContext *dc)
1584{
6d76d23e
EI
1585 TCGv_i32 t_id, t_ctrl;
1586 int ctrl;
1587
bdfc1e88 1588 if (trap_userspace(dc, true)) {
6d76d23e
EI
1589 return;
1590 }
1591
cfeea807 1592 t_id = tcg_temp_new_i32();
6d76d23e 1593 if (dc->type_b) {
cfeea807 1594 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
6d76d23e
EI
1595 ctrl = dc->imm >> 10;
1596 } else {
cfeea807 1597 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
6d76d23e
EI
1598 ctrl = dc->imm >> 5;
1599 }
1600
cfeea807 1601 t_ctrl = tcg_const_i32(ctrl);
6d76d23e
EI
1602
1603 if (dc->rd == 0) {
1604 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1605 } else {
1606 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1607 }
cfeea807
EI
1608 tcg_temp_free_i32(t_id);
1609 tcg_temp_free_i32(t_ctrl);
6d76d23e
EI
1610}
1611
4acb54ba
EI
1612static struct decoder_info {
1613 struct {
1614 uint32_t bits;
1615 uint32_t mask;
1616 };
1617 void (*dec)(DisasContext *dc);
1618} decinfo[] = {
4acb54ba
EI
1619 {DEC_BR, dec_br},
1620 {DEC_BCC, dec_bcc},
1621 {DEC_RTS, dec_rts},
4acb54ba 1622 {DEC_MSR, dec_msr},
6d76d23e 1623 {DEC_STREAM, dec_stream},
4acb54ba
EI
1624 {{0, 0}, dec_null}
1625};
1626
44d1432b 1627static void old_decode(DisasContext *dc, uint32_t ir)
4acb54ba 1628{
4acb54ba
EI
1629 int i;
1630
64254eba 1631 dc->ir = ir;
4acb54ba 1632
4acb54ba
EI
1633 /* bit 2 seems to indicate insn type. */
1634 dc->type_b = ir & (1 << 29);
1635
1636 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1637 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1638 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1639 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1640 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1641
1642 /* Large switch for all insns. */
1643 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1644 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1645 decinfo[i].dec(dc);
1646 break;
1647 }
1648 }
1649}
1650
372122e3 1651static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
4acb54ba 1652{
372122e3
RH
1653 DisasContext *dc = container_of(dcb, DisasContext, base);
1654 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1655 int bound;
4acb54ba 1656
372122e3
RH
1657 dc->cpu = cpu;
1658 dc->synced_flags = dc->tb_flags = dc->base.tb->flags;
4acb54ba 1659 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
372122e3 1660 dc->jmp = dc->delayed_branch ? JMP_INDIRECT : JMP_NOJMP;
4acb54ba
EI
1661 dc->cpustate_changed = 0;
1662 dc->abort_at_next_insn = 0;
d7ecb757 1663 dc->ext_imm = dc->base.tb->cs_base;
20800179
RH
1664 dc->r0 = NULL;
1665 dc->r0_set = false;
287b1def 1666 dc->mem_index = cpu_mmu_index(&cpu->env, false);
4acb54ba 1667
372122e3
RH
1668 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1669 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1670}
4acb54ba 1671
372122e3
RH
1672static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1673{
1674}
4acb54ba 1675
372122e3
RH
1676static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1677{
1678 tcg_gen_insn_start(dcb->pc_next);
1679}
4acb54ba 1680
372122e3
RH
1681static bool mb_tr_breakpoint_check(DisasContextBase *dcb, CPUState *cs,
1682 const CPUBreakpoint *bp)
1683{
1684 DisasContext *dc = container_of(dcb, DisasContext, base);
b933066a 1685
372122e3 1686 gen_raise_exception_sync(dc, EXCP_DEBUG);
4acb54ba 1687
372122e3
RH
1688 /*
1689 * The address covered by the breakpoint must be included in
1690 * [tb->pc, tb->pc + tb->size) in order to for it to be
1691 * properly cleared -- thus we increment the PC here so that
1692 * the logic setting tb->size below does the right thing.
1693 */
1694 dc->base.pc_next += 4;
1695 return true;
1696}
4acb54ba 1697
372122e3
RH
1698static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1699{
1700 DisasContext *dc = container_of(dcb, DisasContext, base);
1701 CPUMBState *env = cs->env_ptr;
44d1432b 1702 uint32_t ir;
372122e3
RH
1703
1704 /* TODO: This should raise an exception, not terminate qemu. */
1705 if (dc->base.pc_next & 3) {
1706 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1707 (uint32_t)dc->base.pc_next);
1708 }
1709
1710 dc->clear_imm = 1;
44d1432b
RH
1711 ir = cpu_ldl_code(env, dc->base.pc_next);
1712 if (!decode(dc, ir)) {
1713 old_decode(dc, ir);
1714 }
20800179
RH
1715
1716 if (dc->r0) {
1717 tcg_temp_free_i32(dc->r0);
1718 dc->r0 = NULL;
1719 dc->r0_set = false;
1720 }
1721
d7ecb757 1722 if (dc->clear_imm && (dc->tb_flags & IMM_FLAG)) {
372122e3 1723 dc->tb_flags &= ~IMM_FLAG;
d7ecb757 1724 tcg_gen_discard_i32(cpu_imm);
372122e3
RH
1725 }
1726 dc->base.pc_next += 4;
1727
1728 if (dc->delayed_branch && --dc->delayed_branch == 0) {
1729 if (dc->tb_flags & DRTI_FLAG) {
1730 do_rti(dc);
4acb54ba 1731 }
372122e3
RH
1732 if (dc->tb_flags & DRTB_FLAG) {
1733 do_rtb(dc);
ed2803da 1734 }
372122e3
RH
1735 if (dc->tb_flags & DRTE_FLAG) {
1736 do_rte(dc);
1737 }
1738 /* Clear the delay slot flag. */
1739 dc->tb_flags &= ~D_FLAG;
1740 dc->base.is_jmp = DISAS_JUMP;
4acb54ba
EI
1741 }
1742
372122e3
RH
1743 /* Force an exit if the per-tb cpu state has changed. */
1744 if (dc->base.is_jmp == DISAS_NEXT && dc->cpustate_changed) {
d4705ae0 1745 dc->base.is_jmp = DISAS_UPDATE;
372122e3 1746 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
4acb54ba 1747 }
372122e3
RH
1748}
1749
1750static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1751{
1752 DisasContext *dc = container_of(dcb, DisasContext, base);
1753
1754 assert(!dc->abort_at_next_insn);
4acb54ba 1755
d4705ae0 1756 if (dc->base.is_jmp == DISAS_NORETURN) {
372122e3
RH
1757 /* We have already exited the TB. */
1758 return;
1759 }
1760
1761 t_sync_flags(dc);
1762 if (dc->tb_flags & D_FLAG) {
1763 sync_jmpstate(dc);
1764 dc->jmp = JMP_NOJMP;
1765 }
1766
1767 switch (dc->base.is_jmp) {
1768 case DISAS_TOO_MANY:
1769 assert(dc->jmp == JMP_NOJMP);
1770 gen_goto_tb(dc, 0, dc->base.pc_next);
1771 return;
6c5f738d 1772
372122e3
RH
1773 case DISAS_UPDATE:
1774 assert(dc->jmp == JMP_NOJMP);
1775 if (unlikely(cs->singlestep_enabled)) {
1776 gen_raise_exception(dc, EXCP_DEBUG);
1777 } else {
1778 tcg_gen_exit_tb(NULL, 0);
6c5f738d 1779 }
372122e3
RH
1780 return;
1781
1782 case DISAS_JUMP:
1783 switch (dc->jmp) {
1784 case JMP_INDIRECT:
1785 {
1786 TCGv_i32 tmp_pc = tcg_const_i32(dc->base.pc_next);
1787 eval_cond_jmp(dc, cpu_btarget, tmp_pc);
1788 tcg_temp_free_i32(tmp_pc);
1789
1790 if (unlikely(cs->singlestep_enabled)) {
1791 gen_raise_exception(dc, EXCP_DEBUG);
1792 } else {
1793 tcg_gen_exit_tb(NULL, 0);
1794 }
1795 }
1796 return;
1797
1798 case JMP_DIRECT_CC:
1799 {
1800 TCGLabel *l1 = gen_new_label();
1801 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_btaken, 0, l1);
1802 gen_goto_tb(dc, 1, dc->base.pc_next);
1803 gen_set_label(l1);
1804 }
1805 /* fall through */
1806
1807 case JMP_DIRECT:
1808 gen_goto_tb(dc, 0, dc->jmp_pc);
1809 return;
4acb54ba 1810 }
372122e3 1811 /* fall through */
0a7df5da 1812
372122e3
RH
1813 default:
1814 g_assert_not_reached();
1815 }
1816}
4acb54ba 1817
372122e3
RH
1818static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
1819{
372122e3
RH
1820 qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
1821 log_target_disas(cs, dcb->pc_first, dcb->tb->size);
372122e3
RH
1822}
1823
1824static const TranslatorOps mb_tr_ops = {
1825 .init_disas_context = mb_tr_init_disas_context,
1826 .tb_start = mb_tr_tb_start,
1827 .insn_start = mb_tr_insn_start,
1828 .breakpoint_check = mb_tr_breakpoint_check,
1829 .translate_insn = mb_tr_translate_insn,
1830 .tb_stop = mb_tr_tb_stop,
1831 .disas_log = mb_tr_disas_log,
1832};
1833
1834void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1835{
1836 DisasContext dc;
1837 translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
4acb54ba
EI
1838}
1839
90c84c56 1840void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
4acb54ba 1841{
878096ee
AF
1842 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1843 CPUMBState *env = &cpu->env;
4acb54ba
EI
1844 int i;
1845
90c84c56 1846 if (!env) {
4acb54ba 1847 return;
90c84c56 1848 }
4acb54ba 1849
0f96e96b 1850 qemu_fprintf(f, "IN: PC=%x %s\n",
76e8187d 1851 env->pc, lookup_symbol(env->pc));
6efd5599 1852 qemu_fprintf(f, "rmsr=%x resr=%x rear=%" PRIx64 " "
eb2022b7 1853 "imm=%x iflags=%x fsr=%x rbtr=%x\n",
78e9caf2 1854 env->msr, env->esr, env->ear,
eb2022b7 1855 env->imm, env->iflags, env->fsr, env->btr);
0f96e96b 1856 qemu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
90c84c56 1857 env->btaken, env->btarget,
2e5282ca
RH
1858 (env->msr & MSR_UM) ? "user" : "kernel",
1859 (env->msr & MSR_UMS) ? "user" : "kernel",
1860 (bool)(env->msr & MSR_EIP),
1861 (bool)(env->msr & MSR_IE));
2ead1b18
JK
1862 for (i = 0; i < 12; i++) {
1863 qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1864 if ((i + 1) % 4 == 0) {
1865 qemu_fprintf(f, "\n");
1866 }
1867 }
17c52a43 1868
2ead1b18 1869 /* Registers that aren't modeled are reported as 0 */
39db007e 1870 qemu_fprintf(f, "redr=%x rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
af20a93a 1871 "rtlblo=0 rtlbhi=0\n", env->edr);
2ead1b18 1872 qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
4acb54ba 1873 for (i = 0; i < 32; i++) {
90c84c56 1874 qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
4acb54ba 1875 if ((i + 1) % 4 == 0)
90c84c56 1876 qemu_fprintf(f, "\n");
4acb54ba 1877 }
90c84c56 1878 qemu_fprintf(f, "\n\n");
4acb54ba
EI
1879}
1880
cd0c24f9
AF
1881void mb_tcg_init(void)
1882{
480d29a8
RH
1883#define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1884#define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1885
1886 static const struct {
1887 TCGv_i32 *var; int ofs; char name[8];
1888 } i32s[] = {
1889 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1890 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1891 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1892 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1893
1894 SP(pc),
1895 SP(msr),
1074c0fb 1896 SP(msr_c),
480d29a8
RH
1897 SP(imm),
1898 SP(iflags),
1899 SP(btaken),
1900 SP(btarget),
1901 SP(res_val),
1902 };
1903
1904#undef R
1905#undef SP
1906
1907 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1908 *i32s[i].var =
1909 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1910 }
4acb54ba 1911
480d29a8
RH
1912 cpu_res_addr =
1913 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
4acb54ba
EI
1914}
1915
bad729e2
RH
1916void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1917 target_ulong *data)
4acb54ba 1918{
76e8187d 1919 env->pc = data[0];
4acb54ba 1920}