]> git.proxmox.com Git - mirror_qemu.git/blame - target/microblaze/translate.c
iotests: Fix test 200 on s390x without virtio-pci
[mirror_qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
4acb54ba 25#include "tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
4acb54ba 27#include "microblaze-decode.h"
f08b6170 28#include "exec/cpu_ldst.h"
2ef6175a 29#include "exec/helper-gen.h"
77fc6f5e 30#include "exec/translator.h"
4acb54ba 31
a7e30d84 32#include "trace-tcg.h"
508127e2 33#include "exec/log.h"
a7e30d84
LV
34
35
4acb54ba
EI
36#define SIM_COMPAT 0
37#define DISAS_GNU 1
38#define DISAS_MB 1
39#if DISAS_MB && !SIM_COMPAT
40# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41#else
42# define LOG_DIS(...) do { } while (0)
43#endif
44
45#define D(x)
46
47#define EXTRACT_FIELD(src, start, end) \
48 (((src) >> start) & ((1 << (end - start + 1)) - 1))
49
77fc6f5e
LV
50/* is_jmp field values */
51#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54
cfeea807
EI
55static TCGv_i32 env_debug;
56static TCGv_i32 cpu_R[32];
0a22f8cf 57static TCGv_i64 cpu_SR[14];
cfeea807
EI
58static TCGv_i32 env_imm;
59static TCGv_i32 env_btaken;
43d318b2 60static TCGv_i64 env_btarget;
cfeea807 61static TCGv_i32 env_iflags;
403322ea 62static TCGv env_res_addr;
cfeea807 63static TCGv_i32 env_res_val;
4acb54ba 64
022c62cb 65#include "exec/gen-icount.h"
4acb54ba
EI
66
67/* This is the state at translation time. */
68typedef struct DisasContext {
0063ebd6 69 MicroBlazeCPU *cpu;
cfeea807 70 uint32_t pc;
4acb54ba
EI
71
72 /* Decoder. */
73 int type_b;
74 uint32_t ir;
75 uint8_t opcode;
76 uint8_t rd, ra, rb;
77 uint16_t imm;
78
79 unsigned int cpustate_changed;
80 unsigned int delayed_branch;
81 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
82 unsigned int clear_imm;
83 int is_jmp;
84
844bab60
EI
85#define JMP_NOJMP 0
86#define JMP_DIRECT 1
87#define JMP_DIRECT_CC 2
88#define JMP_INDIRECT 3
4acb54ba
EI
89 unsigned int jmp;
90 uint32_t jmp_pc;
91
92 int abort_at_next_insn;
4acb54ba
EI
93 struct TranslationBlock *tb;
94 int singlestep_enabled;
95} DisasContext;
96
38972938 97static const char *regnames[] =
4acb54ba
EI
98{
99 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
101 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
102 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
103};
104
38972938 105static const char *special_regnames[] =
4acb54ba 106{
0031eef2
EI
107 "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
108 "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
4acb54ba
EI
109};
110
4acb54ba
EI
111static inline void t_sync_flags(DisasContext *dc)
112{
4abf79a4 113 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba 114 if (dc->tb_flags != dc->synced_flags) {
cfeea807 115 tcg_gen_movi_i32(env_iflags, dc->tb_flags);
4acb54ba
EI
116 dc->synced_flags = dc->tb_flags;
117 }
118}
119
120static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
121{
122 TCGv_i32 tmp = tcg_const_i32(index);
123
124 t_sync_flags(dc);
0a22f8cf 125 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
64254eba 126 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
127 tcg_temp_free_i32(tmp);
128 dc->is_jmp = DISAS_UPDATE;
129}
130
90aa39a1
SF
131static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
132{
133#ifndef CONFIG_USER_ONLY
134 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
135#else
136 return true;
137#endif
138}
139
4acb54ba
EI
140static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
141{
90aa39a1 142 if (use_goto_tb(dc, dest)) {
4acb54ba 143 tcg_gen_goto_tb(n);
0a22f8cf 144 tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
07ea28b4 145 tcg_gen_exit_tb(dc->tb, n);
4acb54ba 146 } else {
0a22f8cf 147 tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
07ea28b4 148 tcg_gen_exit_tb(NULL, 0);
4acb54ba
EI
149 }
150}
151
cfeea807 152static void read_carry(DisasContext *dc, TCGv_i32 d)
ee8b246f 153{
0a22f8cf
EI
154 tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
155 tcg_gen_shri_i32(d, d, 31);
ee8b246f
EI
156}
157
04ec7df7
EI
158/*
159 * write_carry sets the carry bits in MSR based on bit 0 of v.
160 * v[31:1] are ignored.
161 */
cfeea807 162static void write_carry(DisasContext *dc, TCGv_i32 v)
ee8b246f 163{
0a22f8cf
EI
164 TCGv_i64 t0 = tcg_temp_new_i64();
165 tcg_gen_extu_i32_i64(t0, v);
166 /* Deposit bit 0 into MSR_C and the alias MSR_CC. */
167 tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 2, 1);
168 tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 31, 1);
169 tcg_temp_free_i64(t0);
ee8b246f
EI
170}
171
65ab5eb4 172static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f 173{
cfeea807
EI
174 TCGv_i32 t0 = tcg_temp_new_i32();
175 tcg_gen_movi_i32(t0, carry);
8cc9b43f 176 write_carry(dc, t0);
cfeea807 177 tcg_temp_free_i32(t0);
8cc9b43f
PC
178}
179
9ba8cd45
EI
180/*
181 * Returns true if the insn an illegal operation.
182 * If exceptions are enabled, an exception is raised.
183 */
184static bool trap_illegal(DisasContext *dc, bool cond)
185{
186 if (cond && (dc->tb_flags & MSR_EE_FLAG)
187 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0a22f8cf 188 tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
9ba8cd45
EI
189 t_gen_raise_exception(dc, EXCP_HW_EXCP);
190 }
191 return cond;
192}
193
bdfc1e88
EI
194/*
195 * Returns true if the insn is illegal in userspace.
196 * If exceptions are enabled, an exception is raised.
197 */
198static bool trap_userspace(DisasContext *dc, bool cond)
199{
200 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
201 bool cond_user = cond && mem_index == MMU_USER_IDX;
202
203 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
0a22f8cf 204 tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
bdfc1e88
EI
205 t_gen_raise_exception(dc, EXCP_HW_EXCP);
206 }
207 return cond_user;
208}
209
61204ce8
EI
210/* True if ALU operand b is a small immediate that may deserve
211 faster treatment. */
212static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
213{
214 /* Immediate insn without the imm prefix ? */
215 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
216}
217
cfeea807 218static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
4acb54ba
EI
219{
220 if (dc->type_b) {
221 if (dc->tb_flags & IMM_FLAG)
cfeea807 222 tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
4acb54ba 223 else
cfeea807 224 tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
4acb54ba
EI
225 return &env_imm;
226 } else
227 return &cpu_R[dc->rb];
228}
229
230static void dec_add(DisasContext *dc)
231{
232 unsigned int k, c;
cfeea807 233 TCGv_i32 cf;
4acb54ba
EI
234
235 k = dc->opcode & 4;
236 c = dc->opcode & 2;
237
238 LOG_DIS("add%s%s%s r%d r%d r%d\n",
239 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
240 dc->rd, dc->ra, dc->rb);
241
40cbf5b7
EI
242 /* Take care of the easy cases first. */
243 if (k) {
244 /* k - keep carry, no need to update MSR. */
245 /* If rd == r0, it's a nop. */
246 if (dc->rd) {
cfeea807 247 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
248
249 if (c) {
250 /* c - Add carry into the result. */
cfeea807 251 cf = tcg_temp_new_i32();
40cbf5b7
EI
252
253 read_carry(dc, cf);
cfeea807
EI
254 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
255 tcg_temp_free_i32(cf);
40cbf5b7
EI
256 }
257 }
258 return;
259 }
260
261 /* From now on, we can assume k is zero. So we need to update MSR. */
262 /* Extract carry. */
cfeea807 263 cf = tcg_temp_new_i32();
40cbf5b7
EI
264 if (c) {
265 read_carry(dc, cf);
266 } else {
cfeea807 267 tcg_gen_movi_i32(cf, 0);
40cbf5b7
EI
268 }
269
270 if (dc->rd) {
cfeea807 271 TCGv_i32 ncf = tcg_temp_new_i32();
5d0bb823 272 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
cfeea807
EI
273 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
274 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
40cbf5b7 275 write_carry(dc, ncf);
cfeea807 276 tcg_temp_free_i32(ncf);
40cbf5b7 277 } else {
5d0bb823 278 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 279 write_carry(dc, cf);
4acb54ba 280 }
cfeea807 281 tcg_temp_free_i32(cf);
4acb54ba
EI
282}
283
284static void dec_sub(DisasContext *dc)
285{
286 unsigned int u, cmp, k, c;
cfeea807 287 TCGv_i32 cf, na;
4acb54ba
EI
288
289 u = dc->imm & 2;
290 k = dc->opcode & 4;
291 c = dc->opcode & 2;
292 cmp = (dc->imm & 1) && (!dc->type_b) && k;
293
294 if (cmp) {
295 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
296 if (dc->rd) {
297 if (u)
298 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
299 else
300 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
301 }
e0a42ebc
EI
302 return;
303 }
304
305 LOG_DIS("sub%s%s r%d, r%d r%d\n",
306 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
307
308 /* Take care of the easy cases first. */
309 if (k) {
310 /* k - keep carry, no need to update MSR. */
311 /* If rd == r0, it's a nop. */
312 if (dc->rd) {
cfeea807 313 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
314
315 if (c) {
316 /* c - Add carry into the result. */
cfeea807 317 cf = tcg_temp_new_i32();
e0a42ebc
EI
318
319 read_carry(dc, cf);
cfeea807
EI
320 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
321 tcg_temp_free_i32(cf);
e0a42ebc
EI
322 }
323 }
324 return;
325 }
326
327 /* From now on, we can assume k is zero. So we need to update MSR. */
328 /* Extract carry. And complement a into na. */
cfeea807
EI
329 cf = tcg_temp_new_i32();
330 na = tcg_temp_new_i32();
e0a42ebc
EI
331 if (c) {
332 read_carry(dc, cf);
333 } else {
cfeea807 334 tcg_gen_movi_i32(cf, 1);
e0a42ebc
EI
335 }
336
337 /* d = b + ~a + c. carry defaults to 1. */
cfeea807 338 tcg_gen_not_i32(na, cpu_R[dc->ra]);
e0a42ebc
EI
339
340 if (dc->rd) {
cfeea807 341 TCGv_i32 ncf = tcg_temp_new_i32();
5d0bb823 342 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
cfeea807
EI
343 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
344 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
e0a42ebc 345 write_carry(dc, ncf);
cfeea807 346 tcg_temp_free_i32(ncf);
e0a42ebc 347 } else {
5d0bb823 348 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 349 write_carry(dc, cf);
4acb54ba 350 }
cfeea807
EI
351 tcg_temp_free_i32(cf);
352 tcg_temp_free_i32(na);
4acb54ba
EI
353}
354
355static void dec_pattern(DisasContext *dc)
356{
357 unsigned int mode;
4acb54ba 358
9ba8cd45
EI
359 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
360 return;
1567a005
EI
361 }
362
4acb54ba
EI
363 mode = dc->opcode & 3;
364 switch (mode) {
365 case 0:
366 /* pcmpbf. */
367 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
368 if (dc->rd)
369 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
370 break;
371 case 2:
372 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
373 if (dc->rd) {
cfeea807 374 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
86112805 375 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
376 }
377 break;
378 case 3:
379 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
4acb54ba 380 if (dc->rd) {
cfeea807 381 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
86112805 382 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
383 }
384 break;
385 default:
0063ebd6 386 cpu_abort(CPU(dc->cpu),
4acb54ba
EI
387 "unsupported pattern insn opcode=%x\n", dc->opcode);
388 break;
389 }
390}
391
392static void dec_and(DisasContext *dc)
393{
394 unsigned int not;
395
396 if (!dc->type_b && (dc->imm & (1 << 10))) {
397 dec_pattern(dc);
398 return;
399 }
400
401 not = dc->opcode & (1 << 1);
402 LOG_DIS("and%s\n", not ? "n" : "");
403
404 if (!dc->rd)
405 return;
406
407 if (not) {
cfeea807 408 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba 409 } else
cfeea807 410 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
411}
412
413static void dec_or(DisasContext *dc)
414{
415 if (!dc->type_b && (dc->imm & (1 << 10))) {
416 dec_pattern(dc);
417 return;
418 }
419
420 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
421 if (dc->rd)
cfeea807 422 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
423}
424
425static void dec_xor(DisasContext *dc)
426{
427 if (!dc->type_b && (dc->imm & (1 << 10))) {
428 dec_pattern(dc);
429 return;
430 }
431
432 LOG_DIS("xor r%d\n", dc->rd);
433 if (dc->rd)
cfeea807 434 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
435}
436
cfeea807 437static inline void msr_read(DisasContext *dc, TCGv_i32 d)
4acb54ba 438{
0a22f8cf 439 tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
4acb54ba
EI
440}
441
cfeea807 442static inline void msr_write(DisasContext *dc, TCGv_i32 v)
4acb54ba 443{
0a22f8cf 444 TCGv_i64 t;
97b833c5 445
0a22f8cf 446 t = tcg_temp_new_i64();
4acb54ba 447 dc->cpustate_changed = 1;
97b833c5 448 /* PVR bit is not writable. */
0a22f8cf
EI
449 tcg_gen_extu_i32_i64(t, v);
450 tcg_gen_andi_i64(t, t, ~MSR_PVR);
451 tcg_gen_andi_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
452 tcg_gen_or_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
453 tcg_temp_free_i64(t);
4acb54ba
EI
454}
455
456static void dec_msr(DisasContext *dc)
457{
0063ebd6 458 CPUState *cs = CPU(dc->cpu);
cfeea807 459 TCGv_i32 t0, t1;
2023e9a3 460 unsigned int sr, rn;
f0f7e7f7 461 bool to, clrset, extended = false;
4acb54ba 462
2023e9a3
EI
463 sr = extract32(dc->imm, 0, 14);
464 to = extract32(dc->imm, 14, 1);
465 clrset = extract32(dc->imm, 15, 1) == 0;
4acb54ba 466 dc->type_b = 1;
2023e9a3 467 if (to) {
4acb54ba 468 dc->cpustate_changed = 1;
f0f7e7f7
EI
469 }
470
471 /* Extended MSRs are only available if addr_size > 32. */
472 if (dc->cpu->cfg.addr_size > 32) {
473 /* The E-bit is encoded differently for To/From MSR. */
474 static const unsigned int e_bit[] = { 19, 24 };
475
476 extended = extract32(dc->imm, e_bit[to], 1);
2023e9a3 477 }
4acb54ba
EI
478
479 /* msrclr and msrset. */
2023e9a3
EI
480 if (clrset) {
481 bool clr = extract32(dc->ir, 16, 1);
4acb54ba
EI
482
483 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
484 dc->rd, dc->imm);
1567a005 485
56837509 486 if (!dc->cpu->cfg.use_msr_instr) {
1567a005
EI
487 /* nop??? */
488 return;
489 }
490
bdfc1e88 491 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
1567a005
EI
492 return;
493 }
494
4acb54ba
EI
495 if (dc->rd)
496 msr_read(dc, cpu_R[dc->rd]);
497
cfeea807
EI
498 t0 = tcg_temp_new_i32();
499 t1 = tcg_temp_new_i32();
4acb54ba 500 msr_read(dc, t0);
cfeea807 501 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
4acb54ba
EI
502
503 if (clr) {
cfeea807
EI
504 tcg_gen_not_i32(t1, t1);
505 tcg_gen_and_i32(t0, t0, t1);
4acb54ba 506 } else
cfeea807 507 tcg_gen_or_i32(t0, t0, t1);
4acb54ba 508 msr_write(dc, t0);
cfeea807
EI
509 tcg_temp_free_i32(t0);
510 tcg_temp_free_i32(t1);
0a22f8cf 511 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
4acb54ba
EI
512 dc->is_jmp = DISAS_UPDATE;
513 return;
514 }
515
bdfc1e88
EI
516 if (trap_userspace(dc, to)) {
517 return;
1567a005
EI
518 }
519
4acb54ba
EI
520#if !defined(CONFIG_USER_ONLY)
521 /* Catch read/writes to the mmu block. */
522 if ((sr & ~0xff) == 0x1000) {
f0f7e7f7 523 TCGv_i32 tmp_ext = tcg_const_i32(extended);
05a9a651
EI
524 TCGv_i32 tmp_sr;
525
4acb54ba 526 sr &= 7;
05a9a651 527 tmp_sr = tcg_const_i32(sr);
4acb54ba 528 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
05a9a651 529 if (to) {
f0f7e7f7 530 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
05a9a651 531 } else {
f0f7e7f7 532 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
05a9a651
EI
533 }
534 tcg_temp_free_i32(tmp_sr);
f0f7e7f7 535 tcg_temp_free_i32(tmp_ext);
4acb54ba
EI
536 return;
537 }
538#endif
539
540 if (to) {
541 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
542 switch (sr) {
543 case 0:
544 break;
545 case 1:
546 msr_write(dc, cpu_R[dc->ra]);
547 break;
351527b7
EI
548 case SR_EAR:
549 case SR_ESR:
ab6dd380 550 case SR_FSR:
0a22f8cf 551 tcg_gen_extu_i32_i64(cpu_SR[sr], cpu_R[dc->ra]);
4acb54ba 552 break;
5818dee5 553 case 0x800:
cfeea807
EI
554 tcg_gen_st_i32(cpu_R[dc->ra],
555 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
556 break;
557 case 0x802:
cfeea807
EI
558 tcg_gen_st_i32(cpu_R[dc->ra],
559 cpu_env, offsetof(CPUMBState, shr));
5818dee5 560 break;
4acb54ba 561 default:
0063ebd6 562 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
563 break;
564 }
565 } else {
566 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
567
568 switch (sr) {
569 case 0:
cfeea807 570 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
4acb54ba
EI
571 break;
572 case 1:
573 msr_read(dc, cpu_R[dc->rd]);
574 break;
351527b7 575 case SR_EAR:
a1b48e3a
EI
576 if (extended) {
577 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
578 break;
579 }
351527b7
EI
580 case SR_ESR:
581 case SR_FSR:
582 case SR_BTR:
0a22f8cf 583 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
4acb54ba 584 break;
5818dee5 585 case 0x800:
cfeea807
EI
586 tcg_gen_ld_i32(cpu_R[dc->rd],
587 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
588 break;
589 case 0x802:
cfeea807
EI
590 tcg_gen_ld_i32(cpu_R[dc->rd],
591 cpu_env, offsetof(CPUMBState, shr));
5818dee5 592 break;
351527b7 593 case 0x2000 ... 0x200c:
4acb54ba 594 rn = sr & 0xf;
cfeea807 595 tcg_gen_ld_i32(cpu_R[dc->rd],
68cee38a 596 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
597 break;
598 default:
a47dddd7 599 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
600 break;
601 }
602 }
ee7dbcf8
EI
603
604 if (dc->rd == 0) {
cfeea807 605 tcg_gen_movi_i32(cpu_R[0], 0);
ee7dbcf8 606 }
4acb54ba
EI
607}
608
4acb54ba
EI
609/* Multiplier unit. */
610static void dec_mul(DisasContext *dc)
611{
cfeea807 612 TCGv_i32 tmp;
4acb54ba
EI
613 unsigned int subcode;
614
9ba8cd45 615 if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
1567a005
EI
616 return;
617 }
618
4acb54ba 619 subcode = dc->imm & 3;
4acb54ba
EI
620
621 if (dc->type_b) {
622 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
cfeea807 623 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
16ece88d 624 return;
4acb54ba
EI
625 }
626
1567a005 627 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
9b964318 628 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
1567a005
EI
629 /* nop??? */
630 }
631
cfeea807 632 tmp = tcg_temp_new_i32();
4acb54ba
EI
633 switch (subcode) {
634 case 0:
635 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807 636 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
637 break;
638 case 1:
639 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807
EI
640 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
641 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
642 break;
643 case 2:
644 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807
EI
645 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
646 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
647 break;
648 case 3:
649 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807 650 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
651 break;
652 default:
0063ebd6 653 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
4acb54ba
EI
654 break;
655 }
cfeea807 656 tcg_temp_free_i32(tmp);
4acb54ba
EI
657}
658
659/* Div unit. */
660static void dec_div(DisasContext *dc)
661{
662 unsigned int u;
663
664 u = dc->imm & 2;
665 LOG_DIS("div\n");
666
9ba8cd45
EI
667 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
668 return;
1567a005
EI
669 }
670
4acb54ba 671 if (u)
64254eba
BS
672 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
673 cpu_R[dc->ra]);
4acb54ba 674 else
64254eba
BS
675 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
676 cpu_R[dc->ra]);
4acb54ba 677 if (!dc->rd)
cfeea807 678 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
4acb54ba
EI
679}
680
681static void dec_barrel(DisasContext *dc)
682{
cfeea807 683 TCGv_i32 t0;
faa48d74 684 unsigned int imm_w, imm_s;
d09b2585 685 bool s, t, e = false, i = false;
4acb54ba 686
9ba8cd45 687 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
1567a005
EI
688 return;
689 }
690
faa48d74
EI
691 if (dc->type_b) {
692 /* Insert and extract are only available in immediate mode. */
d09b2585 693 i = extract32(dc->imm, 15, 1);
faa48d74
EI
694 e = extract32(dc->imm, 14, 1);
695 }
e3e84983
EI
696 s = extract32(dc->imm, 10, 1);
697 t = extract32(dc->imm, 9, 1);
faa48d74
EI
698 imm_w = extract32(dc->imm, 6, 5);
699 imm_s = extract32(dc->imm, 0, 5);
4acb54ba 700
faa48d74
EI
701 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
702 e ? "e" : "",
4acb54ba
EI
703 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
704
faa48d74
EI
705 if (e) {
706 if (imm_w + imm_s > 32 || imm_w == 0) {
707 /* These inputs have an undefined behavior. */
708 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
709 imm_w, imm_s);
710 } else {
711 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
712 }
d09b2585
EI
713 } else if (i) {
714 int width = imm_w - imm_s + 1;
715
716 if (imm_w < imm_s) {
717 /* These inputs have an undefined behavior. */
718 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
719 imm_w, imm_s);
720 } else {
721 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
722 imm_s, width);
723 }
faa48d74 724 } else {
cfeea807 725 t0 = tcg_temp_new_i32();
4acb54ba 726
cfeea807
EI
727 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
728 tcg_gen_andi_i32(t0, t0, 31);
4acb54ba 729
faa48d74 730 if (s) {
cfeea807 731 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
2acf6d53 732 } else {
faa48d74 733 if (t) {
cfeea807 734 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
faa48d74 735 } else {
cfeea807 736 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
faa48d74 737 }
2acf6d53 738 }
cfeea807 739 tcg_temp_free_i32(t0);
4acb54ba
EI
740 }
741}
742
743static void dec_bit(DisasContext *dc)
744{
0063ebd6 745 CPUState *cs = CPU(dc->cpu);
cfeea807 746 TCGv_i32 t0;
4acb54ba
EI
747 unsigned int op;
748
ace2e4da 749 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
750 switch (op) {
751 case 0x21:
752 /* src. */
cfeea807 753 t0 = tcg_temp_new_i32();
4acb54ba
EI
754
755 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
0a22f8cf
EI
756 tcg_gen_extrl_i64_i32(t0, cpu_SR[SR_MSR]);
757 tcg_gen_andi_i32(t0, t0, MSR_CC);
09b9f113 758 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 759 if (dc->rd) {
cfeea807
EI
760 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
761 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 762 }
cfeea807 763 tcg_temp_free_i32(t0);
4acb54ba
EI
764 break;
765
766 case 0x1:
767 case 0x41:
768 /* srl. */
4acb54ba
EI
769 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
770
bb3cb951
EI
771 /* Update carry. Note that write carry only looks at the LSB. */
772 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
773 if (dc->rd) {
774 if (op == 0x41)
cfeea807 775 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
4acb54ba 776 else
cfeea807 777 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
4acb54ba
EI
778 }
779 break;
780 case 0x60:
781 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
782 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
783 break;
784 case 0x61:
785 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
786 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
787 break;
788 case 0x64:
f062a3c7
EI
789 case 0x66:
790 case 0x74:
791 case 0x76:
4acb54ba
EI
792 /* wdc. */
793 LOG_DIS("wdc r%d\n", dc->ra);
bdfc1e88 794 trap_userspace(dc, true);
4acb54ba
EI
795 break;
796 case 0x68:
797 /* wic. */
798 LOG_DIS("wic r%d\n", dc->ra);
bdfc1e88 799 trap_userspace(dc, true);
4acb54ba 800 break;
48b5e96f 801 case 0xe0:
9ba8cd45
EI
802 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
803 return;
48b5e96f 804 }
8fc5239e 805 if (dc->cpu->cfg.use_pcmp_instr) {
5318420c 806 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
48b5e96f
EI
807 }
808 break;
ace2e4da
PC
809 case 0x1e0:
810 /* swapb */
811 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
812 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
813 break;
b8c6a5d9 814 case 0x1e2:
ace2e4da
PC
815 /*swaph */
816 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
817 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
818 break;
4acb54ba 819 default:
a47dddd7
AF
820 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
821 dc->pc, op, dc->rd, dc->ra, dc->rb);
4acb54ba
EI
822 break;
823 }
824}
825
826static inline void sync_jmpstate(DisasContext *dc)
827{
844bab60
EI
828 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
829 if (dc->jmp == JMP_DIRECT) {
cfeea807 830 tcg_gen_movi_i32(env_btaken, 1);
844bab60 831 }
23979dc5 832 dc->jmp = JMP_INDIRECT;
43d318b2 833 tcg_gen_movi_i64(env_btarget, dc->jmp_pc);
4acb54ba
EI
834 }
835}
836
837static void dec_imm(DisasContext *dc)
838{
839 LOG_DIS("imm %x\n", dc->imm << 16);
cfeea807 840 tcg_gen_movi_i32(env_imm, (dc->imm << 16));
4acb54ba
EI
841 dc->tb_flags |= IMM_FLAG;
842 dc->clear_imm = 0;
843}
844
d248e1be 845static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
4acb54ba 846{
0e9033c8
EI
847 bool extimm = dc->tb_flags & IMM_FLAG;
848 /* Should be set to true if r1 is used by loadstores. */
849 bool stackprot = false;
403322ea 850 TCGv_i32 t32;
5818dee5
EI
851
852 /* All load/stores use ra. */
9aaaa181 853 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
0e9033c8 854 stackprot = true;
5818dee5 855 }
4acb54ba 856
9ef55357 857 /* Treat the common cases first. */
4acb54ba 858 if (!dc->type_b) {
d248e1be
EI
859 if (ea) {
860 int addr_size = dc->cpu->cfg.addr_size;
861
862 if (addr_size == 32) {
863 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
864 return;
865 }
866
867 tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
868 if (addr_size < 64) {
869 /* Mask off out of range bits. */
870 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
871 }
872 return;
873 }
874
0dc4af5c 875 /* If any of the regs is r0, set t to the value of the other reg. */
4b5ef0b5 876 if (dc->ra == 0) {
403322ea 877 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
0dc4af5c 878 return;
4b5ef0b5 879 } else if (dc->rb == 0) {
403322ea 880 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
0dc4af5c 881 return;
4b5ef0b5
EI
882 }
883
9aaaa181 884 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
0e9033c8 885 stackprot = true;
5818dee5
EI
886 }
887
403322ea
EI
888 t32 = tcg_temp_new_i32();
889 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
890 tcg_gen_extu_i32_tl(t, t32);
891 tcg_temp_free_i32(t32);
5818dee5
EI
892
893 if (stackprot) {
0a87e691 894 gen_helper_stackprot(cpu_env, t);
5818dee5 895 }
0dc4af5c 896 return;
4acb54ba
EI
897 }
898 /* Immediate. */
403322ea 899 t32 = tcg_temp_new_i32();
4acb54ba 900 if (!extimm) {
f7a66e3a 901 tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
4acb54ba 902 } else {
403322ea 903 tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba 904 }
403322ea
EI
905 tcg_gen_extu_i32_tl(t, t32);
906 tcg_temp_free_i32(t32);
4acb54ba 907
5818dee5 908 if (stackprot) {
0a87e691 909 gen_helper_stackprot(cpu_env, t);
5818dee5 910 }
0dc4af5c 911 return;
4acb54ba
EI
912}
913
914static void dec_load(DisasContext *dc)
915{
403322ea
EI
916 TCGv_i32 v;
917 TCGv addr;
8534063a 918 unsigned int size;
d248e1be
EI
919 bool rev = false, ex = false, ea = false;
920 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
47acdd63 921 TCGMemOp mop;
4acb54ba 922
47acdd63
RH
923 mop = dc->opcode & 3;
924 size = 1 << mop;
9f8beb66 925 if (!dc->type_b) {
d248e1be 926 ea = extract32(dc->ir, 7, 1);
8534063a
EI
927 rev = extract32(dc->ir, 9, 1);
928 ex = extract32(dc->ir, 10, 1);
9f8beb66 929 }
47acdd63
RH
930 mop |= MO_TE;
931 if (rev) {
932 mop ^= MO_BSWAP;
933 }
9f8beb66 934
9ba8cd45 935 if (trap_illegal(dc, size > 4)) {
0187688f
EI
936 return;
937 }
4acb54ba 938
d248e1be
EI
939 if (trap_userspace(dc, ea)) {
940 return;
941 }
942
943 LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
944 ex ? "x" : "",
945 ea ? "ea" : "");
9f8beb66 946
4acb54ba 947 t_sync_flags(dc);
403322ea 948 addr = tcg_temp_new();
d248e1be
EI
949 compute_ldst_addr(dc, ea, addr);
950 /* Extended addressing bypasses the MMU. */
951 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
4acb54ba 952
9f8beb66
EI
953 /*
954 * When doing reverse accesses we need to do two things.
955 *
4ff9786c 956 * 1. Reverse the address wrt endianness.
9f8beb66
EI
957 * 2. Byteswap the data lanes on the way back into the CPU core.
958 */
959 if (rev && size != 4) {
960 /* Endian reverse the address. t is addr. */
961 switch (size) {
962 case 1:
963 {
964 /* 00 -> 11
965 01 -> 10
966 10 -> 10
967 11 -> 00 */
403322ea 968 TCGv low = tcg_temp_new();
9f8beb66 969
403322ea
EI
970 tcg_gen_andi_tl(low, addr, 3);
971 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
972 tcg_gen_andi_tl(addr, addr, ~3);
973 tcg_gen_or_tl(addr, addr, low);
974 tcg_temp_free(low);
9f8beb66
EI
975 break;
976 }
977
978 case 2:
979 /* 00 -> 10
980 10 -> 00. */
403322ea 981 tcg_gen_xori_tl(addr, addr, 2);
9f8beb66
EI
982 break;
983 default:
0063ebd6 984 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
985 break;
986 }
987 }
988
8cc9b43f
PC
989 /* lwx does not throw unaligned access errors, so force alignment */
990 if (ex) {
403322ea 991 tcg_gen_andi_tl(addr, addr, ~3);
8cc9b43f
PC
992 }
993
4acb54ba
EI
994 /* If we get a fault on a dslot, the jmpstate better be in sync. */
995 sync_jmpstate(dc);
968a40f6
EI
996
997 /* Verify alignment if needed. */
47acdd63
RH
998 /*
999 * Microblaze gives MMU faults priority over faults due to
1000 * unaligned addresses. That's why we speculatively do the load
1001 * into v. If the load succeeds, we verify alignment of the
1002 * address and if that succeeds we write into the destination reg.
1003 */
cfeea807 1004 v = tcg_temp_new_i32();
d248e1be 1005 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
a12f6507 1006
0063ebd6 1007 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
0a22f8cf 1008 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
0dc4af5c 1009 gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
cfeea807 1010 tcg_const_i32(0), tcg_const_i32(size - 1));
4acb54ba
EI
1011 }
1012
47acdd63 1013 if (ex) {
403322ea 1014 tcg_gen_mov_tl(env_res_addr, addr);
cfeea807 1015 tcg_gen_mov_i32(env_res_val, v);
47acdd63
RH
1016 }
1017 if (dc->rd) {
cfeea807 1018 tcg_gen_mov_i32(cpu_R[dc->rd], v);
47acdd63 1019 }
cfeea807 1020 tcg_temp_free_i32(v);
47acdd63 1021
8cc9b43f 1022 if (ex) { /* lwx */
b6af0975 1023 /* no support for AXI exclusive so always clear C */
8cc9b43f 1024 write_carryi(dc, 0);
8cc9b43f
PC
1025 }
1026
403322ea 1027 tcg_temp_free(addr);
4acb54ba
EI
1028}
1029
4acb54ba
EI
1030static void dec_store(DisasContext *dc)
1031{
403322ea 1032 TCGv addr;
42a268c2 1033 TCGLabel *swx_skip = NULL;
b51b3d43 1034 unsigned int size;
d248e1be
EI
1035 bool rev = false, ex = false, ea = false;
1036 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
47acdd63 1037 TCGMemOp mop;
4acb54ba 1038
47acdd63
RH
1039 mop = dc->opcode & 3;
1040 size = 1 << mop;
9f8beb66 1041 if (!dc->type_b) {
d248e1be 1042 ea = extract32(dc->ir, 7, 1);
b51b3d43
EI
1043 rev = extract32(dc->ir, 9, 1);
1044 ex = extract32(dc->ir, 10, 1);
9f8beb66 1045 }
47acdd63
RH
1046 mop |= MO_TE;
1047 if (rev) {
1048 mop ^= MO_BSWAP;
1049 }
4acb54ba 1050
9ba8cd45 1051 if (trap_illegal(dc, size > 4)) {
0187688f
EI
1052 return;
1053 }
1054
d248e1be
EI
1055 trap_userspace(dc, ea);
1056
1057 LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1058 ex ? "x" : "",
1059 ea ? "ea" : "");
4acb54ba
EI
1060 t_sync_flags(dc);
1061 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1062 sync_jmpstate(dc);
0dc4af5c 1063 /* SWX needs a temp_local. */
403322ea 1064 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
d248e1be
EI
1065 compute_ldst_addr(dc, ea, addr);
1066 /* Extended addressing bypasses the MMU. */
1067 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
968a40f6 1068
8cc9b43f 1069 if (ex) { /* swx */
cfeea807 1070 TCGv_i32 tval;
8cc9b43f 1071
8cc9b43f 1072 /* swx does not throw unaligned access errors, so force alignment */
403322ea 1073 tcg_gen_andi_tl(addr, addr, ~3);
8cc9b43f 1074
8cc9b43f
PC
1075 write_carryi(dc, 1);
1076 swx_skip = gen_new_label();
403322ea 1077 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
11a76217
EI
1078
1079 /* Compare the value loaded at lwx with current contents of
1080 the reserved location.
1081 FIXME: This only works for system emulation where we can expect
1082 this compare and the following write to be atomic. For user
1083 emulation we need to add atomicity between threads. */
cfeea807 1084 tval = tcg_temp_new_i32();
0dc4af5c
EI
1085 tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
1086 MO_TEUL);
cfeea807 1087 tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
8cc9b43f 1088 write_carryi(dc, 0);
cfeea807 1089 tcg_temp_free_i32(tval);
8cc9b43f
PC
1090 }
1091
9f8beb66
EI
1092 if (rev && size != 4) {
1093 /* Endian reverse the address. t is addr. */
1094 switch (size) {
1095 case 1:
1096 {
1097 /* 00 -> 11
1098 01 -> 10
1099 10 -> 10
1100 11 -> 00 */
403322ea 1101 TCGv low = tcg_temp_new();
9f8beb66 1102
403322ea
EI
1103 tcg_gen_andi_tl(low, addr, 3);
1104 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1105 tcg_gen_andi_tl(addr, addr, ~3);
1106 tcg_gen_or_tl(addr, addr, low);
1107 tcg_temp_free(low);
9f8beb66
EI
1108 break;
1109 }
1110
1111 case 2:
1112 /* 00 -> 10
1113 10 -> 00. */
1114 /* Force addr into the temp. */
403322ea 1115 tcg_gen_xori_tl(addr, addr, 2);
9f8beb66
EI
1116 break;
1117 default:
0063ebd6 1118 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
1119 break;
1120 }
9f8beb66 1121 }
d248e1be 1122 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
a12f6507 1123
968a40f6 1124 /* Verify alignment if needed. */
0063ebd6 1125 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
0a22f8cf 1126 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
a12f6507 1127 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1128 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1129 * the MMU prior to the memaccess, thay way we could put
1130 * the alignment checks in between the probe and the mem
1131 * access.
a12f6507 1132 */
0dc4af5c 1133 gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
cfeea807 1134 tcg_const_i32(1), tcg_const_i32(size - 1));
968a40f6 1135 }
083dbf48 1136
8cc9b43f
PC
1137 if (ex) {
1138 gen_set_label(swx_skip);
8cc9b43f 1139 }
968a40f6 1140
403322ea 1141 tcg_temp_free(addr);
4acb54ba
EI
1142}
1143
1144static inline void eval_cc(DisasContext *dc, unsigned int cc,
9e6e1828 1145 TCGv_i32 d, TCGv_i32 a)
4acb54ba 1146{
d89b86e9
EI
1147 static const int mb_to_tcg_cc[] = {
1148 [CC_EQ] = TCG_COND_EQ,
1149 [CC_NE] = TCG_COND_NE,
1150 [CC_LT] = TCG_COND_LT,
1151 [CC_LE] = TCG_COND_LE,
1152 [CC_GE] = TCG_COND_GE,
1153 [CC_GT] = TCG_COND_GT,
1154 };
1155
4acb54ba 1156 switch (cc) {
d89b86e9
EI
1157 case CC_EQ:
1158 case CC_NE:
1159 case CC_LT:
1160 case CC_LE:
1161 case CC_GE:
1162 case CC_GT:
9e6e1828 1163 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
d89b86e9
EI
1164 break;
1165 default:
1166 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1167 break;
4acb54ba
EI
1168 }
1169}
1170
43d318b2 1171static void eval_cond_jmp(DisasContext *dc, TCGv_i64 pc_true, TCGv_i64 pc_false)
4acb54ba 1172{
e956caf2
EI
1173 TCGv_i64 tmp_btaken = tcg_temp_new_i64();
1174 TCGv_i64 tmp_zero = tcg_const_i64(0);
1175
1176 tcg_gen_extu_i32_i64(tmp_btaken, env_btaken);
1177 tcg_gen_movcond_i64(TCG_COND_NE, cpu_SR[SR_PC],
1178 tmp_btaken, tmp_zero,
1179 pc_true, pc_false);
1180
1181 tcg_temp_free_i64(tmp_btaken);
1182 tcg_temp_free_i64(tmp_zero);
4acb54ba
EI
1183}
1184
1185static void dec_bcc(DisasContext *dc)
1186{
1187 unsigned int cc;
1188 unsigned int dslot;
1189
1190 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1191 dslot = dc->ir & (1 << 25);
1192 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1193
1194 dc->delayed_branch = 1;
1195 if (dslot) {
1196 dc->delayed_branch = 2;
1197 dc->tb_flags |= D_FLAG;
cfeea807 1198 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1199 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1200 }
1201
61204ce8
EI
1202 if (dec_alu_op_b_is_small_imm(dc)) {
1203 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1204
43d318b2 1205 tcg_gen_movi_i64(env_btarget, dc->pc + offset);
844bab60 1206 dc->jmp = JMP_DIRECT_CC;
23979dc5 1207 dc->jmp_pc = dc->pc + offset;
61204ce8 1208 } else {
23979dc5 1209 dc->jmp = JMP_INDIRECT;
43d318b2
EI
1210 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1211 tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1212 tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
61204ce8 1213 }
9e6e1828 1214 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
4acb54ba
EI
1215}
1216
1217static void dec_br(DisasContext *dc)
1218{
9f6113c7 1219 unsigned int dslot, link, abs, mbar;
4acb54ba
EI
1220
1221 dslot = dc->ir & (1 << 20);
1222 abs = dc->ir & (1 << 19);
1223 link = dc->ir & (1 << 18);
9f6113c7
EI
1224
1225 /* Memory barrier. */
1226 mbar = (dc->ir >> 16) & 31;
1227 if (mbar == 2 && dc->imm == 4) {
5d45de97
EI
1228 /* mbar IMM & 16 decodes to sleep. */
1229 if (dc->rd & 16) {
1230 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1231 TCGv_i32 tmp_1 = tcg_const_i32(1);
1232
1233 LOG_DIS("sleep\n");
1234
1235 t_sync_flags(dc);
1236 tcg_gen_st_i32(tmp_1, cpu_env,
1237 -offsetof(MicroBlazeCPU, env)
1238 +offsetof(CPUState, halted));
0a22f8cf 1239 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
5d45de97
EI
1240 gen_helper_raise_exception(cpu_env, tmp_hlt);
1241 tcg_temp_free_i32(tmp_hlt);
1242 tcg_temp_free_i32(tmp_1);
1243 return;
1244 }
9f6113c7
EI
1245 LOG_DIS("mbar %d\n", dc->rd);
1246 /* Break the TB. */
1247 dc->cpustate_changed = 1;
1248 return;
1249 }
1250
4acb54ba
EI
1251 LOG_DIS("br%s%s%s%s imm=%x\n",
1252 abs ? "a" : "", link ? "l" : "",
1253 dc->type_b ? "i" : "", dslot ? "d" : "",
1254 dc->imm);
1255
1256 dc->delayed_branch = 1;
1257 if (dslot) {
1258 dc->delayed_branch = 2;
1259 dc->tb_flags |= D_FLAG;
cfeea807 1260 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1261 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1262 }
1263 if (link && dc->rd)
cfeea807 1264 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
4acb54ba
EI
1265
1266 dc->jmp = JMP_INDIRECT;
1267 if (abs) {
cfeea807 1268 tcg_gen_movi_i32(env_btaken, 1);
43d318b2 1269 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1270 if (link && !dslot) {
1271 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1272 t_gen_raise_exception(dc, EXCP_BREAK);
1273 if (dc->imm == 0) {
bdfc1e88 1274 if (trap_userspace(dc, true)) {
ff21f70a
EI
1275 return;
1276 }
1277
1278 t_gen_raise_exception(dc, EXCP_DEBUG);
1279 }
1280 }
4acb54ba 1281 } else {
61204ce8
EI
1282 if (dec_alu_op_b_is_small_imm(dc)) {
1283 dc->jmp = JMP_DIRECT;
1284 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1285 } else {
cfeea807 1286 tcg_gen_movi_i32(env_btaken, 1);
43d318b2
EI
1287 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1288 tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1289 tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
4acb54ba
EI
1290 }
1291 }
1292}
1293
1294static inline void do_rti(DisasContext *dc)
1295{
cfeea807
EI
1296 TCGv_i32 t0, t1;
1297 t0 = tcg_temp_new_i32();
1298 t1 = tcg_temp_new_i32();
0a22f8cf
EI
1299 tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1300 tcg_gen_shri_i32(t0, t1, 1);
1301 tcg_gen_ori_i32(t1, t1, MSR_IE);
cfeea807
EI
1302 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1303
1304 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1305 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1306 msr_write(dc, t1);
cfeea807
EI
1307 tcg_temp_free_i32(t1);
1308 tcg_temp_free_i32(t0);
4acb54ba
EI
1309 dc->tb_flags &= ~DRTI_FLAG;
1310}
1311
1312static inline void do_rtb(DisasContext *dc)
1313{
cfeea807
EI
1314 TCGv_i32 t0, t1;
1315 t0 = tcg_temp_new_i32();
1316 t1 = tcg_temp_new_i32();
0a22f8cf
EI
1317 tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1318 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
cfeea807
EI
1319 tcg_gen_shri_i32(t0, t1, 1);
1320 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1321
1322 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1323 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1324 msr_write(dc, t1);
cfeea807
EI
1325 tcg_temp_free_i32(t1);
1326 tcg_temp_free_i32(t0);
4acb54ba
EI
1327 dc->tb_flags &= ~DRTB_FLAG;
1328}
1329
1330static inline void do_rte(DisasContext *dc)
1331{
cfeea807
EI
1332 TCGv_i32 t0, t1;
1333 t0 = tcg_temp_new_i32();
1334 t1 = tcg_temp_new_i32();
4acb54ba 1335
0a22f8cf
EI
1336 tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1337 tcg_gen_ori_i32(t1, t1, MSR_EE);
cfeea807
EI
1338 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1339 tcg_gen_shri_i32(t0, t1, 1);
1340 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
4acb54ba 1341
cfeea807
EI
1342 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1343 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1344 msr_write(dc, t1);
cfeea807
EI
1345 tcg_temp_free_i32(t1);
1346 tcg_temp_free_i32(t0);
4acb54ba
EI
1347 dc->tb_flags &= ~DRTE_FLAG;
1348}
1349
1350static void dec_rts(DisasContext *dc)
1351{
1352 unsigned int b_bit, i_bit, e_bit;
43d318b2 1353 TCGv_i64 tmp64;
4acb54ba
EI
1354
1355 i_bit = dc->ir & (1 << 21);
1356 b_bit = dc->ir & (1 << 22);
1357 e_bit = dc->ir & (1 << 23);
1358
bdfc1e88
EI
1359 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1360 return;
1361 }
1362
4acb54ba
EI
1363 dc->delayed_branch = 2;
1364 dc->tb_flags |= D_FLAG;
cfeea807 1365 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1366 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1367
1368 if (i_bit) {
1369 LOG_DIS("rtid ir=%x\n", dc->ir);
1370 dc->tb_flags |= DRTI_FLAG;
1371 } else if (b_bit) {
1372 LOG_DIS("rtbd ir=%x\n", dc->ir);
1373 dc->tb_flags |= DRTB_FLAG;
1374 } else if (e_bit) {
1375 LOG_DIS("rted ir=%x\n", dc->ir);
1376 dc->tb_flags |= DRTE_FLAG;
1377 } else
1378 LOG_DIS("rts ir=%x\n", dc->ir);
1379
23979dc5 1380 dc->jmp = JMP_INDIRECT;
cfeea807 1381 tcg_gen_movi_i32(env_btaken, 1);
43d318b2
EI
1382
1383 tmp64 = tcg_temp_new_i64();
1384 tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1385 tcg_gen_extu_i32_i64(tmp64, cpu_R[dc->ra]);
1386 tcg_gen_add_i64(env_btarget, env_btarget, tmp64);
1387 tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1388 tcg_temp_free_i64(tmp64);
4acb54ba
EI
1389}
1390
97694c57
EI
1391static int dec_check_fpuv2(DisasContext *dc)
1392{
be67e9ab 1393 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
0a22f8cf 1394 tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_FPU);
97694c57
EI
1395 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1396 }
be67e9ab 1397 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
97694c57
EI
1398}
1399
1567a005
EI
1400static void dec_fpu(DisasContext *dc)
1401{
97694c57
EI
1402 unsigned int fpu_insn;
1403
9ba8cd45 1404 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1567a005
EI
1405 return;
1406 }
1407
97694c57
EI
1408 fpu_insn = (dc->ir >> 7) & 7;
1409
1410 switch (fpu_insn) {
1411 case 0:
64254eba
BS
1412 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1413 cpu_R[dc->rb]);
97694c57
EI
1414 break;
1415
1416 case 1:
64254eba
BS
1417 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1418 cpu_R[dc->rb]);
97694c57
EI
1419 break;
1420
1421 case 2:
64254eba
BS
1422 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1423 cpu_R[dc->rb]);
97694c57
EI
1424 break;
1425
1426 case 3:
64254eba
BS
1427 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1428 cpu_R[dc->rb]);
97694c57
EI
1429 break;
1430
1431 case 4:
1432 switch ((dc->ir >> 4) & 7) {
1433 case 0:
64254eba 1434 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1435 cpu_R[dc->ra], cpu_R[dc->rb]);
1436 break;
1437 case 1:
64254eba 1438 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1439 cpu_R[dc->ra], cpu_R[dc->rb]);
1440 break;
1441 case 2:
64254eba 1442 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1443 cpu_R[dc->ra], cpu_R[dc->rb]);
1444 break;
1445 case 3:
64254eba 1446 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1447 cpu_R[dc->ra], cpu_R[dc->rb]);
1448 break;
1449 case 4:
64254eba 1450 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1451 cpu_R[dc->ra], cpu_R[dc->rb]);
1452 break;
1453 case 5:
64254eba 1454 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1455 cpu_R[dc->ra], cpu_R[dc->rb]);
1456 break;
1457 case 6:
64254eba 1458 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1459 cpu_R[dc->ra], cpu_R[dc->rb]);
1460 break;
1461 default:
71547a3b
BS
1462 qemu_log_mask(LOG_UNIMP,
1463 "unimplemented fcmp fpu_insn=%x pc=%x"
1464 " opc=%x\n",
1465 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1466 dc->abort_at_next_insn = 1;
1467 break;
1468 }
1469 break;
1470
1471 case 5:
1472 if (!dec_check_fpuv2(dc)) {
1473 return;
1474 }
64254eba 1475 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1476 break;
1477
1478 case 6:
1479 if (!dec_check_fpuv2(dc)) {
1480 return;
1481 }
64254eba 1482 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1483 break;
1484
1485 case 7:
1486 if (!dec_check_fpuv2(dc)) {
1487 return;
1488 }
64254eba 1489 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1490 break;
1491
1492 default:
71547a3b
BS
1493 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1494 " opc=%x\n",
1495 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1496 dc->abort_at_next_insn = 1;
1497 break;
1498 }
1567a005
EI
1499}
1500
4acb54ba
EI
1501static void dec_null(DisasContext *dc)
1502{
9ba8cd45 1503 if (trap_illegal(dc, true)) {
02b33596
EI
1504 return;
1505 }
1d512a65 1506 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
4acb54ba
EI
1507 dc->abort_at_next_insn = 1;
1508}
1509
6d76d23e
EI
1510/* Insns connected to FSL or AXI stream attached devices. */
1511static void dec_stream(DisasContext *dc)
1512{
6d76d23e
EI
1513 TCGv_i32 t_id, t_ctrl;
1514 int ctrl;
1515
1516 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1517 dc->type_b ? "" : "d", dc->imm);
1518
bdfc1e88 1519 if (trap_userspace(dc, true)) {
6d76d23e
EI
1520 return;
1521 }
1522
cfeea807 1523 t_id = tcg_temp_new_i32();
6d76d23e 1524 if (dc->type_b) {
cfeea807 1525 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
6d76d23e
EI
1526 ctrl = dc->imm >> 10;
1527 } else {
cfeea807 1528 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
6d76d23e
EI
1529 ctrl = dc->imm >> 5;
1530 }
1531
cfeea807 1532 t_ctrl = tcg_const_i32(ctrl);
6d76d23e
EI
1533
1534 if (dc->rd == 0) {
1535 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1536 } else {
1537 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1538 }
cfeea807
EI
1539 tcg_temp_free_i32(t_id);
1540 tcg_temp_free_i32(t_ctrl);
6d76d23e
EI
1541}
1542
4acb54ba
EI
1543static struct decoder_info {
1544 struct {
1545 uint32_t bits;
1546 uint32_t mask;
1547 };
1548 void (*dec)(DisasContext *dc);
1549} decinfo[] = {
1550 {DEC_ADD, dec_add},
1551 {DEC_SUB, dec_sub},
1552 {DEC_AND, dec_and},
1553 {DEC_XOR, dec_xor},
1554 {DEC_OR, dec_or},
1555 {DEC_BIT, dec_bit},
1556 {DEC_BARREL, dec_barrel},
1557 {DEC_LD, dec_load},
1558 {DEC_ST, dec_store},
1559 {DEC_IMM, dec_imm},
1560 {DEC_BR, dec_br},
1561 {DEC_BCC, dec_bcc},
1562 {DEC_RTS, dec_rts},
1567a005 1563 {DEC_FPU, dec_fpu},
4acb54ba
EI
1564 {DEC_MUL, dec_mul},
1565 {DEC_DIV, dec_div},
1566 {DEC_MSR, dec_msr},
6d76d23e 1567 {DEC_STREAM, dec_stream},
4acb54ba
EI
1568 {{0, 0}, dec_null}
1569};
1570
64254eba 1571static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1572{
4acb54ba
EI
1573 int i;
1574
64254eba 1575 dc->ir = ir;
4acb54ba
EI
1576 LOG_DIS("%8.8x\t", dc->ir);
1577
462c2544 1578 if (ir == 0) {
9ba8cd45 1579 trap_illegal(dc, dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK);
462c2544
EI
1580 /* Don't decode nop/zero instructions any further. */
1581 return;
4acb54ba 1582 }
462c2544 1583
4acb54ba
EI
1584 /* bit 2 seems to indicate insn type. */
1585 dc->type_b = ir & (1 << 29);
1586
1587 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1588 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1589 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1590 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1591 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1592
1593 /* Large switch for all insns. */
1594 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1595 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1596 decinfo[i].dec(dc);
1597 break;
1598 }
1599 }
1600}
1601
4acb54ba 1602/* generate intermediate code for basic block 'tb'. */
9c489ea6 1603void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4acb54ba 1604{
9c489ea6 1605 CPUMBState *env = cs->env_ptr;
4e5e1215 1606 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
4acb54ba 1607 uint32_t pc_start;
4acb54ba
EI
1608 struct DisasContext ctx;
1609 struct DisasContext *dc = &ctx;
56371527 1610 uint32_t page_start, org_flags;
cfeea807 1611 uint32_t npc;
4acb54ba
EI
1612 int num_insns;
1613 int max_insns;
1614
4acb54ba 1615 pc_start = tb->pc;
0063ebd6 1616 dc->cpu = cpu;
4acb54ba
EI
1617 dc->tb = tb;
1618 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1619
4acb54ba
EI
1620 dc->is_jmp = DISAS_NEXT;
1621 dc->jmp = 0;
1622 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1623 if (dc->delayed_branch) {
1624 dc->jmp = JMP_INDIRECT;
1625 }
4acb54ba 1626 dc->pc = pc_start;
ed2803da 1627 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1628 dc->cpustate_changed = 0;
1629 dc->abort_at_next_insn = 0;
4acb54ba 1630
a47dddd7
AF
1631 if (pc_start & 3) {
1632 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1633 }
4acb54ba 1634
56371527 1635 page_start = pc_start & TARGET_PAGE_MASK;
4acb54ba 1636 num_insns = 0;
c5a49c63 1637 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
190ce7fb 1638 if (max_insns == 0) {
4acb54ba 1639 max_insns = CF_COUNT_MASK;
190ce7fb
RH
1640 }
1641 if (max_insns > TCG_MAX_INSNS) {
1642 max_insns = TCG_MAX_INSNS;
1643 }
4acb54ba 1644
cd42d5b2 1645 gen_tb_start(tb);
4acb54ba
EI
1646 do
1647 {
667b8e29 1648 tcg_gen_insn_start(dc->pc);
959082fc 1649 num_insns++;
4acb54ba 1650
b933066a
RH
1651#if SIM_COMPAT
1652 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
0a22f8cf 1653 tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
b933066a
RH
1654 gen_helper_debug();
1655 }
1656#endif
1657
1658 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1659 t_gen_raise_exception(dc, EXCP_DEBUG);
1660 dc->is_jmp = DISAS_UPDATE;
522a0d4e
RH
1661 /* The address covered by the breakpoint must be included in
1662 [tb->pc, tb->pc + tb->size) in order to for it to be
1663 properly cleared -- thus we increment the PC here so that
1664 the logic setting tb->size below does the right thing. */
1665 dc->pc += 4;
b933066a
RH
1666 break;
1667 }
1668
4acb54ba
EI
1669 /* Pretty disas. */
1670 LOG_DIS("%8.8x:\t", dc->pc);
1671
c5a49c63 1672 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
4acb54ba 1673 gen_io_start();
959082fc 1674 }
4acb54ba
EI
1675
1676 dc->clear_imm = 1;
64254eba 1677 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1678 if (dc->clear_imm)
1679 dc->tb_flags &= ~IMM_FLAG;
4acb54ba 1680 dc->pc += 4;
4acb54ba
EI
1681
1682 if (dc->delayed_branch) {
1683 dc->delayed_branch--;
1684 if (!dc->delayed_branch) {
1685 if (dc->tb_flags & DRTI_FLAG)
1686 do_rti(dc);
1687 if (dc->tb_flags & DRTB_FLAG)
1688 do_rtb(dc);
1689 if (dc->tb_flags & DRTE_FLAG)
1690 do_rte(dc);
1691 /* Clear the delay slot flag. */
1692 dc->tb_flags &= ~D_FLAG;
1693 /* If it is a direct jump, try direct chaining. */
23979dc5 1694 if (dc->jmp == JMP_INDIRECT) {
0a22f8cf 1695 eval_cond_jmp(dc, env_btarget, tcg_const_i64(dc->pc));
4acb54ba 1696 dc->is_jmp = DISAS_JUMP;
23979dc5 1697 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1698 t_sync_flags(dc);
1699 gen_goto_tb(dc, 0, dc->jmp_pc);
1700 dc->is_jmp = DISAS_TB_JUMP;
1701 } else if (dc->jmp == JMP_DIRECT_CC) {
42a268c2 1702 TCGLabel *l1 = gen_new_label();
23979dc5 1703 t_sync_flags(dc);
23979dc5 1704 /* Conditional jmp. */
cfeea807 1705 tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
23979dc5
EI
1706 gen_goto_tb(dc, 1, dc->pc);
1707 gen_set_label(l1);
1708 gen_goto_tb(dc, 0, dc->jmp_pc);
1709
1710 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1711 }
1712 break;
1713 }
1714 }
ed2803da 1715 if (cs->singlestep_enabled) {
4acb54ba 1716 break;
ed2803da 1717 }
4acb54ba 1718 } while (!dc->is_jmp && !dc->cpustate_changed
fe700adb
RH
1719 && !tcg_op_buf_full()
1720 && !singlestep
56371527 1721 && (dc->pc - page_start < TARGET_PAGE_SIZE)
fe700adb 1722 && num_insns < max_insns);
4acb54ba
EI
1723
1724 npc = dc->pc;
844bab60 1725 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1726 if (dc->tb_flags & D_FLAG) {
1727 dc->is_jmp = DISAS_UPDATE;
0a22f8cf 1728 tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
4acb54ba
EI
1729 sync_jmpstate(dc);
1730 } else
1731 npc = dc->jmp_pc;
1732 }
1733
c5a49c63 1734 if (tb_cflags(tb) & CF_LAST_IO)
4acb54ba
EI
1735 gen_io_end();
1736 /* Force an update if the per-tb cpu state has changed. */
1737 if (dc->is_jmp == DISAS_NEXT
1738 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1739 dc->is_jmp = DISAS_UPDATE;
0a22f8cf 1740 tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
4acb54ba
EI
1741 }
1742 t_sync_flags(dc);
1743
ed2803da 1744 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1745 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1746
1747 if (dc->is_jmp != DISAS_JUMP) {
0a22f8cf 1748 tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
6c5f738d 1749 }
64254eba 1750 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1751 tcg_temp_free_i32(tmp);
4acb54ba
EI
1752 } else {
1753 switch(dc->is_jmp) {
1754 case DISAS_NEXT:
1755 gen_goto_tb(dc, 1, npc);
1756 break;
1757 default:
1758 case DISAS_JUMP:
1759 case DISAS_UPDATE:
1760 /* indicate that the hash table must be used
1761 to find the next TB */
07ea28b4 1762 tcg_gen_exit_tb(NULL, 0);
4acb54ba
EI
1763 break;
1764 case DISAS_TB_JUMP:
1765 /* nothing more to generate */
1766 break;
1767 }
1768 }
806f352d 1769 gen_tb_end(tb, num_insns);
0a7df5da 1770
4e5e1215
RH
1771 tb->size = dc->pc - pc_start;
1772 tb->icount = num_insns;
4acb54ba
EI
1773
1774#ifdef DEBUG_DISAS
1775#if !SIM_COMPAT
4910e6e4
RH
1776 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1777 && qemu_log_in_addr_range(pc_start)) {
1ee73216 1778 qemu_log_lock();
f01a5e7e 1779 qemu_log("--------------\n");
1d48474d 1780 log_target_disas(cs, pc_start, dc->pc - pc_start);
1ee73216 1781 qemu_log_unlock();
4acb54ba
EI
1782 }
1783#endif
1784#endif
1785 assert(!dc->abort_at_next_insn);
1786}
1787
878096ee
AF
1788void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1789 int flags)
4acb54ba 1790{
878096ee
AF
1791 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1792 CPUMBState *env = &cpu->env;
4acb54ba
EI
1793 int i;
1794
1795 if (!env || !f)
1796 return;
1797
0a22f8cf 1798 cpu_fprintf(f, "IN: PC=%" PRIx64 " %s\n",
4acb54ba 1799 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
0a22f8cf
EI
1800 cpu_fprintf(f, "rmsr=%" PRIx64 " resr=%" PRIx64 " rear=%" PRIx64 " "
1801 "debug=%x imm=%x iflags=%x fsr=%" PRIx64 "\n",
4c24aa0a 1802 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
97694c57 1803 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
43d318b2
EI
1804 cpu_fprintf(f, "btaken=%d btarget=%" PRIx64 " mode=%s(saved=%s) "
1805 "eip=%d ie=%d\n",
4acb54ba
EI
1806 env->btaken, env->btarget,
1807 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
17c52a43 1808 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
0a22f8cf
EI
1809 (bool)(env->sregs[SR_MSR] & MSR_EIP),
1810 (bool)(env->sregs[SR_MSR] & MSR_IE));
17c52a43 1811
4acb54ba
EI
1812 for (i = 0; i < 32; i++) {
1813 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1814 if ((i + 1) % 4 == 0)
1815 cpu_fprintf(f, "\n");
1816 }
1817 cpu_fprintf(f, "\n\n");
1818}
1819
cd0c24f9
AF
1820void mb_tcg_init(void)
1821{
1822 int i;
4acb54ba 1823
cfeea807 1824 env_debug = tcg_global_mem_new_i32(cpu_env,
68cee38a 1825 offsetof(CPUMBState, debug),
4acb54ba 1826 "debug0");
cfeea807 1827 env_iflags = tcg_global_mem_new_i32(cpu_env,
68cee38a 1828 offsetof(CPUMBState, iflags),
4acb54ba 1829 "iflags");
cfeea807 1830 env_imm = tcg_global_mem_new_i32(cpu_env,
68cee38a 1831 offsetof(CPUMBState, imm),
4acb54ba 1832 "imm");
43d318b2 1833 env_btarget = tcg_global_mem_new_i64(cpu_env,
68cee38a 1834 offsetof(CPUMBState, btarget),
4acb54ba 1835 "btarget");
cfeea807 1836 env_btaken = tcg_global_mem_new_i32(cpu_env,
68cee38a 1837 offsetof(CPUMBState, btaken),
4acb54ba 1838 "btaken");
403322ea 1839 env_res_addr = tcg_global_mem_new(cpu_env,
4a536270
EI
1840 offsetof(CPUMBState, res_addr),
1841 "res_addr");
cfeea807 1842 env_res_val = tcg_global_mem_new_i32(cpu_env,
11a76217
EI
1843 offsetof(CPUMBState, res_val),
1844 "res_val");
4acb54ba 1845 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
cfeea807 1846 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
68cee38a 1847 offsetof(CPUMBState, regs[i]),
4acb54ba
EI
1848 regnames[i]);
1849 }
1850 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
0a22f8cf 1851 cpu_SR[i] = tcg_global_mem_new_i64(cpu_env,
68cee38a 1852 offsetof(CPUMBState, sregs[i]),
4acb54ba
EI
1853 special_regnames[i]);
1854 }
4acb54ba
EI
1855}
1856
bad729e2
RH
1857void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1858 target_ulong *data)
4acb54ba 1859{
bad729e2 1860 env->sregs[SR_PC] = data[0];
4acb54ba 1861}