]> git.proxmox.com Git - mirror_qemu.git/blame - target/microblaze/translate.c
target-microblaze: dec_msr: Use bool and extract32
[mirror_qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
4acb54ba 25#include "tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
4acb54ba 27#include "microblaze-decode.h"
f08b6170 28#include "exec/cpu_ldst.h"
2ef6175a 29#include "exec/helper-gen.h"
77fc6f5e 30#include "exec/translator.h"
4acb54ba 31
a7e30d84 32#include "trace-tcg.h"
508127e2 33#include "exec/log.h"
a7e30d84
LV
34
35
4acb54ba
EI
36#define SIM_COMPAT 0
37#define DISAS_GNU 1
38#define DISAS_MB 1
39#if DISAS_MB && !SIM_COMPAT
40# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41#else
42# define LOG_DIS(...) do { } while (0)
43#endif
44
45#define D(x)
46
47#define EXTRACT_FIELD(src, start, end) \
48 (((src) >> start) & ((1 << (end - start + 1)) - 1))
49
77fc6f5e
LV
50/* is_jmp field values */
51#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54
cfeea807
EI
55static TCGv_i32 env_debug;
56static TCGv_i32 cpu_R[32];
57static TCGv_i32 cpu_SR[14];
58static TCGv_i32 env_imm;
59static TCGv_i32 env_btaken;
60static TCGv_i32 env_btarget;
61static TCGv_i32 env_iflags;
403322ea 62static TCGv env_res_addr;
cfeea807 63static TCGv_i32 env_res_val;
4acb54ba 64
022c62cb 65#include "exec/gen-icount.h"
4acb54ba
EI
66
67/* This is the state at translation time. */
68typedef struct DisasContext {
0063ebd6 69 MicroBlazeCPU *cpu;
cfeea807 70 uint32_t pc;
4acb54ba
EI
71
72 /* Decoder. */
73 int type_b;
74 uint32_t ir;
75 uint8_t opcode;
76 uint8_t rd, ra, rb;
77 uint16_t imm;
78
79 unsigned int cpustate_changed;
80 unsigned int delayed_branch;
81 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
82 unsigned int clear_imm;
83 int is_jmp;
84
844bab60
EI
85#define JMP_NOJMP 0
86#define JMP_DIRECT 1
87#define JMP_DIRECT_CC 2
88#define JMP_INDIRECT 3
4acb54ba
EI
89 unsigned int jmp;
90 uint32_t jmp_pc;
91
92 int abort_at_next_insn;
93 int nr_nops;
94 struct TranslationBlock *tb;
95 int singlestep_enabled;
96} DisasContext;
97
38972938 98static const char *regnames[] =
4acb54ba
EI
99{
100 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
104};
105
38972938 106static const char *special_regnames[] =
4acb54ba 107{
0031eef2
EI
108 "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
109 "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
4acb54ba
EI
110};
111
4acb54ba
EI
112static inline void t_sync_flags(DisasContext *dc)
113{
4abf79a4 114 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba 115 if (dc->tb_flags != dc->synced_flags) {
cfeea807 116 tcg_gen_movi_i32(env_iflags, dc->tb_flags);
4acb54ba
EI
117 dc->synced_flags = dc->tb_flags;
118 }
119}
120
121static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
122{
123 TCGv_i32 tmp = tcg_const_i32(index);
124
125 t_sync_flags(dc);
cfeea807 126 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
64254eba 127 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
128 tcg_temp_free_i32(tmp);
129 dc->is_jmp = DISAS_UPDATE;
130}
131
90aa39a1
SF
132static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133{
134#ifndef CONFIG_USER_ONLY
135 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136#else
137 return true;
138#endif
139}
140
4acb54ba
EI
141static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142{
90aa39a1 143 if (use_goto_tb(dc, dest)) {
4acb54ba 144 tcg_gen_goto_tb(n);
cfeea807 145 tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
90aa39a1 146 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
4acb54ba 147 } else {
cfeea807 148 tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
4acb54ba
EI
149 tcg_gen_exit_tb(0);
150 }
151}
152
cfeea807 153static void read_carry(DisasContext *dc, TCGv_i32 d)
ee8b246f 154{
cfeea807 155 tcg_gen_shri_i32(d, cpu_SR[SR_MSR], 31);
ee8b246f
EI
156}
157
04ec7df7
EI
158/*
159 * write_carry sets the carry bits in MSR based on bit 0 of v.
160 * v[31:1] are ignored.
161 */
cfeea807 162static void write_carry(DisasContext *dc, TCGv_i32 v)
ee8b246f 163{
cfeea807
EI
164 TCGv_i32 t0 = tcg_temp_new_i32();
165 tcg_gen_shli_i32(t0, v, 31);
166 tcg_gen_sari_i32(t0, t0, 31);
167 tcg_gen_andi_i32(t0, t0, (MSR_C | MSR_CC));
168 tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
ee8b246f 169 ~(MSR_C | MSR_CC));
cfeea807
EI
170 tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
171 tcg_temp_free_i32(t0);
ee8b246f
EI
172}
173
65ab5eb4 174static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f 175{
cfeea807
EI
176 TCGv_i32 t0 = tcg_temp_new_i32();
177 tcg_gen_movi_i32(t0, carry);
8cc9b43f 178 write_carry(dc, t0);
cfeea807 179 tcg_temp_free_i32(t0);
8cc9b43f
PC
180}
181
9ba8cd45
EI
182/*
183 * Returns true if the insn an illegal operation.
184 * If exceptions are enabled, an exception is raised.
185 */
186static bool trap_illegal(DisasContext *dc, bool cond)
187{
188 if (cond && (dc->tb_flags & MSR_EE_FLAG)
189 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
190 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
191 t_gen_raise_exception(dc, EXCP_HW_EXCP);
192 }
193 return cond;
194}
195
bdfc1e88
EI
196/*
197 * Returns true if the insn is illegal in userspace.
198 * If exceptions are enabled, an exception is raised.
199 */
200static bool trap_userspace(DisasContext *dc, bool cond)
201{
202 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
203 bool cond_user = cond && mem_index == MMU_USER_IDX;
204
205 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
206 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
207 t_gen_raise_exception(dc, EXCP_HW_EXCP);
208 }
209 return cond_user;
210}
211
61204ce8
EI
212/* True if ALU operand b is a small immediate that may deserve
213 faster treatment. */
214static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
215{
216 /* Immediate insn without the imm prefix ? */
217 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
218}
219
cfeea807 220static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
4acb54ba
EI
221{
222 if (dc->type_b) {
223 if (dc->tb_flags & IMM_FLAG)
cfeea807 224 tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
4acb54ba 225 else
cfeea807 226 tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
4acb54ba
EI
227 return &env_imm;
228 } else
229 return &cpu_R[dc->rb];
230}
231
232static void dec_add(DisasContext *dc)
233{
234 unsigned int k, c;
cfeea807 235 TCGv_i32 cf;
4acb54ba
EI
236
237 k = dc->opcode & 4;
238 c = dc->opcode & 2;
239
240 LOG_DIS("add%s%s%s r%d r%d r%d\n",
241 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
242 dc->rd, dc->ra, dc->rb);
243
40cbf5b7
EI
244 /* Take care of the easy cases first. */
245 if (k) {
246 /* k - keep carry, no need to update MSR. */
247 /* If rd == r0, it's a nop. */
248 if (dc->rd) {
cfeea807 249 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
250
251 if (c) {
252 /* c - Add carry into the result. */
cfeea807 253 cf = tcg_temp_new_i32();
40cbf5b7
EI
254
255 read_carry(dc, cf);
cfeea807
EI
256 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
257 tcg_temp_free_i32(cf);
40cbf5b7
EI
258 }
259 }
260 return;
261 }
262
263 /* From now on, we can assume k is zero. So we need to update MSR. */
264 /* Extract carry. */
cfeea807 265 cf = tcg_temp_new_i32();
40cbf5b7
EI
266 if (c) {
267 read_carry(dc, cf);
268 } else {
cfeea807 269 tcg_gen_movi_i32(cf, 0);
40cbf5b7
EI
270 }
271
272 if (dc->rd) {
cfeea807 273 TCGv_i32 ncf = tcg_temp_new_i32();
5d0bb823 274 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
cfeea807
EI
275 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
276 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
40cbf5b7 277 write_carry(dc, ncf);
cfeea807 278 tcg_temp_free_i32(ncf);
40cbf5b7 279 } else {
5d0bb823 280 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 281 write_carry(dc, cf);
4acb54ba 282 }
cfeea807 283 tcg_temp_free_i32(cf);
4acb54ba
EI
284}
285
286static void dec_sub(DisasContext *dc)
287{
288 unsigned int u, cmp, k, c;
cfeea807 289 TCGv_i32 cf, na;
4acb54ba
EI
290
291 u = dc->imm & 2;
292 k = dc->opcode & 4;
293 c = dc->opcode & 2;
294 cmp = (dc->imm & 1) && (!dc->type_b) && k;
295
296 if (cmp) {
297 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
298 if (dc->rd) {
299 if (u)
300 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
301 else
302 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
303 }
e0a42ebc
EI
304 return;
305 }
306
307 LOG_DIS("sub%s%s r%d, r%d r%d\n",
308 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
309
310 /* Take care of the easy cases first. */
311 if (k) {
312 /* k - keep carry, no need to update MSR. */
313 /* If rd == r0, it's a nop. */
314 if (dc->rd) {
cfeea807 315 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
316
317 if (c) {
318 /* c - Add carry into the result. */
cfeea807 319 cf = tcg_temp_new_i32();
e0a42ebc
EI
320
321 read_carry(dc, cf);
cfeea807
EI
322 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
323 tcg_temp_free_i32(cf);
e0a42ebc
EI
324 }
325 }
326 return;
327 }
328
329 /* From now on, we can assume k is zero. So we need to update MSR. */
330 /* Extract carry. And complement a into na. */
cfeea807
EI
331 cf = tcg_temp_new_i32();
332 na = tcg_temp_new_i32();
e0a42ebc
EI
333 if (c) {
334 read_carry(dc, cf);
335 } else {
cfeea807 336 tcg_gen_movi_i32(cf, 1);
e0a42ebc
EI
337 }
338
339 /* d = b + ~a + c. carry defaults to 1. */
cfeea807 340 tcg_gen_not_i32(na, cpu_R[dc->ra]);
e0a42ebc
EI
341
342 if (dc->rd) {
cfeea807 343 TCGv_i32 ncf = tcg_temp_new_i32();
5d0bb823 344 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
cfeea807
EI
345 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
346 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
e0a42ebc 347 write_carry(dc, ncf);
cfeea807 348 tcg_temp_free_i32(ncf);
e0a42ebc 349 } else {
5d0bb823 350 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 351 write_carry(dc, cf);
4acb54ba 352 }
cfeea807
EI
353 tcg_temp_free_i32(cf);
354 tcg_temp_free_i32(na);
4acb54ba
EI
355}
356
357static void dec_pattern(DisasContext *dc)
358{
359 unsigned int mode;
4acb54ba 360
9ba8cd45
EI
361 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
362 return;
1567a005
EI
363 }
364
4acb54ba
EI
365 mode = dc->opcode & 3;
366 switch (mode) {
367 case 0:
368 /* pcmpbf. */
369 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
370 if (dc->rd)
371 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
372 break;
373 case 2:
374 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
375 if (dc->rd) {
cfeea807 376 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
86112805 377 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
378 }
379 break;
380 case 3:
381 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
4acb54ba 382 if (dc->rd) {
cfeea807 383 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
86112805 384 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
385 }
386 break;
387 default:
0063ebd6 388 cpu_abort(CPU(dc->cpu),
4acb54ba
EI
389 "unsupported pattern insn opcode=%x\n", dc->opcode);
390 break;
391 }
392}
393
394static void dec_and(DisasContext *dc)
395{
396 unsigned int not;
397
398 if (!dc->type_b && (dc->imm & (1 << 10))) {
399 dec_pattern(dc);
400 return;
401 }
402
403 not = dc->opcode & (1 << 1);
404 LOG_DIS("and%s\n", not ? "n" : "");
405
406 if (!dc->rd)
407 return;
408
409 if (not) {
cfeea807 410 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba 411 } else
cfeea807 412 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
413}
414
415static void dec_or(DisasContext *dc)
416{
417 if (!dc->type_b && (dc->imm & (1 << 10))) {
418 dec_pattern(dc);
419 return;
420 }
421
422 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
423 if (dc->rd)
cfeea807 424 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
425}
426
427static void dec_xor(DisasContext *dc)
428{
429 if (!dc->type_b && (dc->imm & (1 << 10))) {
430 dec_pattern(dc);
431 return;
432 }
433
434 LOG_DIS("xor r%d\n", dc->rd);
435 if (dc->rd)
cfeea807 436 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
437}
438
cfeea807 439static inline void msr_read(DisasContext *dc, TCGv_i32 d)
4acb54ba 440{
cfeea807 441 tcg_gen_mov_i32(d, cpu_SR[SR_MSR]);
4acb54ba
EI
442}
443
cfeea807 444static inline void msr_write(DisasContext *dc, TCGv_i32 v)
4acb54ba 445{
cfeea807 446 TCGv_i32 t;
97b833c5 447
cfeea807 448 t = tcg_temp_new_i32();
4acb54ba 449 dc->cpustate_changed = 1;
97b833c5 450 /* PVR bit is not writable. */
cfeea807
EI
451 tcg_gen_andi_i32(t, v, ~MSR_PVR);
452 tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
453 tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
97b833c5 454 tcg_temp_free(t);
4acb54ba
EI
455}
456
457static void dec_msr(DisasContext *dc)
458{
0063ebd6 459 CPUState *cs = CPU(dc->cpu);
cfeea807 460 TCGv_i32 t0, t1;
2023e9a3
EI
461 unsigned int sr, rn;
462 bool to, clrset;
4acb54ba 463
2023e9a3
EI
464 sr = extract32(dc->imm, 0, 14);
465 to = extract32(dc->imm, 14, 1);
466 clrset = extract32(dc->imm, 15, 1) == 0;
4acb54ba 467 dc->type_b = 1;
2023e9a3 468 if (to) {
4acb54ba 469 dc->cpustate_changed = 1;
2023e9a3 470 }
4acb54ba
EI
471
472 /* msrclr and msrset. */
2023e9a3
EI
473 if (clrset) {
474 bool clr = extract32(dc->ir, 16, 1);
4acb54ba
EI
475
476 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
477 dc->rd, dc->imm);
1567a005 478
56837509 479 if (!dc->cpu->cfg.use_msr_instr) {
1567a005
EI
480 /* nop??? */
481 return;
482 }
483
bdfc1e88 484 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
1567a005
EI
485 return;
486 }
487
4acb54ba
EI
488 if (dc->rd)
489 msr_read(dc, cpu_R[dc->rd]);
490
cfeea807
EI
491 t0 = tcg_temp_new_i32();
492 t1 = tcg_temp_new_i32();
4acb54ba 493 msr_read(dc, t0);
cfeea807 494 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
4acb54ba
EI
495
496 if (clr) {
cfeea807
EI
497 tcg_gen_not_i32(t1, t1);
498 tcg_gen_and_i32(t0, t0, t1);
4acb54ba 499 } else
cfeea807 500 tcg_gen_or_i32(t0, t0, t1);
4acb54ba 501 msr_write(dc, t0);
cfeea807
EI
502 tcg_temp_free_i32(t0);
503 tcg_temp_free_i32(t1);
504 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
4acb54ba
EI
505 dc->is_jmp = DISAS_UPDATE;
506 return;
507 }
508
bdfc1e88
EI
509 if (trap_userspace(dc, to)) {
510 return;
1567a005
EI
511 }
512
4acb54ba
EI
513#if !defined(CONFIG_USER_ONLY)
514 /* Catch read/writes to the mmu block. */
515 if ((sr & ~0xff) == 0x1000) {
516 sr &= 7;
517 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
518 if (to)
cfeea807 519 gen_helper_mmu_write(cpu_env, tcg_const_i32(sr), cpu_R[dc->ra]);
4acb54ba 520 else
cfeea807 521 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_i32(sr));
4acb54ba
EI
522 return;
523 }
524#endif
525
526 if (to) {
527 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
528 switch (sr) {
529 case 0:
530 break;
531 case 1:
532 msr_write(dc, cpu_R[dc->ra]);
533 break;
534 case 0x3:
cfeea807 535 tcg_gen_mov_i32(cpu_SR[SR_EAR], cpu_R[dc->ra]);
4acb54ba
EI
536 break;
537 case 0x5:
cfeea807 538 tcg_gen_mov_i32(cpu_SR[SR_ESR], cpu_R[dc->ra]);
4acb54ba
EI
539 break;
540 case 0x7:
cfeea807 541 tcg_gen_andi_i32(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
4acb54ba 542 break;
5818dee5 543 case 0x800:
cfeea807
EI
544 tcg_gen_st_i32(cpu_R[dc->ra],
545 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
546 break;
547 case 0x802:
cfeea807
EI
548 tcg_gen_st_i32(cpu_R[dc->ra],
549 cpu_env, offsetof(CPUMBState, shr));
5818dee5 550 break;
4acb54ba 551 default:
0063ebd6 552 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
553 break;
554 }
555 } else {
556 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
557
558 switch (sr) {
559 case 0:
cfeea807 560 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
4acb54ba
EI
561 break;
562 case 1:
563 msr_read(dc, cpu_R[dc->rd]);
564 break;
565 case 0x3:
cfeea807 566 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_EAR]);
4acb54ba
EI
567 break;
568 case 0x5:
cfeea807 569 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_ESR]);
4acb54ba
EI
570 break;
571 case 0x7:
cfeea807 572 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_FSR]);
4acb54ba
EI
573 break;
574 case 0xb:
cfeea807 575 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_BTR]);
4acb54ba 576 break;
5818dee5 577 case 0x800:
cfeea807
EI
578 tcg_gen_ld_i32(cpu_R[dc->rd],
579 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
580 break;
581 case 0x802:
cfeea807
EI
582 tcg_gen_ld_i32(cpu_R[dc->rd],
583 cpu_env, offsetof(CPUMBState, shr));
5818dee5 584 break;
4acb54ba
EI
585 case 0x2000:
586 case 0x2001:
587 case 0x2002:
588 case 0x2003:
589 case 0x2004:
590 case 0x2005:
591 case 0x2006:
592 case 0x2007:
593 case 0x2008:
594 case 0x2009:
595 case 0x200a:
596 case 0x200b:
597 case 0x200c:
598 rn = sr & 0xf;
cfeea807 599 tcg_gen_ld_i32(cpu_R[dc->rd],
68cee38a 600 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
601 break;
602 default:
a47dddd7 603 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
604 break;
605 }
606 }
ee7dbcf8
EI
607
608 if (dc->rd == 0) {
cfeea807 609 tcg_gen_movi_i32(cpu_R[0], 0);
ee7dbcf8 610 }
4acb54ba
EI
611}
612
4acb54ba
EI
613/* Multiplier unit. */
614static void dec_mul(DisasContext *dc)
615{
cfeea807 616 TCGv_i32 tmp;
4acb54ba
EI
617 unsigned int subcode;
618
9ba8cd45 619 if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
1567a005
EI
620 return;
621 }
622
4acb54ba 623 subcode = dc->imm & 3;
4acb54ba
EI
624
625 if (dc->type_b) {
626 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
cfeea807 627 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
16ece88d 628 return;
4acb54ba
EI
629 }
630
1567a005 631 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
9b964318 632 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
1567a005
EI
633 /* nop??? */
634 }
635
cfeea807 636 tmp = tcg_temp_new_i32();
4acb54ba
EI
637 switch (subcode) {
638 case 0:
639 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807 640 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
641 break;
642 case 1:
643 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807
EI
644 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
645 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
646 break;
647 case 2:
648 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807
EI
649 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
650 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
651 break;
652 case 3:
653 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807 654 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
655 break;
656 default:
0063ebd6 657 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
4acb54ba
EI
658 break;
659 }
cfeea807 660 tcg_temp_free_i32(tmp);
4acb54ba
EI
661}
662
663/* Div unit. */
664static void dec_div(DisasContext *dc)
665{
666 unsigned int u;
667
668 u = dc->imm & 2;
669 LOG_DIS("div\n");
670
9ba8cd45
EI
671 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
672 return;
1567a005
EI
673 }
674
4acb54ba 675 if (u)
64254eba
BS
676 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
677 cpu_R[dc->ra]);
4acb54ba 678 else
64254eba
BS
679 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
680 cpu_R[dc->ra]);
4acb54ba 681 if (!dc->rd)
cfeea807 682 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
4acb54ba
EI
683}
684
685static void dec_barrel(DisasContext *dc)
686{
cfeea807 687 TCGv_i32 t0;
faa48d74 688 unsigned int imm_w, imm_s;
d09b2585 689 bool s, t, e = false, i = false;
4acb54ba 690
9ba8cd45 691 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
1567a005
EI
692 return;
693 }
694
faa48d74
EI
695 if (dc->type_b) {
696 /* Insert and extract are only available in immediate mode. */
d09b2585 697 i = extract32(dc->imm, 15, 1);
faa48d74
EI
698 e = extract32(dc->imm, 14, 1);
699 }
e3e84983
EI
700 s = extract32(dc->imm, 10, 1);
701 t = extract32(dc->imm, 9, 1);
faa48d74
EI
702 imm_w = extract32(dc->imm, 6, 5);
703 imm_s = extract32(dc->imm, 0, 5);
4acb54ba 704
faa48d74
EI
705 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
706 e ? "e" : "",
4acb54ba
EI
707 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
708
faa48d74
EI
709 if (e) {
710 if (imm_w + imm_s > 32 || imm_w == 0) {
711 /* These inputs have an undefined behavior. */
712 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
713 imm_w, imm_s);
714 } else {
715 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
716 }
d09b2585
EI
717 } else if (i) {
718 int width = imm_w - imm_s + 1;
719
720 if (imm_w < imm_s) {
721 /* These inputs have an undefined behavior. */
722 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
723 imm_w, imm_s);
724 } else {
725 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
726 imm_s, width);
727 }
faa48d74 728 } else {
cfeea807 729 t0 = tcg_temp_new_i32();
4acb54ba 730
cfeea807
EI
731 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
732 tcg_gen_andi_i32(t0, t0, 31);
4acb54ba 733
faa48d74 734 if (s) {
cfeea807 735 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
2acf6d53 736 } else {
faa48d74 737 if (t) {
cfeea807 738 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
faa48d74 739 } else {
cfeea807 740 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
faa48d74 741 }
2acf6d53 742 }
cfeea807 743 tcg_temp_free_i32(t0);
4acb54ba
EI
744 }
745}
746
747static void dec_bit(DisasContext *dc)
748{
0063ebd6 749 CPUState *cs = CPU(dc->cpu);
cfeea807 750 TCGv_i32 t0;
4acb54ba
EI
751 unsigned int op;
752
ace2e4da 753 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
754 switch (op) {
755 case 0x21:
756 /* src. */
cfeea807 757 t0 = tcg_temp_new_i32();
4acb54ba
EI
758
759 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
cfeea807 760 tcg_gen_andi_i32(t0, cpu_SR[SR_MSR], MSR_CC);
09b9f113 761 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 762 if (dc->rd) {
cfeea807
EI
763 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
764 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 765 }
cfeea807 766 tcg_temp_free_i32(t0);
4acb54ba
EI
767 break;
768
769 case 0x1:
770 case 0x41:
771 /* srl. */
4acb54ba
EI
772 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
773
bb3cb951
EI
774 /* Update carry. Note that write carry only looks at the LSB. */
775 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
776 if (dc->rd) {
777 if (op == 0x41)
cfeea807 778 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
4acb54ba 779 else
cfeea807 780 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
4acb54ba
EI
781 }
782 break;
783 case 0x60:
784 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
785 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
786 break;
787 case 0x61:
788 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
789 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
790 break;
791 case 0x64:
f062a3c7
EI
792 case 0x66:
793 case 0x74:
794 case 0x76:
4acb54ba
EI
795 /* wdc. */
796 LOG_DIS("wdc r%d\n", dc->ra);
bdfc1e88 797 trap_userspace(dc, true);
4acb54ba
EI
798 break;
799 case 0x68:
800 /* wic. */
801 LOG_DIS("wic r%d\n", dc->ra);
bdfc1e88 802 trap_userspace(dc, true);
4acb54ba 803 break;
48b5e96f 804 case 0xe0:
9ba8cd45
EI
805 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
806 return;
48b5e96f 807 }
8fc5239e 808 if (dc->cpu->cfg.use_pcmp_instr) {
5318420c 809 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
48b5e96f
EI
810 }
811 break;
ace2e4da
PC
812 case 0x1e0:
813 /* swapb */
814 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
815 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
816 break;
b8c6a5d9 817 case 0x1e2:
ace2e4da
PC
818 /*swaph */
819 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
820 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
821 break;
4acb54ba 822 default:
a47dddd7
AF
823 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
824 dc->pc, op, dc->rd, dc->ra, dc->rb);
4acb54ba
EI
825 break;
826 }
827}
828
829static inline void sync_jmpstate(DisasContext *dc)
830{
844bab60
EI
831 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
832 if (dc->jmp == JMP_DIRECT) {
cfeea807 833 tcg_gen_movi_i32(env_btaken, 1);
844bab60 834 }
23979dc5 835 dc->jmp = JMP_INDIRECT;
cfeea807 836 tcg_gen_movi_i32(env_btarget, dc->jmp_pc);
4acb54ba
EI
837 }
838}
839
840static void dec_imm(DisasContext *dc)
841{
842 LOG_DIS("imm %x\n", dc->imm << 16);
cfeea807 843 tcg_gen_movi_i32(env_imm, (dc->imm << 16));
4acb54ba
EI
844 dc->tb_flags |= IMM_FLAG;
845 dc->clear_imm = 0;
846}
847
403322ea 848static inline void compute_ldst_addr(DisasContext *dc, TCGv t)
4acb54ba 849{
0e9033c8
EI
850 bool extimm = dc->tb_flags & IMM_FLAG;
851 /* Should be set to true if r1 is used by loadstores. */
852 bool stackprot = false;
403322ea 853 TCGv_i32 t32;
5818dee5
EI
854
855 /* All load/stores use ra. */
9aaaa181 856 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
0e9033c8 857 stackprot = true;
5818dee5 858 }
4acb54ba 859
9ef55357 860 /* Treat the common cases first. */
4acb54ba 861 if (!dc->type_b) {
0dc4af5c 862 /* If any of the regs is r0, set t to the value of the other reg. */
4b5ef0b5 863 if (dc->ra == 0) {
403322ea 864 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
0dc4af5c 865 return;
4b5ef0b5 866 } else if (dc->rb == 0) {
403322ea 867 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
0dc4af5c 868 return;
4b5ef0b5
EI
869 }
870
9aaaa181 871 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
0e9033c8 872 stackprot = true;
5818dee5
EI
873 }
874
403322ea
EI
875 t32 = tcg_temp_new_i32();
876 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
877 tcg_gen_extu_i32_tl(t, t32);
878 tcg_temp_free_i32(t32);
5818dee5
EI
879
880 if (stackprot) {
0a87e691 881 gen_helper_stackprot(cpu_env, t);
5818dee5 882 }
0dc4af5c 883 return;
4acb54ba
EI
884 }
885 /* Immediate. */
403322ea 886 t32 = tcg_temp_new_i32();
4acb54ba
EI
887 if (!extimm) {
888 if (dc->imm == 0) {
403322ea
EI
889 tcg_gen_mov_i32(t32, cpu_R[dc->ra]);
890 } else {
891 tcg_gen_movi_i32(t32, (int32_t)((int16_t)dc->imm));
892 tcg_gen_add_i32(t32, cpu_R[dc->ra], t32);
4acb54ba 893 }
4acb54ba 894 } else {
403322ea 895 tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba 896 }
403322ea
EI
897 tcg_gen_extu_i32_tl(t, t32);
898 tcg_temp_free_i32(t32);
4acb54ba 899
5818dee5 900 if (stackprot) {
0a87e691 901 gen_helper_stackprot(cpu_env, t);
5818dee5 902 }
0dc4af5c 903 return;
4acb54ba
EI
904}
905
906static void dec_load(DisasContext *dc)
907{
403322ea
EI
908 TCGv_i32 v;
909 TCGv addr;
8534063a
EI
910 unsigned int size;
911 bool rev = false, ex = false;
47acdd63 912 TCGMemOp mop;
4acb54ba 913
47acdd63
RH
914 mop = dc->opcode & 3;
915 size = 1 << mop;
9f8beb66 916 if (!dc->type_b) {
8534063a
EI
917 rev = extract32(dc->ir, 9, 1);
918 ex = extract32(dc->ir, 10, 1);
9f8beb66 919 }
47acdd63
RH
920 mop |= MO_TE;
921 if (rev) {
922 mop ^= MO_BSWAP;
923 }
9f8beb66 924
9ba8cd45 925 if (trap_illegal(dc, size > 4)) {
0187688f
EI
926 return;
927 }
4acb54ba 928
8cc9b43f
PC
929 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
930 ex ? "x" : "");
9f8beb66 931
4acb54ba 932 t_sync_flags(dc);
403322ea 933 addr = tcg_temp_new();
0a87e691 934 compute_ldst_addr(dc, addr);
4acb54ba 935
9f8beb66
EI
936 /*
937 * When doing reverse accesses we need to do two things.
938 *
4ff9786c 939 * 1. Reverse the address wrt endianness.
9f8beb66
EI
940 * 2. Byteswap the data lanes on the way back into the CPU core.
941 */
942 if (rev && size != 4) {
943 /* Endian reverse the address. t is addr. */
944 switch (size) {
945 case 1:
946 {
947 /* 00 -> 11
948 01 -> 10
949 10 -> 10
950 11 -> 00 */
403322ea 951 TCGv low = tcg_temp_new();
9f8beb66 952
403322ea
EI
953 tcg_gen_andi_tl(low, addr, 3);
954 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
955 tcg_gen_andi_tl(addr, addr, ~3);
956 tcg_gen_or_tl(addr, addr, low);
957 tcg_temp_free(low);
9f8beb66
EI
958 break;
959 }
960
961 case 2:
962 /* 00 -> 10
963 10 -> 00. */
403322ea 964 tcg_gen_xori_tl(addr, addr, 2);
9f8beb66
EI
965 break;
966 default:
0063ebd6 967 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
968 break;
969 }
970 }
971
8cc9b43f
PC
972 /* lwx does not throw unaligned access errors, so force alignment */
973 if (ex) {
403322ea 974 tcg_gen_andi_tl(addr, addr, ~3);
8cc9b43f
PC
975 }
976
4acb54ba
EI
977 /* If we get a fault on a dslot, the jmpstate better be in sync. */
978 sync_jmpstate(dc);
968a40f6
EI
979
980 /* Verify alignment if needed. */
47acdd63
RH
981 /*
982 * Microblaze gives MMU faults priority over faults due to
983 * unaligned addresses. That's why we speculatively do the load
984 * into v. If the load succeeds, we verify alignment of the
985 * address and if that succeeds we write into the destination reg.
986 */
cfeea807 987 v = tcg_temp_new_i32();
0dc4af5c 988 tcg_gen_qemu_ld_i32(v, addr, cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 989
0063ebd6 990 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
cfeea807 991 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
0dc4af5c 992 gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
cfeea807 993 tcg_const_i32(0), tcg_const_i32(size - 1));
4acb54ba
EI
994 }
995
47acdd63 996 if (ex) {
403322ea 997 tcg_gen_mov_tl(env_res_addr, addr);
cfeea807 998 tcg_gen_mov_i32(env_res_val, v);
47acdd63
RH
999 }
1000 if (dc->rd) {
cfeea807 1001 tcg_gen_mov_i32(cpu_R[dc->rd], v);
47acdd63 1002 }
cfeea807 1003 tcg_temp_free_i32(v);
47acdd63 1004
8cc9b43f 1005 if (ex) { /* lwx */
b6af0975 1006 /* no support for AXI exclusive so always clear C */
8cc9b43f 1007 write_carryi(dc, 0);
8cc9b43f
PC
1008 }
1009
403322ea 1010 tcg_temp_free(addr);
4acb54ba
EI
1011}
1012
4acb54ba
EI
1013static void dec_store(DisasContext *dc)
1014{
403322ea 1015 TCGv addr;
42a268c2 1016 TCGLabel *swx_skip = NULL;
b51b3d43
EI
1017 unsigned int size;
1018 bool rev = false, ex = false;
47acdd63 1019 TCGMemOp mop;
4acb54ba 1020
47acdd63
RH
1021 mop = dc->opcode & 3;
1022 size = 1 << mop;
9f8beb66 1023 if (!dc->type_b) {
b51b3d43
EI
1024 rev = extract32(dc->ir, 9, 1);
1025 ex = extract32(dc->ir, 10, 1);
9f8beb66 1026 }
47acdd63
RH
1027 mop |= MO_TE;
1028 if (rev) {
1029 mop ^= MO_BSWAP;
1030 }
4acb54ba 1031
9ba8cd45 1032 if (trap_illegal(dc, size > 4)) {
0187688f
EI
1033 return;
1034 }
1035
8cc9b43f
PC
1036 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1037 ex ? "x" : "");
4acb54ba
EI
1038 t_sync_flags(dc);
1039 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1040 sync_jmpstate(dc);
0dc4af5c 1041 /* SWX needs a temp_local. */
403322ea 1042 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
0a87e691 1043 compute_ldst_addr(dc, addr);
968a40f6 1044
8cc9b43f 1045 if (ex) { /* swx */
cfeea807 1046 TCGv_i32 tval;
8cc9b43f 1047
8cc9b43f 1048 /* swx does not throw unaligned access errors, so force alignment */
403322ea 1049 tcg_gen_andi_tl(addr, addr, ~3);
8cc9b43f 1050
8cc9b43f
PC
1051 write_carryi(dc, 1);
1052 swx_skip = gen_new_label();
403322ea 1053 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
11a76217
EI
1054
1055 /* Compare the value loaded at lwx with current contents of
1056 the reserved location.
1057 FIXME: This only works for system emulation where we can expect
1058 this compare and the following write to be atomic. For user
1059 emulation we need to add atomicity between threads. */
cfeea807 1060 tval = tcg_temp_new_i32();
0dc4af5c
EI
1061 tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
1062 MO_TEUL);
cfeea807 1063 tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
8cc9b43f 1064 write_carryi(dc, 0);
cfeea807 1065 tcg_temp_free_i32(tval);
8cc9b43f
PC
1066 }
1067
9f8beb66
EI
1068 if (rev && size != 4) {
1069 /* Endian reverse the address. t is addr. */
1070 switch (size) {
1071 case 1:
1072 {
1073 /* 00 -> 11
1074 01 -> 10
1075 10 -> 10
1076 11 -> 00 */
403322ea 1077 TCGv low = tcg_temp_new();
9f8beb66 1078
403322ea
EI
1079 tcg_gen_andi_tl(low, addr, 3);
1080 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1081 tcg_gen_andi_tl(addr, addr, ~3);
1082 tcg_gen_or_tl(addr, addr, low);
1083 tcg_temp_free(low);
9f8beb66
EI
1084 break;
1085 }
1086
1087 case 2:
1088 /* 00 -> 10
1089 10 -> 00. */
1090 /* Force addr into the temp. */
403322ea 1091 tcg_gen_xori_tl(addr, addr, 2);
9f8beb66
EI
1092 break;
1093 default:
0063ebd6 1094 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
1095 break;
1096 }
9f8beb66 1097 }
0dc4af5c 1098 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr,
cfeea807 1099 cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 1100
968a40f6 1101 /* Verify alignment if needed. */
0063ebd6 1102 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
cfeea807 1103 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
a12f6507 1104 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1105 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1106 * the MMU prior to the memaccess, thay way we could put
1107 * the alignment checks in between the probe and the mem
1108 * access.
a12f6507 1109 */
0dc4af5c 1110 gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
cfeea807 1111 tcg_const_i32(1), tcg_const_i32(size - 1));
968a40f6 1112 }
083dbf48 1113
8cc9b43f
PC
1114 if (ex) {
1115 gen_set_label(swx_skip);
8cc9b43f 1116 }
968a40f6 1117
403322ea 1118 tcg_temp_free(addr);
4acb54ba
EI
1119}
1120
1121static inline void eval_cc(DisasContext *dc, unsigned int cc,
cfeea807 1122 TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4acb54ba 1123{
4acb54ba
EI
1124 switch (cc) {
1125 case CC_EQ:
cfeea807 1126 tcg_gen_setcond_i32(TCG_COND_EQ, d, a, b);
4acb54ba
EI
1127 break;
1128 case CC_NE:
cfeea807 1129 tcg_gen_setcond_i32(TCG_COND_NE, d, a, b);
4acb54ba
EI
1130 break;
1131 case CC_LT:
cfeea807 1132 tcg_gen_setcond_i32(TCG_COND_LT, d, a, b);
4acb54ba
EI
1133 break;
1134 case CC_LE:
cfeea807 1135 tcg_gen_setcond_i32(TCG_COND_LE, d, a, b);
4acb54ba
EI
1136 break;
1137 case CC_GE:
cfeea807 1138 tcg_gen_setcond_i32(TCG_COND_GE, d, a, b);
4acb54ba
EI
1139 break;
1140 case CC_GT:
cfeea807 1141 tcg_gen_setcond_i32(TCG_COND_GT, d, a, b);
4acb54ba
EI
1142 break;
1143 default:
0063ebd6 1144 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
4acb54ba
EI
1145 break;
1146 }
1147}
1148
cfeea807 1149static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
4acb54ba 1150{
42a268c2 1151 TCGLabel *l1 = gen_new_label();
4acb54ba 1152 /* Conditional jmp. */
cfeea807
EI
1153 tcg_gen_mov_i32(cpu_SR[SR_PC], pc_false);
1154 tcg_gen_brcondi_i32(TCG_COND_EQ, env_btaken, 0, l1);
1155 tcg_gen_mov_i32(cpu_SR[SR_PC], pc_true);
4acb54ba
EI
1156 gen_set_label(l1);
1157}
1158
1159static void dec_bcc(DisasContext *dc)
1160{
1161 unsigned int cc;
1162 unsigned int dslot;
1163
1164 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1165 dslot = dc->ir & (1 << 25);
1166 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1167
1168 dc->delayed_branch = 1;
1169 if (dslot) {
1170 dc->delayed_branch = 2;
1171 dc->tb_flags |= D_FLAG;
cfeea807 1172 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1173 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1174 }
1175
61204ce8
EI
1176 if (dec_alu_op_b_is_small_imm(dc)) {
1177 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1178
cfeea807 1179 tcg_gen_movi_i32(env_btarget, dc->pc + offset);
844bab60 1180 dc->jmp = JMP_DIRECT_CC;
23979dc5 1181 dc->jmp_pc = dc->pc + offset;
61204ce8 1182 } else {
23979dc5 1183 dc->jmp = JMP_INDIRECT;
cfeea807
EI
1184 tcg_gen_movi_i32(env_btarget, dc->pc);
1185 tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
61204ce8 1186 }
cfeea807 1187 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_i32(0));
4acb54ba
EI
1188}
1189
1190static void dec_br(DisasContext *dc)
1191{
9f6113c7 1192 unsigned int dslot, link, abs, mbar;
4acb54ba
EI
1193
1194 dslot = dc->ir & (1 << 20);
1195 abs = dc->ir & (1 << 19);
1196 link = dc->ir & (1 << 18);
9f6113c7
EI
1197
1198 /* Memory barrier. */
1199 mbar = (dc->ir >> 16) & 31;
1200 if (mbar == 2 && dc->imm == 4) {
5d45de97
EI
1201 /* mbar IMM & 16 decodes to sleep. */
1202 if (dc->rd & 16) {
1203 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1204 TCGv_i32 tmp_1 = tcg_const_i32(1);
1205
1206 LOG_DIS("sleep\n");
1207
1208 t_sync_flags(dc);
1209 tcg_gen_st_i32(tmp_1, cpu_env,
1210 -offsetof(MicroBlazeCPU, env)
1211 +offsetof(CPUState, halted));
cfeea807 1212 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
5d45de97
EI
1213 gen_helper_raise_exception(cpu_env, tmp_hlt);
1214 tcg_temp_free_i32(tmp_hlt);
1215 tcg_temp_free_i32(tmp_1);
1216 return;
1217 }
9f6113c7
EI
1218 LOG_DIS("mbar %d\n", dc->rd);
1219 /* Break the TB. */
1220 dc->cpustate_changed = 1;
1221 return;
1222 }
1223
4acb54ba
EI
1224 LOG_DIS("br%s%s%s%s imm=%x\n",
1225 abs ? "a" : "", link ? "l" : "",
1226 dc->type_b ? "i" : "", dslot ? "d" : "",
1227 dc->imm);
1228
1229 dc->delayed_branch = 1;
1230 if (dslot) {
1231 dc->delayed_branch = 2;
1232 dc->tb_flags |= D_FLAG;
cfeea807 1233 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1234 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1235 }
1236 if (link && dc->rd)
cfeea807 1237 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
4acb54ba
EI
1238
1239 dc->jmp = JMP_INDIRECT;
1240 if (abs) {
cfeea807
EI
1241 tcg_gen_movi_i32(env_btaken, 1);
1242 tcg_gen_mov_i32(env_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1243 if (link && !dslot) {
1244 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1245 t_gen_raise_exception(dc, EXCP_BREAK);
1246 if (dc->imm == 0) {
bdfc1e88 1247 if (trap_userspace(dc, true)) {
ff21f70a
EI
1248 return;
1249 }
1250
1251 t_gen_raise_exception(dc, EXCP_DEBUG);
1252 }
1253 }
4acb54ba 1254 } else {
61204ce8
EI
1255 if (dec_alu_op_b_is_small_imm(dc)) {
1256 dc->jmp = JMP_DIRECT;
1257 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1258 } else {
cfeea807
EI
1259 tcg_gen_movi_i32(env_btaken, 1);
1260 tcg_gen_movi_i32(env_btarget, dc->pc);
1261 tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
4acb54ba
EI
1262 }
1263 }
1264}
1265
1266static inline void do_rti(DisasContext *dc)
1267{
cfeea807
EI
1268 TCGv_i32 t0, t1;
1269 t0 = tcg_temp_new_i32();
1270 t1 = tcg_temp_new_i32();
1271 tcg_gen_shri_i32(t0, cpu_SR[SR_MSR], 1);
1272 tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_IE);
1273 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1274
1275 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1276 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1277 msr_write(dc, t1);
cfeea807
EI
1278 tcg_temp_free_i32(t1);
1279 tcg_temp_free_i32(t0);
4acb54ba
EI
1280 dc->tb_flags &= ~DRTI_FLAG;
1281}
1282
1283static inline void do_rtb(DisasContext *dc)
1284{
cfeea807
EI
1285 TCGv_i32 t0, t1;
1286 t0 = tcg_temp_new_i32();
1287 t1 = tcg_temp_new_i32();
1288 tcg_gen_andi_i32(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1289 tcg_gen_shri_i32(t0, t1, 1);
1290 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1291
1292 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1293 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1294 msr_write(dc, t1);
cfeea807
EI
1295 tcg_temp_free_i32(t1);
1296 tcg_temp_free_i32(t0);
4acb54ba
EI
1297 dc->tb_flags &= ~DRTB_FLAG;
1298}
1299
1300static inline void do_rte(DisasContext *dc)
1301{
cfeea807
EI
1302 TCGv_i32 t0, t1;
1303 t0 = tcg_temp_new_i32();
1304 t1 = tcg_temp_new_i32();
4acb54ba 1305
cfeea807
EI
1306 tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_EE);
1307 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1308 tcg_gen_shri_i32(t0, t1, 1);
1309 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
4acb54ba 1310
cfeea807
EI
1311 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1312 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1313 msr_write(dc, t1);
cfeea807
EI
1314 tcg_temp_free_i32(t1);
1315 tcg_temp_free_i32(t0);
4acb54ba
EI
1316 dc->tb_flags &= ~DRTE_FLAG;
1317}
1318
1319static void dec_rts(DisasContext *dc)
1320{
1321 unsigned int b_bit, i_bit, e_bit;
1322
1323 i_bit = dc->ir & (1 << 21);
1324 b_bit = dc->ir & (1 << 22);
1325 e_bit = dc->ir & (1 << 23);
1326
bdfc1e88
EI
1327 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1328 return;
1329 }
1330
4acb54ba
EI
1331 dc->delayed_branch = 2;
1332 dc->tb_flags |= D_FLAG;
cfeea807 1333 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1334 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1335
1336 if (i_bit) {
1337 LOG_DIS("rtid ir=%x\n", dc->ir);
1338 dc->tb_flags |= DRTI_FLAG;
1339 } else if (b_bit) {
1340 LOG_DIS("rtbd ir=%x\n", dc->ir);
1341 dc->tb_flags |= DRTB_FLAG;
1342 } else if (e_bit) {
1343 LOG_DIS("rted ir=%x\n", dc->ir);
1344 dc->tb_flags |= DRTE_FLAG;
1345 } else
1346 LOG_DIS("rts ir=%x\n", dc->ir);
1347
23979dc5 1348 dc->jmp = JMP_INDIRECT;
cfeea807
EI
1349 tcg_gen_movi_i32(env_btaken, 1);
1350 tcg_gen_add_i32(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
1351}
1352
97694c57
EI
1353static int dec_check_fpuv2(DisasContext *dc)
1354{
be67e9ab 1355 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
cfeea807 1356 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_FPU);
97694c57
EI
1357 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1358 }
be67e9ab 1359 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
97694c57
EI
1360}
1361
1567a005
EI
1362static void dec_fpu(DisasContext *dc)
1363{
97694c57
EI
1364 unsigned int fpu_insn;
1365
9ba8cd45 1366 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1567a005
EI
1367 return;
1368 }
1369
97694c57
EI
1370 fpu_insn = (dc->ir >> 7) & 7;
1371
1372 switch (fpu_insn) {
1373 case 0:
64254eba
BS
1374 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1375 cpu_R[dc->rb]);
97694c57
EI
1376 break;
1377
1378 case 1:
64254eba
BS
1379 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1380 cpu_R[dc->rb]);
97694c57
EI
1381 break;
1382
1383 case 2:
64254eba
BS
1384 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1385 cpu_R[dc->rb]);
97694c57
EI
1386 break;
1387
1388 case 3:
64254eba
BS
1389 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1390 cpu_R[dc->rb]);
97694c57
EI
1391 break;
1392
1393 case 4:
1394 switch ((dc->ir >> 4) & 7) {
1395 case 0:
64254eba 1396 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1397 cpu_R[dc->ra], cpu_R[dc->rb]);
1398 break;
1399 case 1:
64254eba 1400 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1401 cpu_R[dc->ra], cpu_R[dc->rb]);
1402 break;
1403 case 2:
64254eba 1404 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1405 cpu_R[dc->ra], cpu_R[dc->rb]);
1406 break;
1407 case 3:
64254eba 1408 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1409 cpu_R[dc->ra], cpu_R[dc->rb]);
1410 break;
1411 case 4:
64254eba 1412 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1413 cpu_R[dc->ra], cpu_R[dc->rb]);
1414 break;
1415 case 5:
64254eba 1416 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1417 cpu_R[dc->ra], cpu_R[dc->rb]);
1418 break;
1419 case 6:
64254eba 1420 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1421 cpu_R[dc->ra], cpu_R[dc->rb]);
1422 break;
1423 default:
71547a3b
BS
1424 qemu_log_mask(LOG_UNIMP,
1425 "unimplemented fcmp fpu_insn=%x pc=%x"
1426 " opc=%x\n",
1427 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1428 dc->abort_at_next_insn = 1;
1429 break;
1430 }
1431 break;
1432
1433 case 5:
1434 if (!dec_check_fpuv2(dc)) {
1435 return;
1436 }
64254eba 1437 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1438 break;
1439
1440 case 6:
1441 if (!dec_check_fpuv2(dc)) {
1442 return;
1443 }
64254eba 1444 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1445 break;
1446
1447 case 7:
1448 if (!dec_check_fpuv2(dc)) {
1449 return;
1450 }
64254eba 1451 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1452 break;
1453
1454 default:
71547a3b
BS
1455 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1456 " opc=%x\n",
1457 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1458 dc->abort_at_next_insn = 1;
1459 break;
1460 }
1567a005
EI
1461}
1462
4acb54ba
EI
1463static void dec_null(DisasContext *dc)
1464{
9ba8cd45 1465 if (trap_illegal(dc, true)) {
02b33596
EI
1466 return;
1467 }
1d512a65 1468 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
4acb54ba
EI
1469 dc->abort_at_next_insn = 1;
1470}
1471
6d76d23e
EI
1472/* Insns connected to FSL or AXI stream attached devices. */
1473static void dec_stream(DisasContext *dc)
1474{
6d76d23e
EI
1475 TCGv_i32 t_id, t_ctrl;
1476 int ctrl;
1477
1478 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1479 dc->type_b ? "" : "d", dc->imm);
1480
bdfc1e88 1481 if (trap_userspace(dc, true)) {
6d76d23e
EI
1482 return;
1483 }
1484
cfeea807 1485 t_id = tcg_temp_new_i32();
6d76d23e 1486 if (dc->type_b) {
cfeea807 1487 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
6d76d23e
EI
1488 ctrl = dc->imm >> 10;
1489 } else {
cfeea807 1490 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
6d76d23e
EI
1491 ctrl = dc->imm >> 5;
1492 }
1493
cfeea807 1494 t_ctrl = tcg_const_i32(ctrl);
6d76d23e
EI
1495
1496 if (dc->rd == 0) {
1497 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1498 } else {
1499 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1500 }
cfeea807
EI
1501 tcg_temp_free_i32(t_id);
1502 tcg_temp_free_i32(t_ctrl);
6d76d23e
EI
1503}
1504
4acb54ba
EI
1505static struct decoder_info {
1506 struct {
1507 uint32_t bits;
1508 uint32_t mask;
1509 };
1510 void (*dec)(DisasContext *dc);
1511} decinfo[] = {
1512 {DEC_ADD, dec_add},
1513 {DEC_SUB, dec_sub},
1514 {DEC_AND, dec_and},
1515 {DEC_XOR, dec_xor},
1516 {DEC_OR, dec_or},
1517 {DEC_BIT, dec_bit},
1518 {DEC_BARREL, dec_barrel},
1519 {DEC_LD, dec_load},
1520 {DEC_ST, dec_store},
1521 {DEC_IMM, dec_imm},
1522 {DEC_BR, dec_br},
1523 {DEC_BCC, dec_bcc},
1524 {DEC_RTS, dec_rts},
1567a005 1525 {DEC_FPU, dec_fpu},
4acb54ba
EI
1526 {DEC_MUL, dec_mul},
1527 {DEC_DIV, dec_div},
1528 {DEC_MSR, dec_msr},
6d76d23e 1529 {DEC_STREAM, dec_stream},
4acb54ba
EI
1530 {{0, 0}, dec_null}
1531};
1532
64254eba 1533static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1534{
4acb54ba
EI
1535 int i;
1536
64254eba 1537 dc->ir = ir;
4acb54ba
EI
1538 LOG_DIS("%8.8x\t", dc->ir);
1539
1540 if (dc->ir)
1541 dc->nr_nops = 0;
1542 else {
9ba8cd45 1543 trap_illegal(dc, dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK);
1567a005 1544
4acb54ba
EI
1545 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1546 dc->nr_nops++;
a47dddd7 1547 if (dc->nr_nops > 4) {
0063ebd6 1548 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
a47dddd7 1549 }
4acb54ba
EI
1550 }
1551 /* bit 2 seems to indicate insn type. */
1552 dc->type_b = ir & (1 << 29);
1553
1554 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1555 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1556 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1557 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1558 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1559
1560 /* Large switch for all insns. */
1561 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1562 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1563 decinfo[i].dec(dc);
1564 break;
1565 }
1566 }
1567}
1568
4acb54ba 1569/* generate intermediate code for basic block 'tb'. */
9c489ea6 1570void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4acb54ba 1571{
9c489ea6 1572 CPUMBState *env = cs->env_ptr;
4e5e1215 1573 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
4acb54ba 1574 uint32_t pc_start;
4acb54ba
EI
1575 struct DisasContext ctx;
1576 struct DisasContext *dc = &ctx;
56371527 1577 uint32_t page_start, org_flags;
cfeea807 1578 uint32_t npc;
4acb54ba
EI
1579 int num_insns;
1580 int max_insns;
1581
4acb54ba 1582 pc_start = tb->pc;
0063ebd6 1583 dc->cpu = cpu;
4acb54ba
EI
1584 dc->tb = tb;
1585 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1586
4acb54ba
EI
1587 dc->is_jmp = DISAS_NEXT;
1588 dc->jmp = 0;
1589 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1590 if (dc->delayed_branch) {
1591 dc->jmp = JMP_INDIRECT;
1592 }
4acb54ba 1593 dc->pc = pc_start;
ed2803da 1594 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1595 dc->cpustate_changed = 0;
1596 dc->abort_at_next_insn = 0;
1597 dc->nr_nops = 0;
1598
a47dddd7
AF
1599 if (pc_start & 3) {
1600 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1601 }
4acb54ba 1602
56371527 1603 page_start = pc_start & TARGET_PAGE_MASK;
4acb54ba 1604 num_insns = 0;
c5a49c63 1605 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
190ce7fb 1606 if (max_insns == 0) {
4acb54ba 1607 max_insns = CF_COUNT_MASK;
190ce7fb
RH
1608 }
1609 if (max_insns > TCG_MAX_INSNS) {
1610 max_insns = TCG_MAX_INSNS;
1611 }
4acb54ba 1612
cd42d5b2 1613 gen_tb_start(tb);
4acb54ba
EI
1614 do
1615 {
667b8e29 1616 tcg_gen_insn_start(dc->pc);
959082fc 1617 num_insns++;
4acb54ba 1618
b933066a
RH
1619#if SIM_COMPAT
1620 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
cfeea807 1621 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
b933066a
RH
1622 gen_helper_debug();
1623 }
1624#endif
1625
1626 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1627 t_gen_raise_exception(dc, EXCP_DEBUG);
1628 dc->is_jmp = DISAS_UPDATE;
522a0d4e
RH
1629 /* The address covered by the breakpoint must be included in
1630 [tb->pc, tb->pc + tb->size) in order to for it to be
1631 properly cleared -- thus we increment the PC here so that
1632 the logic setting tb->size below does the right thing. */
1633 dc->pc += 4;
b933066a
RH
1634 break;
1635 }
1636
4acb54ba
EI
1637 /* Pretty disas. */
1638 LOG_DIS("%8.8x:\t", dc->pc);
1639
c5a49c63 1640 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
4acb54ba 1641 gen_io_start();
959082fc 1642 }
4acb54ba
EI
1643
1644 dc->clear_imm = 1;
64254eba 1645 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1646 if (dc->clear_imm)
1647 dc->tb_flags &= ~IMM_FLAG;
4acb54ba 1648 dc->pc += 4;
4acb54ba
EI
1649
1650 if (dc->delayed_branch) {
1651 dc->delayed_branch--;
1652 if (!dc->delayed_branch) {
1653 if (dc->tb_flags & DRTI_FLAG)
1654 do_rti(dc);
1655 if (dc->tb_flags & DRTB_FLAG)
1656 do_rtb(dc);
1657 if (dc->tb_flags & DRTE_FLAG)
1658 do_rte(dc);
1659 /* Clear the delay slot flag. */
1660 dc->tb_flags &= ~D_FLAG;
1661 /* If it is a direct jump, try direct chaining. */
23979dc5 1662 if (dc->jmp == JMP_INDIRECT) {
cfeea807 1663 eval_cond_jmp(dc, env_btarget, tcg_const_i32(dc->pc));
4acb54ba 1664 dc->is_jmp = DISAS_JUMP;
23979dc5 1665 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1666 t_sync_flags(dc);
1667 gen_goto_tb(dc, 0, dc->jmp_pc);
1668 dc->is_jmp = DISAS_TB_JUMP;
1669 } else if (dc->jmp == JMP_DIRECT_CC) {
42a268c2 1670 TCGLabel *l1 = gen_new_label();
23979dc5 1671 t_sync_flags(dc);
23979dc5 1672 /* Conditional jmp. */
cfeea807 1673 tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
23979dc5
EI
1674 gen_goto_tb(dc, 1, dc->pc);
1675 gen_set_label(l1);
1676 gen_goto_tb(dc, 0, dc->jmp_pc);
1677
1678 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1679 }
1680 break;
1681 }
1682 }
ed2803da 1683 if (cs->singlestep_enabled) {
4acb54ba 1684 break;
ed2803da 1685 }
4acb54ba 1686 } while (!dc->is_jmp && !dc->cpustate_changed
fe700adb
RH
1687 && !tcg_op_buf_full()
1688 && !singlestep
56371527 1689 && (dc->pc - page_start < TARGET_PAGE_SIZE)
fe700adb 1690 && num_insns < max_insns);
4acb54ba
EI
1691
1692 npc = dc->pc;
844bab60 1693 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1694 if (dc->tb_flags & D_FLAG) {
1695 dc->is_jmp = DISAS_UPDATE;
cfeea807 1696 tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
4acb54ba
EI
1697 sync_jmpstate(dc);
1698 } else
1699 npc = dc->jmp_pc;
1700 }
1701
c5a49c63 1702 if (tb_cflags(tb) & CF_LAST_IO)
4acb54ba
EI
1703 gen_io_end();
1704 /* Force an update if the per-tb cpu state has changed. */
1705 if (dc->is_jmp == DISAS_NEXT
1706 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1707 dc->is_jmp = DISAS_UPDATE;
cfeea807 1708 tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
4acb54ba
EI
1709 }
1710 t_sync_flags(dc);
1711
ed2803da 1712 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1713 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1714
1715 if (dc->is_jmp != DISAS_JUMP) {
cfeea807 1716 tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
6c5f738d 1717 }
64254eba 1718 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1719 tcg_temp_free_i32(tmp);
4acb54ba
EI
1720 } else {
1721 switch(dc->is_jmp) {
1722 case DISAS_NEXT:
1723 gen_goto_tb(dc, 1, npc);
1724 break;
1725 default:
1726 case DISAS_JUMP:
1727 case DISAS_UPDATE:
1728 /* indicate that the hash table must be used
1729 to find the next TB */
1730 tcg_gen_exit_tb(0);
1731 break;
1732 case DISAS_TB_JUMP:
1733 /* nothing more to generate */
1734 break;
1735 }
1736 }
806f352d 1737 gen_tb_end(tb, num_insns);
0a7df5da 1738
4e5e1215
RH
1739 tb->size = dc->pc - pc_start;
1740 tb->icount = num_insns;
4acb54ba
EI
1741
1742#ifdef DEBUG_DISAS
1743#if !SIM_COMPAT
4910e6e4
RH
1744 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1745 && qemu_log_in_addr_range(pc_start)) {
1ee73216 1746 qemu_log_lock();
f01a5e7e 1747 qemu_log("--------------\n");
1d48474d 1748 log_target_disas(cs, pc_start, dc->pc - pc_start);
1ee73216 1749 qemu_log_unlock();
4acb54ba
EI
1750 }
1751#endif
1752#endif
1753 assert(!dc->abort_at_next_insn);
1754}
1755
878096ee
AF
1756void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1757 int flags)
4acb54ba 1758{
878096ee
AF
1759 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1760 CPUMBState *env = &cpu->env;
4acb54ba
EI
1761 int i;
1762
1763 if (!env || !f)
1764 return;
1765
1766 cpu_fprintf(f, "IN: PC=%x %s\n",
1767 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
97694c57 1768 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
4c24aa0a 1769 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
97694c57 1770 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
17c52a43 1771 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
4acb54ba
EI
1772 env->btaken, env->btarget,
1773 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
17c52a43
EI
1774 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1775 (env->sregs[SR_MSR] & MSR_EIP),
1776 (env->sregs[SR_MSR] & MSR_IE));
1777
4acb54ba
EI
1778 for (i = 0; i < 32; i++) {
1779 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1780 if ((i + 1) % 4 == 0)
1781 cpu_fprintf(f, "\n");
1782 }
1783 cpu_fprintf(f, "\n\n");
1784}
1785
cd0c24f9
AF
1786void mb_tcg_init(void)
1787{
1788 int i;
4acb54ba 1789
cfeea807 1790 env_debug = tcg_global_mem_new_i32(cpu_env,
68cee38a 1791 offsetof(CPUMBState, debug),
4acb54ba 1792 "debug0");
cfeea807 1793 env_iflags = tcg_global_mem_new_i32(cpu_env,
68cee38a 1794 offsetof(CPUMBState, iflags),
4acb54ba 1795 "iflags");
cfeea807 1796 env_imm = tcg_global_mem_new_i32(cpu_env,
68cee38a 1797 offsetof(CPUMBState, imm),
4acb54ba 1798 "imm");
cfeea807 1799 env_btarget = tcg_global_mem_new_i32(cpu_env,
68cee38a 1800 offsetof(CPUMBState, btarget),
4acb54ba 1801 "btarget");
cfeea807 1802 env_btaken = tcg_global_mem_new_i32(cpu_env,
68cee38a 1803 offsetof(CPUMBState, btaken),
4acb54ba 1804 "btaken");
403322ea 1805 env_res_addr = tcg_global_mem_new(cpu_env,
4a536270
EI
1806 offsetof(CPUMBState, res_addr),
1807 "res_addr");
cfeea807 1808 env_res_val = tcg_global_mem_new_i32(cpu_env,
11a76217
EI
1809 offsetof(CPUMBState, res_val),
1810 "res_val");
4acb54ba 1811 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
cfeea807 1812 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
68cee38a 1813 offsetof(CPUMBState, regs[i]),
4acb54ba
EI
1814 regnames[i]);
1815 }
1816 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
cfeea807 1817 cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
68cee38a 1818 offsetof(CPUMBState, sregs[i]),
4acb54ba
EI
1819 special_regnames[i]);
1820 }
4acb54ba
EI
1821}
1822
bad729e2
RH
1823void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1824 target_ulong *data)
4acb54ba 1825{
bad729e2 1826 env->sregs[SR_PC] = data[0];
4acb54ba 1827}