]> git.proxmox.com Git - mirror_qemu.git/blame - target-microblaze/translate.c
target-*: Unconditionally emit tcg_gen_insn_start
[mirror_qemu.git] / target-microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
4acb54ba 21#include "cpu.h"
76cad711 22#include "disas/disas.h"
4acb54ba 23#include "tcg-op.h"
2ef6175a 24#include "exec/helper-proto.h"
4acb54ba 25#include "microblaze-decode.h"
f08b6170 26#include "exec/cpu_ldst.h"
2ef6175a 27#include "exec/helper-gen.h"
4acb54ba 28
a7e30d84
LV
29#include "trace-tcg.h"
30
31
4acb54ba
EI
32#define SIM_COMPAT 0
33#define DISAS_GNU 1
34#define DISAS_MB 1
35#if DISAS_MB && !SIM_COMPAT
36# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
37#else
38# define LOG_DIS(...) do { } while (0)
39#endif
40
41#define D(x)
42
43#define EXTRACT_FIELD(src, start, end) \
44 (((src) >> start) & ((1 << (end - start + 1)) - 1))
45
46static TCGv env_debug;
47static TCGv_ptr cpu_env;
48static TCGv cpu_R[32];
49static TCGv cpu_SR[18];
50static TCGv env_imm;
51static TCGv env_btaken;
52static TCGv env_btarget;
53static TCGv env_iflags;
4a536270 54static TCGv env_res_addr;
11a76217 55static TCGv env_res_val;
4acb54ba 56
022c62cb 57#include "exec/gen-icount.h"
4acb54ba
EI
58
59/* This is the state at translation time. */
60typedef struct DisasContext {
0063ebd6 61 MicroBlazeCPU *cpu;
a5efa644 62 target_ulong pc;
4acb54ba
EI
63
64 /* Decoder. */
65 int type_b;
66 uint32_t ir;
67 uint8_t opcode;
68 uint8_t rd, ra, rb;
69 uint16_t imm;
70
71 unsigned int cpustate_changed;
72 unsigned int delayed_branch;
73 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
74 unsigned int clear_imm;
75 int is_jmp;
76
844bab60
EI
77#define JMP_NOJMP 0
78#define JMP_DIRECT 1
79#define JMP_DIRECT_CC 2
80#define JMP_INDIRECT 3
4acb54ba
EI
81 unsigned int jmp;
82 uint32_t jmp_pc;
83
84 int abort_at_next_insn;
85 int nr_nops;
86 struct TranslationBlock *tb;
87 int singlestep_enabled;
88} DisasContext;
89
38972938 90static const char *regnames[] =
4acb54ba
EI
91{
92 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
93 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
94 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
95 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
96};
97
38972938 98static const char *special_regnames[] =
4acb54ba
EI
99{
100 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
101 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
102 "sr16", "sr17", "sr18"
103};
104
4acb54ba
EI
105static inline void t_sync_flags(DisasContext *dc)
106{
4abf79a4 107 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba
EI
108 if (dc->tb_flags != dc->synced_flags) {
109 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
110 dc->synced_flags = dc->tb_flags;
111 }
112}
113
114static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
115{
116 TCGv_i32 tmp = tcg_const_i32(index);
117
118 t_sync_flags(dc);
119 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 120 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
121 tcg_temp_free_i32(tmp);
122 dc->is_jmp = DISAS_UPDATE;
123}
124
125static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
126{
127 TranslationBlock *tb;
128 tb = dc->tb;
129 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
130 tcg_gen_goto_tb(n);
131 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
8cfd0495 132 tcg_gen_exit_tb((uintptr_t)tb + n);
4acb54ba
EI
133 } else {
134 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
135 tcg_gen_exit_tb(0);
136 }
137}
138
ee8b246f
EI
139static void read_carry(DisasContext *dc, TCGv d)
140{
141 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
142}
143
04ec7df7
EI
144/*
145 * write_carry sets the carry bits in MSR based on bit 0 of v.
146 * v[31:1] are ignored.
147 */
ee8b246f
EI
148static void write_carry(DisasContext *dc, TCGv v)
149{
150 TCGv t0 = tcg_temp_new();
151 tcg_gen_shli_tl(t0, v, 31);
152 tcg_gen_sari_tl(t0, t0, 31);
153 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
154 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
155 ~(MSR_C | MSR_CC));
156 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
157 tcg_temp_free(t0);
158}
159
65ab5eb4 160static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f
PC
161{
162 TCGv t0 = tcg_temp_new();
65ab5eb4 163 tcg_gen_movi_tl(t0, carry);
8cc9b43f
PC
164 write_carry(dc, t0);
165 tcg_temp_free(t0);
166}
167
61204ce8
EI
168/* True if ALU operand b is a small immediate that may deserve
169 faster treatment. */
170static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
171{
172 /* Immediate insn without the imm prefix ? */
173 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
174}
175
4acb54ba
EI
176static inline TCGv *dec_alu_op_b(DisasContext *dc)
177{
178 if (dc->type_b) {
179 if (dc->tb_flags & IMM_FLAG)
180 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
181 else
182 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
183 return &env_imm;
184 } else
185 return &cpu_R[dc->rb];
186}
187
188static void dec_add(DisasContext *dc)
189{
190 unsigned int k, c;
40cbf5b7 191 TCGv cf;
4acb54ba
EI
192
193 k = dc->opcode & 4;
194 c = dc->opcode & 2;
195
196 LOG_DIS("add%s%s%s r%d r%d r%d\n",
197 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
198 dc->rd, dc->ra, dc->rb);
199
40cbf5b7
EI
200 /* Take care of the easy cases first. */
201 if (k) {
202 /* k - keep carry, no need to update MSR. */
203 /* If rd == r0, it's a nop. */
204 if (dc->rd) {
205 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
206
207 if (c) {
208 /* c - Add carry into the result. */
209 cf = tcg_temp_new();
210
211 read_carry(dc, cf);
212 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
213 tcg_temp_free(cf);
214 }
215 }
216 return;
217 }
218
219 /* From now on, we can assume k is zero. So we need to update MSR. */
220 /* Extract carry. */
221 cf = tcg_temp_new();
222 if (c) {
223 read_carry(dc, cf);
224 } else {
225 tcg_gen_movi_tl(cf, 0);
226 }
227
228 if (dc->rd) {
229 TCGv ncf = tcg_temp_new();
5d0bb823 230 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
4acb54ba 231 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
232 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
233 write_carry(dc, ncf);
234 tcg_temp_free(ncf);
235 } else {
5d0bb823 236 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 237 write_carry(dc, cf);
4acb54ba 238 }
40cbf5b7 239 tcg_temp_free(cf);
4acb54ba
EI
240}
241
242static void dec_sub(DisasContext *dc)
243{
244 unsigned int u, cmp, k, c;
e0a42ebc 245 TCGv cf, na;
4acb54ba
EI
246
247 u = dc->imm & 2;
248 k = dc->opcode & 4;
249 c = dc->opcode & 2;
250 cmp = (dc->imm & 1) && (!dc->type_b) && k;
251
252 if (cmp) {
253 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
254 if (dc->rd) {
255 if (u)
256 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
257 else
258 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
259 }
e0a42ebc
EI
260 return;
261 }
262
263 LOG_DIS("sub%s%s r%d, r%d r%d\n",
264 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
265
266 /* Take care of the easy cases first. */
267 if (k) {
268 /* k - keep carry, no need to update MSR. */
269 /* If rd == r0, it's a nop. */
270 if (dc->rd) {
4acb54ba 271 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
272
273 if (c) {
274 /* c - Add carry into the result. */
275 cf = tcg_temp_new();
276
277 read_carry(dc, cf);
278 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
279 tcg_temp_free(cf);
280 }
281 }
282 return;
283 }
284
285 /* From now on, we can assume k is zero. So we need to update MSR. */
286 /* Extract carry. And complement a into na. */
287 cf = tcg_temp_new();
288 na = tcg_temp_new();
289 if (c) {
290 read_carry(dc, cf);
291 } else {
292 tcg_gen_movi_tl(cf, 1);
293 }
294
295 /* d = b + ~a + c. carry defaults to 1. */
296 tcg_gen_not_tl(na, cpu_R[dc->ra]);
297
298 if (dc->rd) {
299 TCGv ncf = tcg_temp_new();
5d0bb823 300 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc
EI
301 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
302 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
303 write_carry(dc, ncf);
304 tcg_temp_free(ncf);
305 } else {
5d0bb823 306 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 307 write_carry(dc, cf);
4acb54ba 308 }
e0a42ebc
EI
309 tcg_temp_free(cf);
310 tcg_temp_free(na);
4acb54ba
EI
311}
312
313static void dec_pattern(DisasContext *dc)
314{
315 unsigned int mode;
4acb54ba 316
1567a005 317 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
318 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
319 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
1567a005
EI
320 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
321 t_gen_raise_exception(dc, EXCP_HW_EXCP);
322 }
323
4acb54ba
EI
324 mode = dc->opcode & 3;
325 switch (mode) {
326 case 0:
327 /* pcmpbf. */
328 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
329 if (dc->rd)
330 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
331 break;
332 case 2:
333 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
334 if (dc->rd) {
86112805
RH
335 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
336 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
337 }
338 break;
339 case 3:
340 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
4acb54ba 341 if (dc->rd) {
86112805
RH
342 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
343 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
344 }
345 break;
346 default:
0063ebd6 347 cpu_abort(CPU(dc->cpu),
4acb54ba
EI
348 "unsupported pattern insn opcode=%x\n", dc->opcode);
349 break;
350 }
351}
352
353static void dec_and(DisasContext *dc)
354{
355 unsigned int not;
356
357 if (!dc->type_b && (dc->imm & (1 << 10))) {
358 dec_pattern(dc);
359 return;
360 }
361
362 not = dc->opcode & (1 << 1);
363 LOG_DIS("and%s\n", not ? "n" : "");
364
365 if (!dc->rd)
366 return;
367
368 if (not) {
a235900e 369 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
370 } else
371 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
372}
373
374static void dec_or(DisasContext *dc)
375{
376 if (!dc->type_b && (dc->imm & (1 << 10))) {
377 dec_pattern(dc);
378 return;
379 }
380
381 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
382 if (dc->rd)
383 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
384}
385
386static void dec_xor(DisasContext *dc)
387{
388 if (!dc->type_b && (dc->imm & (1 << 10))) {
389 dec_pattern(dc);
390 return;
391 }
392
393 LOG_DIS("xor r%d\n", dc->rd);
394 if (dc->rd)
395 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
396}
397
4acb54ba
EI
398static inline void msr_read(DisasContext *dc, TCGv d)
399{
400 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
401}
402
403static inline void msr_write(DisasContext *dc, TCGv v)
404{
97b833c5
EI
405 TCGv t;
406
407 t = tcg_temp_new();
4acb54ba 408 dc->cpustate_changed = 1;
97b833c5 409 /* PVR bit is not writable. */
8a84fc6b
EI
410 tcg_gen_andi_tl(t, v, ~MSR_PVR);
411 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
97b833c5
EI
412 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
413 tcg_temp_free(t);
4acb54ba
EI
414}
415
416static void dec_msr(DisasContext *dc)
417{
0063ebd6 418 CPUState *cs = CPU(dc->cpu);
4acb54ba
EI
419 TCGv t0, t1;
420 unsigned int sr, to, rn;
97ed5ccd 421 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
422
423 sr = dc->imm & ((1 << 14) - 1);
424 to = dc->imm & (1 << 14);
425 dc->type_b = 1;
426 if (to)
427 dc->cpustate_changed = 1;
428
429 /* msrclr and msrset. */
430 if (!(dc->imm & (1 << 15))) {
431 unsigned int clr = dc->ir & (1 << 16);
432
433 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
434 dc->rd, dc->imm);
1567a005 435
0063ebd6 436 if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
1567a005
EI
437 /* nop??? */
438 return;
439 }
440
441 if ((dc->tb_flags & MSR_EE_FLAG)
442 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
443 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
444 t_gen_raise_exception(dc, EXCP_HW_EXCP);
445 return;
446 }
447
4acb54ba
EI
448 if (dc->rd)
449 msr_read(dc, cpu_R[dc->rd]);
450
451 t0 = tcg_temp_new();
452 t1 = tcg_temp_new();
453 msr_read(dc, t0);
454 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
455
456 if (clr) {
457 tcg_gen_not_tl(t1, t1);
458 tcg_gen_and_tl(t0, t0, t1);
459 } else
460 tcg_gen_or_tl(t0, t0, t1);
461 msr_write(dc, t0);
462 tcg_temp_free(t0);
463 tcg_temp_free(t1);
464 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
465 dc->is_jmp = DISAS_UPDATE;
466 return;
467 }
468
1567a005
EI
469 if (to) {
470 if ((dc->tb_flags & MSR_EE_FLAG)
471 && mem_index == MMU_USER_IDX) {
472 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
473 t_gen_raise_exception(dc, EXCP_HW_EXCP);
474 return;
475 }
476 }
477
4acb54ba
EI
478#if !defined(CONFIG_USER_ONLY)
479 /* Catch read/writes to the mmu block. */
480 if ((sr & ~0xff) == 0x1000) {
481 sr &= 7;
482 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
483 if (to)
64254eba 484 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
4acb54ba 485 else
64254eba 486 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
4acb54ba
EI
487 return;
488 }
489#endif
490
491 if (to) {
492 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
493 switch (sr) {
494 case 0:
495 break;
496 case 1:
497 msr_write(dc, cpu_R[dc->ra]);
498 break;
499 case 0x3:
500 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
501 break;
502 case 0x5:
503 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
504 break;
505 case 0x7:
97694c57 506 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
4acb54ba 507 break;
5818dee5 508 case 0x800:
68cee38a 509 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
510 break;
511 case 0x802:
68cee38a 512 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
5818dee5 513 break;
4acb54ba 514 default:
0063ebd6 515 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
516 break;
517 }
518 } else {
519 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
520
521 switch (sr) {
522 case 0:
523 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
524 break;
525 case 1:
526 msr_read(dc, cpu_R[dc->rd]);
527 break;
528 case 0x3:
529 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
530 break;
531 case 0x5:
532 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
533 break;
534 case 0x7:
97694c57 535 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
4acb54ba
EI
536 break;
537 case 0xb:
538 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
539 break;
5818dee5 540 case 0x800:
68cee38a 541 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
542 break;
543 case 0x802:
68cee38a 544 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
5818dee5 545 break;
4acb54ba
EI
546 case 0x2000:
547 case 0x2001:
548 case 0x2002:
549 case 0x2003:
550 case 0x2004:
551 case 0x2005:
552 case 0x2006:
553 case 0x2007:
554 case 0x2008:
555 case 0x2009:
556 case 0x200a:
557 case 0x200b:
558 case 0x200c:
559 rn = sr & 0xf;
560 tcg_gen_ld_tl(cpu_R[dc->rd],
68cee38a 561 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
562 break;
563 default:
a47dddd7 564 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
565 break;
566 }
567 }
ee7dbcf8
EI
568
569 if (dc->rd == 0) {
570 tcg_gen_movi_tl(cpu_R[0], 0);
571 }
4acb54ba
EI
572}
573
574/* 64-bit signed mul, lower result in d and upper in d2. */
575static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
576{
577 TCGv_i64 t0, t1;
578
579 t0 = tcg_temp_new_i64();
580 t1 = tcg_temp_new_i64();
581
582 tcg_gen_ext_i32_i64(t0, a);
583 tcg_gen_ext_i32_i64(t1, b);
584 tcg_gen_mul_i64(t0, t0, t1);
585
ecc7b3aa 586 tcg_gen_extrl_i64_i32(d, t0);
4acb54ba 587 tcg_gen_shri_i64(t0, t0, 32);
ecc7b3aa 588 tcg_gen_extrl_i64_i32(d2, t0);
4acb54ba
EI
589
590 tcg_temp_free_i64(t0);
591 tcg_temp_free_i64(t1);
592}
593
594/* 64-bit unsigned muls, lower result in d and upper in d2. */
595static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
596{
597 TCGv_i64 t0, t1;
598
599 t0 = tcg_temp_new_i64();
600 t1 = tcg_temp_new_i64();
601
602 tcg_gen_extu_i32_i64(t0, a);
603 tcg_gen_extu_i32_i64(t1, b);
604 tcg_gen_mul_i64(t0, t0, t1);
605
ecc7b3aa 606 tcg_gen_extrl_i64_i32(d, t0);
4acb54ba 607 tcg_gen_shri_i64(t0, t0, 32);
ecc7b3aa 608 tcg_gen_extrl_i64_i32(d2, t0);
4acb54ba
EI
609
610 tcg_temp_free_i64(t0);
611 tcg_temp_free_i64(t1);
612}
613
614/* Multiplier unit. */
615static void dec_mul(DisasContext *dc)
616{
617 TCGv d[2];
618 unsigned int subcode;
619
1567a005 620 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
621 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
622 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
1567a005
EI
623 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
624 t_gen_raise_exception(dc, EXCP_HW_EXCP);
625 return;
626 }
627
4acb54ba
EI
628 subcode = dc->imm & 3;
629 d[0] = tcg_temp_new();
630 d[1] = tcg_temp_new();
631
632 if (dc->type_b) {
633 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
634 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
635 goto done;
636 }
637
1567a005
EI
638 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
639 if (subcode >= 1 && subcode <= 3
0063ebd6 640 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
1567a005
EI
641 /* nop??? */
642 }
643
4acb54ba
EI
644 switch (subcode) {
645 case 0:
646 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
647 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
648 break;
649 case 1:
650 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
651 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
652 break;
653 case 2:
654 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
655 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
656 break;
657 case 3:
658 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
659 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
660 break;
661 default:
0063ebd6 662 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
4acb54ba
EI
663 break;
664 }
665done:
666 tcg_temp_free(d[0]);
667 tcg_temp_free(d[1]);
668}
669
670/* Div unit. */
671static void dec_div(DisasContext *dc)
672{
673 unsigned int u;
674
675 u = dc->imm & 2;
676 LOG_DIS("div\n");
677
0063ebd6
AF
678 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
679 && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
1567a005
EI
680 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
681 t_gen_raise_exception(dc, EXCP_HW_EXCP);
682 }
683
4acb54ba 684 if (u)
64254eba
BS
685 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
686 cpu_R[dc->ra]);
4acb54ba 687 else
64254eba
BS
688 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
689 cpu_R[dc->ra]);
4acb54ba
EI
690 if (!dc->rd)
691 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
692}
693
694static void dec_barrel(DisasContext *dc)
695{
696 TCGv t0;
697 unsigned int s, t;
698
1567a005 699 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
700 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
701 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
1567a005
EI
702 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
703 t_gen_raise_exception(dc, EXCP_HW_EXCP);
704 return;
705 }
706
4acb54ba
EI
707 s = dc->imm & (1 << 10);
708 t = dc->imm & (1 << 9);
709
710 LOG_DIS("bs%s%s r%d r%d r%d\n",
711 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
712
713 t0 = tcg_temp_new();
714
715 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
716 tcg_gen_andi_tl(t0, t0, 31);
717
718 if (s)
719 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
720 else {
721 if (t)
722 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
723 else
724 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
725 }
726}
727
728static void dec_bit(DisasContext *dc)
729{
0063ebd6 730 CPUState *cs = CPU(dc->cpu);
09b9f113 731 TCGv t0;
4acb54ba 732 unsigned int op;
97ed5ccd 733 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba 734
ace2e4da 735 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
736 switch (op) {
737 case 0x21:
738 /* src. */
739 t0 = tcg_temp_new();
740
741 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
09b9f113
EI
742 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
743 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 744 if (dc->rd) {
4acb54ba 745 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
09b9f113 746 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 747 }
4acb54ba
EI
748 tcg_temp_free(t0);
749 break;
750
751 case 0x1:
752 case 0x41:
753 /* srl. */
4acb54ba
EI
754 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
755
bb3cb951
EI
756 /* Update carry. Note that write carry only looks at the LSB. */
757 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
758 if (dc->rd) {
759 if (op == 0x41)
760 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
761 else
762 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
763 }
764 break;
765 case 0x60:
766 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
767 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
768 break;
769 case 0x61:
770 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
771 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
772 break;
773 case 0x64:
f062a3c7
EI
774 case 0x66:
775 case 0x74:
776 case 0x76:
4acb54ba
EI
777 /* wdc. */
778 LOG_DIS("wdc r%d\n", dc->ra);
1567a005
EI
779 if ((dc->tb_flags & MSR_EE_FLAG)
780 && mem_index == MMU_USER_IDX) {
781 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
782 t_gen_raise_exception(dc, EXCP_HW_EXCP);
783 return;
784 }
4acb54ba
EI
785 break;
786 case 0x68:
787 /* wic. */
788 LOG_DIS("wic r%d\n", dc->ra);
1567a005
EI
789 if ((dc->tb_flags & MSR_EE_FLAG)
790 && mem_index == MMU_USER_IDX) {
791 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
792 t_gen_raise_exception(dc, EXCP_HW_EXCP);
793 return;
794 }
4acb54ba 795 break;
48b5e96f
EI
796 case 0xe0:
797 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
798 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
799 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
48b5e96f
EI
800 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
801 t_gen_raise_exception(dc, EXCP_HW_EXCP);
802 }
0063ebd6 803 if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
48b5e96f
EI
804 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
805 }
806 break;
ace2e4da
PC
807 case 0x1e0:
808 /* swapb */
809 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
810 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
811 break;
b8c6a5d9 812 case 0x1e2:
ace2e4da
PC
813 /*swaph */
814 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
815 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
816 break;
4acb54ba 817 default:
a47dddd7
AF
818 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
819 dc->pc, op, dc->rd, dc->ra, dc->rb);
4acb54ba
EI
820 break;
821 }
822}
823
824static inline void sync_jmpstate(DisasContext *dc)
825{
844bab60
EI
826 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
827 if (dc->jmp == JMP_DIRECT) {
828 tcg_gen_movi_tl(env_btaken, 1);
829 }
23979dc5
EI
830 dc->jmp = JMP_INDIRECT;
831 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
4acb54ba
EI
832 }
833}
834
835static void dec_imm(DisasContext *dc)
836{
837 LOG_DIS("imm %x\n", dc->imm << 16);
838 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
839 dc->tb_flags |= IMM_FLAG;
840 dc->clear_imm = 0;
841}
842
4acb54ba
EI
843static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
844{
845 unsigned int extimm = dc->tb_flags & IMM_FLAG;
5818dee5
EI
846 /* Should be set to one if r1 is used by loadstores. */
847 int stackprot = 0;
848
849 /* All load/stores use ra. */
9aaaa181 850 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
5818dee5
EI
851 stackprot = 1;
852 }
4acb54ba 853
9ef55357 854 /* Treat the common cases first. */
4acb54ba 855 if (!dc->type_b) {
4b5ef0b5
EI
856 /* If any of the regs is r0, return a ptr to the other. */
857 if (dc->ra == 0) {
858 return &cpu_R[dc->rb];
859 } else if (dc->rb == 0) {
860 return &cpu_R[dc->ra];
861 }
862
9aaaa181 863 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
5818dee5
EI
864 stackprot = 1;
865 }
866
4acb54ba
EI
867 *t = tcg_temp_new();
868 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
5818dee5
EI
869
870 if (stackprot) {
64254eba 871 gen_helper_stackprot(cpu_env, *t);
5818dee5 872 }
4acb54ba
EI
873 return t;
874 }
875 /* Immediate. */
876 if (!extimm) {
877 if (dc->imm == 0) {
878 return &cpu_R[dc->ra];
879 }
880 *t = tcg_temp_new();
881 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
882 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
883 } else {
884 *t = tcg_temp_new();
885 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
886 }
887
5818dee5 888 if (stackprot) {
64254eba 889 gen_helper_stackprot(cpu_env, *t);
5818dee5 890 }
4acb54ba
EI
891 return t;
892}
893
894static void dec_load(DisasContext *dc)
895{
47acdd63 896 TCGv t, v, *addr;
8cc9b43f 897 unsigned int size, rev = 0, ex = 0;
47acdd63 898 TCGMemOp mop;
4acb54ba 899
47acdd63
RH
900 mop = dc->opcode & 3;
901 size = 1 << mop;
9f8beb66
EI
902 if (!dc->type_b) {
903 rev = (dc->ir >> 9) & 1;
8cc9b43f 904 ex = (dc->ir >> 10) & 1;
9f8beb66 905 }
47acdd63
RH
906 mop |= MO_TE;
907 if (rev) {
908 mop ^= MO_BSWAP;
909 }
9f8beb66 910
0187688f 911 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 912 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
913 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
914 t_gen_raise_exception(dc, EXCP_HW_EXCP);
915 return;
916 }
4acb54ba 917
8cc9b43f
PC
918 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
919 ex ? "x" : "");
9f8beb66 920
4acb54ba
EI
921 t_sync_flags(dc);
922 addr = compute_ldst_addr(dc, &t);
923
9f8beb66
EI
924 /*
925 * When doing reverse accesses we need to do two things.
926 *
4ff9786c 927 * 1. Reverse the address wrt endianness.
9f8beb66
EI
928 * 2. Byteswap the data lanes on the way back into the CPU core.
929 */
930 if (rev && size != 4) {
931 /* Endian reverse the address. t is addr. */
932 switch (size) {
933 case 1:
934 {
935 /* 00 -> 11
936 01 -> 10
937 10 -> 10
938 11 -> 00 */
939 TCGv low = tcg_temp_new();
940
941 /* Force addr into the temp. */
942 if (addr != &t) {
943 t = tcg_temp_new();
944 tcg_gen_mov_tl(t, *addr);
945 addr = &t;
946 }
947
948 tcg_gen_andi_tl(low, t, 3);
949 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
950 tcg_gen_andi_tl(t, t, ~3);
951 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
952 tcg_gen_mov_tl(env_imm, t);
953 tcg_temp_free(low);
954 break;
955 }
956
957 case 2:
958 /* 00 -> 10
959 10 -> 00. */
960 /* Force addr into the temp. */
961 if (addr != &t) {
962 t = tcg_temp_new();
963 tcg_gen_xori_tl(t, *addr, 2);
964 addr = &t;
965 } else {
966 tcg_gen_xori_tl(t, t, 2);
967 }
968 break;
969 default:
0063ebd6 970 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
971 break;
972 }
973 }
974
8cc9b43f
PC
975 /* lwx does not throw unaligned access errors, so force alignment */
976 if (ex) {
977 /* Force addr into the temp. */
978 if (addr != &t) {
979 t = tcg_temp_new();
980 tcg_gen_mov_tl(t, *addr);
981 addr = &t;
982 }
983 tcg_gen_andi_tl(t, t, ~3);
984 }
985
4acb54ba
EI
986 /* If we get a fault on a dslot, the jmpstate better be in sync. */
987 sync_jmpstate(dc);
968a40f6
EI
988
989 /* Verify alignment if needed. */
47acdd63
RH
990 /*
991 * Microblaze gives MMU faults priority over faults due to
992 * unaligned addresses. That's why we speculatively do the load
993 * into v. If the load succeeds, we verify alignment of the
994 * address and if that succeeds we write into the destination reg.
995 */
996 v = tcg_temp_new();
97ed5ccd 997 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 998
0063ebd6 999 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507 1000 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 1001 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1002 tcg_const_tl(0), tcg_const_tl(size - 1));
4acb54ba
EI
1003 }
1004
47acdd63
RH
1005 if (ex) {
1006 tcg_gen_mov_tl(env_res_addr, *addr);
1007 tcg_gen_mov_tl(env_res_val, v);
1008 }
1009 if (dc->rd) {
1010 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1011 }
1012 tcg_temp_free(v);
1013
8cc9b43f 1014 if (ex) { /* lwx */
b6af0975 1015 /* no support for AXI exclusive so always clear C */
8cc9b43f 1016 write_carryi(dc, 0);
8cc9b43f
PC
1017 }
1018
4acb54ba
EI
1019 if (addr == &t)
1020 tcg_temp_free(t);
1021}
1022
4acb54ba
EI
1023static void dec_store(DisasContext *dc)
1024{
4a536270 1025 TCGv t, *addr, swx_addr;
42a268c2 1026 TCGLabel *swx_skip = NULL;
8cc9b43f 1027 unsigned int size, rev = 0, ex = 0;
47acdd63 1028 TCGMemOp mop;
4acb54ba 1029
47acdd63
RH
1030 mop = dc->opcode & 3;
1031 size = 1 << mop;
9f8beb66
EI
1032 if (!dc->type_b) {
1033 rev = (dc->ir >> 9) & 1;
8cc9b43f 1034 ex = (dc->ir >> 10) & 1;
9f8beb66 1035 }
47acdd63
RH
1036 mop |= MO_TE;
1037 if (rev) {
1038 mop ^= MO_BSWAP;
1039 }
4acb54ba 1040
0187688f 1041 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1042 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
1043 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1044 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1045 return;
1046 }
1047
8cc9b43f
PC
1048 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1049 ex ? "x" : "");
4acb54ba
EI
1050 t_sync_flags(dc);
1051 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1052 sync_jmpstate(dc);
1053 addr = compute_ldst_addr(dc, &t);
968a40f6 1054
083dbf48 1055 swx_addr = tcg_temp_local_new();
8cc9b43f 1056 if (ex) { /* swx */
11a76217 1057 TCGv tval;
8cc9b43f
PC
1058
1059 /* Force addr into the swx_addr. */
1060 tcg_gen_mov_tl(swx_addr, *addr);
1061 addr = &swx_addr;
1062 /* swx does not throw unaligned access errors, so force alignment */
1063 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1064
8cc9b43f
PC
1065 write_carryi(dc, 1);
1066 swx_skip = gen_new_label();
4a536270 1067 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
11a76217
EI
1068
1069 /* Compare the value loaded at lwx with current contents of
1070 the reserved location.
1071 FIXME: This only works for system emulation where we can expect
1072 this compare and the following write to be atomic. For user
1073 emulation we need to add atomicity between threads. */
1074 tval = tcg_temp_new();
97ed5ccd 1075 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
0063ebd6 1076 MO_TEUL);
11a76217 1077 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
8cc9b43f 1078 write_carryi(dc, 0);
11a76217 1079 tcg_temp_free(tval);
8cc9b43f
PC
1080 }
1081
9f8beb66
EI
1082 if (rev && size != 4) {
1083 /* Endian reverse the address. t is addr. */
1084 switch (size) {
1085 case 1:
1086 {
1087 /* 00 -> 11
1088 01 -> 10
1089 10 -> 10
1090 11 -> 00 */
1091 TCGv low = tcg_temp_new();
1092
1093 /* Force addr into the temp. */
1094 if (addr != &t) {
1095 t = tcg_temp_new();
1096 tcg_gen_mov_tl(t, *addr);
1097 addr = &t;
1098 }
1099
1100 tcg_gen_andi_tl(low, t, 3);
1101 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1102 tcg_gen_andi_tl(t, t, ~3);
1103 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
1104 tcg_gen_mov_tl(env_imm, t);
1105 tcg_temp_free(low);
1106 break;
1107 }
1108
1109 case 2:
1110 /* 00 -> 10
1111 10 -> 00. */
1112 /* Force addr into the temp. */
1113 if (addr != &t) {
1114 t = tcg_temp_new();
1115 tcg_gen_xori_tl(t, *addr, 2);
1116 addr = &t;
1117 } else {
1118 tcg_gen_xori_tl(t, t, 2);
1119 }
1120 break;
1121 default:
0063ebd6 1122 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
1123 break;
1124 }
9f8beb66 1125 }
97ed5ccd 1126 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 1127
968a40f6 1128 /* Verify alignment if needed. */
0063ebd6 1129 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507
EI
1130 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1131 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1132 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1133 * the MMU prior to the memaccess, thay way we could put
1134 * the alignment checks in between the probe and the mem
1135 * access.
a12f6507 1136 */
64254eba 1137 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1138 tcg_const_tl(1), tcg_const_tl(size - 1));
968a40f6 1139 }
083dbf48 1140
8cc9b43f
PC
1141 if (ex) {
1142 gen_set_label(swx_skip);
8cc9b43f 1143 }
083dbf48 1144 tcg_temp_free(swx_addr);
968a40f6 1145
4acb54ba
EI
1146 if (addr == &t)
1147 tcg_temp_free(t);
1148}
1149
1150static inline void eval_cc(DisasContext *dc, unsigned int cc,
1151 TCGv d, TCGv a, TCGv b)
1152{
4acb54ba
EI
1153 switch (cc) {
1154 case CC_EQ:
b2565c69 1155 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
4acb54ba
EI
1156 break;
1157 case CC_NE:
b2565c69 1158 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
4acb54ba
EI
1159 break;
1160 case CC_LT:
b2565c69 1161 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
4acb54ba
EI
1162 break;
1163 case CC_LE:
b2565c69 1164 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
4acb54ba
EI
1165 break;
1166 case CC_GE:
b2565c69 1167 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
4acb54ba
EI
1168 break;
1169 case CC_GT:
b2565c69 1170 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
4acb54ba
EI
1171 break;
1172 default:
0063ebd6 1173 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
4acb54ba
EI
1174 break;
1175 }
1176}
1177
1178static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1179{
42a268c2 1180 TCGLabel *l1 = gen_new_label();
4acb54ba
EI
1181 /* Conditional jmp. */
1182 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1183 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1184 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1185 gen_set_label(l1);
1186}
1187
1188static void dec_bcc(DisasContext *dc)
1189{
1190 unsigned int cc;
1191 unsigned int dslot;
1192
1193 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1194 dslot = dc->ir & (1 << 25);
1195 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1196
1197 dc->delayed_branch = 1;
1198 if (dslot) {
1199 dc->delayed_branch = 2;
1200 dc->tb_flags |= D_FLAG;
1201 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1202 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1203 }
1204
61204ce8
EI
1205 if (dec_alu_op_b_is_small_imm(dc)) {
1206 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1207
1208 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
844bab60 1209 dc->jmp = JMP_DIRECT_CC;
23979dc5 1210 dc->jmp_pc = dc->pc + offset;
61204ce8 1211 } else {
23979dc5 1212 dc->jmp = JMP_INDIRECT;
61204ce8
EI
1213 tcg_gen_movi_tl(env_btarget, dc->pc);
1214 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1215 }
61204ce8 1216 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
4acb54ba
EI
1217}
1218
1219static void dec_br(DisasContext *dc)
1220{
9f6113c7 1221 unsigned int dslot, link, abs, mbar;
97ed5ccd 1222 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
1223
1224 dslot = dc->ir & (1 << 20);
1225 abs = dc->ir & (1 << 19);
1226 link = dc->ir & (1 << 18);
9f6113c7
EI
1227
1228 /* Memory barrier. */
1229 mbar = (dc->ir >> 16) & 31;
1230 if (mbar == 2 && dc->imm == 4) {
5d45de97
EI
1231 /* mbar IMM & 16 decodes to sleep. */
1232 if (dc->rd & 16) {
1233 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1234 TCGv_i32 tmp_1 = tcg_const_i32(1);
1235
1236 LOG_DIS("sleep\n");
1237
1238 t_sync_flags(dc);
1239 tcg_gen_st_i32(tmp_1, cpu_env,
1240 -offsetof(MicroBlazeCPU, env)
1241 +offsetof(CPUState, halted));
1242 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1243 gen_helper_raise_exception(cpu_env, tmp_hlt);
1244 tcg_temp_free_i32(tmp_hlt);
1245 tcg_temp_free_i32(tmp_1);
1246 return;
1247 }
9f6113c7
EI
1248 LOG_DIS("mbar %d\n", dc->rd);
1249 /* Break the TB. */
1250 dc->cpustate_changed = 1;
1251 return;
1252 }
1253
4acb54ba
EI
1254 LOG_DIS("br%s%s%s%s imm=%x\n",
1255 abs ? "a" : "", link ? "l" : "",
1256 dc->type_b ? "i" : "", dslot ? "d" : "",
1257 dc->imm);
1258
1259 dc->delayed_branch = 1;
1260 if (dslot) {
1261 dc->delayed_branch = 2;
1262 dc->tb_flags |= D_FLAG;
1263 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1264 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1265 }
1266 if (link && dc->rd)
1267 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1268
1269 dc->jmp = JMP_INDIRECT;
1270 if (abs) {
1271 tcg_gen_movi_tl(env_btaken, 1);
1272 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1273 if (link && !dslot) {
1274 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1275 t_gen_raise_exception(dc, EXCP_BREAK);
1276 if (dc->imm == 0) {
1277 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1278 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1279 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1280 return;
1281 }
1282
1283 t_gen_raise_exception(dc, EXCP_DEBUG);
1284 }
1285 }
4acb54ba 1286 } else {
61204ce8
EI
1287 if (dec_alu_op_b_is_small_imm(dc)) {
1288 dc->jmp = JMP_DIRECT;
1289 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1290 } else {
4acb54ba
EI
1291 tcg_gen_movi_tl(env_btaken, 1);
1292 tcg_gen_movi_tl(env_btarget, dc->pc);
1293 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
4acb54ba
EI
1294 }
1295 }
1296}
1297
1298static inline void do_rti(DisasContext *dc)
1299{
1300 TCGv t0, t1;
1301 t0 = tcg_temp_new();
1302 t1 = tcg_temp_new();
1303 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1304 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1305 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1306
1307 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1308 tcg_gen_or_tl(t1, t1, t0);
1309 msr_write(dc, t1);
1310 tcg_temp_free(t1);
1311 tcg_temp_free(t0);
1312 dc->tb_flags &= ~DRTI_FLAG;
1313}
1314
1315static inline void do_rtb(DisasContext *dc)
1316{
1317 TCGv t0, t1;
1318 t0 = tcg_temp_new();
1319 t1 = tcg_temp_new();
1320 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1321 tcg_gen_shri_tl(t0, t1, 1);
1322 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1323
1324 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1325 tcg_gen_or_tl(t1, t1, t0);
1326 msr_write(dc, t1);
1327 tcg_temp_free(t1);
1328 tcg_temp_free(t0);
1329 dc->tb_flags &= ~DRTB_FLAG;
1330}
1331
1332static inline void do_rte(DisasContext *dc)
1333{
1334 TCGv t0, t1;
1335 t0 = tcg_temp_new();
1336 t1 = tcg_temp_new();
1337
1338 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1339 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1340 tcg_gen_shri_tl(t0, t1, 1);
1341 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1342
1343 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1344 tcg_gen_or_tl(t1, t1, t0);
1345 msr_write(dc, t1);
1346 tcg_temp_free(t1);
1347 tcg_temp_free(t0);
1348 dc->tb_flags &= ~DRTE_FLAG;
1349}
1350
1351static void dec_rts(DisasContext *dc)
1352{
1353 unsigned int b_bit, i_bit, e_bit;
97ed5ccd 1354 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
1355
1356 i_bit = dc->ir & (1 << 21);
1357 b_bit = dc->ir & (1 << 22);
1358 e_bit = dc->ir & (1 << 23);
1359
1360 dc->delayed_branch = 2;
1361 dc->tb_flags |= D_FLAG;
1362 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1363 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1364
1365 if (i_bit) {
1366 LOG_DIS("rtid ir=%x\n", dc->ir);
1567a005
EI
1367 if ((dc->tb_flags & MSR_EE_FLAG)
1368 && mem_index == MMU_USER_IDX) {
1369 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1370 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1371 }
4acb54ba
EI
1372 dc->tb_flags |= DRTI_FLAG;
1373 } else if (b_bit) {
1374 LOG_DIS("rtbd ir=%x\n", dc->ir);
1567a005
EI
1375 if ((dc->tb_flags & MSR_EE_FLAG)
1376 && mem_index == MMU_USER_IDX) {
1377 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1378 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1379 }
4acb54ba
EI
1380 dc->tb_flags |= DRTB_FLAG;
1381 } else if (e_bit) {
1382 LOG_DIS("rted ir=%x\n", dc->ir);
1567a005
EI
1383 if ((dc->tb_flags & MSR_EE_FLAG)
1384 && mem_index == MMU_USER_IDX) {
1385 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1386 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1387 }
4acb54ba
EI
1388 dc->tb_flags |= DRTE_FLAG;
1389 } else
1390 LOG_DIS("rts ir=%x\n", dc->ir);
1391
23979dc5 1392 dc->jmp = JMP_INDIRECT;
4acb54ba
EI
1393 tcg_gen_movi_tl(env_btaken, 1);
1394 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1395}
1396
97694c57
EI
1397static int dec_check_fpuv2(DisasContext *dc)
1398{
be67e9ab 1399 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
97694c57
EI
1400 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1401 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1402 }
be67e9ab 1403 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
97694c57
EI
1404}
1405
1567a005
EI
1406static void dec_fpu(DisasContext *dc)
1407{
97694c57
EI
1408 unsigned int fpu_insn;
1409
1567a005 1410 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1411 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
be67e9ab 1412 && (dc->cpu->cfg.use_fpu != 1)) {
97694c57 1413 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1567a005
EI
1414 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1415 return;
1416 }
1417
97694c57
EI
1418 fpu_insn = (dc->ir >> 7) & 7;
1419
1420 switch (fpu_insn) {
1421 case 0:
64254eba
BS
1422 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1423 cpu_R[dc->rb]);
97694c57
EI
1424 break;
1425
1426 case 1:
64254eba
BS
1427 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1428 cpu_R[dc->rb]);
97694c57
EI
1429 break;
1430
1431 case 2:
64254eba
BS
1432 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1433 cpu_R[dc->rb]);
97694c57
EI
1434 break;
1435
1436 case 3:
64254eba
BS
1437 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1438 cpu_R[dc->rb]);
97694c57
EI
1439 break;
1440
1441 case 4:
1442 switch ((dc->ir >> 4) & 7) {
1443 case 0:
64254eba 1444 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1445 cpu_R[dc->ra], cpu_R[dc->rb]);
1446 break;
1447 case 1:
64254eba 1448 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1449 cpu_R[dc->ra], cpu_R[dc->rb]);
1450 break;
1451 case 2:
64254eba 1452 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1453 cpu_R[dc->ra], cpu_R[dc->rb]);
1454 break;
1455 case 3:
64254eba 1456 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1457 cpu_R[dc->ra], cpu_R[dc->rb]);
1458 break;
1459 case 4:
64254eba 1460 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1461 cpu_R[dc->ra], cpu_R[dc->rb]);
1462 break;
1463 case 5:
64254eba 1464 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1465 cpu_R[dc->ra], cpu_R[dc->rb]);
1466 break;
1467 case 6:
64254eba 1468 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1469 cpu_R[dc->ra], cpu_R[dc->rb]);
1470 break;
1471 default:
71547a3b
BS
1472 qemu_log_mask(LOG_UNIMP,
1473 "unimplemented fcmp fpu_insn=%x pc=%x"
1474 " opc=%x\n",
1475 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1476 dc->abort_at_next_insn = 1;
1477 break;
1478 }
1479 break;
1480
1481 case 5:
1482 if (!dec_check_fpuv2(dc)) {
1483 return;
1484 }
64254eba 1485 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1486 break;
1487
1488 case 6:
1489 if (!dec_check_fpuv2(dc)) {
1490 return;
1491 }
64254eba 1492 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1493 break;
1494
1495 case 7:
1496 if (!dec_check_fpuv2(dc)) {
1497 return;
1498 }
64254eba 1499 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1500 break;
1501
1502 default:
71547a3b
BS
1503 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1504 " opc=%x\n",
1505 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1506 dc->abort_at_next_insn = 1;
1507 break;
1508 }
1567a005
EI
1509}
1510
4acb54ba
EI
1511static void dec_null(DisasContext *dc)
1512{
02b33596 1513 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1514 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
02b33596
EI
1515 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1516 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1517 return;
1518 }
4acb54ba
EI
1519 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1520 dc->abort_at_next_insn = 1;
1521}
1522
6d76d23e
EI
1523/* Insns connected to FSL or AXI stream attached devices. */
1524static void dec_stream(DisasContext *dc)
1525{
97ed5ccd 1526 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
6d76d23e
EI
1527 TCGv_i32 t_id, t_ctrl;
1528 int ctrl;
1529
1530 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1531 dc->type_b ? "" : "d", dc->imm);
1532
1533 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1534 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1535 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1536 return;
1537 }
1538
1539 t_id = tcg_temp_new();
1540 if (dc->type_b) {
1541 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1542 ctrl = dc->imm >> 10;
1543 } else {
1544 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1545 ctrl = dc->imm >> 5;
1546 }
1547
1548 t_ctrl = tcg_const_tl(ctrl);
1549
1550 if (dc->rd == 0) {
1551 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1552 } else {
1553 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1554 }
1555 tcg_temp_free(t_id);
1556 tcg_temp_free(t_ctrl);
1557}
1558
4acb54ba
EI
1559static struct decoder_info {
1560 struct {
1561 uint32_t bits;
1562 uint32_t mask;
1563 };
1564 void (*dec)(DisasContext *dc);
1565} decinfo[] = {
1566 {DEC_ADD, dec_add},
1567 {DEC_SUB, dec_sub},
1568 {DEC_AND, dec_and},
1569 {DEC_XOR, dec_xor},
1570 {DEC_OR, dec_or},
1571 {DEC_BIT, dec_bit},
1572 {DEC_BARREL, dec_barrel},
1573 {DEC_LD, dec_load},
1574 {DEC_ST, dec_store},
1575 {DEC_IMM, dec_imm},
1576 {DEC_BR, dec_br},
1577 {DEC_BCC, dec_bcc},
1578 {DEC_RTS, dec_rts},
1567a005 1579 {DEC_FPU, dec_fpu},
4acb54ba
EI
1580 {DEC_MUL, dec_mul},
1581 {DEC_DIV, dec_div},
1582 {DEC_MSR, dec_msr},
6d76d23e 1583 {DEC_STREAM, dec_stream},
4acb54ba
EI
1584 {{0, 0}, dec_null}
1585};
1586
64254eba 1587static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1588{
4acb54ba
EI
1589 int i;
1590
64254eba 1591 dc->ir = ir;
4acb54ba
EI
1592 LOG_DIS("%8.8x\t", dc->ir);
1593
1594 if (dc->ir)
1595 dc->nr_nops = 0;
1596 else {
1567a005 1597 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
1598 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1599 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1567a005
EI
1600 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1601 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1602 return;
1603 }
1604
4acb54ba
EI
1605 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1606 dc->nr_nops++;
a47dddd7 1607 if (dc->nr_nops > 4) {
0063ebd6 1608 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
a47dddd7 1609 }
4acb54ba
EI
1610 }
1611 /* bit 2 seems to indicate insn type. */
1612 dc->type_b = ir & (1 << 29);
1613
1614 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1615 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1616 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1617 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1618 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1619
1620 /* Large switch for all insns. */
1621 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1622 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1623 decinfo[i].dec(dc);
1624 break;
1625 }
1626 }
1627}
1628
68cee38a 1629static void check_breakpoint(CPUMBState *env, DisasContext *dc)
4acb54ba 1630{
f0c3c505 1631 CPUState *cs = CPU(mb_env_get_cpu(env));
4acb54ba
EI
1632 CPUBreakpoint *bp;
1633
f0c3c505
AF
1634 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
1635 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
4acb54ba
EI
1636 if (bp->pc == dc->pc) {
1637 t_gen_raise_exception(dc, EXCP_DEBUG);
1638 dc->is_jmp = DISAS_UPDATE;
1639 }
1640 }
1641 }
1642}
1643
1644/* generate intermediate code for basic block 'tb'. */
fd327f48 1645static inline void
4a274212
AF
1646gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
1647 bool search_pc)
4acb54ba 1648{
ed2803da 1649 CPUState *cs = CPU(cpu);
4a274212 1650 CPUMBState *env = &cpu->env;
4acb54ba
EI
1651 uint32_t pc_start;
1652 int j, lj;
1653 struct DisasContext ctx;
1654 struct DisasContext *dc = &ctx;
1655 uint32_t next_page_start, org_flags;
1656 target_ulong npc;
1657 int num_insns;
1658 int max_insns;
1659
4acb54ba 1660 pc_start = tb->pc;
0063ebd6 1661 dc->cpu = cpu;
4acb54ba
EI
1662 dc->tb = tb;
1663 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1664
4acb54ba
EI
1665 dc->is_jmp = DISAS_NEXT;
1666 dc->jmp = 0;
1667 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1668 if (dc->delayed_branch) {
1669 dc->jmp = JMP_INDIRECT;
1670 }
4acb54ba 1671 dc->pc = pc_start;
ed2803da 1672 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1673 dc->cpustate_changed = 0;
1674 dc->abort_at_next_insn = 0;
1675 dc->nr_nops = 0;
1676
a47dddd7
AF
1677 if (pc_start & 3) {
1678 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1679 }
4acb54ba
EI
1680
1681 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1682#if !SIM_COMPAT
1683 qemu_log("--------------\n");
a0762859 1684 log_cpu_state(CPU(cpu), 0);
4acb54ba
EI
1685#endif
1686 }
1687
1688 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1689 lj = -1;
1690 num_insns = 0;
1691 max_insns = tb->cflags & CF_COUNT_MASK;
1692 if (max_insns == 0)
1693 max_insns = CF_COUNT_MASK;
1694
cd42d5b2 1695 gen_tb_start(tb);
4acb54ba
EI
1696 do
1697 {
1698#if SIM_COMPAT
1699 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1700 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1701 gen_helper_debug();
1702 }
1703#endif
1704 check_breakpoint(env, dc);
1705
1706 if (search_pc) {
fe700adb 1707 j = tcg_op_buf_count();
4acb54ba
EI
1708 if (lj < j) {
1709 lj++;
1710 while (lj < j)
ab1103de 1711 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4acb54ba 1712 }
25983cad 1713 tcg_ctx.gen_opc_pc[lj] = dc->pc;
ab1103de 1714 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 1715 tcg_ctx.gen_opc_icount[lj] = num_insns;
4acb54ba 1716 }
667b8e29 1717 tcg_gen_insn_start(dc->pc);
4acb54ba
EI
1718
1719 /* Pretty disas. */
1720 LOG_DIS("%8.8x:\t", dc->pc);
1721
1722 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1723 gen_io_start();
1724
1725 dc->clear_imm = 1;
64254eba 1726 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1727 if (dc->clear_imm)
1728 dc->tb_flags &= ~IMM_FLAG;
4acb54ba
EI
1729 dc->pc += 4;
1730 num_insns++;
1731
1732 if (dc->delayed_branch) {
1733 dc->delayed_branch--;
1734 if (!dc->delayed_branch) {
1735 if (dc->tb_flags & DRTI_FLAG)
1736 do_rti(dc);
1737 if (dc->tb_flags & DRTB_FLAG)
1738 do_rtb(dc);
1739 if (dc->tb_flags & DRTE_FLAG)
1740 do_rte(dc);
1741 /* Clear the delay slot flag. */
1742 dc->tb_flags &= ~D_FLAG;
1743 /* If it is a direct jump, try direct chaining. */
23979dc5 1744 if (dc->jmp == JMP_INDIRECT) {
4acb54ba
EI
1745 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1746 dc->is_jmp = DISAS_JUMP;
23979dc5 1747 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1748 t_sync_flags(dc);
1749 gen_goto_tb(dc, 0, dc->jmp_pc);
1750 dc->is_jmp = DISAS_TB_JUMP;
1751 } else if (dc->jmp == JMP_DIRECT_CC) {
42a268c2 1752 TCGLabel *l1 = gen_new_label();
23979dc5 1753 t_sync_flags(dc);
23979dc5
EI
1754 /* Conditional jmp. */
1755 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1756 gen_goto_tb(dc, 1, dc->pc);
1757 gen_set_label(l1);
1758 gen_goto_tb(dc, 0, dc->jmp_pc);
1759
1760 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1761 }
1762 break;
1763 }
1764 }
ed2803da 1765 if (cs->singlestep_enabled) {
4acb54ba 1766 break;
ed2803da 1767 }
4acb54ba 1768 } while (!dc->is_jmp && !dc->cpustate_changed
fe700adb
RH
1769 && !tcg_op_buf_full()
1770 && !singlestep
1771 && (dc->pc < next_page_start)
1772 && num_insns < max_insns);
4acb54ba
EI
1773
1774 npc = dc->pc;
844bab60 1775 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1776 if (dc->tb_flags & D_FLAG) {
1777 dc->is_jmp = DISAS_UPDATE;
1778 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1779 sync_jmpstate(dc);
1780 } else
1781 npc = dc->jmp_pc;
1782 }
1783
1784 if (tb->cflags & CF_LAST_IO)
1785 gen_io_end();
1786 /* Force an update if the per-tb cpu state has changed. */
1787 if (dc->is_jmp == DISAS_NEXT
1788 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1789 dc->is_jmp = DISAS_UPDATE;
1790 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1791 }
1792 t_sync_flags(dc);
1793
ed2803da 1794 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1795 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1796
1797 if (dc->is_jmp != DISAS_JUMP) {
4acb54ba 1798 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
6c5f738d 1799 }
64254eba 1800 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1801 tcg_temp_free_i32(tmp);
4acb54ba
EI
1802 } else {
1803 switch(dc->is_jmp) {
1804 case DISAS_NEXT:
1805 gen_goto_tb(dc, 1, npc);
1806 break;
1807 default:
1808 case DISAS_JUMP:
1809 case DISAS_UPDATE:
1810 /* indicate that the hash table must be used
1811 to find the next TB */
1812 tcg_gen_exit_tb(0);
1813 break;
1814 case DISAS_TB_JUMP:
1815 /* nothing more to generate */
1816 break;
1817 }
1818 }
806f352d 1819 gen_tb_end(tb, num_insns);
0a7df5da 1820
4acb54ba 1821 if (search_pc) {
fe700adb 1822 j = tcg_op_buf_count();
4acb54ba
EI
1823 lj++;
1824 while (lj <= j)
ab1103de 1825 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4acb54ba
EI
1826 } else {
1827 tb->size = dc->pc - pc_start;
1828 tb->icount = num_insns;
1829 }
1830
1831#ifdef DEBUG_DISAS
1832#if !SIM_COMPAT
1833 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1834 qemu_log("\n");
1835#if DISAS_GNU
d49190c4 1836 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
4acb54ba 1837#endif
fe700adb
RH
1838 qemu_log("\nisize=%d osize=%d\n",
1839 dc->pc - pc_start, tcg_op_buf_count());
4acb54ba
EI
1840 }
1841#endif
1842#endif
1843 assert(!dc->abort_at_next_insn);
1844}
1845
68cee38a 1846void gen_intermediate_code (CPUMBState *env, struct TranslationBlock *tb)
4acb54ba 1847{
4a274212 1848 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, false);
4acb54ba
EI
1849}
1850
68cee38a 1851void gen_intermediate_code_pc (CPUMBState *env, struct TranslationBlock *tb)
4acb54ba 1852{
4a274212 1853 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, true);
4acb54ba
EI
1854}
1855
878096ee
AF
1856void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1857 int flags)
4acb54ba 1858{
878096ee
AF
1859 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1860 CPUMBState *env = &cpu->env;
4acb54ba
EI
1861 int i;
1862
1863 if (!env || !f)
1864 return;
1865
1866 cpu_fprintf(f, "IN: PC=%x %s\n",
1867 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
97694c57 1868 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
4c24aa0a 1869 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
97694c57 1870 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
17c52a43 1871 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
4acb54ba
EI
1872 env->btaken, env->btarget,
1873 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
17c52a43
EI
1874 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1875 (env->sregs[SR_MSR] & MSR_EIP),
1876 (env->sregs[SR_MSR] & MSR_IE));
1877
4acb54ba
EI
1878 for (i = 0; i < 32; i++) {
1879 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1880 if ((i + 1) % 4 == 0)
1881 cpu_fprintf(f, "\n");
1882 }
1883 cpu_fprintf(f, "\n\n");
1884}
1885
b33ab1f7 1886MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
4acb54ba 1887{
b77f98ca 1888 MicroBlazeCPU *cpu;
4acb54ba 1889
b77f98ca 1890 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
4acb54ba 1891
746b03b2 1892 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
4acb54ba 1893
cd0c24f9
AF
1894 return cpu;
1895}
4acb54ba 1896
cd0c24f9
AF
1897void mb_tcg_init(void)
1898{
1899 int i;
4acb54ba
EI
1900
1901 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1902
1903 env_debug = tcg_global_mem_new(TCG_AREG0,
68cee38a 1904 offsetof(CPUMBState, debug),
4acb54ba
EI
1905 "debug0");
1906 env_iflags = tcg_global_mem_new(TCG_AREG0,
68cee38a 1907 offsetof(CPUMBState, iflags),
4acb54ba
EI
1908 "iflags");
1909 env_imm = tcg_global_mem_new(TCG_AREG0,
68cee38a 1910 offsetof(CPUMBState, imm),
4acb54ba
EI
1911 "imm");
1912 env_btarget = tcg_global_mem_new(TCG_AREG0,
68cee38a 1913 offsetof(CPUMBState, btarget),
4acb54ba
EI
1914 "btarget");
1915 env_btaken = tcg_global_mem_new(TCG_AREG0,
68cee38a 1916 offsetof(CPUMBState, btaken),
4acb54ba 1917 "btaken");
4a536270
EI
1918 env_res_addr = tcg_global_mem_new(TCG_AREG0,
1919 offsetof(CPUMBState, res_addr),
1920 "res_addr");
11a76217
EI
1921 env_res_val = tcg_global_mem_new(TCG_AREG0,
1922 offsetof(CPUMBState, res_val),
1923 "res_val");
4acb54ba
EI
1924 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1925 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
68cee38a 1926 offsetof(CPUMBState, regs[i]),
4acb54ba
EI
1927 regnames[i]);
1928 }
1929 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1930 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
68cee38a 1931 offsetof(CPUMBState, sregs[i]),
4acb54ba
EI
1932 special_regnames[i]);
1933 }
4acb54ba
EI
1934}
1935
68cee38a 1936void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
4acb54ba 1937{
25983cad 1938 env->sregs[SR_PC] = tcg_ctx.gen_opc_pc[pc_pos];
4acb54ba 1939}