]> git.proxmox.com Git - mirror_qemu.git/blame - target/microblaze/translate.c
target-microblaze: Introduce a use-div property
[mirror_qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
4acb54ba 25#include "tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
4acb54ba 27#include "microblaze-decode.h"
f08b6170 28#include "exec/cpu_ldst.h"
2ef6175a 29#include "exec/helper-gen.h"
4acb54ba 30
a7e30d84 31#include "trace-tcg.h"
508127e2 32#include "exec/log.h"
a7e30d84
LV
33
34
4acb54ba
EI
35#define SIM_COMPAT 0
36#define DISAS_GNU 1
37#define DISAS_MB 1
38#if DISAS_MB && !SIM_COMPAT
39# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40#else
41# define LOG_DIS(...) do { } while (0)
42#endif
43
44#define D(x)
45
46#define EXTRACT_FIELD(src, start, end) \
47 (((src) >> start) & ((1 << (end - start + 1)) - 1))
48
49static TCGv env_debug;
1bcea73e 50static TCGv_env cpu_env;
4acb54ba
EI
51static TCGv cpu_R[32];
52static TCGv cpu_SR[18];
53static TCGv env_imm;
54static TCGv env_btaken;
55static TCGv env_btarget;
56static TCGv env_iflags;
4a536270 57static TCGv env_res_addr;
11a76217 58static TCGv env_res_val;
4acb54ba 59
022c62cb 60#include "exec/gen-icount.h"
4acb54ba
EI
61
62/* This is the state at translation time. */
63typedef struct DisasContext {
0063ebd6 64 MicroBlazeCPU *cpu;
a5efa644 65 target_ulong pc;
4acb54ba
EI
66
67 /* Decoder. */
68 int type_b;
69 uint32_t ir;
70 uint8_t opcode;
71 uint8_t rd, ra, rb;
72 uint16_t imm;
73
74 unsigned int cpustate_changed;
75 unsigned int delayed_branch;
76 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
77 unsigned int clear_imm;
78 int is_jmp;
79
844bab60
EI
80#define JMP_NOJMP 0
81#define JMP_DIRECT 1
82#define JMP_DIRECT_CC 2
83#define JMP_INDIRECT 3
4acb54ba
EI
84 unsigned int jmp;
85 uint32_t jmp_pc;
86
87 int abort_at_next_insn;
88 int nr_nops;
89 struct TranslationBlock *tb;
90 int singlestep_enabled;
91} DisasContext;
92
38972938 93static const char *regnames[] =
4acb54ba
EI
94{
95 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
96 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
97 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
98 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
99};
100
38972938 101static const char *special_regnames[] =
4acb54ba
EI
102{
103 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
104 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
105 "sr16", "sr17", "sr18"
106};
107
4acb54ba
EI
108static inline void t_sync_flags(DisasContext *dc)
109{
4abf79a4 110 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba
EI
111 if (dc->tb_flags != dc->synced_flags) {
112 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
113 dc->synced_flags = dc->tb_flags;
114 }
115}
116
117static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
118{
119 TCGv_i32 tmp = tcg_const_i32(index);
120
121 t_sync_flags(dc);
122 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 123 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
124 tcg_temp_free_i32(tmp);
125 dc->is_jmp = DISAS_UPDATE;
126}
127
90aa39a1
SF
128static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
129{
130#ifndef CONFIG_USER_ONLY
131 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
132#else
133 return true;
134#endif
135}
136
4acb54ba
EI
137static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
138{
90aa39a1 139 if (use_goto_tb(dc, dest)) {
4acb54ba
EI
140 tcg_gen_goto_tb(n);
141 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
90aa39a1 142 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
4acb54ba
EI
143 } else {
144 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
145 tcg_gen_exit_tb(0);
146 }
147}
148
ee8b246f
EI
149static void read_carry(DisasContext *dc, TCGv d)
150{
151 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
152}
153
04ec7df7
EI
154/*
155 * write_carry sets the carry bits in MSR based on bit 0 of v.
156 * v[31:1] are ignored.
157 */
ee8b246f
EI
158static void write_carry(DisasContext *dc, TCGv v)
159{
160 TCGv t0 = tcg_temp_new();
161 tcg_gen_shli_tl(t0, v, 31);
162 tcg_gen_sari_tl(t0, t0, 31);
163 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
164 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
165 ~(MSR_C | MSR_CC));
166 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
167 tcg_temp_free(t0);
168}
169
65ab5eb4 170static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f
PC
171{
172 TCGv t0 = tcg_temp_new();
65ab5eb4 173 tcg_gen_movi_tl(t0, carry);
8cc9b43f
PC
174 write_carry(dc, t0);
175 tcg_temp_free(t0);
176}
177
61204ce8
EI
178/* True if ALU operand b is a small immediate that may deserve
179 faster treatment. */
180static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
181{
182 /* Immediate insn without the imm prefix ? */
183 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
184}
185
4acb54ba
EI
186static inline TCGv *dec_alu_op_b(DisasContext *dc)
187{
188 if (dc->type_b) {
189 if (dc->tb_flags & IMM_FLAG)
190 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
191 else
192 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
193 return &env_imm;
194 } else
195 return &cpu_R[dc->rb];
196}
197
198static void dec_add(DisasContext *dc)
199{
200 unsigned int k, c;
40cbf5b7 201 TCGv cf;
4acb54ba
EI
202
203 k = dc->opcode & 4;
204 c = dc->opcode & 2;
205
206 LOG_DIS("add%s%s%s r%d r%d r%d\n",
207 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
208 dc->rd, dc->ra, dc->rb);
209
40cbf5b7
EI
210 /* Take care of the easy cases first. */
211 if (k) {
212 /* k - keep carry, no need to update MSR. */
213 /* If rd == r0, it's a nop. */
214 if (dc->rd) {
215 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
216
217 if (c) {
218 /* c - Add carry into the result. */
219 cf = tcg_temp_new();
220
221 read_carry(dc, cf);
222 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
223 tcg_temp_free(cf);
224 }
225 }
226 return;
227 }
228
229 /* From now on, we can assume k is zero. So we need to update MSR. */
230 /* Extract carry. */
231 cf = tcg_temp_new();
232 if (c) {
233 read_carry(dc, cf);
234 } else {
235 tcg_gen_movi_tl(cf, 0);
236 }
237
238 if (dc->rd) {
239 TCGv ncf = tcg_temp_new();
5d0bb823 240 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
4acb54ba 241 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
243 write_carry(dc, ncf);
244 tcg_temp_free(ncf);
245 } else {
5d0bb823 246 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 247 write_carry(dc, cf);
4acb54ba 248 }
40cbf5b7 249 tcg_temp_free(cf);
4acb54ba
EI
250}
251
252static void dec_sub(DisasContext *dc)
253{
254 unsigned int u, cmp, k, c;
e0a42ebc 255 TCGv cf, na;
4acb54ba
EI
256
257 u = dc->imm & 2;
258 k = dc->opcode & 4;
259 c = dc->opcode & 2;
260 cmp = (dc->imm & 1) && (!dc->type_b) && k;
261
262 if (cmp) {
263 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
264 if (dc->rd) {
265 if (u)
266 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
267 else
268 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
269 }
e0a42ebc
EI
270 return;
271 }
272
273 LOG_DIS("sub%s%s r%d, r%d r%d\n",
274 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
275
276 /* Take care of the easy cases first. */
277 if (k) {
278 /* k - keep carry, no need to update MSR. */
279 /* If rd == r0, it's a nop. */
280 if (dc->rd) {
4acb54ba 281 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
282
283 if (c) {
284 /* c - Add carry into the result. */
285 cf = tcg_temp_new();
286
287 read_carry(dc, cf);
288 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
289 tcg_temp_free(cf);
290 }
291 }
292 return;
293 }
294
295 /* From now on, we can assume k is zero. So we need to update MSR. */
296 /* Extract carry. And complement a into na. */
297 cf = tcg_temp_new();
298 na = tcg_temp_new();
299 if (c) {
300 read_carry(dc, cf);
301 } else {
302 tcg_gen_movi_tl(cf, 1);
303 }
304
305 /* d = b + ~a + c. carry defaults to 1. */
306 tcg_gen_not_tl(na, cpu_R[dc->ra]);
307
308 if (dc->rd) {
309 TCGv ncf = tcg_temp_new();
5d0bb823 310 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc
EI
311 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
312 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
313 write_carry(dc, ncf);
314 tcg_temp_free(ncf);
315 } else {
5d0bb823 316 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 317 write_carry(dc, cf);
4acb54ba 318 }
e0a42ebc
EI
319 tcg_temp_free(cf);
320 tcg_temp_free(na);
4acb54ba
EI
321}
322
323static void dec_pattern(DisasContext *dc)
324{
325 unsigned int mode;
4acb54ba 326
1567a005 327 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
328 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
329 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
1567a005
EI
330 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
331 t_gen_raise_exception(dc, EXCP_HW_EXCP);
332 }
333
4acb54ba
EI
334 mode = dc->opcode & 3;
335 switch (mode) {
336 case 0:
337 /* pcmpbf. */
338 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
339 if (dc->rd)
340 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
341 break;
342 case 2:
343 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
344 if (dc->rd) {
86112805
RH
345 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
346 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
347 }
348 break;
349 case 3:
350 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
4acb54ba 351 if (dc->rd) {
86112805
RH
352 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
353 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
354 }
355 break;
356 default:
0063ebd6 357 cpu_abort(CPU(dc->cpu),
4acb54ba
EI
358 "unsupported pattern insn opcode=%x\n", dc->opcode);
359 break;
360 }
361}
362
363static void dec_and(DisasContext *dc)
364{
365 unsigned int not;
366
367 if (!dc->type_b && (dc->imm & (1 << 10))) {
368 dec_pattern(dc);
369 return;
370 }
371
372 not = dc->opcode & (1 << 1);
373 LOG_DIS("and%s\n", not ? "n" : "");
374
375 if (!dc->rd)
376 return;
377
378 if (not) {
a235900e 379 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
380 } else
381 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
382}
383
384static void dec_or(DisasContext *dc)
385{
386 if (!dc->type_b && (dc->imm & (1 << 10))) {
387 dec_pattern(dc);
388 return;
389 }
390
391 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
392 if (dc->rd)
393 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
394}
395
396static void dec_xor(DisasContext *dc)
397{
398 if (!dc->type_b && (dc->imm & (1 << 10))) {
399 dec_pattern(dc);
400 return;
401 }
402
403 LOG_DIS("xor r%d\n", dc->rd);
404 if (dc->rd)
405 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
406}
407
4acb54ba
EI
408static inline void msr_read(DisasContext *dc, TCGv d)
409{
410 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
411}
412
413static inline void msr_write(DisasContext *dc, TCGv v)
414{
97b833c5
EI
415 TCGv t;
416
417 t = tcg_temp_new();
4acb54ba 418 dc->cpustate_changed = 1;
97b833c5 419 /* PVR bit is not writable. */
8a84fc6b
EI
420 tcg_gen_andi_tl(t, v, ~MSR_PVR);
421 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
97b833c5
EI
422 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
423 tcg_temp_free(t);
4acb54ba
EI
424}
425
426static void dec_msr(DisasContext *dc)
427{
0063ebd6 428 CPUState *cs = CPU(dc->cpu);
4acb54ba
EI
429 TCGv t0, t1;
430 unsigned int sr, to, rn;
97ed5ccd 431 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
432
433 sr = dc->imm & ((1 << 14) - 1);
434 to = dc->imm & (1 << 14);
435 dc->type_b = 1;
436 if (to)
437 dc->cpustate_changed = 1;
438
439 /* msrclr and msrset. */
440 if (!(dc->imm & (1 << 15))) {
441 unsigned int clr = dc->ir & (1 << 16);
442
443 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
444 dc->rd, dc->imm);
1567a005 445
0063ebd6 446 if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
1567a005
EI
447 /* nop??? */
448 return;
449 }
450
451 if ((dc->tb_flags & MSR_EE_FLAG)
452 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
453 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
454 t_gen_raise_exception(dc, EXCP_HW_EXCP);
455 return;
456 }
457
4acb54ba
EI
458 if (dc->rd)
459 msr_read(dc, cpu_R[dc->rd]);
460
461 t0 = tcg_temp_new();
462 t1 = tcg_temp_new();
463 msr_read(dc, t0);
464 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
465
466 if (clr) {
467 tcg_gen_not_tl(t1, t1);
468 tcg_gen_and_tl(t0, t0, t1);
469 } else
470 tcg_gen_or_tl(t0, t0, t1);
471 msr_write(dc, t0);
472 tcg_temp_free(t0);
473 tcg_temp_free(t1);
474 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
475 dc->is_jmp = DISAS_UPDATE;
476 return;
477 }
478
1567a005
EI
479 if (to) {
480 if ((dc->tb_flags & MSR_EE_FLAG)
481 && mem_index == MMU_USER_IDX) {
482 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
483 t_gen_raise_exception(dc, EXCP_HW_EXCP);
484 return;
485 }
486 }
487
4acb54ba
EI
488#if !defined(CONFIG_USER_ONLY)
489 /* Catch read/writes to the mmu block. */
490 if ((sr & ~0xff) == 0x1000) {
491 sr &= 7;
492 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
493 if (to)
64254eba 494 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
4acb54ba 495 else
64254eba 496 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
4acb54ba
EI
497 return;
498 }
499#endif
500
501 if (to) {
502 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
503 switch (sr) {
504 case 0:
505 break;
506 case 1:
507 msr_write(dc, cpu_R[dc->ra]);
508 break;
509 case 0x3:
510 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
511 break;
512 case 0x5:
513 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
514 break;
515 case 0x7:
97694c57 516 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
4acb54ba 517 break;
5818dee5 518 case 0x800:
68cee38a 519 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
520 break;
521 case 0x802:
68cee38a 522 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
5818dee5 523 break;
4acb54ba 524 default:
0063ebd6 525 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
526 break;
527 }
528 } else {
529 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
530
531 switch (sr) {
532 case 0:
533 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
534 break;
535 case 1:
536 msr_read(dc, cpu_R[dc->rd]);
537 break;
538 case 0x3:
539 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
540 break;
541 case 0x5:
542 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
543 break;
544 case 0x7:
97694c57 545 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
4acb54ba
EI
546 break;
547 case 0xb:
548 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
549 break;
5818dee5 550 case 0x800:
68cee38a 551 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
552 break;
553 case 0x802:
68cee38a 554 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
5818dee5 555 break;
4acb54ba
EI
556 case 0x2000:
557 case 0x2001:
558 case 0x2002:
559 case 0x2003:
560 case 0x2004:
561 case 0x2005:
562 case 0x2006:
563 case 0x2007:
564 case 0x2008:
565 case 0x2009:
566 case 0x200a:
567 case 0x200b:
568 case 0x200c:
569 rn = sr & 0xf;
570 tcg_gen_ld_tl(cpu_R[dc->rd],
68cee38a 571 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
572 break;
573 default:
a47dddd7 574 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
575 break;
576 }
577 }
ee7dbcf8
EI
578
579 if (dc->rd == 0) {
580 tcg_gen_movi_tl(cpu_R[0], 0);
581 }
4acb54ba
EI
582}
583
4acb54ba
EI
584/* Multiplier unit. */
585static void dec_mul(DisasContext *dc)
586{
16ece88d 587 TCGv tmp;
4acb54ba
EI
588 unsigned int subcode;
589
1567a005 590 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
591 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
592 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
1567a005
EI
593 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
594 t_gen_raise_exception(dc, EXCP_HW_EXCP);
595 return;
596 }
597
4acb54ba 598 subcode = dc->imm & 3;
4acb54ba
EI
599
600 if (dc->type_b) {
601 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
16ece88d
RH
602 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
603 return;
4acb54ba
EI
604 }
605
1567a005
EI
606 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
607 if (subcode >= 1 && subcode <= 3
0063ebd6 608 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
1567a005
EI
609 /* nop??? */
610 }
611
16ece88d 612 tmp = tcg_temp_new();
4acb54ba
EI
613 switch (subcode) {
614 case 0:
615 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 616 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
617 break;
618 case 1:
619 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 620 tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
621 break;
622 case 2:
623 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 624 tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
625 break;
626 case 3:
627 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
16ece88d 628 tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
629 break;
630 default:
0063ebd6 631 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
4acb54ba
EI
632 break;
633 }
16ece88d 634 tcg_temp_free(tmp);
4acb54ba
EI
635}
636
637/* Div unit. */
638static void dec_div(DisasContext *dc)
639{
640 unsigned int u;
641
642 u = dc->imm & 2;
643 LOG_DIS("div\n");
644
0063ebd6 645 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
47709e4c 646 && !dc->cpu->cfg.use_div) {
1567a005
EI
647 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
648 t_gen_raise_exception(dc, EXCP_HW_EXCP);
649 }
650
4acb54ba 651 if (u)
64254eba
BS
652 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
653 cpu_R[dc->ra]);
4acb54ba 654 else
64254eba
BS
655 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
656 cpu_R[dc->ra]);
4acb54ba
EI
657 if (!dc->rd)
658 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
659}
660
661static void dec_barrel(DisasContext *dc)
662{
663 TCGv t0;
664 unsigned int s, t;
665
1567a005 666 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 667 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
7faa66aa 668 && !dc->cpu->cfg.use_barrel) {
1567a005
EI
669 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
670 t_gen_raise_exception(dc, EXCP_HW_EXCP);
671 return;
672 }
673
4acb54ba
EI
674 s = dc->imm & (1 << 10);
675 t = dc->imm & (1 << 9);
676
677 LOG_DIS("bs%s%s r%d r%d r%d\n",
678 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
679
680 t0 = tcg_temp_new();
681
682 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
683 tcg_gen_andi_tl(t0, t0, 31);
684
685 if (s)
686 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
687 else {
688 if (t)
689 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
690 else
691 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
692 }
693}
694
695static void dec_bit(DisasContext *dc)
696{
0063ebd6 697 CPUState *cs = CPU(dc->cpu);
09b9f113 698 TCGv t0;
4acb54ba 699 unsigned int op;
97ed5ccd 700 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba 701
ace2e4da 702 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
703 switch (op) {
704 case 0x21:
705 /* src. */
706 t0 = tcg_temp_new();
707
708 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
09b9f113
EI
709 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
710 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 711 if (dc->rd) {
4acb54ba 712 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
09b9f113 713 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 714 }
4acb54ba
EI
715 tcg_temp_free(t0);
716 break;
717
718 case 0x1:
719 case 0x41:
720 /* srl. */
4acb54ba
EI
721 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
722
bb3cb951
EI
723 /* Update carry. Note that write carry only looks at the LSB. */
724 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
725 if (dc->rd) {
726 if (op == 0x41)
727 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
728 else
729 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
730 }
731 break;
732 case 0x60:
733 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
734 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
735 break;
736 case 0x61:
737 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
738 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
739 break;
740 case 0x64:
f062a3c7
EI
741 case 0x66:
742 case 0x74:
743 case 0x76:
4acb54ba
EI
744 /* wdc. */
745 LOG_DIS("wdc r%d\n", dc->ra);
1567a005
EI
746 if ((dc->tb_flags & MSR_EE_FLAG)
747 && mem_index == MMU_USER_IDX) {
748 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
749 t_gen_raise_exception(dc, EXCP_HW_EXCP);
750 return;
751 }
4acb54ba
EI
752 break;
753 case 0x68:
754 /* wic. */
755 LOG_DIS("wic r%d\n", dc->ra);
1567a005
EI
756 if ((dc->tb_flags & MSR_EE_FLAG)
757 && mem_index == MMU_USER_IDX) {
758 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
759 t_gen_raise_exception(dc, EXCP_HW_EXCP);
760 return;
761 }
4acb54ba 762 break;
48b5e96f
EI
763 case 0xe0:
764 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
765 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
766 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
48b5e96f
EI
767 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
768 t_gen_raise_exception(dc, EXCP_HW_EXCP);
769 }
0063ebd6 770 if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
5318420c 771 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
48b5e96f
EI
772 }
773 break;
ace2e4da
PC
774 case 0x1e0:
775 /* swapb */
776 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
777 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
778 break;
b8c6a5d9 779 case 0x1e2:
ace2e4da
PC
780 /*swaph */
781 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
782 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
783 break;
4acb54ba 784 default:
a47dddd7
AF
785 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
786 dc->pc, op, dc->rd, dc->ra, dc->rb);
4acb54ba
EI
787 break;
788 }
789}
790
791static inline void sync_jmpstate(DisasContext *dc)
792{
844bab60
EI
793 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
794 if (dc->jmp == JMP_DIRECT) {
795 tcg_gen_movi_tl(env_btaken, 1);
796 }
23979dc5
EI
797 dc->jmp = JMP_INDIRECT;
798 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
4acb54ba
EI
799 }
800}
801
802static void dec_imm(DisasContext *dc)
803{
804 LOG_DIS("imm %x\n", dc->imm << 16);
805 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
806 dc->tb_flags |= IMM_FLAG;
807 dc->clear_imm = 0;
808}
809
4acb54ba
EI
810static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
811{
812 unsigned int extimm = dc->tb_flags & IMM_FLAG;
5818dee5
EI
813 /* Should be set to one if r1 is used by loadstores. */
814 int stackprot = 0;
815
816 /* All load/stores use ra. */
9aaaa181 817 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
5818dee5
EI
818 stackprot = 1;
819 }
4acb54ba 820
9ef55357 821 /* Treat the common cases first. */
4acb54ba 822 if (!dc->type_b) {
4b5ef0b5
EI
823 /* If any of the regs is r0, return a ptr to the other. */
824 if (dc->ra == 0) {
825 return &cpu_R[dc->rb];
826 } else if (dc->rb == 0) {
827 return &cpu_R[dc->ra];
828 }
829
9aaaa181 830 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
5818dee5
EI
831 stackprot = 1;
832 }
833
4acb54ba
EI
834 *t = tcg_temp_new();
835 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
5818dee5
EI
836
837 if (stackprot) {
64254eba 838 gen_helper_stackprot(cpu_env, *t);
5818dee5 839 }
4acb54ba
EI
840 return t;
841 }
842 /* Immediate. */
843 if (!extimm) {
844 if (dc->imm == 0) {
845 return &cpu_R[dc->ra];
846 }
847 *t = tcg_temp_new();
848 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
849 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
850 } else {
851 *t = tcg_temp_new();
852 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
853 }
854
5818dee5 855 if (stackprot) {
64254eba 856 gen_helper_stackprot(cpu_env, *t);
5818dee5 857 }
4acb54ba
EI
858 return t;
859}
860
861static void dec_load(DisasContext *dc)
862{
47acdd63 863 TCGv t, v, *addr;
8cc9b43f 864 unsigned int size, rev = 0, ex = 0;
47acdd63 865 TCGMemOp mop;
4acb54ba 866
47acdd63
RH
867 mop = dc->opcode & 3;
868 size = 1 << mop;
9f8beb66
EI
869 if (!dc->type_b) {
870 rev = (dc->ir >> 9) & 1;
8cc9b43f 871 ex = (dc->ir >> 10) & 1;
9f8beb66 872 }
47acdd63
RH
873 mop |= MO_TE;
874 if (rev) {
875 mop ^= MO_BSWAP;
876 }
9f8beb66 877
0187688f 878 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 879 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
880 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
881 t_gen_raise_exception(dc, EXCP_HW_EXCP);
882 return;
883 }
4acb54ba 884
8cc9b43f
PC
885 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
886 ex ? "x" : "");
9f8beb66 887
4acb54ba
EI
888 t_sync_flags(dc);
889 addr = compute_ldst_addr(dc, &t);
890
9f8beb66
EI
891 /*
892 * When doing reverse accesses we need to do two things.
893 *
4ff9786c 894 * 1. Reverse the address wrt endianness.
9f8beb66
EI
895 * 2. Byteswap the data lanes on the way back into the CPU core.
896 */
897 if (rev && size != 4) {
898 /* Endian reverse the address. t is addr. */
899 switch (size) {
900 case 1:
901 {
902 /* 00 -> 11
903 01 -> 10
904 10 -> 10
905 11 -> 00 */
906 TCGv low = tcg_temp_new();
907
908 /* Force addr into the temp. */
909 if (addr != &t) {
910 t = tcg_temp_new();
911 tcg_gen_mov_tl(t, *addr);
912 addr = &t;
913 }
914
915 tcg_gen_andi_tl(low, t, 3);
916 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
917 tcg_gen_andi_tl(t, t, ~3);
918 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
919 tcg_gen_mov_tl(env_imm, t);
920 tcg_temp_free(low);
921 break;
922 }
923
924 case 2:
925 /* 00 -> 10
926 10 -> 00. */
927 /* Force addr into the temp. */
928 if (addr != &t) {
929 t = tcg_temp_new();
930 tcg_gen_xori_tl(t, *addr, 2);
931 addr = &t;
932 } else {
933 tcg_gen_xori_tl(t, t, 2);
934 }
935 break;
936 default:
0063ebd6 937 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
938 break;
939 }
940 }
941
8cc9b43f
PC
942 /* lwx does not throw unaligned access errors, so force alignment */
943 if (ex) {
944 /* Force addr into the temp. */
945 if (addr != &t) {
946 t = tcg_temp_new();
947 tcg_gen_mov_tl(t, *addr);
948 addr = &t;
949 }
950 tcg_gen_andi_tl(t, t, ~3);
951 }
952
4acb54ba
EI
953 /* If we get a fault on a dslot, the jmpstate better be in sync. */
954 sync_jmpstate(dc);
968a40f6
EI
955
956 /* Verify alignment if needed. */
47acdd63
RH
957 /*
958 * Microblaze gives MMU faults priority over faults due to
959 * unaligned addresses. That's why we speculatively do the load
960 * into v. If the load succeeds, we verify alignment of the
961 * address and if that succeeds we write into the destination reg.
962 */
963 v = tcg_temp_new();
97ed5ccd 964 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 965
0063ebd6 966 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507 967 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 968 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 969 tcg_const_tl(0), tcg_const_tl(size - 1));
4acb54ba
EI
970 }
971
47acdd63
RH
972 if (ex) {
973 tcg_gen_mov_tl(env_res_addr, *addr);
974 tcg_gen_mov_tl(env_res_val, v);
975 }
976 if (dc->rd) {
977 tcg_gen_mov_tl(cpu_R[dc->rd], v);
978 }
979 tcg_temp_free(v);
980
8cc9b43f 981 if (ex) { /* lwx */
b6af0975 982 /* no support for AXI exclusive so always clear C */
8cc9b43f 983 write_carryi(dc, 0);
8cc9b43f
PC
984 }
985
4acb54ba
EI
986 if (addr == &t)
987 tcg_temp_free(t);
988}
989
4acb54ba
EI
990static void dec_store(DisasContext *dc)
991{
4a536270 992 TCGv t, *addr, swx_addr;
42a268c2 993 TCGLabel *swx_skip = NULL;
8cc9b43f 994 unsigned int size, rev = 0, ex = 0;
47acdd63 995 TCGMemOp mop;
4acb54ba 996
47acdd63
RH
997 mop = dc->opcode & 3;
998 size = 1 << mop;
9f8beb66
EI
999 if (!dc->type_b) {
1000 rev = (dc->ir >> 9) & 1;
8cc9b43f 1001 ex = (dc->ir >> 10) & 1;
9f8beb66 1002 }
47acdd63
RH
1003 mop |= MO_TE;
1004 if (rev) {
1005 mop ^= MO_BSWAP;
1006 }
4acb54ba 1007
0187688f 1008 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1009 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
1010 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1011 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1012 return;
1013 }
1014
8cc9b43f
PC
1015 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1016 ex ? "x" : "");
4acb54ba
EI
1017 t_sync_flags(dc);
1018 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1019 sync_jmpstate(dc);
1020 addr = compute_ldst_addr(dc, &t);
968a40f6 1021
083dbf48 1022 swx_addr = tcg_temp_local_new();
8cc9b43f 1023 if (ex) { /* swx */
11a76217 1024 TCGv tval;
8cc9b43f
PC
1025
1026 /* Force addr into the swx_addr. */
1027 tcg_gen_mov_tl(swx_addr, *addr);
1028 addr = &swx_addr;
1029 /* swx does not throw unaligned access errors, so force alignment */
1030 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1031
8cc9b43f
PC
1032 write_carryi(dc, 1);
1033 swx_skip = gen_new_label();
4a536270 1034 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
11a76217
EI
1035
1036 /* Compare the value loaded at lwx with current contents of
1037 the reserved location.
1038 FIXME: This only works for system emulation where we can expect
1039 this compare and the following write to be atomic. For user
1040 emulation we need to add atomicity between threads. */
1041 tval = tcg_temp_new();
97ed5ccd 1042 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
0063ebd6 1043 MO_TEUL);
11a76217 1044 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
8cc9b43f 1045 write_carryi(dc, 0);
11a76217 1046 tcg_temp_free(tval);
8cc9b43f
PC
1047 }
1048
9f8beb66
EI
1049 if (rev && size != 4) {
1050 /* Endian reverse the address. t is addr. */
1051 switch (size) {
1052 case 1:
1053 {
1054 /* 00 -> 11
1055 01 -> 10
1056 10 -> 10
1057 11 -> 00 */
1058 TCGv low = tcg_temp_new();
1059
1060 /* Force addr into the temp. */
1061 if (addr != &t) {
1062 t = tcg_temp_new();
1063 tcg_gen_mov_tl(t, *addr);
1064 addr = &t;
1065 }
1066
1067 tcg_gen_andi_tl(low, t, 3);
1068 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1069 tcg_gen_andi_tl(t, t, ~3);
1070 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
1071 tcg_gen_mov_tl(env_imm, t);
1072 tcg_temp_free(low);
1073 break;
1074 }
1075
1076 case 2:
1077 /* 00 -> 10
1078 10 -> 00. */
1079 /* Force addr into the temp. */
1080 if (addr != &t) {
1081 t = tcg_temp_new();
1082 tcg_gen_xori_tl(t, *addr, 2);
1083 addr = &t;
1084 } else {
1085 tcg_gen_xori_tl(t, t, 2);
1086 }
1087 break;
1088 default:
0063ebd6 1089 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
1090 break;
1091 }
9f8beb66 1092 }
97ed5ccd 1093 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
a12f6507 1094
968a40f6 1095 /* Verify alignment if needed. */
0063ebd6 1096 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507
EI
1097 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1098 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1099 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1100 * the MMU prior to the memaccess, thay way we could put
1101 * the alignment checks in between the probe and the mem
1102 * access.
a12f6507 1103 */
64254eba 1104 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1105 tcg_const_tl(1), tcg_const_tl(size - 1));
968a40f6 1106 }
083dbf48 1107
8cc9b43f
PC
1108 if (ex) {
1109 gen_set_label(swx_skip);
8cc9b43f 1110 }
083dbf48 1111 tcg_temp_free(swx_addr);
968a40f6 1112
4acb54ba
EI
1113 if (addr == &t)
1114 tcg_temp_free(t);
1115}
1116
1117static inline void eval_cc(DisasContext *dc, unsigned int cc,
1118 TCGv d, TCGv a, TCGv b)
1119{
4acb54ba
EI
1120 switch (cc) {
1121 case CC_EQ:
b2565c69 1122 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
4acb54ba
EI
1123 break;
1124 case CC_NE:
b2565c69 1125 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
4acb54ba
EI
1126 break;
1127 case CC_LT:
b2565c69 1128 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
4acb54ba
EI
1129 break;
1130 case CC_LE:
b2565c69 1131 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
4acb54ba
EI
1132 break;
1133 case CC_GE:
b2565c69 1134 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
4acb54ba
EI
1135 break;
1136 case CC_GT:
b2565c69 1137 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
4acb54ba
EI
1138 break;
1139 default:
0063ebd6 1140 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
4acb54ba
EI
1141 break;
1142 }
1143}
1144
1145static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1146{
42a268c2 1147 TCGLabel *l1 = gen_new_label();
4acb54ba
EI
1148 /* Conditional jmp. */
1149 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1150 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1151 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1152 gen_set_label(l1);
1153}
1154
1155static void dec_bcc(DisasContext *dc)
1156{
1157 unsigned int cc;
1158 unsigned int dslot;
1159
1160 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1161 dslot = dc->ir & (1 << 25);
1162 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1163
1164 dc->delayed_branch = 1;
1165 if (dslot) {
1166 dc->delayed_branch = 2;
1167 dc->tb_flags |= D_FLAG;
1168 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1169 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1170 }
1171
61204ce8
EI
1172 if (dec_alu_op_b_is_small_imm(dc)) {
1173 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1174
1175 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
844bab60 1176 dc->jmp = JMP_DIRECT_CC;
23979dc5 1177 dc->jmp_pc = dc->pc + offset;
61204ce8 1178 } else {
23979dc5 1179 dc->jmp = JMP_INDIRECT;
61204ce8
EI
1180 tcg_gen_movi_tl(env_btarget, dc->pc);
1181 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1182 }
61204ce8 1183 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
4acb54ba
EI
1184}
1185
1186static void dec_br(DisasContext *dc)
1187{
9f6113c7 1188 unsigned int dslot, link, abs, mbar;
97ed5ccd 1189 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
1190
1191 dslot = dc->ir & (1 << 20);
1192 abs = dc->ir & (1 << 19);
1193 link = dc->ir & (1 << 18);
9f6113c7
EI
1194
1195 /* Memory barrier. */
1196 mbar = (dc->ir >> 16) & 31;
1197 if (mbar == 2 && dc->imm == 4) {
5d45de97
EI
1198 /* mbar IMM & 16 decodes to sleep. */
1199 if (dc->rd & 16) {
1200 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1201 TCGv_i32 tmp_1 = tcg_const_i32(1);
1202
1203 LOG_DIS("sleep\n");
1204
1205 t_sync_flags(dc);
1206 tcg_gen_st_i32(tmp_1, cpu_env,
1207 -offsetof(MicroBlazeCPU, env)
1208 +offsetof(CPUState, halted));
1209 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1210 gen_helper_raise_exception(cpu_env, tmp_hlt);
1211 tcg_temp_free_i32(tmp_hlt);
1212 tcg_temp_free_i32(tmp_1);
1213 return;
1214 }
9f6113c7
EI
1215 LOG_DIS("mbar %d\n", dc->rd);
1216 /* Break the TB. */
1217 dc->cpustate_changed = 1;
1218 return;
1219 }
1220
4acb54ba
EI
1221 LOG_DIS("br%s%s%s%s imm=%x\n",
1222 abs ? "a" : "", link ? "l" : "",
1223 dc->type_b ? "i" : "", dslot ? "d" : "",
1224 dc->imm);
1225
1226 dc->delayed_branch = 1;
1227 if (dslot) {
1228 dc->delayed_branch = 2;
1229 dc->tb_flags |= D_FLAG;
1230 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1231 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1232 }
1233 if (link && dc->rd)
1234 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1235
1236 dc->jmp = JMP_INDIRECT;
1237 if (abs) {
1238 tcg_gen_movi_tl(env_btaken, 1);
1239 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1240 if (link && !dslot) {
1241 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1242 t_gen_raise_exception(dc, EXCP_BREAK);
1243 if (dc->imm == 0) {
1244 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1245 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1246 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1247 return;
1248 }
1249
1250 t_gen_raise_exception(dc, EXCP_DEBUG);
1251 }
1252 }
4acb54ba 1253 } else {
61204ce8
EI
1254 if (dec_alu_op_b_is_small_imm(dc)) {
1255 dc->jmp = JMP_DIRECT;
1256 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1257 } else {
4acb54ba
EI
1258 tcg_gen_movi_tl(env_btaken, 1);
1259 tcg_gen_movi_tl(env_btarget, dc->pc);
1260 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
4acb54ba
EI
1261 }
1262 }
1263}
1264
1265static inline void do_rti(DisasContext *dc)
1266{
1267 TCGv t0, t1;
1268 t0 = tcg_temp_new();
1269 t1 = tcg_temp_new();
1270 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1271 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1272 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1273
1274 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1275 tcg_gen_or_tl(t1, t1, t0);
1276 msr_write(dc, t1);
1277 tcg_temp_free(t1);
1278 tcg_temp_free(t0);
1279 dc->tb_flags &= ~DRTI_FLAG;
1280}
1281
1282static inline void do_rtb(DisasContext *dc)
1283{
1284 TCGv t0, t1;
1285 t0 = tcg_temp_new();
1286 t1 = tcg_temp_new();
1287 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1288 tcg_gen_shri_tl(t0, t1, 1);
1289 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1290
1291 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1292 tcg_gen_or_tl(t1, t1, t0);
1293 msr_write(dc, t1);
1294 tcg_temp_free(t1);
1295 tcg_temp_free(t0);
1296 dc->tb_flags &= ~DRTB_FLAG;
1297}
1298
1299static inline void do_rte(DisasContext *dc)
1300{
1301 TCGv t0, t1;
1302 t0 = tcg_temp_new();
1303 t1 = tcg_temp_new();
1304
1305 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1306 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1307 tcg_gen_shri_tl(t0, t1, 1);
1308 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1309
1310 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1311 tcg_gen_or_tl(t1, t1, t0);
1312 msr_write(dc, t1);
1313 tcg_temp_free(t1);
1314 tcg_temp_free(t0);
1315 dc->tb_flags &= ~DRTE_FLAG;
1316}
1317
1318static void dec_rts(DisasContext *dc)
1319{
1320 unsigned int b_bit, i_bit, e_bit;
97ed5ccd 1321 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
4acb54ba
EI
1322
1323 i_bit = dc->ir & (1 << 21);
1324 b_bit = dc->ir & (1 << 22);
1325 e_bit = dc->ir & (1 << 23);
1326
1327 dc->delayed_branch = 2;
1328 dc->tb_flags |= D_FLAG;
1329 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1330 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1331
1332 if (i_bit) {
1333 LOG_DIS("rtid ir=%x\n", dc->ir);
1567a005
EI
1334 if ((dc->tb_flags & MSR_EE_FLAG)
1335 && mem_index == MMU_USER_IDX) {
1336 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1337 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1338 }
4acb54ba
EI
1339 dc->tb_flags |= DRTI_FLAG;
1340 } else if (b_bit) {
1341 LOG_DIS("rtbd ir=%x\n", dc->ir);
1567a005
EI
1342 if ((dc->tb_flags & MSR_EE_FLAG)
1343 && mem_index == MMU_USER_IDX) {
1344 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1345 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1346 }
4acb54ba
EI
1347 dc->tb_flags |= DRTB_FLAG;
1348 } else if (e_bit) {
1349 LOG_DIS("rted ir=%x\n", dc->ir);
1567a005
EI
1350 if ((dc->tb_flags & MSR_EE_FLAG)
1351 && mem_index == MMU_USER_IDX) {
1352 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1353 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1354 }
4acb54ba
EI
1355 dc->tb_flags |= DRTE_FLAG;
1356 } else
1357 LOG_DIS("rts ir=%x\n", dc->ir);
1358
23979dc5 1359 dc->jmp = JMP_INDIRECT;
4acb54ba
EI
1360 tcg_gen_movi_tl(env_btaken, 1);
1361 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1362}
1363
97694c57
EI
1364static int dec_check_fpuv2(DisasContext *dc)
1365{
be67e9ab 1366 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
97694c57
EI
1367 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1368 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1369 }
be67e9ab 1370 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
97694c57
EI
1371}
1372
1567a005
EI
1373static void dec_fpu(DisasContext *dc)
1374{
97694c57
EI
1375 unsigned int fpu_insn;
1376
1567a005 1377 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1378 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
be67e9ab 1379 && (dc->cpu->cfg.use_fpu != 1)) {
97694c57 1380 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1567a005
EI
1381 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1382 return;
1383 }
1384
97694c57
EI
1385 fpu_insn = (dc->ir >> 7) & 7;
1386
1387 switch (fpu_insn) {
1388 case 0:
64254eba
BS
1389 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1390 cpu_R[dc->rb]);
97694c57
EI
1391 break;
1392
1393 case 1:
64254eba
BS
1394 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1395 cpu_R[dc->rb]);
97694c57
EI
1396 break;
1397
1398 case 2:
64254eba
BS
1399 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1400 cpu_R[dc->rb]);
97694c57
EI
1401 break;
1402
1403 case 3:
64254eba
BS
1404 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1405 cpu_R[dc->rb]);
97694c57
EI
1406 break;
1407
1408 case 4:
1409 switch ((dc->ir >> 4) & 7) {
1410 case 0:
64254eba 1411 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1412 cpu_R[dc->ra], cpu_R[dc->rb]);
1413 break;
1414 case 1:
64254eba 1415 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1416 cpu_R[dc->ra], cpu_R[dc->rb]);
1417 break;
1418 case 2:
64254eba 1419 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1420 cpu_R[dc->ra], cpu_R[dc->rb]);
1421 break;
1422 case 3:
64254eba 1423 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1424 cpu_R[dc->ra], cpu_R[dc->rb]);
1425 break;
1426 case 4:
64254eba 1427 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1428 cpu_R[dc->ra], cpu_R[dc->rb]);
1429 break;
1430 case 5:
64254eba 1431 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1432 cpu_R[dc->ra], cpu_R[dc->rb]);
1433 break;
1434 case 6:
64254eba 1435 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1436 cpu_R[dc->ra], cpu_R[dc->rb]);
1437 break;
1438 default:
71547a3b
BS
1439 qemu_log_mask(LOG_UNIMP,
1440 "unimplemented fcmp fpu_insn=%x pc=%x"
1441 " opc=%x\n",
1442 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1443 dc->abort_at_next_insn = 1;
1444 break;
1445 }
1446 break;
1447
1448 case 5:
1449 if (!dec_check_fpuv2(dc)) {
1450 return;
1451 }
64254eba 1452 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1453 break;
1454
1455 case 6:
1456 if (!dec_check_fpuv2(dc)) {
1457 return;
1458 }
64254eba 1459 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1460 break;
1461
1462 case 7:
1463 if (!dec_check_fpuv2(dc)) {
1464 return;
1465 }
64254eba 1466 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1467 break;
1468
1469 default:
71547a3b
BS
1470 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1471 " opc=%x\n",
1472 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1473 dc->abort_at_next_insn = 1;
1474 break;
1475 }
1567a005
EI
1476}
1477
4acb54ba
EI
1478static void dec_null(DisasContext *dc)
1479{
02b33596 1480 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1481 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
02b33596
EI
1482 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1483 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1484 return;
1485 }
1d512a65 1486 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
4acb54ba
EI
1487 dc->abort_at_next_insn = 1;
1488}
1489
6d76d23e
EI
1490/* Insns connected to FSL or AXI stream attached devices. */
1491static void dec_stream(DisasContext *dc)
1492{
97ed5ccd 1493 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
6d76d23e
EI
1494 TCGv_i32 t_id, t_ctrl;
1495 int ctrl;
1496
1497 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1498 dc->type_b ? "" : "d", dc->imm);
1499
1500 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1501 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1502 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1503 return;
1504 }
1505
1506 t_id = tcg_temp_new();
1507 if (dc->type_b) {
1508 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1509 ctrl = dc->imm >> 10;
1510 } else {
1511 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1512 ctrl = dc->imm >> 5;
1513 }
1514
1515 t_ctrl = tcg_const_tl(ctrl);
1516
1517 if (dc->rd == 0) {
1518 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1519 } else {
1520 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1521 }
1522 tcg_temp_free(t_id);
1523 tcg_temp_free(t_ctrl);
1524}
1525
4acb54ba
EI
1526static struct decoder_info {
1527 struct {
1528 uint32_t bits;
1529 uint32_t mask;
1530 };
1531 void (*dec)(DisasContext *dc);
1532} decinfo[] = {
1533 {DEC_ADD, dec_add},
1534 {DEC_SUB, dec_sub},
1535 {DEC_AND, dec_and},
1536 {DEC_XOR, dec_xor},
1537 {DEC_OR, dec_or},
1538 {DEC_BIT, dec_bit},
1539 {DEC_BARREL, dec_barrel},
1540 {DEC_LD, dec_load},
1541 {DEC_ST, dec_store},
1542 {DEC_IMM, dec_imm},
1543 {DEC_BR, dec_br},
1544 {DEC_BCC, dec_bcc},
1545 {DEC_RTS, dec_rts},
1567a005 1546 {DEC_FPU, dec_fpu},
4acb54ba
EI
1547 {DEC_MUL, dec_mul},
1548 {DEC_DIV, dec_div},
1549 {DEC_MSR, dec_msr},
6d76d23e 1550 {DEC_STREAM, dec_stream},
4acb54ba
EI
1551 {{0, 0}, dec_null}
1552};
1553
64254eba 1554static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1555{
4acb54ba
EI
1556 int i;
1557
64254eba 1558 dc->ir = ir;
4acb54ba
EI
1559 LOG_DIS("%8.8x\t", dc->ir);
1560
1561 if (dc->ir)
1562 dc->nr_nops = 0;
1563 else {
1567a005 1564 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
1565 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1566 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1567a005
EI
1567 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1568 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1569 return;
1570 }
1571
4acb54ba
EI
1572 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1573 dc->nr_nops++;
a47dddd7 1574 if (dc->nr_nops > 4) {
0063ebd6 1575 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
a47dddd7 1576 }
4acb54ba
EI
1577 }
1578 /* bit 2 seems to indicate insn type. */
1579 dc->type_b = ir & (1 << 29);
1580
1581 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1582 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1583 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1584 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1585 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1586
1587 /* Large switch for all insns. */
1588 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1589 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1590 decinfo[i].dec(dc);
1591 break;
1592 }
1593 }
1594}
1595
4acb54ba 1596/* generate intermediate code for basic block 'tb'. */
4e5e1215 1597void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
4acb54ba 1598{
4e5e1215 1599 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
ed2803da 1600 CPUState *cs = CPU(cpu);
4acb54ba 1601 uint32_t pc_start;
4acb54ba
EI
1602 struct DisasContext ctx;
1603 struct DisasContext *dc = &ctx;
1604 uint32_t next_page_start, org_flags;
1605 target_ulong npc;
1606 int num_insns;
1607 int max_insns;
1608
4acb54ba 1609 pc_start = tb->pc;
0063ebd6 1610 dc->cpu = cpu;
4acb54ba
EI
1611 dc->tb = tb;
1612 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1613
4acb54ba
EI
1614 dc->is_jmp = DISAS_NEXT;
1615 dc->jmp = 0;
1616 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1617 if (dc->delayed_branch) {
1618 dc->jmp = JMP_INDIRECT;
1619 }
4acb54ba 1620 dc->pc = pc_start;
ed2803da 1621 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1622 dc->cpustate_changed = 0;
1623 dc->abort_at_next_insn = 0;
1624 dc->nr_nops = 0;
1625
a47dddd7
AF
1626 if (pc_start & 3) {
1627 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1628 }
4acb54ba 1629
4acb54ba 1630 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4acb54ba
EI
1631 num_insns = 0;
1632 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 1633 if (max_insns == 0) {
4acb54ba 1634 max_insns = CF_COUNT_MASK;
190ce7fb
RH
1635 }
1636 if (max_insns > TCG_MAX_INSNS) {
1637 max_insns = TCG_MAX_INSNS;
1638 }
4acb54ba 1639
cd42d5b2 1640 gen_tb_start(tb);
4acb54ba
EI
1641 do
1642 {
667b8e29 1643 tcg_gen_insn_start(dc->pc);
959082fc 1644 num_insns++;
4acb54ba 1645
b933066a
RH
1646#if SIM_COMPAT
1647 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1648 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1649 gen_helper_debug();
1650 }
1651#endif
1652
1653 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1654 t_gen_raise_exception(dc, EXCP_DEBUG);
1655 dc->is_jmp = DISAS_UPDATE;
522a0d4e
RH
1656 /* The address covered by the breakpoint must be included in
1657 [tb->pc, tb->pc + tb->size) in order to for it to be
1658 properly cleared -- thus we increment the PC here so that
1659 the logic setting tb->size below does the right thing. */
1660 dc->pc += 4;
b933066a
RH
1661 break;
1662 }
1663
4acb54ba
EI
1664 /* Pretty disas. */
1665 LOG_DIS("%8.8x:\t", dc->pc);
1666
959082fc 1667 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4acb54ba 1668 gen_io_start();
959082fc 1669 }
4acb54ba
EI
1670
1671 dc->clear_imm = 1;
64254eba 1672 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1673 if (dc->clear_imm)
1674 dc->tb_flags &= ~IMM_FLAG;
4acb54ba 1675 dc->pc += 4;
4acb54ba
EI
1676
1677 if (dc->delayed_branch) {
1678 dc->delayed_branch--;
1679 if (!dc->delayed_branch) {
1680 if (dc->tb_flags & DRTI_FLAG)
1681 do_rti(dc);
1682 if (dc->tb_flags & DRTB_FLAG)
1683 do_rtb(dc);
1684 if (dc->tb_flags & DRTE_FLAG)
1685 do_rte(dc);
1686 /* Clear the delay slot flag. */
1687 dc->tb_flags &= ~D_FLAG;
1688 /* If it is a direct jump, try direct chaining. */
23979dc5 1689 if (dc->jmp == JMP_INDIRECT) {
4acb54ba
EI
1690 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1691 dc->is_jmp = DISAS_JUMP;
23979dc5 1692 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1693 t_sync_flags(dc);
1694 gen_goto_tb(dc, 0, dc->jmp_pc);
1695 dc->is_jmp = DISAS_TB_JUMP;
1696 } else if (dc->jmp == JMP_DIRECT_CC) {
42a268c2 1697 TCGLabel *l1 = gen_new_label();
23979dc5 1698 t_sync_flags(dc);
23979dc5
EI
1699 /* Conditional jmp. */
1700 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1701 gen_goto_tb(dc, 1, dc->pc);
1702 gen_set_label(l1);
1703 gen_goto_tb(dc, 0, dc->jmp_pc);
1704
1705 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1706 }
1707 break;
1708 }
1709 }
ed2803da 1710 if (cs->singlestep_enabled) {
4acb54ba 1711 break;
ed2803da 1712 }
4acb54ba 1713 } while (!dc->is_jmp && !dc->cpustate_changed
fe700adb
RH
1714 && !tcg_op_buf_full()
1715 && !singlestep
1716 && (dc->pc < next_page_start)
1717 && num_insns < max_insns);
4acb54ba
EI
1718
1719 npc = dc->pc;
844bab60 1720 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1721 if (dc->tb_flags & D_FLAG) {
1722 dc->is_jmp = DISAS_UPDATE;
1723 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1724 sync_jmpstate(dc);
1725 } else
1726 npc = dc->jmp_pc;
1727 }
1728
1729 if (tb->cflags & CF_LAST_IO)
1730 gen_io_end();
1731 /* Force an update if the per-tb cpu state has changed. */
1732 if (dc->is_jmp == DISAS_NEXT
1733 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1734 dc->is_jmp = DISAS_UPDATE;
1735 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1736 }
1737 t_sync_flags(dc);
1738
ed2803da 1739 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1740 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1741
1742 if (dc->is_jmp != DISAS_JUMP) {
4acb54ba 1743 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
6c5f738d 1744 }
64254eba 1745 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1746 tcg_temp_free_i32(tmp);
4acb54ba
EI
1747 } else {
1748 switch(dc->is_jmp) {
1749 case DISAS_NEXT:
1750 gen_goto_tb(dc, 1, npc);
1751 break;
1752 default:
1753 case DISAS_JUMP:
1754 case DISAS_UPDATE:
1755 /* indicate that the hash table must be used
1756 to find the next TB */
1757 tcg_gen_exit_tb(0);
1758 break;
1759 case DISAS_TB_JUMP:
1760 /* nothing more to generate */
1761 break;
1762 }
1763 }
806f352d 1764 gen_tb_end(tb, num_insns);
0a7df5da 1765
4e5e1215
RH
1766 tb->size = dc->pc - pc_start;
1767 tb->icount = num_insns;
4acb54ba
EI
1768
1769#ifdef DEBUG_DISAS
1770#if !SIM_COMPAT
4910e6e4
RH
1771 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1772 && qemu_log_in_addr_range(pc_start)) {
1ee73216 1773 qemu_log_lock();
f01a5e7e 1774 qemu_log("--------------\n");
4acb54ba 1775#if DISAS_GNU
d49190c4 1776 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
4acb54ba 1777#endif
fe700adb
RH
1778 qemu_log("\nisize=%d osize=%d\n",
1779 dc->pc - pc_start, tcg_op_buf_count());
1ee73216 1780 qemu_log_unlock();
4acb54ba
EI
1781 }
1782#endif
1783#endif
1784 assert(!dc->abort_at_next_insn);
1785}
1786
878096ee
AF
1787void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1788 int flags)
4acb54ba 1789{
878096ee
AF
1790 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1791 CPUMBState *env = &cpu->env;
4acb54ba
EI
1792 int i;
1793
1794 if (!env || !f)
1795 return;
1796
1797 cpu_fprintf(f, "IN: PC=%x %s\n",
1798 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
97694c57 1799 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
4c24aa0a 1800 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
97694c57 1801 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
17c52a43 1802 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
4acb54ba
EI
1803 env->btaken, env->btarget,
1804 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
17c52a43
EI
1805 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1806 (env->sregs[SR_MSR] & MSR_EIP),
1807 (env->sregs[SR_MSR] & MSR_IE));
1808
4acb54ba
EI
1809 for (i = 0; i < 32; i++) {
1810 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1811 if ((i + 1) % 4 == 0)
1812 cpu_fprintf(f, "\n");
1813 }
1814 cpu_fprintf(f, "\n\n");
1815}
1816
b33ab1f7 1817MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
4acb54ba 1818{
b77f98ca 1819 MicroBlazeCPU *cpu;
4acb54ba 1820
b77f98ca 1821 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
4acb54ba 1822
746b03b2 1823 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
4acb54ba 1824
cd0c24f9
AF
1825 return cpu;
1826}
4acb54ba 1827
cd0c24f9
AF
1828void mb_tcg_init(void)
1829{
1830 int i;
4acb54ba
EI
1831
1832 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7c255043 1833 tcg_ctx.tcg_env = cpu_env;
4acb54ba 1834
e1ccc054 1835 env_debug = tcg_global_mem_new(cpu_env,
68cee38a 1836 offsetof(CPUMBState, debug),
4acb54ba 1837 "debug0");
e1ccc054 1838 env_iflags = tcg_global_mem_new(cpu_env,
68cee38a 1839 offsetof(CPUMBState, iflags),
4acb54ba 1840 "iflags");
e1ccc054 1841 env_imm = tcg_global_mem_new(cpu_env,
68cee38a 1842 offsetof(CPUMBState, imm),
4acb54ba 1843 "imm");
e1ccc054 1844 env_btarget = tcg_global_mem_new(cpu_env,
68cee38a 1845 offsetof(CPUMBState, btarget),
4acb54ba 1846 "btarget");
e1ccc054 1847 env_btaken = tcg_global_mem_new(cpu_env,
68cee38a 1848 offsetof(CPUMBState, btaken),
4acb54ba 1849 "btaken");
e1ccc054 1850 env_res_addr = tcg_global_mem_new(cpu_env,
4a536270
EI
1851 offsetof(CPUMBState, res_addr),
1852 "res_addr");
e1ccc054 1853 env_res_val = tcg_global_mem_new(cpu_env,
11a76217
EI
1854 offsetof(CPUMBState, res_val),
1855 "res_val");
4acb54ba 1856 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
e1ccc054 1857 cpu_R[i] = tcg_global_mem_new(cpu_env,
68cee38a 1858 offsetof(CPUMBState, regs[i]),
4acb54ba
EI
1859 regnames[i]);
1860 }
1861 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
e1ccc054 1862 cpu_SR[i] = tcg_global_mem_new(cpu_env,
68cee38a 1863 offsetof(CPUMBState, sregs[i]),
4acb54ba
EI
1864 special_regnames[i]);
1865 }
4acb54ba
EI
1866}
1867
bad729e2
RH
1868void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1869 target_ulong *data)
4acb54ba 1870{
bad729e2 1871 env->sregs[SR_PC] = data[0];
4acb54ba 1872}