]> git.proxmox.com Git - qemu.git/blame - target-microblaze/translate.c
smc91c111: Fix receive starvation
[qemu.git] / target-microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
4acb54ba 21#include "cpu.h"
76cad711 22#include "disas/disas.h"
4acb54ba
EI
23#include "tcg-op.h"
24#include "helper.h"
25#include "microblaze-decode.h"
4acb54ba
EI
26
27#define GEN_HELPER 1
28#include "helper.h"
29
30#define SIM_COMPAT 0
31#define DISAS_GNU 1
32#define DISAS_MB 1
33#if DISAS_MB && !SIM_COMPAT
34# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35#else
36# define LOG_DIS(...) do { } while (0)
37#endif
38
39#define D(x)
40
41#define EXTRACT_FIELD(src, start, end) \
42 (((src) >> start) & ((1 << (end - start + 1)) - 1))
43
44static TCGv env_debug;
45static TCGv_ptr cpu_env;
46static TCGv cpu_R[32];
47static TCGv cpu_SR[18];
48static TCGv env_imm;
49static TCGv env_btaken;
50static TCGv env_btarget;
51static TCGv env_iflags;
4a536270 52static TCGv env_res_addr;
11a76217 53static TCGv env_res_val;
4acb54ba 54
022c62cb 55#include "exec/gen-icount.h"
4acb54ba
EI
56
57/* This is the state at translation time. */
58typedef struct DisasContext {
68cee38a 59 CPUMBState *env;
a5efa644 60 target_ulong pc;
4acb54ba
EI
61
62 /* Decoder. */
63 int type_b;
64 uint32_t ir;
65 uint8_t opcode;
66 uint8_t rd, ra, rb;
67 uint16_t imm;
68
69 unsigned int cpustate_changed;
70 unsigned int delayed_branch;
71 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
72 unsigned int clear_imm;
73 int is_jmp;
74
844bab60
EI
75#define JMP_NOJMP 0
76#define JMP_DIRECT 1
77#define JMP_DIRECT_CC 2
78#define JMP_INDIRECT 3
4acb54ba
EI
79 unsigned int jmp;
80 uint32_t jmp_pc;
81
82 int abort_at_next_insn;
83 int nr_nops;
84 struct TranslationBlock *tb;
85 int singlestep_enabled;
86} DisasContext;
87
38972938 88static const char *regnames[] =
4acb54ba
EI
89{
90 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
91 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
92 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
93 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
94};
95
38972938 96static const char *special_regnames[] =
4acb54ba
EI
97{
98 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
99 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
100 "sr16", "sr17", "sr18"
101};
102
103/* Sign extend at translation time. */
104static inline int sign_extend(unsigned int val, unsigned int width)
105{
106 int sval;
107
108 /* LSL. */
109 val <<= 31 - width;
110 sval = val;
111 /* ASR. */
112 sval >>= 31 - width;
113 return sval;
114}
115
116static inline void t_sync_flags(DisasContext *dc)
117{
4abf79a4 118 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba
EI
119 if (dc->tb_flags != dc->synced_flags) {
120 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
121 dc->synced_flags = dc->tb_flags;
122 }
123}
124
125static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
126{
127 TCGv_i32 tmp = tcg_const_i32(index);
128
129 t_sync_flags(dc);
130 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 131 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
132 tcg_temp_free_i32(tmp);
133 dc->is_jmp = DISAS_UPDATE;
134}
135
136static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
137{
138 TranslationBlock *tb;
139 tb = dc->tb;
140 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
141 tcg_gen_goto_tb(n);
142 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
8cfd0495 143 tcg_gen_exit_tb((uintptr_t)tb + n);
4acb54ba
EI
144 } else {
145 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
146 tcg_gen_exit_tb(0);
147 }
148}
149
ee8b246f
EI
150static void read_carry(DisasContext *dc, TCGv d)
151{
152 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
153}
154
04ec7df7
EI
155/*
156 * write_carry sets the carry bits in MSR based on bit 0 of v.
157 * v[31:1] are ignored.
158 */
ee8b246f
EI
159static void write_carry(DisasContext *dc, TCGv v)
160{
161 TCGv t0 = tcg_temp_new();
162 tcg_gen_shli_tl(t0, v, 31);
163 tcg_gen_sari_tl(t0, t0, 31);
164 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
165 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
166 ~(MSR_C | MSR_CC));
167 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
168 tcg_temp_free(t0);
169}
170
65ab5eb4 171static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f
PC
172{
173 TCGv t0 = tcg_temp_new();
65ab5eb4 174 tcg_gen_movi_tl(t0, carry);
8cc9b43f
PC
175 write_carry(dc, t0);
176 tcg_temp_free(t0);
177}
178
61204ce8
EI
179/* True if ALU operand b is a small immediate that may deserve
180 faster treatment. */
181static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
182{
183 /* Immediate insn without the imm prefix ? */
184 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
185}
186
4acb54ba
EI
187static inline TCGv *dec_alu_op_b(DisasContext *dc)
188{
189 if (dc->type_b) {
190 if (dc->tb_flags & IMM_FLAG)
191 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
192 else
193 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
194 return &env_imm;
195 } else
196 return &cpu_R[dc->rb];
197}
198
199static void dec_add(DisasContext *dc)
200{
201 unsigned int k, c;
40cbf5b7 202 TCGv cf;
4acb54ba
EI
203
204 k = dc->opcode & 4;
205 c = dc->opcode & 2;
206
207 LOG_DIS("add%s%s%s r%d r%d r%d\n",
208 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
209 dc->rd, dc->ra, dc->rb);
210
40cbf5b7
EI
211 /* Take care of the easy cases first. */
212 if (k) {
213 /* k - keep carry, no need to update MSR. */
214 /* If rd == r0, it's a nop. */
215 if (dc->rd) {
216 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
217
218 if (c) {
219 /* c - Add carry into the result. */
220 cf = tcg_temp_new();
221
222 read_carry(dc, cf);
223 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
224 tcg_temp_free(cf);
225 }
226 }
227 return;
228 }
229
230 /* From now on, we can assume k is zero. So we need to update MSR. */
231 /* Extract carry. */
232 cf = tcg_temp_new();
233 if (c) {
234 read_carry(dc, cf);
235 } else {
236 tcg_gen_movi_tl(cf, 0);
237 }
238
239 if (dc->rd) {
240 TCGv ncf = tcg_temp_new();
5d0bb823 241 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
4acb54ba 242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
243 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
244 write_carry(dc, ncf);
245 tcg_temp_free(ncf);
246 } else {
5d0bb823 247 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 248 write_carry(dc, cf);
4acb54ba 249 }
40cbf5b7 250 tcg_temp_free(cf);
4acb54ba
EI
251}
252
253static void dec_sub(DisasContext *dc)
254{
255 unsigned int u, cmp, k, c;
e0a42ebc 256 TCGv cf, na;
4acb54ba
EI
257
258 u = dc->imm & 2;
259 k = dc->opcode & 4;
260 c = dc->opcode & 2;
261 cmp = (dc->imm & 1) && (!dc->type_b) && k;
262
263 if (cmp) {
264 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
265 if (dc->rd) {
266 if (u)
267 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
268 else
269 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
270 }
e0a42ebc
EI
271 return;
272 }
273
274 LOG_DIS("sub%s%s r%d, r%d r%d\n",
275 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
276
277 /* Take care of the easy cases first. */
278 if (k) {
279 /* k - keep carry, no need to update MSR. */
280 /* If rd == r0, it's a nop. */
281 if (dc->rd) {
4acb54ba 282 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
283
284 if (c) {
285 /* c - Add carry into the result. */
286 cf = tcg_temp_new();
287
288 read_carry(dc, cf);
289 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
290 tcg_temp_free(cf);
291 }
292 }
293 return;
294 }
295
296 /* From now on, we can assume k is zero. So we need to update MSR. */
297 /* Extract carry. And complement a into na. */
298 cf = tcg_temp_new();
299 na = tcg_temp_new();
300 if (c) {
301 read_carry(dc, cf);
302 } else {
303 tcg_gen_movi_tl(cf, 1);
304 }
305
306 /* d = b + ~a + c. carry defaults to 1. */
307 tcg_gen_not_tl(na, cpu_R[dc->ra]);
308
309 if (dc->rd) {
310 TCGv ncf = tcg_temp_new();
5d0bb823 311 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc
EI
312 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
313 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
314 write_carry(dc, ncf);
315 tcg_temp_free(ncf);
316 } else {
5d0bb823 317 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 318 write_carry(dc, cf);
4acb54ba 319 }
e0a42ebc
EI
320 tcg_temp_free(cf);
321 tcg_temp_free(na);
4acb54ba
EI
322}
323
324static void dec_pattern(DisasContext *dc)
325{
326 unsigned int mode;
327 int l1;
328
1567a005 329 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf 330 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005
EI
331 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
332 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
333 t_gen_raise_exception(dc, EXCP_HW_EXCP);
334 }
335
4acb54ba
EI
336 mode = dc->opcode & 3;
337 switch (mode) {
338 case 0:
339 /* pcmpbf. */
340 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
341 if (dc->rd)
342 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
343 break;
344 case 2:
345 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
346 if (dc->rd) {
347 TCGv t0 = tcg_temp_local_new();
348 l1 = gen_new_label();
349 tcg_gen_movi_tl(t0, 1);
350 tcg_gen_brcond_tl(TCG_COND_EQ,
351 cpu_R[dc->ra], cpu_R[dc->rb], l1);
352 tcg_gen_movi_tl(t0, 0);
353 gen_set_label(l1);
354 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
355 tcg_temp_free(t0);
356 }
357 break;
358 case 3:
359 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
360 l1 = gen_new_label();
361 if (dc->rd) {
362 TCGv t0 = tcg_temp_local_new();
363 tcg_gen_movi_tl(t0, 1);
364 tcg_gen_brcond_tl(TCG_COND_NE,
365 cpu_R[dc->ra], cpu_R[dc->rb], l1);
366 tcg_gen_movi_tl(t0, 0);
367 gen_set_label(l1);
368 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
369 tcg_temp_free(t0);
370 }
371 break;
372 default:
373 cpu_abort(dc->env,
374 "unsupported pattern insn opcode=%x\n", dc->opcode);
375 break;
376 }
377}
378
379static void dec_and(DisasContext *dc)
380{
381 unsigned int not;
382
383 if (!dc->type_b && (dc->imm & (1 << 10))) {
384 dec_pattern(dc);
385 return;
386 }
387
388 not = dc->opcode & (1 << 1);
389 LOG_DIS("and%s\n", not ? "n" : "");
390
391 if (!dc->rd)
392 return;
393
394 if (not) {
a235900e 395 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
396 } else
397 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
398}
399
400static void dec_or(DisasContext *dc)
401{
402 if (!dc->type_b && (dc->imm & (1 << 10))) {
403 dec_pattern(dc);
404 return;
405 }
406
407 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
408 if (dc->rd)
409 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410}
411
412static void dec_xor(DisasContext *dc)
413{
414 if (!dc->type_b && (dc->imm & (1 << 10))) {
415 dec_pattern(dc);
416 return;
417 }
418
419 LOG_DIS("xor r%d\n", dc->rd);
420 if (dc->rd)
421 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
422}
423
4acb54ba
EI
424static inline void msr_read(DisasContext *dc, TCGv d)
425{
426 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
427}
428
429static inline void msr_write(DisasContext *dc, TCGv v)
430{
97b833c5
EI
431 TCGv t;
432
433 t = tcg_temp_new();
4acb54ba 434 dc->cpustate_changed = 1;
97b833c5 435 /* PVR bit is not writable. */
8a84fc6b
EI
436 tcg_gen_andi_tl(t, v, ~MSR_PVR);
437 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
97b833c5
EI
438 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
439 tcg_temp_free(t);
4acb54ba
EI
440}
441
442static void dec_msr(DisasContext *dc)
443{
444 TCGv t0, t1;
445 unsigned int sr, to, rn;
1567a005 446 int mem_index = cpu_mmu_index(dc->env);
4acb54ba
EI
447
448 sr = dc->imm & ((1 << 14) - 1);
449 to = dc->imm & (1 << 14);
450 dc->type_b = 1;
451 if (to)
452 dc->cpustate_changed = 1;
453
454 /* msrclr and msrset. */
455 if (!(dc->imm & (1 << 15))) {
456 unsigned int clr = dc->ir & (1 << 16);
457
458 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
459 dc->rd, dc->imm);
1567a005
EI
460
461 if (!(dc->env->pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
462 /* nop??? */
463 return;
464 }
465
466 if ((dc->tb_flags & MSR_EE_FLAG)
467 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
468 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
469 t_gen_raise_exception(dc, EXCP_HW_EXCP);
470 return;
471 }
472
4acb54ba
EI
473 if (dc->rd)
474 msr_read(dc, cpu_R[dc->rd]);
475
476 t0 = tcg_temp_new();
477 t1 = tcg_temp_new();
478 msr_read(dc, t0);
479 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
480
481 if (clr) {
482 tcg_gen_not_tl(t1, t1);
483 tcg_gen_and_tl(t0, t0, t1);
484 } else
485 tcg_gen_or_tl(t0, t0, t1);
486 msr_write(dc, t0);
487 tcg_temp_free(t0);
488 tcg_temp_free(t1);
489 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
490 dc->is_jmp = DISAS_UPDATE;
491 return;
492 }
493
1567a005
EI
494 if (to) {
495 if ((dc->tb_flags & MSR_EE_FLAG)
496 && mem_index == MMU_USER_IDX) {
497 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
498 t_gen_raise_exception(dc, EXCP_HW_EXCP);
499 return;
500 }
501 }
502
4acb54ba
EI
503#if !defined(CONFIG_USER_ONLY)
504 /* Catch read/writes to the mmu block. */
505 if ((sr & ~0xff) == 0x1000) {
506 sr &= 7;
507 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
508 if (to)
64254eba 509 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
4acb54ba 510 else
64254eba 511 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
4acb54ba
EI
512 return;
513 }
514#endif
515
516 if (to) {
517 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
518 switch (sr) {
519 case 0:
520 break;
521 case 1:
522 msr_write(dc, cpu_R[dc->ra]);
523 break;
524 case 0x3:
525 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
526 break;
527 case 0x5:
528 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
529 break;
530 case 0x7:
97694c57 531 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
4acb54ba 532 break;
5818dee5 533 case 0x800:
68cee38a 534 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
535 break;
536 case 0x802:
68cee38a 537 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
5818dee5 538 break;
4acb54ba
EI
539 default:
540 cpu_abort(dc->env, "unknown mts reg %x\n", sr);
541 break;
542 }
543 } else {
544 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
545
546 switch (sr) {
547 case 0:
548 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
549 break;
550 case 1:
551 msr_read(dc, cpu_R[dc->rd]);
552 break;
553 case 0x3:
554 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
555 break;
556 case 0x5:
557 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
558 break;
559 case 0x7:
97694c57 560 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
4acb54ba
EI
561 break;
562 case 0xb:
563 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
564 break;
5818dee5 565 case 0x800:
68cee38a 566 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
567 break;
568 case 0x802:
68cee38a 569 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
5818dee5 570 break;
4acb54ba
EI
571 case 0x2000:
572 case 0x2001:
573 case 0x2002:
574 case 0x2003:
575 case 0x2004:
576 case 0x2005:
577 case 0x2006:
578 case 0x2007:
579 case 0x2008:
580 case 0x2009:
581 case 0x200a:
582 case 0x200b:
583 case 0x200c:
584 rn = sr & 0xf;
585 tcg_gen_ld_tl(cpu_R[dc->rd],
68cee38a 586 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
587 break;
588 default:
589 cpu_abort(dc->env, "unknown mfs reg %x\n", sr);
590 break;
591 }
592 }
ee7dbcf8
EI
593
594 if (dc->rd == 0) {
595 tcg_gen_movi_tl(cpu_R[0], 0);
596 }
4acb54ba
EI
597}
598
599/* 64-bit signed mul, lower result in d and upper in d2. */
600static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
601{
602 TCGv_i64 t0, t1;
603
604 t0 = tcg_temp_new_i64();
605 t1 = tcg_temp_new_i64();
606
607 tcg_gen_ext_i32_i64(t0, a);
608 tcg_gen_ext_i32_i64(t1, b);
609 tcg_gen_mul_i64(t0, t0, t1);
610
611 tcg_gen_trunc_i64_i32(d, t0);
612 tcg_gen_shri_i64(t0, t0, 32);
613 tcg_gen_trunc_i64_i32(d2, t0);
614
615 tcg_temp_free_i64(t0);
616 tcg_temp_free_i64(t1);
617}
618
619/* 64-bit unsigned muls, lower result in d and upper in d2. */
620static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
621{
622 TCGv_i64 t0, t1;
623
624 t0 = tcg_temp_new_i64();
625 t1 = tcg_temp_new_i64();
626
627 tcg_gen_extu_i32_i64(t0, a);
628 tcg_gen_extu_i32_i64(t1, b);
629 tcg_gen_mul_i64(t0, t0, t1);
630
631 tcg_gen_trunc_i64_i32(d, t0);
632 tcg_gen_shri_i64(t0, t0, 32);
633 tcg_gen_trunc_i64_i32(d2, t0);
634
635 tcg_temp_free_i64(t0);
636 tcg_temp_free_i64(t1);
637}
638
639/* Multiplier unit. */
640static void dec_mul(DisasContext *dc)
641{
642 TCGv d[2];
643 unsigned int subcode;
644
1567a005 645 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf 646 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005
EI
647 && !(dc->env->pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
648 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
649 t_gen_raise_exception(dc, EXCP_HW_EXCP);
650 return;
651 }
652
4acb54ba
EI
653 subcode = dc->imm & 3;
654 d[0] = tcg_temp_new();
655 d[1] = tcg_temp_new();
656
657 if (dc->type_b) {
658 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
659 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
660 goto done;
661 }
662
1567a005
EI
663 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
664 if (subcode >= 1 && subcode <= 3
665 && !((dc->env->pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
666 /* nop??? */
667 }
668
4acb54ba
EI
669 switch (subcode) {
670 case 0:
671 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
672 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
673 break;
674 case 1:
675 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
676 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
677 break;
678 case 2:
679 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
680 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
681 break;
682 case 3:
683 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
684 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
685 break;
686 default:
687 cpu_abort(dc->env, "unknown MUL insn %x\n", subcode);
688 break;
689 }
690done:
691 tcg_temp_free(d[0]);
692 tcg_temp_free(d[1]);
693}
694
695/* Div unit. */
696static void dec_div(DisasContext *dc)
697{
698 unsigned int u;
699
700 u = dc->imm & 2;
701 LOG_DIS("div\n");
702
97f90cbf 703 if ((dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005
EI
704 && !((dc->env->pvr.regs[0] & PVR0_USE_DIV_MASK))) {
705 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
706 t_gen_raise_exception(dc, EXCP_HW_EXCP);
707 }
708
4acb54ba 709 if (u)
64254eba
BS
710 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
711 cpu_R[dc->ra]);
4acb54ba 712 else
64254eba
BS
713 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
714 cpu_R[dc->ra]);
4acb54ba
EI
715 if (!dc->rd)
716 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
717}
718
719static void dec_barrel(DisasContext *dc)
720{
721 TCGv t0;
722 unsigned int s, t;
723
1567a005 724 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf 725 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005
EI
726 && !(dc->env->pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
727 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
728 t_gen_raise_exception(dc, EXCP_HW_EXCP);
729 return;
730 }
731
4acb54ba
EI
732 s = dc->imm & (1 << 10);
733 t = dc->imm & (1 << 9);
734
735 LOG_DIS("bs%s%s r%d r%d r%d\n",
736 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
737
738 t0 = tcg_temp_new();
739
740 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
741 tcg_gen_andi_tl(t0, t0, 31);
742
743 if (s)
744 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
745 else {
746 if (t)
747 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
748 else
749 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
750 }
751}
752
753static void dec_bit(DisasContext *dc)
754{
09b9f113 755 TCGv t0;
4acb54ba 756 unsigned int op;
1567a005 757 int mem_index = cpu_mmu_index(dc->env);
4acb54ba 758
ace2e4da 759 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
760 switch (op) {
761 case 0x21:
762 /* src. */
763 t0 = tcg_temp_new();
764
765 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
09b9f113
EI
766 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
767 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 768 if (dc->rd) {
4acb54ba 769 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
09b9f113 770 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 771 }
4acb54ba
EI
772 tcg_temp_free(t0);
773 break;
774
775 case 0x1:
776 case 0x41:
777 /* srl. */
4acb54ba
EI
778 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
779
bb3cb951
EI
780 /* Update carry. Note that write carry only looks at the LSB. */
781 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
782 if (dc->rd) {
783 if (op == 0x41)
784 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
785 else
786 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
787 }
788 break;
789 case 0x60:
790 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
791 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
792 break;
793 case 0x61:
794 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
795 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
796 break;
797 case 0x64:
f062a3c7
EI
798 case 0x66:
799 case 0x74:
800 case 0x76:
4acb54ba
EI
801 /* wdc. */
802 LOG_DIS("wdc r%d\n", dc->ra);
1567a005
EI
803 if ((dc->tb_flags & MSR_EE_FLAG)
804 && mem_index == MMU_USER_IDX) {
805 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
806 t_gen_raise_exception(dc, EXCP_HW_EXCP);
807 return;
808 }
4acb54ba
EI
809 break;
810 case 0x68:
811 /* wic. */
812 LOG_DIS("wic r%d\n", dc->ra);
1567a005
EI
813 if ((dc->tb_flags & MSR_EE_FLAG)
814 && mem_index == MMU_USER_IDX) {
815 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
816 t_gen_raise_exception(dc, EXCP_HW_EXCP);
817 return;
818 }
4acb54ba 819 break;
48b5e96f
EI
820 case 0xe0:
821 if ((dc->tb_flags & MSR_EE_FLAG)
822 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
823 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
824 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
825 t_gen_raise_exception(dc, EXCP_HW_EXCP);
826 }
827 if (dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
828 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
829 }
830 break;
ace2e4da
PC
831 case 0x1e0:
832 /* swapb */
833 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
834 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
835 break;
b8c6a5d9 836 case 0x1e2:
ace2e4da
PC
837 /*swaph */
838 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
839 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
840 break;
4acb54ba
EI
841 default:
842 cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
843 dc->pc, op, dc->rd, dc->ra, dc->rb);
844 break;
845 }
846}
847
848static inline void sync_jmpstate(DisasContext *dc)
849{
844bab60
EI
850 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
851 if (dc->jmp == JMP_DIRECT) {
852 tcg_gen_movi_tl(env_btaken, 1);
853 }
23979dc5
EI
854 dc->jmp = JMP_INDIRECT;
855 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
4acb54ba
EI
856 }
857}
858
859static void dec_imm(DisasContext *dc)
860{
861 LOG_DIS("imm %x\n", dc->imm << 16);
862 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
863 dc->tb_flags |= IMM_FLAG;
864 dc->clear_imm = 0;
865}
866
867static inline void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
536446e9 868 unsigned int size, bool exclusive)
4acb54ba
EI
869{
870 int mem_index = cpu_mmu_index(dc->env);
871
872 if (size == 1) {
873 tcg_gen_qemu_ld8u(dst, addr, mem_index);
874 } else if (size == 2) {
875 tcg_gen_qemu_ld16u(dst, addr, mem_index);
876 } else if (size == 4) {
877 tcg_gen_qemu_ld32u(dst, addr, mem_index);
878 } else
879 cpu_abort(dc->env, "Incorrect load size %d\n", size);
536446e9
EI
880
881 if (exclusive) {
4a536270 882 tcg_gen_mov_tl(env_res_addr, addr);
11a76217 883 tcg_gen_mov_tl(env_res_val, dst);
536446e9 884 }
4acb54ba
EI
885}
886
887static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
888{
889 unsigned int extimm = dc->tb_flags & IMM_FLAG;
5818dee5
EI
890 /* Should be set to one if r1 is used by loadstores. */
891 int stackprot = 0;
892
893 /* All load/stores use ra. */
894 if (dc->ra == 1) {
895 stackprot = 1;
896 }
4acb54ba 897
9ef55357 898 /* Treat the common cases first. */
4acb54ba 899 if (!dc->type_b) {
4b5ef0b5
EI
900 /* If any of the regs is r0, return a ptr to the other. */
901 if (dc->ra == 0) {
902 return &cpu_R[dc->rb];
903 } else if (dc->rb == 0) {
904 return &cpu_R[dc->ra];
905 }
906
5818dee5
EI
907 if (dc->rb == 1) {
908 stackprot = 1;
909 }
910
4acb54ba
EI
911 *t = tcg_temp_new();
912 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
5818dee5
EI
913
914 if (stackprot) {
64254eba 915 gen_helper_stackprot(cpu_env, *t);
5818dee5 916 }
4acb54ba
EI
917 return t;
918 }
919 /* Immediate. */
920 if (!extimm) {
921 if (dc->imm == 0) {
922 return &cpu_R[dc->ra];
923 }
924 *t = tcg_temp_new();
925 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
926 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
927 } else {
928 *t = tcg_temp_new();
929 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
930 }
931
5818dee5 932 if (stackprot) {
64254eba 933 gen_helper_stackprot(cpu_env, *t);
5818dee5 934 }
4acb54ba
EI
935 return t;
936}
937
9f8beb66
EI
938static inline void dec_byteswap(DisasContext *dc, TCGv dst, TCGv src, int size)
939{
940 if (size == 4) {
941 tcg_gen_bswap32_tl(dst, src);
942 } else if (size == 2) {
943 TCGv t = tcg_temp_new();
944
945 /* bswap16 assumes the high bits are zero. */
946 tcg_gen_andi_tl(t, src, 0xffff);
947 tcg_gen_bswap16_tl(dst, t);
948 tcg_temp_free(t);
949 } else {
950 /* Ignore.
951 cpu_abort(dc->env, "Invalid ldst byteswap size %d\n", size);
952 */
953 }
954}
955
4acb54ba
EI
956static void dec_load(DisasContext *dc)
957{
958 TCGv t, *addr;
8cc9b43f 959 unsigned int size, rev = 0, ex = 0;
4acb54ba
EI
960
961 size = 1 << (dc->opcode & 3);
9f8beb66
EI
962
963 if (!dc->type_b) {
964 rev = (dc->ir >> 9) & 1;
8cc9b43f 965 ex = (dc->ir >> 10) & 1;
9f8beb66
EI
966 }
967
0187688f 968 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
97f90cbf 969 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
970 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
971 t_gen_raise_exception(dc, EXCP_HW_EXCP);
972 return;
973 }
4acb54ba 974
8cc9b43f
PC
975 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
976 ex ? "x" : "");
9f8beb66 977
4acb54ba
EI
978 t_sync_flags(dc);
979 addr = compute_ldst_addr(dc, &t);
980
9f8beb66
EI
981 /*
982 * When doing reverse accesses we need to do two things.
983 *
4ff9786c 984 * 1. Reverse the address wrt endianness.
9f8beb66
EI
985 * 2. Byteswap the data lanes on the way back into the CPU core.
986 */
987 if (rev && size != 4) {
988 /* Endian reverse the address. t is addr. */
989 switch (size) {
990 case 1:
991 {
992 /* 00 -> 11
993 01 -> 10
994 10 -> 10
995 11 -> 00 */
996 TCGv low = tcg_temp_new();
997
998 /* Force addr into the temp. */
999 if (addr != &t) {
1000 t = tcg_temp_new();
1001 tcg_gen_mov_tl(t, *addr);
1002 addr = &t;
1003 }
1004
1005 tcg_gen_andi_tl(low, t, 3);
1006 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1007 tcg_gen_andi_tl(t, t, ~3);
1008 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
1009 tcg_gen_mov_tl(env_imm, t);
1010 tcg_temp_free(low);
1011 break;
1012 }
1013
1014 case 2:
1015 /* 00 -> 10
1016 10 -> 00. */
1017 /* Force addr into the temp. */
1018 if (addr != &t) {
1019 t = tcg_temp_new();
1020 tcg_gen_xori_tl(t, *addr, 2);
1021 addr = &t;
1022 } else {
1023 tcg_gen_xori_tl(t, t, 2);
1024 }
1025 break;
1026 default:
1027 cpu_abort(dc->env, "Invalid reverse size\n");
1028 break;
1029 }
1030 }
1031
8cc9b43f
PC
1032 /* lwx does not throw unaligned access errors, so force alignment */
1033 if (ex) {
1034 /* Force addr into the temp. */
1035 if (addr != &t) {
1036 t = tcg_temp_new();
1037 tcg_gen_mov_tl(t, *addr);
1038 addr = &t;
1039 }
1040 tcg_gen_andi_tl(t, t, ~3);
1041 }
1042
4acb54ba
EI
1043 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1044 sync_jmpstate(dc);
968a40f6
EI
1045
1046 /* Verify alignment if needed. */
1047 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507
EI
1048 TCGv v = tcg_temp_new();
1049
1050 /*
1051 * Microblaze gives MMU faults priority over faults due to
1052 * unaligned addresses. That's why we speculatively do the load
1053 * into v. If the load succeeds, we verify alignment of the
1054 * address and if that succeeds we write into the destination reg.
1055 */
536446e9 1056 gen_load(dc, v, *addr, size, ex);
a12f6507
EI
1057
1058 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 1059 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1060 tcg_const_tl(0), tcg_const_tl(size - 1));
9f8beb66
EI
1061 if (dc->rd) {
1062 if (rev) {
1063 dec_byteswap(dc, cpu_R[dc->rd], v, size);
1064 } else {
1065 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1066 }
1067 }
a12f6507 1068 tcg_temp_free(v);
968a40f6 1069 } else {
a12f6507 1070 if (dc->rd) {
536446e9 1071 gen_load(dc, cpu_R[dc->rd], *addr, size, ex);
9f8beb66
EI
1072 if (rev) {
1073 dec_byteswap(dc, cpu_R[dc->rd], cpu_R[dc->rd], size);
1074 }
a12f6507 1075 } else {
9f8beb66 1076 /* We are loading into r0, no need to reverse. */
536446e9 1077 gen_load(dc, env_imm, *addr, size, ex);
a12f6507 1078 }
4acb54ba
EI
1079 }
1080
8cc9b43f
PC
1081 if (ex) { /* lwx */
1082 /* no support for for AXI exclusive so always clear C */
1083 write_carryi(dc, 0);
8cc9b43f
PC
1084 }
1085
4acb54ba
EI
1086 if (addr == &t)
1087 tcg_temp_free(t);
1088}
1089
1090static void gen_store(DisasContext *dc, TCGv addr, TCGv val,
1091 unsigned int size)
1092{
1093 int mem_index = cpu_mmu_index(dc->env);
1094
1095 if (size == 1)
1096 tcg_gen_qemu_st8(val, addr, mem_index);
1097 else if (size == 2) {
1098 tcg_gen_qemu_st16(val, addr, mem_index);
1099 } else if (size == 4) {
1100 tcg_gen_qemu_st32(val, addr, mem_index);
1101 } else
1102 cpu_abort(dc->env, "Incorrect store size %d\n", size);
1103}
1104
1105static void dec_store(DisasContext *dc)
1106{
4a536270 1107 TCGv t, *addr, swx_addr;
8cc9b43f
PC
1108 int swx_skip = 0;
1109 unsigned int size, rev = 0, ex = 0;
4acb54ba
EI
1110
1111 size = 1 << (dc->opcode & 3);
9f8beb66
EI
1112 if (!dc->type_b) {
1113 rev = (dc->ir >> 9) & 1;
8cc9b43f 1114 ex = (dc->ir >> 10) & 1;
9f8beb66 1115 }
4acb54ba 1116
0187688f 1117 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
97f90cbf 1118 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
1119 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1120 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1121 return;
1122 }
1123
8cc9b43f
PC
1124 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1125 ex ? "x" : "");
4acb54ba
EI
1126 t_sync_flags(dc);
1127 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1128 sync_jmpstate(dc);
1129 addr = compute_ldst_addr(dc, &t);
968a40f6 1130
083dbf48 1131 swx_addr = tcg_temp_local_new();
8cc9b43f 1132 if (ex) { /* swx */
11a76217 1133 TCGv tval;
8cc9b43f
PC
1134
1135 /* Force addr into the swx_addr. */
1136 tcg_gen_mov_tl(swx_addr, *addr);
1137 addr = &swx_addr;
1138 /* swx does not throw unaligned access errors, so force alignment */
1139 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1140
8cc9b43f
PC
1141 write_carryi(dc, 1);
1142 swx_skip = gen_new_label();
4a536270 1143 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
11a76217
EI
1144
1145 /* Compare the value loaded at lwx with current contents of
1146 the reserved location.
1147 FIXME: This only works for system emulation where we can expect
1148 this compare and the following write to be atomic. For user
1149 emulation we need to add atomicity between threads. */
1150 tval = tcg_temp_new();
1151 gen_load(dc, tval, swx_addr, 4, false);
1152 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
8cc9b43f 1153 write_carryi(dc, 0);
11a76217 1154 tcg_temp_free(tval);
8cc9b43f
PC
1155 }
1156
9f8beb66
EI
1157 if (rev && size != 4) {
1158 /* Endian reverse the address. t is addr. */
1159 switch (size) {
1160 case 1:
1161 {
1162 /* 00 -> 11
1163 01 -> 10
1164 10 -> 10
1165 11 -> 00 */
1166 TCGv low = tcg_temp_new();
1167
1168 /* Force addr into the temp. */
1169 if (addr != &t) {
1170 t = tcg_temp_new();
1171 tcg_gen_mov_tl(t, *addr);
1172 addr = &t;
1173 }
1174
1175 tcg_gen_andi_tl(low, t, 3);
1176 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1177 tcg_gen_andi_tl(t, t, ~3);
1178 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
1179 tcg_gen_mov_tl(env_imm, t);
1180 tcg_temp_free(low);
1181 break;
1182 }
1183
1184 case 2:
1185 /* 00 -> 10
1186 10 -> 00. */
1187 /* Force addr into the temp. */
1188 if (addr != &t) {
1189 t = tcg_temp_new();
1190 tcg_gen_xori_tl(t, *addr, 2);
1191 addr = &t;
1192 } else {
1193 tcg_gen_xori_tl(t, t, 2);
1194 }
1195 break;
1196 default:
1197 cpu_abort(dc->env, "Invalid reverse size\n");
1198 break;
1199 }
1200
1201 if (size != 1) {
1202 TCGv bs_data = tcg_temp_new();
1203 dec_byteswap(dc, bs_data, cpu_R[dc->rd], size);
1204 gen_store(dc, *addr, bs_data, size);
1205 tcg_temp_free(bs_data);
1206 } else {
1207 gen_store(dc, *addr, cpu_R[dc->rd], size);
1208 }
1209 } else {
1210 if (rev) {
1211 TCGv bs_data = tcg_temp_new();
1212 dec_byteswap(dc, bs_data, cpu_R[dc->rd], size);
1213 gen_store(dc, *addr, bs_data, size);
1214 tcg_temp_free(bs_data);
1215 } else {
1216 gen_store(dc, *addr, cpu_R[dc->rd], size);
1217 }
1218 }
a12f6507 1219
968a40f6
EI
1220 /* Verify alignment if needed. */
1221 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507
EI
1222 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1223 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1224 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1225 * the MMU prior to the memaccess, thay way we could put
1226 * the alignment checks in between the probe and the mem
1227 * access.
a12f6507 1228 */
64254eba 1229 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1230 tcg_const_tl(1), tcg_const_tl(size - 1));
968a40f6 1231 }
083dbf48 1232
8cc9b43f
PC
1233 if (ex) {
1234 gen_set_label(swx_skip);
8cc9b43f 1235 }
083dbf48 1236 tcg_temp_free(swx_addr);
968a40f6 1237
4acb54ba
EI
1238 if (addr == &t)
1239 tcg_temp_free(t);
1240}
1241
1242static inline void eval_cc(DisasContext *dc, unsigned int cc,
1243 TCGv d, TCGv a, TCGv b)
1244{
4acb54ba
EI
1245 switch (cc) {
1246 case CC_EQ:
b2565c69 1247 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
4acb54ba
EI
1248 break;
1249 case CC_NE:
b2565c69 1250 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
4acb54ba
EI
1251 break;
1252 case CC_LT:
b2565c69 1253 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
4acb54ba
EI
1254 break;
1255 case CC_LE:
b2565c69 1256 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
4acb54ba
EI
1257 break;
1258 case CC_GE:
b2565c69 1259 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
4acb54ba
EI
1260 break;
1261 case CC_GT:
b2565c69 1262 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
4acb54ba
EI
1263 break;
1264 default:
1265 cpu_abort(dc->env, "Unknown condition code %x.\n", cc);
1266 break;
1267 }
1268}
1269
1270static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1271{
1272 int l1;
1273
1274 l1 = gen_new_label();
1275 /* Conditional jmp. */
1276 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1277 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1278 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1279 gen_set_label(l1);
1280}
1281
1282static void dec_bcc(DisasContext *dc)
1283{
1284 unsigned int cc;
1285 unsigned int dslot;
1286
1287 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1288 dslot = dc->ir & (1 << 25);
1289 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1290
1291 dc->delayed_branch = 1;
1292 if (dslot) {
1293 dc->delayed_branch = 2;
1294 dc->tb_flags |= D_FLAG;
1295 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1296 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1297 }
1298
61204ce8
EI
1299 if (dec_alu_op_b_is_small_imm(dc)) {
1300 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1301
1302 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
844bab60 1303 dc->jmp = JMP_DIRECT_CC;
23979dc5 1304 dc->jmp_pc = dc->pc + offset;
61204ce8 1305 } else {
23979dc5 1306 dc->jmp = JMP_INDIRECT;
61204ce8
EI
1307 tcg_gen_movi_tl(env_btarget, dc->pc);
1308 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1309 }
61204ce8 1310 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
4acb54ba
EI
1311}
1312
1313static void dec_br(DisasContext *dc)
1314{
9f6113c7 1315 unsigned int dslot, link, abs, mbar;
ff21f70a 1316 int mem_index = cpu_mmu_index(dc->env);
4acb54ba
EI
1317
1318 dslot = dc->ir & (1 << 20);
1319 abs = dc->ir & (1 << 19);
1320 link = dc->ir & (1 << 18);
9f6113c7
EI
1321
1322 /* Memory barrier. */
1323 mbar = (dc->ir >> 16) & 31;
1324 if (mbar == 2 && dc->imm == 4) {
5d45de97
EI
1325 /* mbar IMM & 16 decodes to sleep. */
1326 if (dc->rd & 16) {
1327 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1328 TCGv_i32 tmp_1 = tcg_const_i32(1);
1329
1330 LOG_DIS("sleep\n");
1331
1332 t_sync_flags(dc);
1333 tcg_gen_st_i32(tmp_1, cpu_env,
1334 -offsetof(MicroBlazeCPU, env)
1335 +offsetof(CPUState, halted));
1336 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1337 gen_helper_raise_exception(cpu_env, tmp_hlt);
1338 tcg_temp_free_i32(tmp_hlt);
1339 tcg_temp_free_i32(tmp_1);
1340 return;
1341 }
9f6113c7
EI
1342 LOG_DIS("mbar %d\n", dc->rd);
1343 /* Break the TB. */
1344 dc->cpustate_changed = 1;
1345 return;
1346 }
1347
4acb54ba
EI
1348 LOG_DIS("br%s%s%s%s imm=%x\n",
1349 abs ? "a" : "", link ? "l" : "",
1350 dc->type_b ? "i" : "", dslot ? "d" : "",
1351 dc->imm);
1352
1353 dc->delayed_branch = 1;
1354 if (dslot) {
1355 dc->delayed_branch = 2;
1356 dc->tb_flags |= D_FLAG;
1357 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1358 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1359 }
1360 if (link && dc->rd)
1361 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1362
1363 dc->jmp = JMP_INDIRECT;
1364 if (abs) {
1365 tcg_gen_movi_tl(env_btaken, 1);
1366 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1367 if (link && !dslot) {
1368 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1369 t_gen_raise_exception(dc, EXCP_BREAK);
1370 if (dc->imm == 0) {
1371 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1372 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1373 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1374 return;
1375 }
1376
1377 t_gen_raise_exception(dc, EXCP_DEBUG);
1378 }
1379 }
4acb54ba 1380 } else {
61204ce8
EI
1381 if (dec_alu_op_b_is_small_imm(dc)) {
1382 dc->jmp = JMP_DIRECT;
1383 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1384 } else {
4acb54ba
EI
1385 tcg_gen_movi_tl(env_btaken, 1);
1386 tcg_gen_movi_tl(env_btarget, dc->pc);
1387 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
4acb54ba
EI
1388 }
1389 }
1390}
1391
1392static inline void do_rti(DisasContext *dc)
1393{
1394 TCGv t0, t1;
1395 t0 = tcg_temp_new();
1396 t1 = tcg_temp_new();
1397 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1398 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1399 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1400
1401 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1402 tcg_gen_or_tl(t1, t1, t0);
1403 msr_write(dc, t1);
1404 tcg_temp_free(t1);
1405 tcg_temp_free(t0);
1406 dc->tb_flags &= ~DRTI_FLAG;
1407}
1408
1409static inline void do_rtb(DisasContext *dc)
1410{
1411 TCGv t0, t1;
1412 t0 = tcg_temp_new();
1413 t1 = tcg_temp_new();
1414 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1415 tcg_gen_shri_tl(t0, t1, 1);
1416 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1417
1418 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1419 tcg_gen_or_tl(t1, t1, t0);
1420 msr_write(dc, t1);
1421 tcg_temp_free(t1);
1422 tcg_temp_free(t0);
1423 dc->tb_flags &= ~DRTB_FLAG;
1424}
1425
1426static inline void do_rte(DisasContext *dc)
1427{
1428 TCGv t0, t1;
1429 t0 = tcg_temp_new();
1430 t1 = tcg_temp_new();
1431
1432 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1433 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1434 tcg_gen_shri_tl(t0, t1, 1);
1435 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1436
1437 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1438 tcg_gen_or_tl(t1, t1, t0);
1439 msr_write(dc, t1);
1440 tcg_temp_free(t1);
1441 tcg_temp_free(t0);
1442 dc->tb_flags &= ~DRTE_FLAG;
1443}
1444
1445static void dec_rts(DisasContext *dc)
1446{
1447 unsigned int b_bit, i_bit, e_bit;
1567a005 1448 int mem_index = cpu_mmu_index(dc->env);
4acb54ba
EI
1449
1450 i_bit = dc->ir & (1 << 21);
1451 b_bit = dc->ir & (1 << 22);
1452 e_bit = dc->ir & (1 << 23);
1453
1454 dc->delayed_branch = 2;
1455 dc->tb_flags |= D_FLAG;
1456 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1457 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1458
1459 if (i_bit) {
1460 LOG_DIS("rtid ir=%x\n", dc->ir);
1567a005
EI
1461 if ((dc->tb_flags & MSR_EE_FLAG)
1462 && mem_index == MMU_USER_IDX) {
1463 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1464 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1465 }
4acb54ba
EI
1466 dc->tb_flags |= DRTI_FLAG;
1467 } else if (b_bit) {
1468 LOG_DIS("rtbd ir=%x\n", dc->ir);
1567a005
EI
1469 if ((dc->tb_flags & MSR_EE_FLAG)
1470 && mem_index == MMU_USER_IDX) {
1471 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1472 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1473 }
4acb54ba
EI
1474 dc->tb_flags |= DRTB_FLAG;
1475 } else if (e_bit) {
1476 LOG_DIS("rted ir=%x\n", dc->ir);
1567a005
EI
1477 if ((dc->tb_flags & MSR_EE_FLAG)
1478 && mem_index == MMU_USER_IDX) {
1479 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1480 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1481 }
4acb54ba
EI
1482 dc->tb_flags |= DRTE_FLAG;
1483 } else
1484 LOG_DIS("rts ir=%x\n", dc->ir);
1485
23979dc5 1486 dc->jmp = JMP_INDIRECT;
4acb54ba
EI
1487 tcg_gen_movi_tl(env_btaken, 1);
1488 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1489}
1490
97694c57
EI
1491static int dec_check_fpuv2(DisasContext *dc)
1492{
1493 int r;
1494
1495 r = dc->env->pvr.regs[2] & PVR2_USE_FPU2_MASK;
1496
1497 if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1498 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1499 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1500 }
1501 return r;
1502}
1503
1567a005
EI
1504static void dec_fpu(DisasContext *dc)
1505{
97694c57
EI
1506 unsigned int fpu_insn;
1507
1567a005 1508 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf 1509 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005 1510 && !((dc->env->pvr.regs[2] & PVR2_USE_FPU_MASK))) {
97694c57 1511 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1567a005
EI
1512 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1513 return;
1514 }
1515
97694c57
EI
1516 fpu_insn = (dc->ir >> 7) & 7;
1517
1518 switch (fpu_insn) {
1519 case 0:
64254eba
BS
1520 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1521 cpu_R[dc->rb]);
97694c57
EI
1522 break;
1523
1524 case 1:
64254eba
BS
1525 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1526 cpu_R[dc->rb]);
97694c57
EI
1527 break;
1528
1529 case 2:
64254eba
BS
1530 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1531 cpu_R[dc->rb]);
97694c57
EI
1532 break;
1533
1534 case 3:
64254eba
BS
1535 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1536 cpu_R[dc->rb]);
97694c57
EI
1537 break;
1538
1539 case 4:
1540 switch ((dc->ir >> 4) & 7) {
1541 case 0:
64254eba 1542 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1543 cpu_R[dc->ra], cpu_R[dc->rb]);
1544 break;
1545 case 1:
64254eba 1546 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1547 cpu_R[dc->ra], cpu_R[dc->rb]);
1548 break;
1549 case 2:
64254eba 1550 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1551 cpu_R[dc->ra], cpu_R[dc->rb]);
1552 break;
1553 case 3:
64254eba 1554 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1555 cpu_R[dc->ra], cpu_R[dc->rb]);
1556 break;
1557 case 4:
64254eba 1558 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1559 cpu_R[dc->ra], cpu_R[dc->rb]);
1560 break;
1561 case 5:
64254eba 1562 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1563 cpu_R[dc->ra], cpu_R[dc->rb]);
1564 break;
1565 case 6:
64254eba 1566 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1567 cpu_R[dc->ra], cpu_R[dc->rb]);
1568 break;
1569 default:
71547a3b
BS
1570 qemu_log_mask(LOG_UNIMP,
1571 "unimplemented fcmp fpu_insn=%x pc=%x"
1572 " opc=%x\n",
1573 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1574 dc->abort_at_next_insn = 1;
1575 break;
1576 }
1577 break;
1578
1579 case 5:
1580 if (!dec_check_fpuv2(dc)) {
1581 return;
1582 }
64254eba 1583 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1584 break;
1585
1586 case 6:
1587 if (!dec_check_fpuv2(dc)) {
1588 return;
1589 }
64254eba 1590 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1591 break;
1592
1593 case 7:
1594 if (!dec_check_fpuv2(dc)) {
1595 return;
1596 }
64254eba 1597 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1598 break;
1599
1600 default:
71547a3b
BS
1601 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1602 " opc=%x\n",
1603 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1604 dc->abort_at_next_insn = 1;
1605 break;
1606 }
1567a005
EI
1607}
1608
4acb54ba
EI
1609static void dec_null(DisasContext *dc)
1610{
02b33596
EI
1611 if ((dc->tb_flags & MSR_EE_FLAG)
1612 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1613 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1614 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1615 return;
1616 }
4acb54ba
EI
1617 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1618 dc->abort_at_next_insn = 1;
1619}
1620
6d76d23e
EI
1621/* Insns connected to FSL or AXI stream attached devices. */
1622static void dec_stream(DisasContext *dc)
1623{
1624 int mem_index = cpu_mmu_index(dc->env);
1625 TCGv_i32 t_id, t_ctrl;
1626 int ctrl;
1627
1628 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1629 dc->type_b ? "" : "d", dc->imm);
1630
1631 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1632 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1633 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1634 return;
1635 }
1636
1637 t_id = tcg_temp_new();
1638 if (dc->type_b) {
1639 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1640 ctrl = dc->imm >> 10;
1641 } else {
1642 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1643 ctrl = dc->imm >> 5;
1644 }
1645
1646 t_ctrl = tcg_const_tl(ctrl);
1647
1648 if (dc->rd == 0) {
1649 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1650 } else {
1651 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1652 }
1653 tcg_temp_free(t_id);
1654 tcg_temp_free(t_ctrl);
1655}
1656
4acb54ba
EI
1657static struct decoder_info {
1658 struct {
1659 uint32_t bits;
1660 uint32_t mask;
1661 };
1662 void (*dec)(DisasContext *dc);
1663} decinfo[] = {
1664 {DEC_ADD, dec_add},
1665 {DEC_SUB, dec_sub},
1666 {DEC_AND, dec_and},
1667 {DEC_XOR, dec_xor},
1668 {DEC_OR, dec_or},
1669 {DEC_BIT, dec_bit},
1670 {DEC_BARREL, dec_barrel},
1671 {DEC_LD, dec_load},
1672 {DEC_ST, dec_store},
1673 {DEC_IMM, dec_imm},
1674 {DEC_BR, dec_br},
1675 {DEC_BCC, dec_bcc},
1676 {DEC_RTS, dec_rts},
1567a005 1677 {DEC_FPU, dec_fpu},
4acb54ba
EI
1678 {DEC_MUL, dec_mul},
1679 {DEC_DIV, dec_div},
1680 {DEC_MSR, dec_msr},
6d76d23e 1681 {DEC_STREAM, dec_stream},
4acb54ba
EI
1682 {{0, 0}, dec_null}
1683};
1684
64254eba 1685static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1686{
4acb54ba
EI
1687 int i;
1688
fdefe51c 1689 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4acb54ba 1690 tcg_gen_debug_insn_start(dc->pc);
fdefe51c 1691 }
4acb54ba 1692
64254eba 1693 dc->ir = ir;
4acb54ba
EI
1694 LOG_DIS("%8.8x\t", dc->ir);
1695
1696 if (dc->ir)
1697 dc->nr_nops = 0;
1698 else {
1567a005 1699 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf
EI
1700 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1701 && (dc->env->pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1567a005
EI
1702 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1703 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1704 return;
1705 }
1706
4acb54ba
EI
1707 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1708 dc->nr_nops++;
1709 if (dc->nr_nops > 4)
1710 cpu_abort(dc->env, "fetching nop sequence\n");
1711 }
1712 /* bit 2 seems to indicate insn type. */
1713 dc->type_b = ir & (1 << 29);
1714
1715 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1716 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1717 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1718 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1719 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1720
1721 /* Large switch for all insns. */
1722 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1723 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1724 decinfo[i].dec(dc);
1725 break;
1726 }
1727 }
1728}
1729
68cee38a 1730static void check_breakpoint(CPUMBState *env, DisasContext *dc)
4acb54ba
EI
1731{
1732 CPUBreakpoint *bp;
1733
72cf2d4f
BS
1734 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1735 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4acb54ba
EI
1736 if (bp->pc == dc->pc) {
1737 t_gen_raise_exception(dc, EXCP_DEBUG);
1738 dc->is_jmp = DISAS_UPDATE;
1739 }
1740 }
1741 }
1742}
1743
1744/* generate intermediate code for basic block 'tb'. */
fd327f48 1745static inline void
4a274212
AF
1746gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
1747 bool search_pc)
4acb54ba 1748{
ed2803da 1749 CPUState *cs = CPU(cpu);
4a274212 1750 CPUMBState *env = &cpu->env;
4acb54ba
EI
1751 uint16_t *gen_opc_end;
1752 uint32_t pc_start;
1753 int j, lj;
1754 struct DisasContext ctx;
1755 struct DisasContext *dc = &ctx;
1756 uint32_t next_page_start, org_flags;
1757 target_ulong npc;
1758 int num_insns;
1759 int max_insns;
1760
4acb54ba
EI
1761 pc_start = tb->pc;
1762 dc->env = env;
1763 dc->tb = tb;
1764 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1765
92414b31 1766 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4acb54ba
EI
1767
1768 dc->is_jmp = DISAS_NEXT;
1769 dc->jmp = 0;
1770 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1771 if (dc->delayed_branch) {
1772 dc->jmp = JMP_INDIRECT;
1773 }
4acb54ba 1774 dc->pc = pc_start;
ed2803da 1775 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1776 dc->cpustate_changed = 0;
1777 dc->abort_at_next_insn = 0;
1778 dc->nr_nops = 0;
1779
1780 if (pc_start & 3)
1781 cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start);
1782
1783 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1784#if !SIM_COMPAT
1785 qemu_log("--------------\n");
a0762859 1786 log_cpu_state(CPU(cpu), 0);
4acb54ba
EI
1787#endif
1788 }
1789
1790 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1791 lj = -1;
1792 num_insns = 0;
1793 max_insns = tb->cflags & CF_COUNT_MASK;
1794 if (max_insns == 0)
1795 max_insns = CF_COUNT_MASK;
1796
806f352d 1797 gen_tb_start();
4acb54ba
EI
1798 do
1799 {
1800#if SIM_COMPAT
1801 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1802 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1803 gen_helper_debug();
1804 }
1805#endif
1806 check_breakpoint(env, dc);
1807
1808 if (search_pc) {
92414b31 1809 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4acb54ba
EI
1810 if (lj < j) {
1811 lj++;
1812 while (lj < j)
ab1103de 1813 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4acb54ba 1814 }
25983cad 1815 tcg_ctx.gen_opc_pc[lj] = dc->pc;
ab1103de 1816 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 1817 tcg_ctx.gen_opc_icount[lj] = num_insns;
4acb54ba
EI
1818 }
1819
1820 /* Pretty disas. */
1821 LOG_DIS("%8.8x:\t", dc->pc);
1822
1823 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1824 gen_io_start();
1825
1826 dc->clear_imm = 1;
64254eba 1827 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1828 if (dc->clear_imm)
1829 dc->tb_flags &= ~IMM_FLAG;
4acb54ba
EI
1830 dc->pc += 4;
1831 num_insns++;
1832
1833 if (dc->delayed_branch) {
1834 dc->delayed_branch--;
1835 if (!dc->delayed_branch) {
1836 if (dc->tb_flags & DRTI_FLAG)
1837 do_rti(dc);
1838 if (dc->tb_flags & DRTB_FLAG)
1839 do_rtb(dc);
1840 if (dc->tb_flags & DRTE_FLAG)
1841 do_rte(dc);
1842 /* Clear the delay slot flag. */
1843 dc->tb_flags &= ~D_FLAG;
1844 /* If it is a direct jump, try direct chaining. */
23979dc5 1845 if (dc->jmp == JMP_INDIRECT) {
4acb54ba
EI
1846 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1847 dc->is_jmp = DISAS_JUMP;
23979dc5 1848 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1849 t_sync_flags(dc);
1850 gen_goto_tb(dc, 0, dc->jmp_pc);
1851 dc->is_jmp = DISAS_TB_JUMP;
1852 } else if (dc->jmp == JMP_DIRECT_CC) {
23979dc5
EI
1853 int l1;
1854
1855 t_sync_flags(dc);
1856 l1 = gen_new_label();
1857 /* Conditional jmp. */
1858 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1859 gen_goto_tb(dc, 1, dc->pc);
1860 gen_set_label(l1);
1861 gen_goto_tb(dc, 0, dc->jmp_pc);
1862
1863 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1864 }
1865 break;
1866 }
1867 }
ed2803da 1868 if (cs->singlestep_enabled) {
4acb54ba 1869 break;
ed2803da 1870 }
4acb54ba 1871 } while (!dc->is_jmp && !dc->cpustate_changed
efd7f486 1872 && tcg_ctx.gen_opc_ptr < gen_opc_end
4acb54ba
EI
1873 && !singlestep
1874 && (dc->pc < next_page_start)
1875 && num_insns < max_insns);
1876
1877 npc = dc->pc;
844bab60 1878 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1879 if (dc->tb_flags & D_FLAG) {
1880 dc->is_jmp = DISAS_UPDATE;
1881 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1882 sync_jmpstate(dc);
1883 } else
1884 npc = dc->jmp_pc;
1885 }
1886
1887 if (tb->cflags & CF_LAST_IO)
1888 gen_io_end();
1889 /* Force an update if the per-tb cpu state has changed. */
1890 if (dc->is_jmp == DISAS_NEXT
1891 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1892 dc->is_jmp = DISAS_UPDATE;
1893 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1894 }
1895 t_sync_flags(dc);
1896
ed2803da 1897 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1898 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1899
1900 if (dc->is_jmp != DISAS_JUMP) {
4acb54ba 1901 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
6c5f738d 1902 }
64254eba 1903 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1904 tcg_temp_free_i32(tmp);
4acb54ba
EI
1905 } else {
1906 switch(dc->is_jmp) {
1907 case DISAS_NEXT:
1908 gen_goto_tb(dc, 1, npc);
1909 break;
1910 default:
1911 case DISAS_JUMP:
1912 case DISAS_UPDATE:
1913 /* indicate that the hash table must be used
1914 to find the next TB */
1915 tcg_gen_exit_tb(0);
1916 break;
1917 case DISAS_TB_JUMP:
1918 /* nothing more to generate */
1919 break;
1920 }
1921 }
806f352d 1922 gen_tb_end(tb, num_insns);
efd7f486 1923 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4acb54ba 1924 if (search_pc) {
92414b31 1925 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4acb54ba
EI
1926 lj++;
1927 while (lj <= j)
ab1103de 1928 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4acb54ba
EI
1929 } else {
1930 tb->size = dc->pc - pc_start;
1931 tb->icount = num_insns;
1932 }
1933
1934#ifdef DEBUG_DISAS
1935#if !SIM_COMPAT
1936 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1937 qemu_log("\n");
1938#if DISAS_GNU
f4359b9f 1939 log_target_disas(env, pc_start, dc->pc - pc_start, 0);
4acb54ba 1940#endif
e6aa0f11 1941 qemu_log("\nisize=%d osize=%td\n",
92414b31
EV
1942 dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
1943 tcg_ctx.gen_opc_buf);
4acb54ba
EI
1944 }
1945#endif
1946#endif
1947 assert(!dc->abort_at_next_insn);
1948}
1949
68cee38a 1950void gen_intermediate_code (CPUMBState *env, struct TranslationBlock *tb)
4acb54ba 1951{
4a274212 1952 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, false);
4acb54ba
EI
1953}
1954
68cee38a 1955void gen_intermediate_code_pc (CPUMBState *env, struct TranslationBlock *tb)
4acb54ba 1956{
4a274212 1957 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, true);
4acb54ba
EI
1958}
1959
878096ee
AF
1960void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1961 int flags)
4acb54ba 1962{
878096ee
AF
1963 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1964 CPUMBState *env = &cpu->env;
4acb54ba
EI
1965 int i;
1966
1967 if (!env || !f)
1968 return;
1969
1970 cpu_fprintf(f, "IN: PC=%x %s\n",
1971 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
97694c57 1972 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
4c24aa0a 1973 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
97694c57 1974 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
17c52a43 1975 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
4acb54ba
EI
1976 env->btaken, env->btarget,
1977 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
17c52a43
EI
1978 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1979 (env->sregs[SR_MSR] & MSR_EIP),
1980 (env->sregs[SR_MSR] & MSR_IE));
1981
4acb54ba
EI
1982 for (i = 0; i < 32; i++) {
1983 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1984 if ((i + 1) % 4 == 0)
1985 cpu_fprintf(f, "\n");
1986 }
1987 cpu_fprintf(f, "\n\n");
1988}
1989
b33ab1f7 1990MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
4acb54ba 1991{
b77f98ca 1992 MicroBlazeCPU *cpu;
4acb54ba 1993
b77f98ca 1994 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
4acb54ba 1995
746b03b2 1996 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
4acb54ba 1997
cd0c24f9
AF
1998 return cpu;
1999}
4acb54ba 2000
cd0c24f9
AF
2001void mb_tcg_init(void)
2002{
2003 int i;
4acb54ba
EI
2004
2005 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
2006
2007 env_debug = tcg_global_mem_new(TCG_AREG0,
68cee38a 2008 offsetof(CPUMBState, debug),
4acb54ba
EI
2009 "debug0");
2010 env_iflags = tcg_global_mem_new(TCG_AREG0,
68cee38a 2011 offsetof(CPUMBState, iflags),
4acb54ba
EI
2012 "iflags");
2013 env_imm = tcg_global_mem_new(TCG_AREG0,
68cee38a 2014 offsetof(CPUMBState, imm),
4acb54ba
EI
2015 "imm");
2016 env_btarget = tcg_global_mem_new(TCG_AREG0,
68cee38a 2017 offsetof(CPUMBState, btarget),
4acb54ba
EI
2018 "btarget");
2019 env_btaken = tcg_global_mem_new(TCG_AREG0,
68cee38a 2020 offsetof(CPUMBState, btaken),
4acb54ba 2021 "btaken");
4a536270
EI
2022 env_res_addr = tcg_global_mem_new(TCG_AREG0,
2023 offsetof(CPUMBState, res_addr),
2024 "res_addr");
11a76217
EI
2025 env_res_val = tcg_global_mem_new(TCG_AREG0,
2026 offsetof(CPUMBState, res_val),
2027 "res_val");
4acb54ba
EI
2028 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
2029 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
68cee38a 2030 offsetof(CPUMBState, regs[i]),
4acb54ba
EI
2031 regnames[i]);
2032 }
2033 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
2034 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
68cee38a 2035 offsetof(CPUMBState, sregs[i]),
4acb54ba
EI
2036 special_regnames[i]);
2037 }
4acb54ba
EI
2038}
2039
68cee38a 2040void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
4acb54ba 2041{
25983cad 2042 env->sregs[SR_PC] = tcg_ctx.gen_opc_pc[pc_pos];
4acb54ba 2043}