]> git.proxmox.com Git - mirror_qemu.git/blame - target-microblaze/translate.c
monitor: Remove left-over code in do_info_profile.
[mirror_qemu.git] / target-microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
4acb54ba 21#include "cpu.h"
76cad711 22#include "disas/disas.h"
4acb54ba
EI
23#include "tcg-op.h"
24#include "helper.h"
25#include "microblaze-decode.h"
4acb54ba
EI
26
27#define GEN_HELPER 1
28#include "helper.h"
29
30#define SIM_COMPAT 0
31#define DISAS_GNU 1
32#define DISAS_MB 1
33#if DISAS_MB && !SIM_COMPAT
34# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35#else
36# define LOG_DIS(...) do { } while (0)
37#endif
38
39#define D(x)
40
41#define EXTRACT_FIELD(src, start, end) \
42 (((src) >> start) & ((1 << (end - start + 1)) - 1))
43
44static TCGv env_debug;
45static TCGv_ptr cpu_env;
46static TCGv cpu_R[32];
47static TCGv cpu_SR[18];
48static TCGv env_imm;
49static TCGv env_btaken;
50static TCGv env_btarget;
51static TCGv env_iflags;
4a536270 52static TCGv env_res_addr;
11a76217 53static TCGv env_res_val;
4acb54ba 54
022c62cb 55#include "exec/gen-icount.h"
4acb54ba
EI
56
57/* This is the state at translation time. */
58typedef struct DisasContext {
68cee38a 59 CPUMBState *env;
a5efa644 60 target_ulong pc;
4acb54ba
EI
61
62 /* Decoder. */
63 int type_b;
64 uint32_t ir;
65 uint8_t opcode;
66 uint8_t rd, ra, rb;
67 uint16_t imm;
68
69 unsigned int cpustate_changed;
70 unsigned int delayed_branch;
71 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
72 unsigned int clear_imm;
73 int is_jmp;
74
844bab60
EI
75#define JMP_NOJMP 0
76#define JMP_DIRECT 1
77#define JMP_DIRECT_CC 2
78#define JMP_INDIRECT 3
4acb54ba
EI
79 unsigned int jmp;
80 uint32_t jmp_pc;
81
82 int abort_at_next_insn;
83 int nr_nops;
84 struct TranslationBlock *tb;
85 int singlestep_enabled;
86} DisasContext;
87
38972938 88static const char *regnames[] =
4acb54ba
EI
89{
90 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
91 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
92 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
93 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
94};
95
38972938 96static const char *special_regnames[] =
4acb54ba
EI
97{
98 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
99 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
100 "sr16", "sr17", "sr18"
101};
102
103/* Sign extend at translation time. */
104static inline int sign_extend(unsigned int val, unsigned int width)
105{
106 int sval;
107
108 /* LSL. */
109 val <<= 31 - width;
110 sval = val;
111 /* ASR. */
112 sval >>= 31 - width;
113 return sval;
114}
115
116static inline void t_sync_flags(DisasContext *dc)
117{
4abf79a4 118 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba
EI
119 if (dc->tb_flags != dc->synced_flags) {
120 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
121 dc->synced_flags = dc->tb_flags;
122 }
123}
124
125static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
126{
127 TCGv_i32 tmp = tcg_const_i32(index);
128
129 t_sync_flags(dc);
130 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 131 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
132 tcg_temp_free_i32(tmp);
133 dc->is_jmp = DISAS_UPDATE;
134}
135
136static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
137{
138 TranslationBlock *tb;
139 tb = dc->tb;
140 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
141 tcg_gen_goto_tb(n);
142 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
8cfd0495 143 tcg_gen_exit_tb((uintptr_t)tb + n);
4acb54ba
EI
144 } else {
145 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
146 tcg_gen_exit_tb(0);
147 }
148}
149
ee8b246f
EI
150static void read_carry(DisasContext *dc, TCGv d)
151{
152 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
153}
154
04ec7df7
EI
155/*
156 * write_carry sets the carry bits in MSR based on bit 0 of v.
157 * v[31:1] are ignored.
158 */
ee8b246f
EI
159static void write_carry(DisasContext *dc, TCGv v)
160{
161 TCGv t0 = tcg_temp_new();
162 tcg_gen_shli_tl(t0, v, 31);
163 tcg_gen_sari_tl(t0, t0, 31);
164 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
165 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
166 ~(MSR_C | MSR_CC));
167 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
168 tcg_temp_free(t0);
169}
170
65ab5eb4 171static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f
PC
172{
173 TCGv t0 = tcg_temp_new();
65ab5eb4 174 tcg_gen_movi_tl(t0, carry);
8cc9b43f
PC
175 write_carry(dc, t0);
176 tcg_temp_free(t0);
177}
178
61204ce8
EI
179/* True if ALU operand b is a small immediate that may deserve
180 faster treatment. */
181static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
182{
183 /* Immediate insn without the imm prefix ? */
184 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
185}
186
4acb54ba
EI
187static inline TCGv *dec_alu_op_b(DisasContext *dc)
188{
189 if (dc->type_b) {
190 if (dc->tb_flags & IMM_FLAG)
191 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
192 else
193 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
194 return &env_imm;
195 } else
196 return &cpu_R[dc->rb];
197}
198
199static void dec_add(DisasContext *dc)
200{
201 unsigned int k, c;
40cbf5b7 202 TCGv cf;
4acb54ba
EI
203
204 k = dc->opcode & 4;
205 c = dc->opcode & 2;
206
207 LOG_DIS("add%s%s%s r%d r%d r%d\n",
208 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
209 dc->rd, dc->ra, dc->rb);
210
40cbf5b7
EI
211 /* Take care of the easy cases first. */
212 if (k) {
213 /* k - keep carry, no need to update MSR. */
214 /* If rd == r0, it's a nop. */
215 if (dc->rd) {
216 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
217
218 if (c) {
219 /* c - Add carry into the result. */
220 cf = tcg_temp_new();
221
222 read_carry(dc, cf);
223 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
224 tcg_temp_free(cf);
225 }
226 }
227 return;
228 }
229
230 /* From now on, we can assume k is zero. So we need to update MSR. */
231 /* Extract carry. */
232 cf = tcg_temp_new();
233 if (c) {
234 read_carry(dc, cf);
235 } else {
236 tcg_gen_movi_tl(cf, 0);
237 }
238
239 if (dc->rd) {
240 TCGv ncf = tcg_temp_new();
5d0bb823 241 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
4acb54ba 242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
243 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
244 write_carry(dc, ncf);
245 tcg_temp_free(ncf);
246 } else {
5d0bb823 247 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 248 write_carry(dc, cf);
4acb54ba 249 }
40cbf5b7 250 tcg_temp_free(cf);
4acb54ba
EI
251}
252
253static void dec_sub(DisasContext *dc)
254{
255 unsigned int u, cmp, k, c;
e0a42ebc 256 TCGv cf, na;
4acb54ba
EI
257
258 u = dc->imm & 2;
259 k = dc->opcode & 4;
260 c = dc->opcode & 2;
261 cmp = (dc->imm & 1) && (!dc->type_b) && k;
262
263 if (cmp) {
264 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
265 if (dc->rd) {
266 if (u)
267 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
268 else
269 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
270 }
e0a42ebc
EI
271 return;
272 }
273
274 LOG_DIS("sub%s%s r%d, r%d r%d\n",
275 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
276
277 /* Take care of the easy cases first. */
278 if (k) {
279 /* k - keep carry, no need to update MSR. */
280 /* If rd == r0, it's a nop. */
281 if (dc->rd) {
4acb54ba 282 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
283
284 if (c) {
285 /* c - Add carry into the result. */
286 cf = tcg_temp_new();
287
288 read_carry(dc, cf);
289 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
290 tcg_temp_free(cf);
291 }
292 }
293 return;
294 }
295
296 /* From now on, we can assume k is zero. So we need to update MSR. */
297 /* Extract carry. And complement a into na. */
298 cf = tcg_temp_new();
299 na = tcg_temp_new();
300 if (c) {
301 read_carry(dc, cf);
302 } else {
303 tcg_gen_movi_tl(cf, 1);
304 }
305
306 /* d = b + ~a + c. carry defaults to 1. */
307 tcg_gen_not_tl(na, cpu_R[dc->ra]);
308
309 if (dc->rd) {
310 TCGv ncf = tcg_temp_new();
5d0bb823 311 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc
EI
312 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
313 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
314 write_carry(dc, ncf);
315 tcg_temp_free(ncf);
316 } else {
5d0bb823 317 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 318 write_carry(dc, cf);
4acb54ba 319 }
e0a42ebc
EI
320 tcg_temp_free(cf);
321 tcg_temp_free(na);
4acb54ba
EI
322}
323
324static void dec_pattern(DisasContext *dc)
325{
326 unsigned int mode;
327 int l1;
328
1567a005 329 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf 330 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005
EI
331 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
332 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
333 t_gen_raise_exception(dc, EXCP_HW_EXCP);
334 }
335
4acb54ba
EI
336 mode = dc->opcode & 3;
337 switch (mode) {
338 case 0:
339 /* pcmpbf. */
340 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
341 if (dc->rd)
342 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
343 break;
344 case 2:
345 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
346 if (dc->rd) {
347 TCGv t0 = tcg_temp_local_new();
348 l1 = gen_new_label();
349 tcg_gen_movi_tl(t0, 1);
350 tcg_gen_brcond_tl(TCG_COND_EQ,
351 cpu_R[dc->ra], cpu_R[dc->rb], l1);
352 tcg_gen_movi_tl(t0, 0);
353 gen_set_label(l1);
354 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
355 tcg_temp_free(t0);
356 }
357 break;
358 case 3:
359 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
360 l1 = gen_new_label();
361 if (dc->rd) {
362 TCGv t0 = tcg_temp_local_new();
363 tcg_gen_movi_tl(t0, 1);
364 tcg_gen_brcond_tl(TCG_COND_NE,
365 cpu_R[dc->ra], cpu_R[dc->rb], l1);
366 tcg_gen_movi_tl(t0, 0);
367 gen_set_label(l1);
368 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
369 tcg_temp_free(t0);
370 }
371 break;
372 default:
373 cpu_abort(dc->env,
374 "unsupported pattern insn opcode=%x\n", dc->opcode);
375 break;
376 }
377}
378
379static void dec_and(DisasContext *dc)
380{
381 unsigned int not;
382
383 if (!dc->type_b && (dc->imm & (1 << 10))) {
384 dec_pattern(dc);
385 return;
386 }
387
388 not = dc->opcode & (1 << 1);
389 LOG_DIS("and%s\n", not ? "n" : "");
390
391 if (!dc->rd)
392 return;
393
394 if (not) {
a235900e 395 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
396 } else
397 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
398}
399
400static void dec_or(DisasContext *dc)
401{
402 if (!dc->type_b && (dc->imm & (1 << 10))) {
403 dec_pattern(dc);
404 return;
405 }
406
407 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
408 if (dc->rd)
409 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410}
411
412static void dec_xor(DisasContext *dc)
413{
414 if (!dc->type_b && (dc->imm & (1 << 10))) {
415 dec_pattern(dc);
416 return;
417 }
418
419 LOG_DIS("xor r%d\n", dc->rd);
420 if (dc->rd)
421 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
422}
423
4acb54ba
EI
424static inline void msr_read(DisasContext *dc, TCGv d)
425{
426 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
427}
428
429static inline void msr_write(DisasContext *dc, TCGv v)
430{
97b833c5
EI
431 TCGv t;
432
433 t = tcg_temp_new();
4acb54ba 434 dc->cpustate_changed = 1;
97b833c5 435 /* PVR bit is not writable. */
8a84fc6b
EI
436 tcg_gen_andi_tl(t, v, ~MSR_PVR);
437 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
97b833c5
EI
438 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
439 tcg_temp_free(t);
4acb54ba
EI
440}
441
442static void dec_msr(DisasContext *dc)
443{
444 TCGv t0, t1;
445 unsigned int sr, to, rn;
1567a005 446 int mem_index = cpu_mmu_index(dc->env);
4acb54ba
EI
447
448 sr = dc->imm & ((1 << 14) - 1);
449 to = dc->imm & (1 << 14);
450 dc->type_b = 1;
451 if (to)
452 dc->cpustate_changed = 1;
453
454 /* msrclr and msrset. */
455 if (!(dc->imm & (1 << 15))) {
456 unsigned int clr = dc->ir & (1 << 16);
457
458 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
459 dc->rd, dc->imm);
1567a005
EI
460
461 if (!(dc->env->pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
462 /* nop??? */
463 return;
464 }
465
466 if ((dc->tb_flags & MSR_EE_FLAG)
467 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
468 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
469 t_gen_raise_exception(dc, EXCP_HW_EXCP);
470 return;
471 }
472
4acb54ba
EI
473 if (dc->rd)
474 msr_read(dc, cpu_R[dc->rd]);
475
476 t0 = tcg_temp_new();
477 t1 = tcg_temp_new();
478 msr_read(dc, t0);
479 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
480
481 if (clr) {
482 tcg_gen_not_tl(t1, t1);
483 tcg_gen_and_tl(t0, t0, t1);
484 } else
485 tcg_gen_or_tl(t0, t0, t1);
486 msr_write(dc, t0);
487 tcg_temp_free(t0);
488 tcg_temp_free(t1);
489 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
490 dc->is_jmp = DISAS_UPDATE;
491 return;
492 }
493
1567a005
EI
494 if (to) {
495 if ((dc->tb_flags & MSR_EE_FLAG)
496 && mem_index == MMU_USER_IDX) {
497 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
498 t_gen_raise_exception(dc, EXCP_HW_EXCP);
499 return;
500 }
501 }
502
4acb54ba
EI
503#if !defined(CONFIG_USER_ONLY)
504 /* Catch read/writes to the mmu block. */
505 if ((sr & ~0xff) == 0x1000) {
506 sr &= 7;
507 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
508 if (to)
64254eba 509 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
4acb54ba 510 else
64254eba 511 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
4acb54ba
EI
512 return;
513 }
514#endif
515
516 if (to) {
517 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
518 switch (sr) {
519 case 0:
520 break;
521 case 1:
522 msr_write(dc, cpu_R[dc->ra]);
523 break;
524 case 0x3:
525 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
526 break;
527 case 0x5:
528 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
529 break;
530 case 0x7:
97694c57 531 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
4acb54ba 532 break;
5818dee5 533 case 0x800:
68cee38a 534 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
535 break;
536 case 0x802:
68cee38a 537 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
5818dee5 538 break;
4acb54ba
EI
539 default:
540 cpu_abort(dc->env, "unknown mts reg %x\n", sr);
541 break;
542 }
543 } else {
544 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
545
546 switch (sr) {
547 case 0:
548 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
549 break;
550 case 1:
551 msr_read(dc, cpu_R[dc->rd]);
552 break;
553 case 0x3:
554 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
555 break;
556 case 0x5:
557 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
558 break;
559 case 0x7:
97694c57 560 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
4acb54ba
EI
561 break;
562 case 0xb:
563 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
564 break;
5818dee5 565 case 0x800:
68cee38a 566 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
567 break;
568 case 0x802:
68cee38a 569 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
5818dee5 570 break;
4acb54ba
EI
571 case 0x2000:
572 case 0x2001:
573 case 0x2002:
574 case 0x2003:
575 case 0x2004:
576 case 0x2005:
577 case 0x2006:
578 case 0x2007:
579 case 0x2008:
580 case 0x2009:
581 case 0x200a:
582 case 0x200b:
583 case 0x200c:
584 rn = sr & 0xf;
585 tcg_gen_ld_tl(cpu_R[dc->rd],
68cee38a 586 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
587 break;
588 default:
589 cpu_abort(dc->env, "unknown mfs reg %x\n", sr);
590 break;
591 }
592 }
ee7dbcf8
EI
593
594 if (dc->rd == 0) {
595 tcg_gen_movi_tl(cpu_R[0], 0);
596 }
4acb54ba
EI
597}
598
599/* 64-bit signed mul, lower result in d and upper in d2. */
600static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
601{
602 TCGv_i64 t0, t1;
603
604 t0 = tcg_temp_new_i64();
605 t1 = tcg_temp_new_i64();
606
607 tcg_gen_ext_i32_i64(t0, a);
608 tcg_gen_ext_i32_i64(t1, b);
609 tcg_gen_mul_i64(t0, t0, t1);
610
611 tcg_gen_trunc_i64_i32(d, t0);
612 tcg_gen_shri_i64(t0, t0, 32);
613 tcg_gen_trunc_i64_i32(d2, t0);
614
615 tcg_temp_free_i64(t0);
616 tcg_temp_free_i64(t1);
617}
618
619/* 64-bit unsigned muls, lower result in d and upper in d2. */
620static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
621{
622 TCGv_i64 t0, t1;
623
624 t0 = tcg_temp_new_i64();
625 t1 = tcg_temp_new_i64();
626
627 tcg_gen_extu_i32_i64(t0, a);
628 tcg_gen_extu_i32_i64(t1, b);
629 tcg_gen_mul_i64(t0, t0, t1);
630
631 tcg_gen_trunc_i64_i32(d, t0);
632 tcg_gen_shri_i64(t0, t0, 32);
633 tcg_gen_trunc_i64_i32(d2, t0);
634
635 tcg_temp_free_i64(t0);
636 tcg_temp_free_i64(t1);
637}
638
639/* Multiplier unit. */
640static void dec_mul(DisasContext *dc)
641{
642 TCGv d[2];
643 unsigned int subcode;
644
1567a005 645 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf 646 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005
EI
647 && !(dc->env->pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
648 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
649 t_gen_raise_exception(dc, EXCP_HW_EXCP);
650 return;
651 }
652
4acb54ba
EI
653 subcode = dc->imm & 3;
654 d[0] = tcg_temp_new();
655 d[1] = tcg_temp_new();
656
657 if (dc->type_b) {
658 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
659 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
660 goto done;
661 }
662
1567a005
EI
663 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
664 if (subcode >= 1 && subcode <= 3
665 && !((dc->env->pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
666 /* nop??? */
667 }
668
4acb54ba
EI
669 switch (subcode) {
670 case 0:
671 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
672 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
673 break;
674 case 1:
675 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
676 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
677 break;
678 case 2:
679 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
680 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
681 break;
682 case 3:
683 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
684 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
685 break;
686 default:
687 cpu_abort(dc->env, "unknown MUL insn %x\n", subcode);
688 break;
689 }
690done:
691 tcg_temp_free(d[0]);
692 tcg_temp_free(d[1]);
693}
694
695/* Div unit. */
696static void dec_div(DisasContext *dc)
697{
698 unsigned int u;
699
700 u = dc->imm & 2;
701 LOG_DIS("div\n");
702
97f90cbf 703 if ((dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005
EI
704 && !((dc->env->pvr.regs[0] & PVR0_USE_DIV_MASK))) {
705 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
706 t_gen_raise_exception(dc, EXCP_HW_EXCP);
707 }
708
4acb54ba 709 if (u)
64254eba
BS
710 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
711 cpu_R[dc->ra]);
4acb54ba 712 else
64254eba
BS
713 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
714 cpu_R[dc->ra]);
4acb54ba
EI
715 if (!dc->rd)
716 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
717}
718
719static void dec_barrel(DisasContext *dc)
720{
721 TCGv t0;
722 unsigned int s, t;
723
1567a005 724 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf 725 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005
EI
726 && !(dc->env->pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
727 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
728 t_gen_raise_exception(dc, EXCP_HW_EXCP);
729 return;
730 }
731
4acb54ba
EI
732 s = dc->imm & (1 << 10);
733 t = dc->imm & (1 << 9);
734
735 LOG_DIS("bs%s%s r%d r%d r%d\n",
736 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
737
738 t0 = tcg_temp_new();
739
740 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
741 tcg_gen_andi_tl(t0, t0, 31);
742
743 if (s)
744 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
745 else {
746 if (t)
747 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
748 else
749 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
750 }
751}
752
753static void dec_bit(DisasContext *dc)
754{
09b9f113 755 TCGv t0;
4acb54ba 756 unsigned int op;
1567a005 757 int mem_index = cpu_mmu_index(dc->env);
4acb54ba 758
ace2e4da 759 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
760 switch (op) {
761 case 0x21:
762 /* src. */
763 t0 = tcg_temp_new();
764
765 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
09b9f113
EI
766 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
767 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 768 if (dc->rd) {
4acb54ba 769 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
09b9f113 770 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 771 }
4acb54ba
EI
772 tcg_temp_free(t0);
773 break;
774
775 case 0x1:
776 case 0x41:
777 /* srl. */
4acb54ba
EI
778 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
779
bb3cb951
EI
780 /* Update carry. Note that write carry only looks at the LSB. */
781 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
782 if (dc->rd) {
783 if (op == 0x41)
784 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
785 else
786 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
787 }
788 break;
789 case 0x60:
790 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
791 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
792 break;
793 case 0x61:
794 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
795 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
796 break;
797 case 0x64:
f062a3c7
EI
798 case 0x66:
799 case 0x74:
800 case 0x76:
4acb54ba
EI
801 /* wdc. */
802 LOG_DIS("wdc r%d\n", dc->ra);
1567a005
EI
803 if ((dc->tb_flags & MSR_EE_FLAG)
804 && mem_index == MMU_USER_IDX) {
805 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
806 t_gen_raise_exception(dc, EXCP_HW_EXCP);
807 return;
808 }
4acb54ba
EI
809 break;
810 case 0x68:
811 /* wic. */
812 LOG_DIS("wic r%d\n", dc->ra);
1567a005
EI
813 if ((dc->tb_flags & MSR_EE_FLAG)
814 && mem_index == MMU_USER_IDX) {
815 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
816 t_gen_raise_exception(dc, EXCP_HW_EXCP);
817 return;
818 }
4acb54ba 819 break;
48b5e96f
EI
820 case 0xe0:
821 if ((dc->tb_flags & MSR_EE_FLAG)
822 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
823 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
824 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
825 t_gen_raise_exception(dc, EXCP_HW_EXCP);
826 }
827 if (dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
828 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
829 }
830 break;
ace2e4da
PC
831 case 0x1e0:
832 /* swapb */
833 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
834 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
835 break;
b8c6a5d9 836 case 0x1e2:
ace2e4da
PC
837 /*swaph */
838 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
839 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
840 break;
4acb54ba
EI
841 default:
842 cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
843 dc->pc, op, dc->rd, dc->ra, dc->rb);
844 break;
845 }
846}
847
848static inline void sync_jmpstate(DisasContext *dc)
849{
844bab60
EI
850 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
851 if (dc->jmp == JMP_DIRECT) {
852 tcg_gen_movi_tl(env_btaken, 1);
853 }
23979dc5
EI
854 dc->jmp = JMP_INDIRECT;
855 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
4acb54ba
EI
856 }
857}
858
859static void dec_imm(DisasContext *dc)
860{
861 LOG_DIS("imm %x\n", dc->imm << 16);
862 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
863 dc->tb_flags |= IMM_FLAG;
864 dc->clear_imm = 0;
865}
866
4acb54ba
EI
867static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
868{
869 unsigned int extimm = dc->tb_flags & IMM_FLAG;
5818dee5
EI
870 /* Should be set to one if r1 is used by loadstores. */
871 int stackprot = 0;
872
873 /* All load/stores use ra. */
874 if (dc->ra == 1) {
875 stackprot = 1;
876 }
4acb54ba 877
9ef55357 878 /* Treat the common cases first. */
4acb54ba 879 if (!dc->type_b) {
4b5ef0b5
EI
880 /* If any of the regs is r0, return a ptr to the other. */
881 if (dc->ra == 0) {
882 return &cpu_R[dc->rb];
883 } else if (dc->rb == 0) {
884 return &cpu_R[dc->ra];
885 }
886
5818dee5
EI
887 if (dc->rb == 1) {
888 stackprot = 1;
889 }
890
4acb54ba
EI
891 *t = tcg_temp_new();
892 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
5818dee5
EI
893
894 if (stackprot) {
64254eba 895 gen_helper_stackprot(cpu_env, *t);
5818dee5 896 }
4acb54ba
EI
897 return t;
898 }
899 /* Immediate. */
900 if (!extimm) {
901 if (dc->imm == 0) {
902 return &cpu_R[dc->ra];
903 }
904 *t = tcg_temp_new();
905 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
906 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
907 } else {
908 *t = tcg_temp_new();
909 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
910 }
911
5818dee5 912 if (stackprot) {
64254eba 913 gen_helper_stackprot(cpu_env, *t);
5818dee5 914 }
4acb54ba
EI
915 return t;
916}
917
918static void dec_load(DisasContext *dc)
919{
47acdd63 920 TCGv t, v, *addr;
8cc9b43f 921 unsigned int size, rev = 0, ex = 0;
47acdd63 922 TCGMemOp mop;
4acb54ba 923
47acdd63
RH
924 mop = dc->opcode & 3;
925 size = 1 << mop;
9f8beb66
EI
926 if (!dc->type_b) {
927 rev = (dc->ir >> 9) & 1;
8cc9b43f 928 ex = (dc->ir >> 10) & 1;
9f8beb66 929 }
47acdd63
RH
930 mop |= MO_TE;
931 if (rev) {
932 mop ^= MO_BSWAP;
933 }
9f8beb66 934
0187688f 935 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
97f90cbf 936 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
937 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
938 t_gen_raise_exception(dc, EXCP_HW_EXCP);
939 return;
940 }
4acb54ba 941
8cc9b43f
PC
942 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
943 ex ? "x" : "");
9f8beb66 944
4acb54ba
EI
945 t_sync_flags(dc);
946 addr = compute_ldst_addr(dc, &t);
947
9f8beb66
EI
948 /*
949 * When doing reverse accesses we need to do two things.
950 *
4ff9786c 951 * 1. Reverse the address wrt endianness.
9f8beb66
EI
952 * 2. Byteswap the data lanes on the way back into the CPU core.
953 */
954 if (rev && size != 4) {
955 /* Endian reverse the address. t is addr. */
956 switch (size) {
957 case 1:
958 {
959 /* 00 -> 11
960 01 -> 10
961 10 -> 10
962 11 -> 00 */
963 TCGv low = tcg_temp_new();
964
965 /* Force addr into the temp. */
966 if (addr != &t) {
967 t = tcg_temp_new();
968 tcg_gen_mov_tl(t, *addr);
969 addr = &t;
970 }
971
972 tcg_gen_andi_tl(low, t, 3);
973 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
974 tcg_gen_andi_tl(t, t, ~3);
975 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
976 tcg_gen_mov_tl(env_imm, t);
977 tcg_temp_free(low);
978 break;
979 }
980
981 case 2:
982 /* 00 -> 10
983 10 -> 00. */
984 /* Force addr into the temp. */
985 if (addr != &t) {
986 t = tcg_temp_new();
987 tcg_gen_xori_tl(t, *addr, 2);
988 addr = &t;
989 } else {
990 tcg_gen_xori_tl(t, t, 2);
991 }
992 break;
993 default:
994 cpu_abort(dc->env, "Invalid reverse size\n");
995 break;
996 }
997 }
998
8cc9b43f
PC
999 /* lwx does not throw unaligned access errors, so force alignment */
1000 if (ex) {
1001 /* Force addr into the temp. */
1002 if (addr != &t) {
1003 t = tcg_temp_new();
1004 tcg_gen_mov_tl(t, *addr);
1005 addr = &t;
1006 }
1007 tcg_gen_andi_tl(t, t, ~3);
1008 }
1009
4acb54ba
EI
1010 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1011 sync_jmpstate(dc);
968a40f6
EI
1012
1013 /* Verify alignment if needed. */
47acdd63
RH
1014 /*
1015 * Microblaze gives MMU faults priority over faults due to
1016 * unaligned addresses. That's why we speculatively do the load
1017 * into v. If the load succeeds, we verify alignment of the
1018 * address and if that succeeds we write into the destination reg.
1019 */
1020 v = tcg_temp_new();
1021 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(dc->env), mop);
a12f6507 1022
47acdd63 1023 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507 1024 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 1025 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1026 tcg_const_tl(0), tcg_const_tl(size - 1));
4acb54ba
EI
1027 }
1028
47acdd63
RH
1029 if (ex) {
1030 tcg_gen_mov_tl(env_res_addr, *addr);
1031 tcg_gen_mov_tl(env_res_val, v);
1032 }
1033 if (dc->rd) {
1034 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1035 }
1036 tcg_temp_free(v);
1037
8cc9b43f
PC
1038 if (ex) { /* lwx */
1039 /* no support for for AXI exclusive so always clear C */
1040 write_carryi(dc, 0);
8cc9b43f
PC
1041 }
1042
4acb54ba
EI
1043 if (addr == &t)
1044 tcg_temp_free(t);
1045}
1046
4acb54ba
EI
1047static void dec_store(DisasContext *dc)
1048{
4a536270 1049 TCGv t, *addr, swx_addr;
8cc9b43f
PC
1050 int swx_skip = 0;
1051 unsigned int size, rev = 0, ex = 0;
47acdd63 1052 TCGMemOp mop;
4acb54ba 1053
47acdd63
RH
1054 mop = dc->opcode & 3;
1055 size = 1 << mop;
9f8beb66
EI
1056 if (!dc->type_b) {
1057 rev = (dc->ir >> 9) & 1;
8cc9b43f 1058 ex = (dc->ir >> 10) & 1;
9f8beb66 1059 }
47acdd63
RH
1060 mop |= MO_TE;
1061 if (rev) {
1062 mop ^= MO_BSWAP;
1063 }
4acb54ba 1064
0187688f 1065 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
97f90cbf 1066 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
1067 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1068 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1069 return;
1070 }
1071
8cc9b43f
PC
1072 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1073 ex ? "x" : "");
4acb54ba
EI
1074 t_sync_flags(dc);
1075 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1076 sync_jmpstate(dc);
1077 addr = compute_ldst_addr(dc, &t);
968a40f6 1078
083dbf48 1079 swx_addr = tcg_temp_local_new();
8cc9b43f 1080 if (ex) { /* swx */
11a76217 1081 TCGv tval;
8cc9b43f
PC
1082
1083 /* Force addr into the swx_addr. */
1084 tcg_gen_mov_tl(swx_addr, *addr);
1085 addr = &swx_addr;
1086 /* swx does not throw unaligned access errors, so force alignment */
1087 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1088
8cc9b43f
PC
1089 write_carryi(dc, 1);
1090 swx_skip = gen_new_label();
4a536270 1091 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
11a76217
EI
1092
1093 /* Compare the value loaded at lwx with current contents of
1094 the reserved location.
1095 FIXME: This only works for system emulation where we can expect
1096 this compare and the following write to be atomic. For user
1097 emulation we need to add atomicity between threads. */
1098 tval = tcg_temp_new();
47acdd63 1099 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(dc->env), MO_TEUL);
11a76217 1100 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
8cc9b43f 1101 write_carryi(dc, 0);
11a76217 1102 tcg_temp_free(tval);
8cc9b43f
PC
1103 }
1104
9f8beb66
EI
1105 if (rev && size != 4) {
1106 /* Endian reverse the address. t is addr. */
1107 switch (size) {
1108 case 1:
1109 {
1110 /* 00 -> 11
1111 01 -> 10
1112 10 -> 10
1113 11 -> 00 */
1114 TCGv low = tcg_temp_new();
1115
1116 /* Force addr into the temp. */
1117 if (addr != &t) {
1118 t = tcg_temp_new();
1119 tcg_gen_mov_tl(t, *addr);
1120 addr = &t;
1121 }
1122
1123 tcg_gen_andi_tl(low, t, 3);
1124 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1125 tcg_gen_andi_tl(t, t, ~3);
1126 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
1127 tcg_gen_mov_tl(env_imm, t);
1128 tcg_temp_free(low);
1129 break;
1130 }
1131
1132 case 2:
1133 /* 00 -> 10
1134 10 -> 00. */
1135 /* Force addr into the temp. */
1136 if (addr != &t) {
1137 t = tcg_temp_new();
1138 tcg_gen_xori_tl(t, *addr, 2);
1139 addr = &t;
1140 } else {
1141 tcg_gen_xori_tl(t, t, 2);
1142 }
1143 break;
1144 default:
1145 cpu_abort(dc->env, "Invalid reverse size\n");
1146 break;
1147 }
9f8beb66 1148 }
47acdd63 1149 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(dc->env), mop);
a12f6507 1150
968a40f6
EI
1151 /* Verify alignment if needed. */
1152 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507
EI
1153 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1154 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1155 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1156 * the MMU prior to the memaccess, thay way we could put
1157 * the alignment checks in between the probe and the mem
1158 * access.
a12f6507 1159 */
64254eba 1160 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1161 tcg_const_tl(1), tcg_const_tl(size - 1));
968a40f6 1162 }
083dbf48 1163
8cc9b43f
PC
1164 if (ex) {
1165 gen_set_label(swx_skip);
8cc9b43f 1166 }
083dbf48 1167 tcg_temp_free(swx_addr);
968a40f6 1168
4acb54ba
EI
1169 if (addr == &t)
1170 tcg_temp_free(t);
1171}
1172
1173static inline void eval_cc(DisasContext *dc, unsigned int cc,
1174 TCGv d, TCGv a, TCGv b)
1175{
4acb54ba
EI
1176 switch (cc) {
1177 case CC_EQ:
b2565c69 1178 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
4acb54ba
EI
1179 break;
1180 case CC_NE:
b2565c69 1181 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
4acb54ba
EI
1182 break;
1183 case CC_LT:
b2565c69 1184 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
4acb54ba
EI
1185 break;
1186 case CC_LE:
b2565c69 1187 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
4acb54ba
EI
1188 break;
1189 case CC_GE:
b2565c69 1190 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
4acb54ba
EI
1191 break;
1192 case CC_GT:
b2565c69 1193 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
4acb54ba
EI
1194 break;
1195 default:
1196 cpu_abort(dc->env, "Unknown condition code %x.\n", cc);
1197 break;
1198 }
1199}
1200
1201static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1202{
1203 int l1;
1204
1205 l1 = gen_new_label();
1206 /* Conditional jmp. */
1207 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1208 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1209 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1210 gen_set_label(l1);
1211}
1212
1213static void dec_bcc(DisasContext *dc)
1214{
1215 unsigned int cc;
1216 unsigned int dslot;
1217
1218 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1219 dslot = dc->ir & (1 << 25);
1220 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1221
1222 dc->delayed_branch = 1;
1223 if (dslot) {
1224 dc->delayed_branch = 2;
1225 dc->tb_flags |= D_FLAG;
1226 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1227 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1228 }
1229
61204ce8
EI
1230 if (dec_alu_op_b_is_small_imm(dc)) {
1231 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1232
1233 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
844bab60 1234 dc->jmp = JMP_DIRECT_CC;
23979dc5 1235 dc->jmp_pc = dc->pc + offset;
61204ce8 1236 } else {
23979dc5 1237 dc->jmp = JMP_INDIRECT;
61204ce8
EI
1238 tcg_gen_movi_tl(env_btarget, dc->pc);
1239 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1240 }
61204ce8 1241 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
4acb54ba
EI
1242}
1243
1244static void dec_br(DisasContext *dc)
1245{
9f6113c7 1246 unsigned int dslot, link, abs, mbar;
ff21f70a 1247 int mem_index = cpu_mmu_index(dc->env);
4acb54ba
EI
1248
1249 dslot = dc->ir & (1 << 20);
1250 abs = dc->ir & (1 << 19);
1251 link = dc->ir & (1 << 18);
9f6113c7
EI
1252
1253 /* Memory barrier. */
1254 mbar = (dc->ir >> 16) & 31;
1255 if (mbar == 2 && dc->imm == 4) {
5d45de97
EI
1256 /* mbar IMM & 16 decodes to sleep. */
1257 if (dc->rd & 16) {
1258 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1259 TCGv_i32 tmp_1 = tcg_const_i32(1);
1260
1261 LOG_DIS("sleep\n");
1262
1263 t_sync_flags(dc);
1264 tcg_gen_st_i32(tmp_1, cpu_env,
1265 -offsetof(MicroBlazeCPU, env)
1266 +offsetof(CPUState, halted));
1267 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1268 gen_helper_raise_exception(cpu_env, tmp_hlt);
1269 tcg_temp_free_i32(tmp_hlt);
1270 tcg_temp_free_i32(tmp_1);
1271 return;
1272 }
9f6113c7
EI
1273 LOG_DIS("mbar %d\n", dc->rd);
1274 /* Break the TB. */
1275 dc->cpustate_changed = 1;
1276 return;
1277 }
1278
4acb54ba
EI
1279 LOG_DIS("br%s%s%s%s imm=%x\n",
1280 abs ? "a" : "", link ? "l" : "",
1281 dc->type_b ? "i" : "", dslot ? "d" : "",
1282 dc->imm);
1283
1284 dc->delayed_branch = 1;
1285 if (dslot) {
1286 dc->delayed_branch = 2;
1287 dc->tb_flags |= D_FLAG;
1288 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1289 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1290 }
1291 if (link && dc->rd)
1292 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1293
1294 dc->jmp = JMP_INDIRECT;
1295 if (abs) {
1296 tcg_gen_movi_tl(env_btaken, 1);
1297 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1298 if (link && !dslot) {
1299 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1300 t_gen_raise_exception(dc, EXCP_BREAK);
1301 if (dc->imm == 0) {
1302 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1303 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1304 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1305 return;
1306 }
1307
1308 t_gen_raise_exception(dc, EXCP_DEBUG);
1309 }
1310 }
4acb54ba 1311 } else {
61204ce8
EI
1312 if (dec_alu_op_b_is_small_imm(dc)) {
1313 dc->jmp = JMP_DIRECT;
1314 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1315 } else {
4acb54ba
EI
1316 tcg_gen_movi_tl(env_btaken, 1);
1317 tcg_gen_movi_tl(env_btarget, dc->pc);
1318 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
4acb54ba
EI
1319 }
1320 }
1321}
1322
1323static inline void do_rti(DisasContext *dc)
1324{
1325 TCGv t0, t1;
1326 t0 = tcg_temp_new();
1327 t1 = tcg_temp_new();
1328 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1329 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1330 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1331
1332 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1333 tcg_gen_or_tl(t1, t1, t0);
1334 msr_write(dc, t1);
1335 tcg_temp_free(t1);
1336 tcg_temp_free(t0);
1337 dc->tb_flags &= ~DRTI_FLAG;
1338}
1339
1340static inline void do_rtb(DisasContext *dc)
1341{
1342 TCGv t0, t1;
1343 t0 = tcg_temp_new();
1344 t1 = tcg_temp_new();
1345 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1346 tcg_gen_shri_tl(t0, t1, 1);
1347 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1348
1349 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1350 tcg_gen_or_tl(t1, t1, t0);
1351 msr_write(dc, t1);
1352 tcg_temp_free(t1);
1353 tcg_temp_free(t0);
1354 dc->tb_flags &= ~DRTB_FLAG;
1355}
1356
1357static inline void do_rte(DisasContext *dc)
1358{
1359 TCGv t0, t1;
1360 t0 = tcg_temp_new();
1361 t1 = tcg_temp_new();
1362
1363 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1364 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1365 tcg_gen_shri_tl(t0, t1, 1);
1366 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1367
1368 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1369 tcg_gen_or_tl(t1, t1, t0);
1370 msr_write(dc, t1);
1371 tcg_temp_free(t1);
1372 tcg_temp_free(t0);
1373 dc->tb_flags &= ~DRTE_FLAG;
1374}
1375
1376static void dec_rts(DisasContext *dc)
1377{
1378 unsigned int b_bit, i_bit, e_bit;
1567a005 1379 int mem_index = cpu_mmu_index(dc->env);
4acb54ba
EI
1380
1381 i_bit = dc->ir & (1 << 21);
1382 b_bit = dc->ir & (1 << 22);
1383 e_bit = dc->ir & (1 << 23);
1384
1385 dc->delayed_branch = 2;
1386 dc->tb_flags |= D_FLAG;
1387 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1388 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1389
1390 if (i_bit) {
1391 LOG_DIS("rtid ir=%x\n", dc->ir);
1567a005
EI
1392 if ((dc->tb_flags & MSR_EE_FLAG)
1393 && mem_index == MMU_USER_IDX) {
1394 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1395 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1396 }
4acb54ba
EI
1397 dc->tb_flags |= DRTI_FLAG;
1398 } else if (b_bit) {
1399 LOG_DIS("rtbd ir=%x\n", dc->ir);
1567a005
EI
1400 if ((dc->tb_flags & MSR_EE_FLAG)
1401 && mem_index == MMU_USER_IDX) {
1402 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1403 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1404 }
4acb54ba
EI
1405 dc->tb_flags |= DRTB_FLAG;
1406 } else if (e_bit) {
1407 LOG_DIS("rted ir=%x\n", dc->ir);
1567a005
EI
1408 if ((dc->tb_flags & MSR_EE_FLAG)
1409 && mem_index == MMU_USER_IDX) {
1410 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1411 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1412 }
4acb54ba
EI
1413 dc->tb_flags |= DRTE_FLAG;
1414 } else
1415 LOG_DIS("rts ir=%x\n", dc->ir);
1416
23979dc5 1417 dc->jmp = JMP_INDIRECT;
4acb54ba
EI
1418 tcg_gen_movi_tl(env_btaken, 1);
1419 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1420}
1421
97694c57
EI
1422static int dec_check_fpuv2(DisasContext *dc)
1423{
1424 int r;
1425
1426 r = dc->env->pvr.regs[2] & PVR2_USE_FPU2_MASK;
1427
1428 if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1429 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1430 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1431 }
1432 return r;
1433}
1434
1567a005
EI
1435static void dec_fpu(DisasContext *dc)
1436{
97694c57
EI
1437 unsigned int fpu_insn;
1438
1567a005 1439 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf 1440 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005 1441 && !((dc->env->pvr.regs[2] & PVR2_USE_FPU_MASK))) {
97694c57 1442 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1567a005
EI
1443 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1444 return;
1445 }
1446
97694c57
EI
1447 fpu_insn = (dc->ir >> 7) & 7;
1448
1449 switch (fpu_insn) {
1450 case 0:
64254eba
BS
1451 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1452 cpu_R[dc->rb]);
97694c57
EI
1453 break;
1454
1455 case 1:
64254eba
BS
1456 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1457 cpu_R[dc->rb]);
97694c57
EI
1458 break;
1459
1460 case 2:
64254eba
BS
1461 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1462 cpu_R[dc->rb]);
97694c57
EI
1463 break;
1464
1465 case 3:
64254eba
BS
1466 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1467 cpu_R[dc->rb]);
97694c57
EI
1468 break;
1469
1470 case 4:
1471 switch ((dc->ir >> 4) & 7) {
1472 case 0:
64254eba 1473 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1474 cpu_R[dc->ra], cpu_R[dc->rb]);
1475 break;
1476 case 1:
64254eba 1477 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1478 cpu_R[dc->ra], cpu_R[dc->rb]);
1479 break;
1480 case 2:
64254eba 1481 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1482 cpu_R[dc->ra], cpu_R[dc->rb]);
1483 break;
1484 case 3:
64254eba 1485 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1486 cpu_R[dc->ra], cpu_R[dc->rb]);
1487 break;
1488 case 4:
64254eba 1489 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1490 cpu_R[dc->ra], cpu_R[dc->rb]);
1491 break;
1492 case 5:
64254eba 1493 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1494 cpu_R[dc->ra], cpu_R[dc->rb]);
1495 break;
1496 case 6:
64254eba 1497 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1498 cpu_R[dc->ra], cpu_R[dc->rb]);
1499 break;
1500 default:
71547a3b
BS
1501 qemu_log_mask(LOG_UNIMP,
1502 "unimplemented fcmp fpu_insn=%x pc=%x"
1503 " opc=%x\n",
1504 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1505 dc->abort_at_next_insn = 1;
1506 break;
1507 }
1508 break;
1509
1510 case 5:
1511 if (!dec_check_fpuv2(dc)) {
1512 return;
1513 }
64254eba 1514 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1515 break;
1516
1517 case 6:
1518 if (!dec_check_fpuv2(dc)) {
1519 return;
1520 }
64254eba 1521 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1522 break;
1523
1524 case 7:
1525 if (!dec_check_fpuv2(dc)) {
1526 return;
1527 }
64254eba 1528 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1529 break;
1530
1531 default:
71547a3b
BS
1532 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1533 " opc=%x\n",
1534 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1535 dc->abort_at_next_insn = 1;
1536 break;
1537 }
1567a005
EI
1538}
1539
4acb54ba
EI
1540static void dec_null(DisasContext *dc)
1541{
02b33596
EI
1542 if ((dc->tb_flags & MSR_EE_FLAG)
1543 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1544 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1545 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1546 return;
1547 }
4acb54ba
EI
1548 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1549 dc->abort_at_next_insn = 1;
1550}
1551
6d76d23e
EI
1552/* Insns connected to FSL or AXI stream attached devices. */
1553static void dec_stream(DisasContext *dc)
1554{
1555 int mem_index = cpu_mmu_index(dc->env);
1556 TCGv_i32 t_id, t_ctrl;
1557 int ctrl;
1558
1559 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1560 dc->type_b ? "" : "d", dc->imm);
1561
1562 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1563 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1564 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1565 return;
1566 }
1567
1568 t_id = tcg_temp_new();
1569 if (dc->type_b) {
1570 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1571 ctrl = dc->imm >> 10;
1572 } else {
1573 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1574 ctrl = dc->imm >> 5;
1575 }
1576
1577 t_ctrl = tcg_const_tl(ctrl);
1578
1579 if (dc->rd == 0) {
1580 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1581 } else {
1582 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1583 }
1584 tcg_temp_free(t_id);
1585 tcg_temp_free(t_ctrl);
1586}
1587
4acb54ba
EI
1588static struct decoder_info {
1589 struct {
1590 uint32_t bits;
1591 uint32_t mask;
1592 };
1593 void (*dec)(DisasContext *dc);
1594} decinfo[] = {
1595 {DEC_ADD, dec_add},
1596 {DEC_SUB, dec_sub},
1597 {DEC_AND, dec_and},
1598 {DEC_XOR, dec_xor},
1599 {DEC_OR, dec_or},
1600 {DEC_BIT, dec_bit},
1601 {DEC_BARREL, dec_barrel},
1602 {DEC_LD, dec_load},
1603 {DEC_ST, dec_store},
1604 {DEC_IMM, dec_imm},
1605 {DEC_BR, dec_br},
1606 {DEC_BCC, dec_bcc},
1607 {DEC_RTS, dec_rts},
1567a005 1608 {DEC_FPU, dec_fpu},
4acb54ba
EI
1609 {DEC_MUL, dec_mul},
1610 {DEC_DIV, dec_div},
1611 {DEC_MSR, dec_msr},
6d76d23e 1612 {DEC_STREAM, dec_stream},
4acb54ba
EI
1613 {{0, 0}, dec_null}
1614};
1615
64254eba 1616static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1617{
4acb54ba
EI
1618 int i;
1619
fdefe51c 1620 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4acb54ba 1621 tcg_gen_debug_insn_start(dc->pc);
fdefe51c 1622 }
4acb54ba 1623
64254eba 1624 dc->ir = ir;
4acb54ba
EI
1625 LOG_DIS("%8.8x\t", dc->ir);
1626
1627 if (dc->ir)
1628 dc->nr_nops = 0;
1629 else {
1567a005 1630 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf
EI
1631 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1632 && (dc->env->pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1567a005
EI
1633 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1634 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1635 return;
1636 }
1637
4acb54ba
EI
1638 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1639 dc->nr_nops++;
1640 if (dc->nr_nops > 4)
1641 cpu_abort(dc->env, "fetching nop sequence\n");
1642 }
1643 /* bit 2 seems to indicate insn type. */
1644 dc->type_b = ir & (1 << 29);
1645
1646 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1647 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1648 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1649 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1650 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1651
1652 /* Large switch for all insns. */
1653 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1654 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1655 decinfo[i].dec(dc);
1656 break;
1657 }
1658 }
1659}
1660
68cee38a 1661static void check_breakpoint(CPUMBState *env, DisasContext *dc)
4acb54ba
EI
1662{
1663 CPUBreakpoint *bp;
1664
72cf2d4f
BS
1665 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1666 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4acb54ba
EI
1667 if (bp->pc == dc->pc) {
1668 t_gen_raise_exception(dc, EXCP_DEBUG);
1669 dc->is_jmp = DISAS_UPDATE;
1670 }
1671 }
1672 }
1673}
1674
1675/* generate intermediate code for basic block 'tb'. */
fd327f48 1676static inline void
4a274212
AF
1677gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
1678 bool search_pc)
4acb54ba 1679{
ed2803da 1680 CPUState *cs = CPU(cpu);
4a274212 1681 CPUMBState *env = &cpu->env;
4acb54ba
EI
1682 uint16_t *gen_opc_end;
1683 uint32_t pc_start;
1684 int j, lj;
1685 struct DisasContext ctx;
1686 struct DisasContext *dc = &ctx;
1687 uint32_t next_page_start, org_flags;
1688 target_ulong npc;
1689 int num_insns;
1690 int max_insns;
1691
4acb54ba
EI
1692 pc_start = tb->pc;
1693 dc->env = env;
1694 dc->tb = tb;
1695 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1696
92414b31 1697 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4acb54ba
EI
1698
1699 dc->is_jmp = DISAS_NEXT;
1700 dc->jmp = 0;
1701 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1702 if (dc->delayed_branch) {
1703 dc->jmp = JMP_INDIRECT;
1704 }
4acb54ba 1705 dc->pc = pc_start;
ed2803da 1706 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1707 dc->cpustate_changed = 0;
1708 dc->abort_at_next_insn = 0;
1709 dc->nr_nops = 0;
1710
1711 if (pc_start & 3)
1712 cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start);
1713
1714 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1715#if !SIM_COMPAT
1716 qemu_log("--------------\n");
a0762859 1717 log_cpu_state(CPU(cpu), 0);
4acb54ba
EI
1718#endif
1719 }
1720
1721 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1722 lj = -1;
1723 num_insns = 0;
1724 max_insns = tb->cflags & CF_COUNT_MASK;
1725 if (max_insns == 0)
1726 max_insns = CF_COUNT_MASK;
1727
806f352d 1728 gen_tb_start();
4acb54ba
EI
1729 do
1730 {
1731#if SIM_COMPAT
1732 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1733 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1734 gen_helper_debug();
1735 }
1736#endif
1737 check_breakpoint(env, dc);
1738
1739 if (search_pc) {
92414b31 1740 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4acb54ba
EI
1741 if (lj < j) {
1742 lj++;
1743 while (lj < j)
ab1103de 1744 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4acb54ba 1745 }
25983cad 1746 tcg_ctx.gen_opc_pc[lj] = dc->pc;
ab1103de 1747 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 1748 tcg_ctx.gen_opc_icount[lj] = num_insns;
4acb54ba
EI
1749 }
1750
1751 /* Pretty disas. */
1752 LOG_DIS("%8.8x:\t", dc->pc);
1753
1754 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1755 gen_io_start();
1756
1757 dc->clear_imm = 1;
64254eba 1758 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1759 if (dc->clear_imm)
1760 dc->tb_flags &= ~IMM_FLAG;
4acb54ba
EI
1761 dc->pc += 4;
1762 num_insns++;
1763
1764 if (dc->delayed_branch) {
1765 dc->delayed_branch--;
1766 if (!dc->delayed_branch) {
1767 if (dc->tb_flags & DRTI_FLAG)
1768 do_rti(dc);
1769 if (dc->tb_flags & DRTB_FLAG)
1770 do_rtb(dc);
1771 if (dc->tb_flags & DRTE_FLAG)
1772 do_rte(dc);
1773 /* Clear the delay slot flag. */
1774 dc->tb_flags &= ~D_FLAG;
1775 /* If it is a direct jump, try direct chaining. */
23979dc5 1776 if (dc->jmp == JMP_INDIRECT) {
4acb54ba
EI
1777 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1778 dc->is_jmp = DISAS_JUMP;
23979dc5 1779 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1780 t_sync_flags(dc);
1781 gen_goto_tb(dc, 0, dc->jmp_pc);
1782 dc->is_jmp = DISAS_TB_JUMP;
1783 } else if (dc->jmp == JMP_DIRECT_CC) {
23979dc5
EI
1784 int l1;
1785
1786 t_sync_flags(dc);
1787 l1 = gen_new_label();
1788 /* Conditional jmp. */
1789 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1790 gen_goto_tb(dc, 1, dc->pc);
1791 gen_set_label(l1);
1792 gen_goto_tb(dc, 0, dc->jmp_pc);
1793
1794 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1795 }
1796 break;
1797 }
1798 }
ed2803da 1799 if (cs->singlestep_enabled) {
4acb54ba 1800 break;
ed2803da 1801 }
4acb54ba 1802 } while (!dc->is_jmp && !dc->cpustate_changed
efd7f486 1803 && tcg_ctx.gen_opc_ptr < gen_opc_end
4acb54ba
EI
1804 && !singlestep
1805 && (dc->pc < next_page_start)
1806 && num_insns < max_insns);
1807
1808 npc = dc->pc;
844bab60 1809 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1810 if (dc->tb_flags & D_FLAG) {
1811 dc->is_jmp = DISAS_UPDATE;
1812 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1813 sync_jmpstate(dc);
1814 } else
1815 npc = dc->jmp_pc;
1816 }
1817
1818 if (tb->cflags & CF_LAST_IO)
1819 gen_io_end();
1820 /* Force an update if the per-tb cpu state has changed. */
1821 if (dc->is_jmp == DISAS_NEXT
1822 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1823 dc->is_jmp = DISAS_UPDATE;
1824 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1825 }
1826 t_sync_flags(dc);
1827
ed2803da 1828 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1829 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1830
1831 if (dc->is_jmp != DISAS_JUMP) {
4acb54ba 1832 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
6c5f738d 1833 }
64254eba 1834 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1835 tcg_temp_free_i32(tmp);
4acb54ba
EI
1836 } else {
1837 switch(dc->is_jmp) {
1838 case DISAS_NEXT:
1839 gen_goto_tb(dc, 1, npc);
1840 break;
1841 default:
1842 case DISAS_JUMP:
1843 case DISAS_UPDATE:
1844 /* indicate that the hash table must be used
1845 to find the next TB */
1846 tcg_gen_exit_tb(0);
1847 break;
1848 case DISAS_TB_JUMP:
1849 /* nothing more to generate */
1850 break;
1851 }
1852 }
806f352d 1853 gen_tb_end(tb, num_insns);
efd7f486 1854 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4acb54ba 1855 if (search_pc) {
92414b31 1856 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4acb54ba
EI
1857 lj++;
1858 while (lj <= j)
ab1103de 1859 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4acb54ba
EI
1860 } else {
1861 tb->size = dc->pc - pc_start;
1862 tb->icount = num_insns;
1863 }
1864
1865#ifdef DEBUG_DISAS
1866#if !SIM_COMPAT
1867 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1868 qemu_log("\n");
1869#if DISAS_GNU
f4359b9f 1870 log_target_disas(env, pc_start, dc->pc - pc_start, 0);
4acb54ba 1871#endif
e6aa0f11 1872 qemu_log("\nisize=%d osize=%td\n",
92414b31
EV
1873 dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
1874 tcg_ctx.gen_opc_buf);
4acb54ba
EI
1875 }
1876#endif
1877#endif
1878 assert(!dc->abort_at_next_insn);
1879}
1880
68cee38a 1881void gen_intermediate_code (CPUMBState *env, struct TranslationBlock *tb)
4acb54ba 1882{
4a274212 1883 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, false);
4acb54ba
EI
1884}
1885
68cee38a 1886void gen_intermediate_code_pc (CPUMBState *env, struct TranslationBlock *tb)
4acb54ba 1887{
4a274212 1888 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, true);
4acb54ba
EI
1889}
1890
878096ee
AF
1891void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1892 int flags)
4acb54ba 1893{
878096ee
AF
1894 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1895 CPUMBState *env = &cpu->env;
4acb54ba
EI
1896 int i;
1897
1898 if (!env || !f)
1899 return;
1900
1901 cpu_fprintf(f, "IN: PC=%x %s\n",
1902 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
97694c57 1903 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
4c24aa0a 1904 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
97694c57 1905 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
17c52a43 1906 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
4acb54ba
EI
1907 env->btaken, env->btarget,
1908 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
17c52a43
EI
1909 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1910 (env->sregs[SR_MSR] & MSR_EIP),
1911 (env->sregs[SR_MSR] & MSR_IE));
1912
4acb54ba
EI
1913 for (i = 0; i < 32; i++) {
1914 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1915 if ((i + 1) % 4 == 0)
1916 cpu_fprintf(f, "\n");
1917 }
1918 cpu_fprintf(f, "\n\n");
1919}
1920
b33ab1f7 1921MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
4acb54ba 1922{
b77f98ca 1923 MicroBlazeCPU *cpu;
4acb54ba 1924
b77f98ca 1925 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
4acb54ba 1926
746b03b2 1927 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
4acb54ba 1928
cd0c24f9
AF
1929 return cpu;
1930}
4acb54ba 1931
cd0c24f9
AF
1932void mb_tcg_init(void)
1933{
1934 int i;
4acb54ba
EI
1935
1936 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1937
1938 env_debug = tcg_global_mem_new(TCG_AREG0,
68cee38a 1939 offsetof(CPUMBState, debug),
4acb54ba
EI
1940 "debug0");
1941 env_iflags = tcg_global_mem_new(TCG_AREG0,
68cee38a 1942 offsetof(CPUMBState, iflags),
4acb54ba
EI
1943 "iflags");
1944 env_imm = tcg_global_mem_new(TCG_AREG0,
68cee38a 1945 offsetof(CPUMBState, imm),
4acb54ba
EI
1946 "imm");
1947 env_btarget = tcg_global_mem_new(TCG_AREG0,
68cee38a 1948 offsetof(CPUMBState, btarget),
4acb54ba
EI
1949 "btarget");
1950 env_btaken = tcg_global_mem_new(TCG_AREG0,
68cee38a 1951 offsetof(CPUMBState, btaken),
4acb54ba 1952 "btaken");
4a536270
EI
1953 env_res_addr = tcg_global_mem_new(TCG_AREG0,
1954 offsetof(CPUMBState, res_addr),
1955 "res_addr");
11a76217
EI
1956 env_res_val = tcg_global_mem_new(TCG_AREG0,
1957 offsetof(CPUMBState, res_val),
1958 "res_val");
4acb54ba
EI
1959 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1960 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
68cee38a 1961 offsetof(CPUMBState, regs[i]),
4acb54ba
EI
1962 regnames[i]);
1963 }
1964 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1965 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
68cee38a 1966 offsetof(CPUMBState, sregs[i]),
4acb54ba
EI
1967 special_regnames[i]);
1968 }
4acb54ba
EI
1969}
1970
68cee38a 1971void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
4acb54ba 1972{
25983cad 1973 env->sregs[SR_PC] = tcg_ctx.gen_opc_pc[pc_pos];
4acb54ba 1974}