]> git.proxmox.com Git - qemu.git/blame - target-microblaze/translate.c
target-m68k: Don't overuse CPUState
[qemu.git] / target-microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
18 */
19
4acb54ba 20#include "cpu.h"
4acb54ba
EI
21#include "disas.h"
22#include "tcg-op.h"
23#include "helper.h"
24#include "microblaze-decode.h"
4acb54ba
EI
25
26#define GEN_HELPER 1
27#include "helper.h"
28
29#define SIM_COMPAT 0
30#define DISAS_GNU 1
31#define DISAS_MB 1
32#if DISAS_MB && !SIM_COMPAT
33# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34#else
35# define LOG_DIS(...) do { } while (0)
36#endif
37
38#define D(x)
39
40#define EXTRACT_FIELD(src, start, end) \
41 (((src) >> start) & ((1 << (end - start + 1)) - 1))
42
43static TCGv env_debug;
44static TCGv_ptr cpu_env;
45static TCGv cpu_R[32];
46static TCGv cpu_SR[18];
47static TCGv env_imm;
48static TCGv env_btaken;
49static TCGv env_btarget;
50static TCGv env_iflags;
51
52#include "gen-icount.h"
53
54/* This is the state at translation time. */
55typedef struct DisasContext {
56 CPUState *env;
a5efa644 57 target_ulong pc;
4acb54ba
EI
58
59 /* Decoder. */
60 int type_b;
61 uint32_t ir;
62 uint8_t opcode;
63 uint8_t rd, ra, rb;
64 uint16_t imm;
65
66 unsigned int cpustate_changed;
67 unsigned int delayed_branch;
68 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
69 unsigned int clear_imm;
70 int is_jmp;
71
844bab60
EI
72#define JMP_NOJMP 0
73#define JMP_DIRECT 1
74#define JMP_DIRECT_CC 2
75#define JMP_INDIRECT 3
4acb54ba
EI
76 unsigned int jmp;
77 uint32_t jmp_pc;
78
79 int abort_at_next_insn;
80 int nr_nops;
81 struct TranslationBlock *tb;
82 int singlestep_enabled;
83} DisasContext;
84
38972938 85static const char *regnames[] =
4acb54ba
EI
86{
87 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
88 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
89 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
90 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
91};
92
38972938 93static const char *special_regnames[] =
4acb54ba
EI
94{
95 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
96 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
97 "sr16", "sr17", "sr18"
98};
99
100/* Sign extend at translation time. */
101static inline int sign_extend(unsigned int val, unsigned int width)
102{
103 int sval;
104
105 /* LSL. */
106 val <<= 31 - width;
107 sval = val;
108 /* ASR. */
109 sval >>= 31 - width;
110 return sval;
111}
112
113static inline void t_sync_flags(DisasContext *dc)
114{
4abf79a4 115 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba
EI
116 if (dc->tb_flags != dc->synced_flags) {
117 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
118 dc->synced_flags = dc->tb_flags;
119 }
120}
121
122static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
123{
124 TCGv_i32 tmp = tcg_const_i32(index);
125
126 t_sync_flags(dc);
127 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
128 gen_helper_raise_exception(tmp);
129 tcg_temp_free_i32(tmp);
130 dc->is_jmp = DISAS_UPDATE;
131}
132
133static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
134{
135 TranslationBlock *tb;
136 tb = dc->tb;
137 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
138 tcg_gen_goto_tb(n);
139 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
4b4a72e5 140 tcg_gen_exit_tb((tcg_target_long)tb + n);
4acb54ba
EI
141 } else {
142 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
143 tcg_gen_exit_tb(0);
144 }
145}
146
ee8b246f
EI
147static void read_carry(DisasContext *dc, TCGv d)
148{
149 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
150}
151
152static void write_carry(DisasContext *dc, TCGv v)
153{
154 TCGv t0 = tcg_temp_new();
155 tcg_gen_shli_tl(t0, v, 31);
156 tcg_gen_sari_tl(t0, t0, 31);
157 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
158 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
159 ~(MSR_C | MSR_CC));
160 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
161 tcg_temp_free(t0);
162}
163
61204ce8
EI
164/* True if ALU operand b is a small immediate that may deserve
165 faster treatment. */
166static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
167{
168 /* Immediate insn without the imm prefix ? */
169 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
170}
171
4acb54ba
EI
172static inline TCGv *dec_alu_op_b(DisasContext *dc)
173{
174 if (dc->type_b) {
175 if (dc->tb_flags & IMM_FLAG)
176 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
177 else
178 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
179 return &env_imm;
180 } else
181 return &cpu_R[dc->rb];
182}
183
184static void dec_add(DisasContext *dc)
185{
186 unsigned int k, c;
40cbf5b7 187 TCGv cf;
4acb54ba
EI
188
189 k = dc->opcode & 4;
190 c = dc->opcode & 2;
191
192 LOG_DIS("add%s%s%s r%d r%d r%d\n",
193 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
194 dc->rd, dc->ra, dc->rb);
195
40cbf5b7
EI
196 /* Take care of the easy cases first. */
197 if (k) {
198 /* k - keep carry, no need to update MSR. */
199 /* If rd == r0, it's a nop. */
200 if (dc->rd) {
201 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
202
203 if (c) {
204 /* c - Add carry into the result. */
205 cf = tcg_temp_new();
206
207 read_carry(dc, cf);
208 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
209 tcg_temp_free(cf);
210 }
211 }
212 return;
213 }
214
215 /* From now on, we can assume k is zero. So we need to update MSR. */
216 /* Extract carry. */
217 cf = tcg_temp_new();
218 if (c) {
219 read_carry(dc, cf);
220 } else {
221 tcg_gen_movi_tl(cf, 0);
222 }
223
224 if (dc->rd) {
225 TCGv ncf = tcg_temp_new();
5d0bb823 226 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
4acb54ba 227 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
228 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
229 write_carry(dc, ncf);
230 tcg_temp_free(ncf);
231 } else {
5d0bb823 232 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 233 write_carry(dc, cf);
4acb54ba 234 }
40cbf5b7 235 tcg_temp_free(cf);
4acb54ba
EI
236}
237
238static void dec_sub(DisasContext *dc)
239{
240 unsigned int u, cmp, k, c;
e0a42ebc 241 TCGv cf, na;
4acb54ba
EI
242
243 u = dc->imm & 2;
244 k = dc->opcode & 4;
245 c = dc->opcode & 2;
246 cmp = (dc->imm & 1) && (!dc->type_b) && k;
247
248 if (cmp) {
249 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
250 if (dc->rd) {
251 if (u)
252 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
253 else
254 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
255 }
e0a42ebc
EI
256 return;
257 }
258
259 LOG_DIS("sub%s%s r%d, r%d r%d\n",
260 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
261
262 /* Take care of the easy cases first. */
263 if (k) {
264 /* k - keep carry, no need to update MSR. */
265 /* If rd == r0, it's a nop. */
266 if (dc->rd) {
4acb54ba 267 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
268
269 if (c) {
270 /* c - Add carry into the result. */
271 cf = tcg_temp_new();
272
273 read_carry(dc, cf);
274 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
275 tcg_temp_free(cf);
276 }
277 }
278 return;
279 }
280
281 /* From now on, we can assume k is zero. So we need to update MSR. */
282 /* Extract carry. And complement a into na. */
283 cf = tcg_temp_new();
284 na = tcg_temp_new();
285 if (c) {
286 read_carry(dc, cf);
287 } else {
288 tcg_gen_movi_tl(cf, 1);
289 }
290
291 /* d = b + ~a + c. carry defaults to 1. */
292 tcg_gen_not_tl(na, cpu_R[dc->ra]);
293
294 if (dc->rd) {
295 TCGv ncf = tcg_temp_new();
5d0bb823 296 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc
EI
297 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
298 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
299 write_carry(dc, ncf);
300 tcg_temp_free(ncf);
301 } else {
5d0bb823 302 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 303 write_carry(dc, cf);
4acb54ba 304 }
e0a42ebc
EI
305 tcg_temp_free(cf);
306 tcg_temp_free(na);
4acb54ba
EI
307}
308
309static void dec_pattern(DisasContext *dc)
310{
311 unsigned int mode;
312 int l1;
313
1567a005 314 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf 315 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005
EI
316 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
317 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
318 t_gen_raise_exception(dc, EXCP_HW_EXCP);
319 }
320
4acb54ba
EI
321 mode = dc->opcode & 3;
322 switch (mode) {
323 case 0:
324 /* pcmpbf. */
325 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
326 if (dc->rd)
327 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
328 break;
329 case 2:
330 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
331 if (dc->rd) {
332 TCGv t0 = tcg_temp_local_new();
333 l1 = gen_new_label();
334 tcg_gen_movi_tl(t0, 1);
335 tcg_gen_brcond_tl(TCG_COND_EQ,
336 cpu_R[dc->ra], cpu_R[dc->rb], l1);
337 tcg_gen_movi_tl(t0, 0);
338 gen_set_label(l1);
339 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
340 tcg_temp_free(t0);
341 }
342 break;
343 case 3:
344 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
345 l1 = gen_new_label();
346 if (dc->rd) {
347 TCGv t0 = tcg_temp_local_new();
348 tcg_gen_movi_tl(t0, 1);
349 tcg_gen_brcond_tl(TCG_COND_NE,
350 cpu_R[dc->ra], cpu_R[dc->rb], l1);
351 tcg_gen_movi_tl(t0, 0);
352 gen_set_label(l1);
353 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
354 tcg_temp_free(t0);
355 }
356 break;
357 default:
358 cpu_abort(dc->env,
359 "unsupported pattern insn opcode=%x\n", dc->opcode);
360 break;
361 }
362}
363
364static void dec_and(DisasContext *dc)
365{
366 unsigned int not;
367
368 if (!dc->type_b && (dc->imm & (1 << 10))) {
369 dec_pattern(dc);
370 return;
371 }
372
373 not = dc->opcode & (1 << 1);
374 LOG_DIS("and%s\n", not ? "n" : "");
375
376 if (!dc->rd)
377 return;
378
379 if (not) {
380 TCGv t = tcg_temp_new();
381 tcg_gen_not_tl(t, *(dec_alu_op_b(dc)));
382 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], t);
383 tcg_temp_free(t);
384 } else
385 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
386}
387
388static void dec_or(DisasContext *dc)
389{
390 if (!dc->type_b && (dc->imm & (1 << 10))) {
391 dec_pattern(dc);
392 return;
393 }
394
395 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
396 if (dc->rd)
397 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
398}
399
400static void dec_xor(DisasContext *dc)
401{
402 if (!dc->type_b && (dc->imm & (1 << 10))) {
403 dec_pattern(dc);
404 return;
405 }
406
407 LOG_DIS("xor r%d\n", dc->rd);
408 if (dc->rd)
409 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410}
411
4acb54ba
EI
412static inline void msr_read(DisasContext *dc, TCGv d)
413{
414 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
415}
416
417static inline void msr_write(DisasContext *dc, TCGv v)
418{
97b833c5
EI
419 TCGv t;
420
421 t = tcg_temp_new();
4acb54ba 422 dc->cpustate_changed = 1;
97b833c5 423 /* PVR bit is not writable. */
8a84fc6b
EI
424 tcg_gen_andi_tl(t, v, ~MSR_PVR);
425 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
97b833c5
EI
426 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
427 tcg_temp_free(t);
4acb54ba
EI
428}
429
430static void dec_msr(DisasContext *dc)
431{
432 TCGv t0, t1;
433 unsigned int sr, to, rn;
1567a005 434 int mem_index = cpu_mmu_index(dc->env);
4acb54ba
EI
435
436 sr = dc->imm & ((1 << 14) - 1);
437 to = dc->imm & (1 << 14);
438 dc->type_b = 1;
439 if (to)
440 dc->cpustate_changed = 1;
441
442 /* msrclr and msrset. */
443 if (!(dc->imm & (1 << 15))) {
444 unsigned int clr = dc->ir & (1 << 16);
445
446 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
447 dc->rd, dc->imm);
1567a005
EI
448
449 if (!(dc->env->pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
450 /* nop??? */
451 return;
452 }
453
454 if ((dc->tb_flags & MSR_EE_FLAG)
455 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
456 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
457 t_gen_raise_exception(dc, EXCP_HW_EXCP);
458 return;
459 }
460
4acb54ba
EI
461 if (dc->rd)
462 msr_read(dc, cpu_R[dc->rd]);
463
464 t0 = tcg_temp_new();
465 t1 = tcg_temp_new();
466 msr_read(dc, t0);
467 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
468
469 if (clr) {
470 tcg_gen_not_tl(t1, t1);
471 tcg_gen_and_tl(t0, t0, t1);
472 } else
473 tcg_gen_or_tl(t0, t0, t1);
474 msr_write(dc, t0);
475 tcg_temp_free(t0);
476 tcg_temp_free(t1);
477 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
478 dc->is_jmp = DISAS_UPDATE;
479 return;
480 }
481
1567a005
EI
482 if (to) {
483 if ((dc->tb_flags & MSR_EE_FLAG)
484 && mem_index == MMU_USER_IDX) {
485 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
486 t_gen_raise_exception(dc, EXCP_HW_EXCP);
487 return;
488 }
489 }
490
4acb54ba
EI
491#if !defined(CONFIG_USER_ONLY)
492 /* Catch read/writes to the mmu block. */
493 if ((sr & ~0xff) == 0x1000) {
494 sr &= 7;
495 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
496 if (to)
497 gen_helper_mmu_write(tcg_const_tl(sr), cpu_R[dc->ra]);
498 else
499 gen_helper_mmu_read(cpu_R[dc->rd], tcg_const_tl(sr));
500 return;
501 }
502#endif
503
504 if (to) {
505 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
506 switch (sr) {
507 case 0:
508 break;
509 case 1:
510 msr_write(dc, cpu_R[dc->ra]);
511 break;
512 case 0x3:
513 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
514 break;
515 case 0x5:
516 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
517 break;
518 case 0x7:
97694c57 519 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
4acb54ba 520 break;
5818dee5
EI
521 case 0x800:
522 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUState, slr));
523 break;
524 case 0x802:
525 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUState, shr));
526 break;
4acb54ba
EI
527 default:
528 cpu_abort(dc->env, "unknown mts reg %x\n", sr);
529 break;
530 }
531 } else {
532 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
533
534 switch (sr) {
535 case 0:
536 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
537 break;
538 case 1:
539 msr_read(dc, cpu_R[dc->rd]);
540 break;
541 case 0x3:
542 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
543 break;
544 case 0x5:
545 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
546 break;
547 case 0x7:
97694c57 548 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
4acb54ba
EI
549 break;
550 case 0xb:
551 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
552 break;
5818dee5
EI
553 case 0x800:
554 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUState, slr));
555 break;
556 case 0x802:
557 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUState, shr));
558 break;
4acb54ba
EI
559 case 0x2000:
560 case 0x2001:
561 case 0x2002:
562 case 0x2003:
563 case 0x2004:
564 case 0x2005:
565 case 0x2006:
566 case 0x2007:
567 case 0x2008:
568 case 0x2009:
569 case 0x200a:
570 case 0x200b:
571 case 0x200c:
572 rn = sr & 0xf;
573 tcg_gen_ld_tl(cpu_R[dc->rd],
574 cpu_env, offsetof(CPUState, pvr.regs[rn]));
575 break;
576 default:
577 cpu_abort(dc->env, "unknown mfs reg %x\n", sr);
578 break;
579 }
580 }
ee7dbcf8
EI
581
582 if (dc->rd == 0) {
583 tcg_gen_movi_tl(cpu_R[0], 0);
584 }
4acb54ba
EI
585}
586
587/* 64-bit signed mul, lower result in d and upper in d2. */
588static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
589{
590 TCGv_i64 t0, t1;
591
592 t0 = tcg_temp_new_i64();
593 t1 = tcg_temp_new_i64();
594
595 tcg_gen_ext_i32_i64(t0, a);
596 tcg_gen_ext_i32_i64(t1, b);
597 tcg_gen_mul_i64(t0, t0, t1);
598
599 tcg_gen_trunc_i64_i32(d, t0);
600 tcg_gen_shri_i64(t0, t0, 32);
601 tcg_gen_trunc_i64_i32(d2, t0);
602
603 tcg_temp_free_i64(t0);
604 tcg_temp_free_i64(t1);
605}
606
607/* 64-bit unsigned muls, lower result in d and upper in d2. */
608static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
609{
610 TCGv_i64 t0, t1;
611
612 t0 = tcg_temp_new_i64();
613 t1 = tcg_temp_new_i64();
614
615 tcg_gen_extu_i32_i64(t0, a);
616 tcg_gen_extu_i32_i64(t1, b);
617 tcg_gen_mul_i64(t0, t0, t1);
618
619 tcg_gen_trunc_i64_i32(d, t0);
620 tcg_gen_shri_i64(t0, t0, 32);
621 tcg_gen_trunc_i64_i32(d2, t0);
622
623 tcg_temp_free_i64(t0);
624 tcg_temp_free_i64(t1);
625}
626
627/* Multiplier unit. */
628static void dec_mul(DisasContext *dc)
629{
630 TCGv d[2];
631 unsigned int subcode;
632
1567a005 633 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf 634 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005
EI
635 && !(dc->env->pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
636 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
637 t_gen_raise_exception(dc, EXCP_HW_EXCP);
638 return;
639 }
640
4acb54ba
EI
641 subcode = dc->imm & 3;
642 d[0] = tcg_temp_new();
643 d[1] = tcg_temp_new();
644
645 if (dc->type_b) {
646 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
647 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
648 goto done;
649 }
650
1567a005
EI
651 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
652 if (subcode >= 1 && subcode <= 3
653 && !((dc->env->pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
654 /* nop??? */
655 }
656
4acb54ba
EI
657 switch (subcode) {
658 case 0:
659 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
660 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
661 break;
662 case 1:
663 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
664 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
665 break;
666 case 2:
667 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
668 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
669 break;
670 case 3:
671 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
672 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
673 break;
674 default:
675 cpu_abort(dc->env, "unknown MUL insn %x\n", subcode);
676 break;
677 }
678done:
679 tcg_temp_free(d[0]);
680 tcg_temp_free(d[1]);
681}
682
683/* Div unit. */
684static void dec_div(DisasContext *dc)
685{
686 unsigned int u;
687
688 u = dc->imm & 2;
689 LOG_DIS("div\n");
690
97f90cbf 691 if ((dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005
EI
692 && !((dc->env->pvr.regs[0] & PVR0_USE_DIV_MASK))) {
693 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
694 t_gen_raise_exception(dc, EXCP_HW_EXCP);
695 }
696
4acb54ba
EI
697 if (u)
698 gen_helper_divu(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
699 else
700 gen_helper_divs(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
701 if (!dc->rd)
702 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
703}
704
705static void dec_barrel(DisasContext *dc)
706{
707 TCGv t0;
708 unsigned int s, t;
709
1567a005 710 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf 711 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005
EI
712 && !(dc->env->pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
713 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
714 t_gen_raise_exception(dc, EXCP_HW_EXCP);
715 return;
716 }
717
4acb54ba
EI
718 s = dc->imm & (1 << 10);
719 t = dc->imm & (1 << 9);
720
721 LOG_DIS("bs%s%s r%d r%d r%d\n",
722 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
723
724 t0 = tcg_temp_new();
725
726 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
727 tcg_gen_andi_tl(t0, t0, 31);
728
729 if (s)
730 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
731 else {
732 if (t)
733 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
734 else
735 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
736 }
737}
738
739static void dec_bit(DisasContext *dc)
740{
741 TCGv t0, t1;
742 unsigned int op;
1567a005 743 int mem_index = cpu_mmu_index(dc->env);
4acb54ba
EI
744
745 op = dc->ir & ((1 << 8) - 1);
746 switch (op) {
747 case 0x21:
748 /* src. */
749 t0 = tcg_temp_new();
750
751 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
752 tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1);
753 if (dc->rd) {
754 t1 = tcg_temp_new();
755 read_carry(dc, t1);
756 tcg_gen_shli_tl(t1, t1, 31);
757
758 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
759 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t1);
760 tcg_temp_free(t1);
761 }
762
763 /* Update carry. */
764 write_carry(dc, t0);
765 tcg_temp_free(t0);
766 break;
767
768 case 0x1:
769 case 0x41:
770 /* srl. */
771 t0 = tcg_temp_new();
772 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
773
774 /* Update carry. */
775 tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1);
776 write_carry(dc, t0);
777 tcg_temp_free(t0);
778 if (dc->rd) {
779 if (op == 0x41)
780 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
781 else
782 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
783 }
784 break;
785 case 0x60:
786 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
787 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
788 break;
789 case 0x61:
790 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
791 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
792 break;
793 case 0x64:
f062a3c7
EI
794 case 0x66:
795 case 0x74:
796 case 0x76:
4acb54ba
EI
797 /* wdc. */
798 LOG_DIS("wdc r%d\n", dc->ra);
1567a005
EI
799 if ((dc->tb_flags & MSR_EE_FLAG)
800 && mem_index == MMU_USER_IDX) {
801 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
802 t_gen_raise_exception(dc, EXCP_HW_EXCP);
803 return;
804 }
4acb54ba
EI
805 break;
806 case 0x68:
807 /* wic. */
808 LOG_DIS("wic r%d\n", dc->ra);
1567a005
EI
809 if ((dc->tb_flags & MSR_EE_FLAG)
810 && mem_index == MMU_USER_IDX) {
811 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
812 t_gen_raise_exception(dc, EXCP_HW_EXCP);
813 return;
814 }
4acb54ba 815 break;
48b5e96f
EI
816 case 0xe0:
817 if ((dc->tb_flags & MSR_EE_FLAG)
818 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
819 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
820 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
821 t_gen_raise_exception(dc, EXCP_HW_EXCP);
822 }
823 if (dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
824 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
825 }
826 break;
4acb54ba
EI
827 default:
828 cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
829 dc->pc, op, dc->rd, dc->ra, dc->rb);
830 break;
831 }
832}
833
834static inline void sync_jmpstate(DisasContext *dc)
835{
844bab60
EI
836 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
837 if (dc->jmp == JMP_DIRECT) {
838 tcg_gen_movi_tl(env_btaken, 1);
839 }
23979dc5
EI
840 dc->jmp = JMP_INDIRECT;
841 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
4acb54ba
EI
842 }
843}
844
845static void dec_imm(DisasContext *dc)
846{
847 LOG_DIS("imm %x\n", dc->imm << 16);
848 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
849 dc->tb_flags |= IMM_FLAG;
850 dc->clear_imm = 0;
851}
852
853static inline void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
854 unsigned int size)
855{
856 int mem_index = cpu_mmu_index(dc->env);
857
858 if (size == 1) {
859 tcg_gen_qemu_ld8u(dst, addr, mem_index);
860 } else if (size == 2) {
861 tcg_gen_qemu_ld16u(dst, addr, mem_index);
862 } else if (size == 4) {
863 tcg_gen_qemu_ld32u(dst, addr, mem_index);
864 } else
865 cpu_abort(dc->env, "Incorrect load size %d\n", size);
866}
867
868static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
869{
870 unsigned int extimm = dc->tb_flags & IMM_FLAG;
5818dee5
EI
871 /* Should be set to one if r1 is used by loadstores. */
872 int stackprot = 0;
873
874 /* All load/stores use ra. */
875 if (dc->ra == 1) {
876 stackprot = 1;
877 }
4acb54ba 878
9ef55357 879 /* Treat the common cases first. */
4acb54ba 880 if (!dc->type_b) {
4b5ef0b5
EI
881 /* If any of the regs is r0, return a ptr to the other. */
882 if (dc->ra == 0) {
883 return &cpu_R[dc->rb];
884 } else if (dc->rb == 0) {
885 return &cpu_R[dc->ra];
886 }
887
5818dee5
EI
888 if (dc->rb == 1) {
889 stackprot = 1;
890 }
891
4acb54ba
EI
892 *t = tcg_temp_new();
893 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
5818dee5
EI
894
895 if (stackprot) {
896 gen_helper_stackprot(*t);
897 }
4acb54ba
EI
898 return t;
899 }
900 /* Immediate. */
901 if (!extimm) {
902 if (dc->imm == 0) {
903 return &cpu_R[dc->ra];
904 }
905 *t = tcg_temp_new();
906 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
907 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
908 } else {
909 *t = tcg_temp_new();
910 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
911 }
912
5818dee5
EI
913 if (stackprot) {
914 gen_helper_stackprot(*t);
915 }
4acb54ba
EI
916 return t;
917}
918
9f8beb66
EI
919static inline void dec_byteswap(DisasContext *dc, TCGv dst, TCGv src, int size)
920{
921 if (size == 4) {
922 tcg_gen_bswap32_tl(dst, src);
923 } else if (size == 2) {
924 TCGv t = tcg_temp_new();
925
926 /* bswap16 assumes the high bits are zero. */
927 tcg_gen_andi_tl(t, src, 0xffff);
928 tcg_gen_bswap16_tl(dst, t);
929 tcg_temp_free(t);
930 } else {
931 /* Ignore.
932 cpu_abort(dc->env, "Invalid ldst byteswap size %d\n", size);
933 */
934 }
935}
936
4acb54ba
EI
937static void dec_load(DisasContext *dc)
938{
939 TCGv t, *addr;
9f8beb66 940 unsigned int size, rev = 0;
4acb54ba
EI
941
942 size = 1 << (dc->opcode & 3);
9f8beb66
EI
943
944 if (!dc->type_b) {
945 rev = (dc->ir >> 9) & 1;
946 }
947
0187688f 948 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
97f90cbf 949 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
950 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
951 t_gen_raise_exception(dc, EXCP_HW_EXCP);
952 return;
953 }
4acb54ba 954
9f8beb66
EI
955 LOG_DIS("l%d%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "");
956
4acb54ba
EI
957 t_sync_flags(dc);
958 addr = compute_ldst_addr(dc, &t);
959
9f8beb66
EI
960 /*
961 * When doing reverse accesses we need to do two things.
962 *
4ff9786c 963 * 1. Reverse the address wrt endianness.
9f8beb66
EI
964 * 2. Byteswap the data lanes on the way back into the CPU core.
965 */
966 if (rev && size != 4) {
967 /* Endian reverse the address. t is addr. */
968 switch (size) {
969 case 1:
970 {
971 /* 00 -> 11
972 01 -> 10
973 10 -> 10
974 11 -> 00 */
975 TCGv low = tcg_temp_new();
976
977 /* Force addr into the temp. */
978 if (addr != &t) {
979 t = tcg_temp_new();
980 tcg_gen_mov_tl(t, *addr);
981 addr = &t;
982 }
983
984 tcg_gen_andi_tl(low, t, 3);
985 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
986 tcg_gen_andi_tl(t, t, ~3);
987 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
988 tcg_gen_mov_tl(env_imm, t);
989 tcg_temp_free(low);
990 break;
991 }
992
993 case 2:
994 /* 00 -> 10
995 10 -> 00. */
996 /* Force addr into the temp. */
997 if (addr != &t) {
998 t = tcg_temp_new();
999 tcg_gen_xori_tl(t, *addr, 2);
1000 addr = &t;
1001 } else {
1002 tcg_gen_xori_tl(t, t, 2);
1003 }
1004 break;
1005 default:
1006 cpu_abort(dc->env, "Invalid reverse size\n");
1007 break;
1008 }
1009 }
1010
4acb54ba
EI
1011 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1012 sync_jmpstate(dc);
968a40f6
EI
1013
1014 /* Verify alignment if needed. */
1015 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507
EI
1016 TCGv v = tcg_temp_new();
1017
1018 /*
1019 * Microblaze gives MMU faults priority over faults due to
1020 * unaligned addresses. That's why we speculatively do the load
1021 * into v. If the load succeeds, we verify alignment of the
1022 * address and if that succeeds we write into the destination reg.
1023 */
1024 gen_load(dc, v, *addr, size);
1025
1026 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
968a40f6 1027 gen_helper_memalign(*addr, tcg_const_tl(dc->rd),
3aa80988 1028 tcg_const_tl(0), tcg_const_tl(size - 1));
9f8beb66
EI
1029 if (dc->rd) {
1030 if (rev) {
1031 dec_byteswap(dc, cpu_R[dc->rd], v, size);
1032 } else {
1033 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1034 }
1035 }
a12f6507 1036 tcg_temp_free(v);
968a40f6 1037 } else {
a12f6507
EI
1038 if (dc->rd) {
1039 gen_load(dc, cpu_R[dc->rd], *addr, size);
9f8beb66
EI
1040 if (rev) {
1041 dec_byteswap(dc, cpu_R[dc->rd], cpu_R[dc->rd], size);
1042 }
a12f6507 1043 } else {
9f8beb66 1044 /* We are loading into r0, no need to reverse. */
a12f6507
EI
1045 gen_load(dc, env_imm, *addr, size);
1046 }
4acb54ba
EI
1047 }
1048
1049 if (addr == &t)
1050 tcg_temp_free(t);
1051}
1052
1053static void gen_store(DisasContext *dc, TCGv addr, TCGv val,
1054 unsigned int size)
1055{
1056 int mem_index = cpu_mmu_index(dc->env);
1057
1058 if (size == 1)
1059 tcg_gen_qemu_st8(val, addr, mem_index);
1060 else if (size == 2) {
1061 tcg_gen_qemu_st16(val, addr, mem_index);
1062 } else if (size == 4) {
1063 tcg_gen_qemu_st32(val, addr, mem_index);
1064 } else
1065 cpu_abort(dc->env, "Incorrect store size %d\n", size);
1066}
1067
1068static void dec_store(DisasContext *dc)
1069{
1070 TCGv t, *addr;
9f8beb66 1071 unsigned int size, rev = 0;
4acb54ba
EI
1072
1073 size = 1 << (dc->opcode & 3);
9f8beb66
EI
1074 if (!dc->type_b) {
1075 rev = (dc->ir >> 9) & 1;
1076 }
4acb54ba 1077
0187688f 1078 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
97f90cbf 1079 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
1080 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1081 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1082 return;
1083 }
1084
9f8beb66 1085 LOG_DIS("s%d%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "");
4acb54ba
EI
1086 t_sync_flags(dc);
1087 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1088 sync_jmpstate(dc);
1089 addr = compute_ldst_addr(dc, &t);
968a40f6 1090
9f8beb66
EI
1091 if (rev && size != 4) {
1092 /* Endian reverse the address. t is addr. */
1093 switch (size) {
1094 case 1:
1095 {
1096 /* 00 -> 11
1097 01 -> 10
1098 10 -> 10
1099 11 -> 00 */
1100 TCGv low = tcg_temp_new();
1101
1102 /* Force addr into the temp. */
1103 if (addr != &t) {
1104 t = tcg_temp_new();
1105 tcg_gen_mov_tl(t, *addr);
1106 addr = &t;
1107 }
1108
1109 tcg_gen_andi_tl(low, t, 3);
1110 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1111 tcg_gen_andi_tl(t, t, ~3);
1112 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
1113 tcg_gen_mov_tl(env_imm, t);
1114 tcg_temp_free(low);
1115 break;
1116 }
1117
1118 case 2:
1119 /* 00 -> 10
1120 10 -> 00. */
1121 /* Force addr into the temp. */
1122 if (addr != &t) {
1123 t = tcg_temp_new();
1124 tcg_gen_xori_tl(t, *addr, 2);
1125 addr = &t;
1126 } else {
1127 tcg_gen_xori_tl(t, t, 2);
1128 }
1129 break;
1130 default:
1131 cpu_abort(dc->env, "Invalid reverse size\n");
1132 break;
1133 }
1134
1135 if (size != 1) {
1136 TCGv bs_data = tcg_temp_new();
1137 dec_byteswap(dc, bs_data, cpu_R[dc->rd], size);
1138 gen_store(dc, *addr, bs_data, size);
1139 tcg_temp_free(bs_data);
1140 } else {
1141 gen_store(dc, *addr, cpu_R[dc->rd], size);
1142 }
1143 } else {
1144 if (rev) {
1145 TCGv bs_data = tcg_temp_new();
1146 dec_byteswap(dc, bs_data, cpu_R[dc->rd], size);
1147 gen_store(dc, *addr, bs_data, size);
1148 tcg_temp_free(bs_data);
1149 } else {
1150 gen_store(dc, *addr, cpu_R[dc->rd], size);
1151 }
1152 }
a12f6507 1153
968a40f6
EI
1154 /* Verify alignment if needed. */
1155 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507
EI
1156 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1157 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1158 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1159 * the MMU prior to the memaccess, thay way we could put
1160 * the alignment checks in between the probe and the mem
1161 * access.
a12f6507 1162 */
968a40f6 1163 gen_helper_memalign(*addr, tcg_const_tl(dc->rd),
3aa80988 1164 tcg_const_tl(1), tcg_const_tl(size - 1));
968a40f6
EI
1165 }
1166
4acb54ba
EI
1167 if (addr == &t)
1168 tcg_temp_free(t);
1169}
1170
1171static inline void eval_cc(DisasContext *dc, unsigned int cc,
1172 TCGv d, TCGv a, TCGv b)
1173{
4acb54ba
EI
1174 switch (cc) {
1175 case CC_EQ:
b2565c69 1176 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
4acb54ba
EI
1177 break;
1178 case CC_NE:
b2565c69 1179 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
4acb54ba
EI
1180 break;
1181 case CC_LT:
b2565c69 1182 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
4acb54ba
EI
1183 break;
1184 case CC_LE:
b2565c69 1185 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
4acb54ba
EI
1186 break;
1187 case CC_GE:
b2565c69 1188 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
4acb54ba
EI
1189 break;
1190 case CC_GT:
b2565c69 1191 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
4acb54ba
EI
1192 break;
1193 default:
1194 cpu_abort(dc->env, "Unknown condition code %x.\n", cc);
1195 break;
1196 }
1197}
1198
1199static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1200{
1201 int l1;
1202
1203 l1 = gen_new_label();
1204 /* Conditional jmp. */
1205 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1206 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1207 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1208 gen_set_label(l1);
1209}
1210
1211static void dec_bcc(DisasContext *dc)
1212{
1213 unsigned int cc;
1214 unsigned int dslot;
1215
1216 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1217 dslot = dc->ir & (1 << 25);
1218 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1219
1220 dc->delayed_branch = 1;
1221 if (dslot) {
1222 dc->delayed_branch = 2;
1223 dc->tb_flags |= D_FLAG;
1224 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1225 cpu_env, offsetof(CPUState, bimm));
1226 }
1227
61204ce8
EI
1228 if (dec_alu_op_b_is_small_imm(dc)) {
1229 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1230
1231 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
844bab60 1232 dc->jmp = JMP_DIRECT_CC;
23979dc5 1233 dc->jmp_pc = dc->pc + offset;
61204ce8 1234 } else {
23979dc5 1235 dc->jmp = JMP_INDIRECT;
61204ce8
EI
1236 tcg_gen_movi_tl(env_btarget, dc->pc);
1237 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1238 }
61204ce8 1239 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
4acb54ba
EI
1240}
1241
1242static void dec_br(DisasContext *dc)
1243{
9f6113c7 1244 unsigned int dslot, link, abs, mbar;
ff21f70a 1245 int mem_index = cpu_mmu_index(dc->env);
4acb54ba
EI
1246
1247 dslot = dc->ir & (1 << 20);
1248 abs = dc->ir & (1 << 19);
1249 link = dc->ir & (1 << 18);
9f6113c7
EI
1250
1251 /* Memory barrier. */
1252 mbar = (dc->ir >> 16) & 31;
1253 if (mbar == 2 && dc->imm == 4) {
1254 LOG_DIS("mbar %d\n", dc->rd);
1255 /* Break the TB. */
1256 dc->cpustate_changed = 1;
1257 return;
1258 }
1259
4acb54ba
EI
1260 LOG_DIS("br%s%s%s%s imm=%x\n",
1261 abs ? "a" : "", link ? "l" : "",
1262 dc->type_b ? "i" : "", dslot ? "d" : "",
1263 dc->imm);
1264
1265 dc->delayed_branch = 1;
1266 if (dslot) {
1267 dc->delayed_branch = 2;
1268 dc->tb_flags |= D_FLAG;
1269 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1270 cpu_env, offsetof(CPUState, bimm));
1271 }
1272 if (link && dc->rd)
1273 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1274
1275 dc->jmp = JMP_INDIRECT;
1276 if (abs) {
1277 tcg_gen_movi_tl(env_btaken, 1);
1278 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1279 if (link && !dslot) {
1280 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1281 t_gen_raise_exception(dc, EXCP_BREAK);
1282 if (dc->imm == 0) {
1283 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1284 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1285 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1286 return;
1287 }
1288
1289 t_gen_raise_exception(dc, EXCP_DEBUG);
1290 }
1291 }
4acb54ba 1292 } else {
61204ce8
EI
1293 if (dec_alu_op_b_is_small_imm(dc)) {
1294 dc->jmp = JMP_DIRECT;
1295 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1296 } else {
4acb54ba
EI
1297 tcg_gen_movi_tl(env_btaken, 1);
1298 tcg_gen_movi_tl(env_btarget, dc->pc);
1299 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
4acb54ba
EI
1300 }
1301 }
1302}
1303
1304static inline void do_rti(DisasContext *dc)
1305{
1306 TCGv t0, t1;
1307 t0 = tcg_temp_new();
1308 t1 = tcg_temp_new();
1309 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1310 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1311 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1312
1313 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1314 tcg_gen_or_tl(t1, t1, t0);
1315 msr_write(dc, t1);
1316 tcg_temp_free(t1);
1317 tcg_temp_free(t0);
1318 dc->tb_flags &= ~DRTI_FLAG;
1319}
1320
1321static inline void do_rtb(DisasContext *dc)
1322{
1323 TCGv t0, t1;
1324 t0 = tcg_temp_new();
1325 t1 = tcg_temp_new();
1326 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1327 tcg_gen_shri_tl(t0, t1, 1);
1328 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1329
1330 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1331 tcg_gen_or_tl(t1, t1, t0);
1332 msr_write(dc, t1);
1333 tcg_temp_free(t1);
1334 tcg_temp_free(t0);
1335 dc->tb_flags &= ~DRTB_FLAG;
1336}
1337
1338static inline void do_rte(DisasContext *dc)
1339{
1340 TCGv t0, t1;
1341 t0 = tcg_temp_new();
1342 t1 = tcg_temp_new();
1343
1344 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1345 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1346 tcg_gen_shri_tl(t0, t1, 1);
1347 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1348
1349 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1350 tcg_gen_or_tl(t1, t1, t0);
1351 msr_write(dc, t1);
1352 tcg_temp_free(t1);
1353 tcg_temp_free(t0);
1354 dc->tb_flags &= ~DRTE_FLAG;
1355}
1356
1357static void dec_rts(DisasContext *dc)
1358{
1359 unsigned int b_bit, i_bit, e_bit;
1567a005 1360 int mem_index = cpu_mmu_index(dc->env);
4acb54ba
EI
1361
1362 i_bit = dc->ir & (1 << 21);
1363 b_bit = dc->ir & (1 << 22);
1364 e_bit = dc->ir & (1 << 23);
1365
1366 dc->delayed_branch = 2;
1367 dc->tb_flags |= D_FLAG;
1368 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1369 cpu_env, offsetof(CPUState, bimm));
1370
1371 if (i_bit) {
1372 LOG_DIS("rtid ir=%x\n", dc->ir);
1567a005
EI
1373 if ((dc->tb_flags & MSR_EE_FLAG)
1374 && mem_index == MMU_USER_IDX) {
1375 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1376 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1377 }
4acb54ba
EI
1378 dc->tb_flags |= DRTI_FLAG;
1379 } else if (b_bit) {
1380 LOG_DIS("rtbd ir=%x\n", dc->ir);
1567a005
EI
1381 if ((dc->tb_flags & MSR_EE_FLAG)
1382 && mem_index == MMU_USER_IDX) {
1383 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1384 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1385 }
4acb54ba
EI
1386 dc->tb_flags |= DRTB_FLAG;
1387 } else if (e_bit) {
1388 LOG_DIS("rted ir=%x\n", dc->ir);
1567a005
EI
1389 if ((dc->tb_flags & MSR_EE_FLAG)
1390 && mem_index == MMU_USER_IDX) {
1391 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1392 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1393 }
4acb54ba
EI
1394 dc->tb_flags |= DRTE_FLAG;
1395 } else
1396 LOG_DIS("rts ir=%x\n", dc->ir);
1397
23979dc5 1398 dc->jmp = JMP_INDIRECT;
4acb54ba
EI
1399 tcg_gen_movi_tl(env_btaken, 1);
1400 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1401}
1402
97694c57
EI
1403static int dec_check_fpuv2(DisasContext *dc)
1404{
1405 int r;
1406
1407 r = dc->env->pvr.regs[2] & PVR2_USE_FPU2_MASK;
1408
1409 if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1410 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1411 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1412 }
1413 return r;
1414}
1415
1567a005
EI
1416static void dec_fpu(DisasContext *dc)
1417{
97694c57
EI
1418 unsigned int fpu_insn;
1419
1567a005 1420 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf 1421 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1567a005 1422 && !((dc->env->pvr.regs[2] & PVR2_USE_FPU_MASK))) {
97694c57 1423 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1567a005
EI
1424 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1425 return;
1426 }
1427
97694c57
EI
1428 fpu_insn = (dc->ir >> 7) & 7;
1429
1430 switch (fpu_insn) {
1431 case 0:
1432 gen_helper_fadd(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
1433 break;
1434
1435 case 1:
1436 gen_helper_frsub(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
1437 break;
1438
1439 case 2:
1440 gen_helper_fmul(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
1441 break;
1442
1443 case 3:
1444 gen_helper_fdiv(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
1445 break;
1446
1447 case 4:
1448 switch ((dc->ir >> 4) & 7) {
1449 case 0:
1450 gen_helper_fcmp_un(cpu_R[dc->rd],
1451 cpu_R[dc->ra], cpu_R[dc->rb]);
1452 break;
1453 case 1:
1454 gen_helper_fcmp_lt(cpu_R[dc->rd],
1455 cpu_R[dc->ra], cpu_R[dc->rb]);
1456 break;
1457 case 2:
1458 gen_helper_fcmp_eq(cpu_R[dc->rd],
1459 cpu_R[dc->ra], cpu_R[dc->rb]);
1460 break;
1461 case 3:
1462 gen_helper_fcmp_le(cpu_R[dc->rd],
1463 cpu_R[dc->ra], cpu_R[dc->rb]);
1464 break;
1465 case 4:
1466 gen_helper_fcmp_gt(cpu_R[dc->rd],
1467 cpu_R[dc->ra], cpu_R[dc->rb]);
1468 break;
1469 case 5:
1470 gen_helper_fcmp_ne(cpu_R[dc->rd],
1471 cpu_R[dc->ra], cpu_R[dc->rb]);
1472 break;
1473 case 6:
1474 gen_helper_fcmp_ge(cpu_R[dc->rd],
1475 cpu_R[dc->ra], cpu_R[dc->rb]);
1476 break;
1477 default:
1478 qemu_log ("unimplemented fcmp fpu_insn=%x pc=%x opc=%x\n",
1479 fpu_insn, dc->pc, dc->opcode);
1480 dc->abort_at_next_insn = 1;
1481 break;
1482 }
1483 break;
1484
1485 case 5:
1486 if (!dec_check_fpuv2(dc)) {
1487 return;
1488 }
1489 gen_helper_flt(cpu_R[dc->rd], cpu_R[dc->ra]);
1490 break;
1491
1492 case 6:
1493 if (!dec_check_fpuv2(dc)) {
1494 return;
1495 }
1496 gen_helper_fint(cpu_R[dc->rd], cpu_R[dc->ra]);
1497 break;
1498
1499 case 7:
1500 if (!dec_check_fpuv2(dc)) {
1501 return;
1502 }
1503 gen_helper_fsqrt(cpu_R[dc->rd], cpu_R[dc->ra]);
1504 break;
1505
1506 default:
1507 qemu_log ("unimplemented FPU insn fpu_insn=%x pc=%x opc=%x\n",
1508 fpu_insn, dc->pc, dc->opcode);
1509 dc->abort_at_next_insn = 1;
1510 break;
1511 }
1567a005
EI
1512}
1513
4acb54ba
EI
1514static void dec_null(DisasContext *dc)
1515{
02b33596
EI
1516 if ((dc->tb_flags & MSR_EE_FLAG)
1517 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1518 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1519 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1520 return;
1521 }
4acb54ba
EI
1522 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1523 dc->abort_at_next_insn = 1;
1524}
1525
6d76d23e
EI
1526/* Insns connected to FSL or AXI stream attached devices. */
1527static void dec_stream(DisasContext *dc)
1528{
1529 int mem_index = cpu_mmu_index(dc->env);
1530 TCGv_i32 t_id, t_ctrl;
1531 int ctrl;
1532
1533 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1534 dc->type_b ? "" : "d", dc->imm);
1535
1536 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1537 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1538 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1539 return;
1540 }
1541
1542 t_id = tcg_temp_new();
1543 if (dc->type_b) {
1544 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1545 ctrl = dc->imm >> 10;
1546 } else {
1547 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1548 ctrl = dc->imm >> 5;
1549 }
1550
1551 t_ctrl = tcg_const_tl(ctrl);
1552
1553 if (dc->rd == 0) {
1554 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1555 } else {
1556 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1557 }
1558 tcg_temp_free(t_id);
1559 tcg_temp_free(t_ctrl);
1560}
1561
4acb54ba
EI
1562static struct decoder_info {
1563 struct {
1564 uint32_t bits;
1565 uint32_t mask;
1566 };
1567 void (*dec)(DisasContext *dc);
1568} decinfo[] = {
1569 {DEC_ADD, dec_add},
1570 {DEC_SUB, dec_sub},
1571 {DEC_AND, dec_and},
1572 {DEC_XOR, dec_xor},
1573 {DEC_OR, dec_or},
1574 {DEC_BIT, dec_bit},
1575 {DEC_BARREL, dec_barrel},
1576 {DEC_LD, dec_load},
1577 {DEC_ST, dec_store},
1578 {DEC_IMM, dec_imm},
1579 {DEC_BR, dec_br},
1580 {DEC_BCC, dec_bcc},
1581 {DEC_RTS, dec_rts},
1567a005 1582 {DEC_FPU, dec_fpu},
4acb54ba
EI
1583 {DEC_MUL, dec_mul},
1584 {DEC_DIV, dec_div},
1585 {DEC_MSR, dec_msr},
6d76d23e 1586 {DEC_STREAM, dec_stream},
4acb54ba
EI
1587 {{0, 0}, dec_null}
1588};
1589
1590static inline void decode(DisasContext *dc)
1591{
1592 uint32_t ir;
1593 int i;
1594
1595 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
1596 tcg_gen_debug_insn_start(dc->pc);
1597
1598 dc->ir = ir = ldl_code(dc->pc);
1599 LOG_DIS("%8.8x\t", dc->ir);
1600
1601 if (dc->ir)
1602 dc->nr_nops = 0;
1603 else {
1567a005 1604 if ((dc->tb_flags & MSR_EE_FLAG)
97f90cbf
EI
1605 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1606 && (dc->env->pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1567a005
EI
1607 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1608 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1609 return;
1610 }
1611
4acb54ba
EI
1612 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1613 dc->nr_nops++;
1614 if (dc->nr_nops > 4)
1615 cpu_abort(dc->env, "fetching nop sequence\n");
1616 }
1617 /* bit 2 seems to indicate insn type. */
1618 dc->type_b = ir & (1 << 29);
1619
1620 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1621 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1622 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1623 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1624 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1625
1626 /* Large switch for all insns. */
1627 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1628 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1629 decinfo[i].dec(dc);
1630 break;
1631 }
1632 }
1633}
1634
4acb54ba
EI
1635static void check_breakpoint(CPUState *env, DisasContext *dc)
1636{
1637 CPUBreakpoint *bp;
1638
72cf2d4f
BS
1639 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1640 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4acb54ba
EI
1641 if (bp->pc == dc->pc) {
1642 t_gen_raise_exception(dc, EXCP_DEBUG);
1643 dc->is_jmp = DISAS_UPDATE;
1644 }
1645 }
1646 }
1647}
1648
1649/* generate intermediate code for basic block 'tb'. */
1650static void
1651gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
1652 int search_pc)
1653{
1654 uint16_t *gen_opc_end;
1655 uint32_t pc_start;
1656 int j, lj;
1657 struct DisasContext ctx;
1658 struct DisasContext *dc = &ctx;
1659 uint32_t next_page_start, org_flags;
1660 target_ulong npc;
1661 int num_insns;
1662 int max_insns;
1663
1664 qemu_log_try_set_file(stderr);
1665
1666 pc_start = tb->pc;
1667 dc->env = env;
1668 dc->tb = tb;
1669 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1670
1671 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1672
1673 dc->is_jmp = DISAS_NEXT;
1674 dc->jmp = 0;
1675 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1676 if (dc->delayed_branch) {
1677 dc->jmp = JMP_INDIRECT;
1678 }
4acb54ba 1679 dc->pc = pc_start;
4acb54ba
EI
1680 dc->singlestep_enabled = env->singlestep_enabled;
1681 dc->cpustate_changed = 0;
1682 dc->abort_at_next_insn = 0;
1683 dc->nr_nops = 0;
1684
1685 if (pc_start & 3)
1686 cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start);
1687
1688 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1689#if !SIM_COMPAT
1690 qemu_log("--------------\n");
1691 log_cpu_state(env, 0);
1692#endif
1693 }
1694
1695 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1696 lj = -1;
1697 num_insns = 0;
1698 max_insns = tb->cflags & CF_COUNT_MASK;
1699 if (max_insns == 0)
1700 max_insns = CF_COUNT_MASK;
1701
1702 gen_icount_start();
1703 do
1704 {
1705#if SIM_COMPAT
1706 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1707 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1708 gen_helper_debug();
1709 }
1710#endif
1711 check_breakpoint(env, dc);
1712
1713 if (search_pc) {
1714 j = gen_opc_ptr - gen_opc_buf;
1715 if (lj < j) {
1716 lj++;
1717 while (lj < j)
1718 gen_opc_instr_start[lj++] = 0;
1719 }
1720 gen_opc_pc[lj] = dc->pc;
1721 gen_opc_instr_start[lj] = 1;
1722 gen_opc_icount[lj] = num_insns;
1723 }
1724
1725 /* Pretty disas. */
1726 LOG_DIS("%8.8x:\t", dc->pc);
1727
1728 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1729 gen_io_start();
1730
1731 dc->clear_imm = 1;
1732 decode(dc);
1733 if (dc->clear_imm)
1734 dc->tb_flags &= ~IMM_FLAG;
4acb54ba
EI
1735 dc->pc += 4;
1736 num_insns++;
1737
1738 if (dc->delayed_branch) {
1739 dc->delayed_branch--;
1740 if (!dc->delayed_branch) {
1741 if (dc->tb_flags & DRTI_FLAG)
1742 do_rti(dc);
1743 if (dc->tb_flags & DRTB_FLAG)
1744 do_rtb(dc);
1745 if (dc->tb_flags & DRTE_FLAG)
1746 do_rte(dc);
1747 /* Clear the delay slot flag. */
1748 dc->tb_flags &= ~D_FLAG;
1749 /* If it is a direct jump, try direct chaining. */
23979dc5 1750 if (dc->jmp == JMP_INDIRECT) {
4acb54ba
EI
1751 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1752 dc->is_jmp = DISAS_JUMP;
23979dc5 1753 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1754 t_sync_flags(dc);
1755 gen_goto_tb(dc, 0, dc->jmp_pc);
1756 dc->is_jmp = DISAS_TB_JUMP;
1757 } else if (dc->jmp == JMP_DIRECT_CC) {
23979dc5
EI
1758 int l1;
1759
1760 t_sync_flags(dc);
1761 l1 = gen_new_label();
1762 /* Conditional jmp. */
1763 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1764 gen_goto_tb(dc, 1, dc->pc);
1765 gen_set_label(l1);
1766 gen_goto_tb(dc, 0, dc->jmp_pc);
1767
1768 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1769 }
1770 break;
1771 }
1772 }
1773 if (env->singlestep_enabled)
1774 break;
1775 } while (!dc->is_jmp && !dc->cpustate_changed
1776 && gen_opc_ptr < gen_opc_end
1777 && !singlestep
1778 && (dc->pc < next_page_start)
1779 && num_insns < max_insns);
1780
1781 npc = dc->pc;
844bab60 1782 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1783 if (dc->tb_flags & D_FLAG) {
1784 dc->is_jmp = DISAS_UPDATE;
1785 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1786 sync_jmpstate(dc);
1787 } else
1788 npc = dc->jmp_pc;
1789 }
1790
1791 if (tb->cflags & CF_LAST_IO)
1792 gen_io_end();
1793 /* Force an update if the per-tb cpu state has changed. */
1794 if (dc->is_jmp == DISAS_NEXT
1795 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1796 dc->is_jmp = DISAS_UPDATE;
1797 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1798 }
1799 t_sync_flags(dc);
1800
1801 if (unlikely(env->singlestep_enabled)) {
6c5f738d
EI
1802 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1803
1804 if (dc->is_jmp != DISAS_JUMP) {
4acb54ba 1805 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
6c5f738d
EI
1806 }
1807 gen_helper_raise_exception(tmp);
1808 tcg_temp_free_i32(tmp);
4acb54ba
EI
1809 } else {
1810 switch(dc->is_jmp) {
1811 case DISAS_NEXT:
1812 gen_goto_tb(dc, 1, npc);
1813 break;
1814 default:
1815 case DISAS_JUMP:
1816 case DISAS_UPDATE:
1817 /* indicate that the hash table must be used
1818 to find the next TB */
1819 tcg_gen_exit_tb(0);
1820 break;
1821 case DISAS_TB_JUMP:
1822 /* nothing more to generate */
1823 break;
1824 }
1825 }
1826 gen_icount_end(tb, num_insns);
1827 *gen_opc_ptr = INDEX_op_end;
1828 if (search_pc) {
1829 j = gen_opc_ptr - gen_opc_buf;
1830 lj++;
1831 while (lj <= j)
1832 gen_opc_instr_start[lj++] = 0;
1833 } else {
1834 tb->size = dc->pc - pc_start;
1835 tb->icount = num_insns;
1836 }
1837
1838#ifdef DEBUG_DISAS
1839#if !SIM_COMPAT
1840 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1841 qemu_log("\n");
1842#if DISAS_GNU
1843 log_target_disas(pc_start, dc->pc - pc_start, 0);
1844#endif
e6aa0f11 1845 qemu_log("\nisize=%d osize=%td\n",
4acb54ba
EI
1846 dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
1847 }
1848#endif
1849#endif
1850 assert(!dc->abort_at_next_insn);
1851}
1852
1853void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
1854{
1855 gen_intermediate_code_internal(env, tb, 0);
1856}
1857
1858void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
1859{
1860 gen_intermediate_code_internal(env, tb, 1);
1861}
1862
9a78eead 1863void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf,
4acb54ba
EI
1864 int flags)
1865{
1866 int i;
1867
1868 if (!env || !f)
1869 return;
1870
1871 cpu_fprintf(f, "IN: PC=%x %s\n",
1872 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
97694c57 1873 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
4c24aa0a 1874 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
97694c57 1875 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
17c52a43 1876 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
4acb54ba
EI
1877 env->btaken, env->btarget,
1878 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
17c52a43
EI
1879 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1880 (env->sregs[SR_MSR] & MSR_EIP),
1881 (env->sregs[SR_MSR] & MSR_IE));
1882
4acb54ba
EI
1883 for (i = 0; i < 32; i++) {
1884 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1885 if ((i + 1) % 4 == 0)
1886 cpu_fprintf(f, "\n");
1887 }
1888 cpu_fprintf(f, "\n\n");
1889}
1890
1891CPUState *cpu_mb_init (const char *cpu_model)
1892{
1893 CPUState *env;
1894 static int tcg_initialized = 0;
1895 int i;
1896
7267c094 1897 env = g_malloc0(sizeof(CPUState));
4acb54ba
EI
1898
1899 cpu_exec_init(env);
1bba0dc9 1900 cpu_state_reset(env);
927d7217 1901 qemu_init_vcpu(env);
97694c57 1902 set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
4acb54ba
EI
1903
1904 if (tcg_initialized)
1905 return env;
1906
1907 tcg_initialized = 1;
1908
1909 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1910
1911 env_debug = tcg_global_mem_new(TCG_AREG0,
1912 offsetof(CPUState, debug),
1913 "debug0");
1914 env_iflags = tcg_global_mem_new(TCG_AREG0,
1915 offsetof(CPUState, iflags),
1916 "iflags");
1917 env_imm = tcg_global_mem_new(TCG_AREG0,
1918 offsetof(CPUState, imm),
1919 "imm");
1920 env_btarget = tcg_global_mem_new(TCG_AREG0,
1921 offsetof(CPUState, btarget),
1922 "btarget");
1923 env_btaken = tcg_global_mem_new(TCG_AREG0,
1924 offsetof(CPUState, btaken),
1925 "btaken");
1926 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1927 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1928 offsetof(CPUState, regs[i]),
1929 regnames[i]);
1930 }
1931 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1932 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
1933 offsetof(CPUState, sregs[i]),
1934 special_regnames[i]);
1935 }
1936#define GEN_HELPER 2
1937#include "helper.h"
1938
1939 return env;
1940}
1941
1bba0dc9 1942void cpu_state_reset(CPUState *env)
4acb54ba
EI
1943{
1944 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
1945 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
1946 log_cpu_state(env, 0);
1947 }
1948
1949 memset(env, 0, offsetof(CPUMBState, breakpoints));
1950 tlb_flush(env, 1);
1951
5818dee5
EI
1952 /* Disable stack protector. */
1953 env->shr = ~0;
1954
4898427e
EI
1955 env->pvr.regs[0] = PVR0_PVR_FULL_MASK \
1956 | PVR0_USE_BARREL_MASK \
1957 | PVR0_USE_DIV_MASK \
1958 | PVR0_USE_HW_MUL_MASK \
1959 | PVR0_USE_EXC_MASK \
1960 | PVR0_USE_ICACHE_MASK \
1961 | PVR0_USE_DCACHE_MASK \
1962 | PVR0_USE_MMU \
1963 | (0xb << 8);
1964 env->pvr.regs[2] = PVR2_D_OPB_MASK \
1965 | PVR2_D_LMB_MASK \
1966 | PVR2_I_OPB_MASK \
1967 | PVR2_I_LMB_MASK \
1968 | PVR2_USE_MSR_INSTR \
1969 | PVR2_USE_PCMP_INSTR \
1970 | PVR2_USE_BARREL_MASK \
1971 | PVR2_USE_DIV_MASK \
1972 | PVR2_USE_HW_MUL_MASK \
1973 | PVR2_USE_MUL64_MASK \
97694c57
EI
1974 | PVR2_USE_FPU_MASK \
1975 | PVR2_USE_FPU2_MASK \
1976 | PVR2_FPU_EXC_MASK \
4898427e
EI
1977 | 0;
1978 env->pvr.regs[10] = 0x0c000000; /* Default to spartan 3a dsp family. */
1979 env->pvr.regs[11] = PVR11_USE_MMU | (16 << 17);
1980
4acb54ba
EI
1981#if defined(CONFIG_USER_ONLY)
1982 /* start in user mode with interrupts enabled. */
97694c57 1983 env->sregs[SR_MSR] = MSR_EE | MSR_IE | MSR_VM | MSR_UM;
4acb54ba
EI
1984 env->pvr.regs[10] = 0x0c000000; /* Spartan 3a dsp. */
1985#else
97694c57 1986 env->sregs[SR_MSR] = 0;
4acb54ba 1987 mmu_init(&env->mmu);
4898427e
EI
1988 env->mmu.c_mmu = 3;
1989 env->mmu.c_mmu_tlb_access = 3;
1990 env->mmu.c_mmu_zones = 16;
4acb54ba
EI
1991#endif
1992}
1993
e87b7cb0 1994void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
4acb54ba
EI
1995{
1996 env->sregs[SR_PC] = gen_opc_pc[pc_pos];
1997}