]> git.proxmox.com Git - mirror_qemu.git/blame - target-microblaze/translate.c
tcg: Invert the inclusion of helper.h
[mirror_qemu.git] / target-microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
4acb54ba 21#include "cpu.h"
76cad711 22#include "disas/disas.h"
4acb54ba 23#include "tcg-op.h"
2ef6175a 24#include "exec/helper-proto.h"
4acb54ba 25#include "microblaze-decode.h"
2ef6175a 26#include "exec/helper-gen.h"
4acb54ba
EI
27
28#define SIM_COMPAT 0
29#define DISAS_GNU 1
30#define DISAS_MB 1
31#if DISAS_MB && !SIM_COMPAT
32# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
33#else
34# define LOG_DIS(...) do { } while (0)
35#endif
36
37#define D(x)
38
39#define EXTRACT_FIELD(src, start, end) \
40 (((src) >> start) & ((1 << (end - start + 1)) - 1))
41
42static TCGv env_debug;
43static TCGv_ptr cpu_env;
44static TCGv cpu_R[32];
45static TCGv cpu_SR[18];
46static TCGv env_imm;
47static TCGv env_btaken;
48static TCGv env_btarget;
49static TCGv env_iflags;
4a536270 50static TCGv env_res_addr;
11a76217 51static TCGv env_res_val;
4acb54ba 52
022c62cb 53#include "exec/gen-icount.h"
4acb54ba
EI
54
55/* This is the state at translation time. */
56typedef struct DisasContext {
0063ebd6 57 MicroBlazeCPU *cpu;
a5efa644 58 target_ulong pc;
4acb54ba
EI
59
60 /* Decoder. */
61 int type_b;
62 uint32_t ir;
63 uint8_t opcode;
64 uint8_t rd, ra, rb;
65 uint16_t imm;
66
67 unsigned int cpustate_changed;
68 unsigned int delayed_branch;
69 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
70 unsigned int clear_imm;
71 int is_jmp;
72
844bab60
EI
73#define JMP_NOJMP 0
74#define JMP_DIRECT 1
75#define JMP_DIRECT_CC 2
76#define JMP_INDIRECT 3
4acb54ba
EI
77 unsigned int jmp;
78 uint32_t jmp_pc;
79
80 int abort_at_next_insn;
81 int nr_nops;
82 struct TranslationBlock *tb;
83 int singlestep_enabled;
84} DisasContext;
85
38972938 86static const char *regnames[] =
4acb54ba
EI
87{
88 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
89 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
90 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
91 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
92};
93
38972938 94static const char *special_regnames[] =
4acb54ba
EI
95{
96 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
97 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
98 "sr16", "sr17", "sr18"
99};
100
101/* Sign extend at translation time. */
102static inline int sign_extend(unsigned int val, unsigned int width)
103{
104 int sval;
105
106 /* LSL. */
107 val <<= 31 - width;
108 sval = val;
109 /* ASR. */
110 sval >>= 31 - width;
111 return sval;
112}
113
114static inline void t_sync_flags(DisasContext *dc)
115{
4abf79a4 116 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba
EI
117 if (dc->tb_flags != dc->synced_flags) {
118 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
119 dc->synced_flags = dc->tb_flags;
120 }
121}
122
123static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
124{
125 TCGv_i32 tmp = tcg_const_i32(index);
126
127 t_sync_flags(dc);
128 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 129 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
130 tcg_temp_free_i32(tmp);
131 dc->is_jmp = DISAS_UPDATE;
132}
133
134static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
135{
136 TranslationBlock *tb;
137 tb = dc->tb;
138 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
139 tcg_gen_goto_tb(n);
140 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
8cfd0495 141 tcg_gen_exit_tb((uintptr_t)tb + n);
4acb54ba
EI
142 } else {
143 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
144 tcg_gen_exit_tb(0);
145 }
146}
147
ee8b246f
EI
148static void read_carry(DisasContext *dc, TCGv d)
149{
150 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
151}
152
04ec7df7
EI
153/*
154 * write_carry sets the carry bits in MSR based on bit 0 of v.
155 * v[31:1] are ignored.
156 */
ee8b246f
EI
157static void write_carry(DisasContext *dc, TCGv v)
158{
159 TCGv t0 = tcg_temp_new();
160 tcg_gen_shli_tl(t0, v, 31);
161 tcg_gen_sari_tl(t0, t0, 31);
162 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
163 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
164 ~(MSR_C | MSR_CC));
165 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
166 tcg_temp_free(t0);
167}
168
65ab5eb4 169static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f
PC
170{
171 TCGv t0 = tcg_temp_new();
65ab5eb4 172 tcg_gen_movi_tl(t0, carry);
8cc9b43f
PC
173 write_carry(dc, t0);
174 tcg_temp_free(t0);
175}
176
61204ce8
EI
177/* True if ALU operand b is a small immediate that may deserve
178 faster treatment. */
179static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
180{
181 /* Immediate insn without the imm prefix ? */
182 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
183}
184
4acb54ba
EI
185static inline TCGv *dec_alu_op_b(DisasContext *dc)
186{
187 if (dc->type_b) {
188 if (dc->tb_flags & IMM_FLAG)
189 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
190 else
191 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
192 return &env_imm;
193 } else
194 return &cpu_R[dc->rb];
195}
196
197static void dec_add(DisasContext *dc)
198{
199 unsigned int k, c;
40cbf5b7 200 TCGv cf;
4acb54ba
EI
201
202 k = dc->opcode & 4;
203 c = dc->opcode & 2;
204
205 LOG_DIS("add%s%s%s r%d r%d r%d\n",
206 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
207 dc->rd, dc->ra, dc->rb);
208
40cbf5b7
EI
209 /* Take care of the easy cases first. */
210 if (k) {
211 /* k - keep carry, no need to update MSR. */
212 /* If rd == r0, it's a nop. */
213 if (dc->rd) {
214 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
215
216 if (c) {
217 /* c - Add carry into the result. */
218 cf = tcg_temp_new();
219
220 read_carry(dc, cf);
221 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
222 tcg_temp_free(cf);
223 }
224 }
225 return;
226 }
227
228 /* From now on, we can assume k is zero. So we need to update MSR. */
229 /* Extract carry. */
230 cf = tcg_temp_new();
231 if (c) {
232 read_carry(dc, cf);
233 } else {
234 tcg_gen_movi_tl(cf, 0);
235 }
236
237 if (dc->rd) {
238 TCGv ncf = tcg_temp_new();
5d0bb823 239 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
4acb54ba 240 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
241 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
242 write_carry(dc, ncf);
243 tcg_temp_free(ncf);
244 } else {
5d0bb823 245 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 246 write_carry(dc, cf);
4acb54ba 247 }
40cbf5b7 248 tcg_temp_free(cf);
4acb54ba
EI
249}
250
251static void dec_sub(DisasContext *dc)
252{
253 unsigned int u, cmp, k, c;
e0a42ebc 254 TCGv cf, na;
4acb54ba
EI
255
256 u = dc->imm & 2;
257 k = dc->opcode & 4;
258 c = dc->opcode & 2;
259 cmp = (dc->imm & 1) && (!dc->type_b) && k;
260
261 if (cmp) {
262 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
263 if (dc->rd) {
264 if (u)
265 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
266 else
267 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
268 }
e0a42ebc
EI
269 return;
270 }
271
272 LOG_DIS("sub%s%s r%d, r%d r%d\n",
273 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
274
275 /* Take care of the easy cases first. */
276 if (k) {
277 /* k - keep carry, no need to update MSR. */
278 /* If rd == r0, it's a nop. */
279 if (dc->rd) {
4acb54ba 280 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
281
282 if (c) {
283 /* c - Add carry into the result. */
284 cf = tcg_temp_new();
285
286 read_carry(dc, cf);
287 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
288 tcg_temp_free(cf);
289 }
290 }
291 return;
292 }
293
294 /* From now on, we can assume k is zero. So we need to update MSR. */
295 /* Extract carry. And complement a into na. */
296 cf = tcg_temp_new();
297 na = tcg_temp_new();
298 if (c) {
299 read_carry(dc, cf);
300 } else {
301 tcg_gen_movi_tl(cf, 1);
302 }
303
304 /* d = b + ~a + c. carry defaults to 1. */
305 tcg_gen_not_tl(na, cpu_R[dc->ra]);
306
307 if (dc->rd) {
308 TCGv ncf = tcg_temp_new();
5d0bb823 309 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc
EI
310 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
311 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
312 write_carry(dc, ncf);
313 tcg_temp_free(ncf);
314 } else {
5d0bb823 315 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 316 write_carry(dc, cf);
4acb54ba 317 }
e0a42ebc
EI
318 tcg_temp_free(cf);
319 tcg_temp_free(na);
4acb54ba
EI
320}
321
322static void dec_pattern(DisasContext *dc)
323{
324 unsigned int mode;
325 int l1;
326
1567a005 327 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
328 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
329 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
1567a005
EI
330 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
331 t_gen_raise_exception(dc, EXCP_HW_EXCP);
332 }
333
4acb54ba
EI
334 mode = dc->opcode & 3;
335 switch (mode) {
336 case 0:
337 /* pcmpbf. */
338 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
339 if (dc->rd)
340 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
341 break;
342 case 2:
343 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
344 if (dc->rd) {
345 TCGv t0 = tcg_temp_local_new();
346 l1 = gen_new_label();
347 tcg_gen_movi_tl(t0, 1);
348 tcg_gen_brcond_tl(TCG_COND_EQ,
349 cpu_R[dc->ra], cpu_R[dc->rb], l1);
350 tcg_gen_movi_tl(t0, 0);
351 gen_set_label(l1);
352 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
353 tcg_temp_free(t0);
354 }
355 break;
356 case 3:
357 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
358 l1 = gen_new_label();
359 if (dc->rd) {
360 TCGv t0 = tcg_temp_local_new();
361 tcg_gen_movi_tl(t0, 1);
362 tcg_gen_brcond_tl(TCG_COND_NE,
363 cpu_R[dc->ra], cpu_R[dc->rb], l1);
364 tcg_gen_movi_tl(t0, 0);
365 gen_set_label(l1);
366 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
367 tcg_temp_free(t0);
368 }
369 break;
370 default:
0063ebd6 371 cpu_abort(CPU(dc->cpu),
4acb54ba
EI
372 "unsupported pattern insn opcode=%x\n", dc->opcode);
373 break;
374 }
375}
376
377static void dec_and(DisasContext *dc)
378{
379 unsigned int not;
380
381 if (!dc->type_b && (dc->imm & (1 << 10))) {
382 dec_pattern(dc);
383 return;
384 }
385
386 not = dc->opcode & (1 << 1);
387 LOG_DIS("and%s\n", not ? "n" : "");
388
389 if (!dc->rd)
390 return;
391
392 if (not) {
a235900e 393 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
394 } else
395 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
396}
397
398static void dec_or(DisasContext *dc)
399{
400 if (!dc->type_b && (dc->imm & (1 << 10))) {
401 dec_pattern(dc);
402 return;
403 }
404
405 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
406 if (dc->rd)
407 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
408}
409
410static void dec_xor(DisasContext *dc)
411{
412 if (!dc->type_b && (dc->imm & (1 << 10))) {
413 dec_pattern(dc);
414 return;
415 }
416
417 LOG_DIS("xor r%d\n", dc->rd);
418 if (dc->rd)
419 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
420}
421
4acb54ba
EI
422static inline void msr_read(DisasContext *dc, TCGv d)
423{
424 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
425}
426
427static inline void msr_write(DisasContext *dc, TCGv v)
428{
97b833c5
EI
429 TCGv t;
430
431 t = tcg_temp_new();
4acb54ba 432 dc->cpustate_changed = 1;
97b833c5 433 /* PVR bit is not writable. */
8a84fc6b
EI
434 tcg_gen_andi_tl(t, v, ~MSR_PVR);
435 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
97b833c5
EI
436 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
437 tcg_temp_free(t);
4acb54ba
EI
438}
439
440static void dec_msr(DisasContext *dc)
441{
0063ebd6 442 CPUState *cs = CPU(dc->cpu);
4acb54ba
EI
443 TCGv t0, t1;
444 unsigned int sr, to, rn;
0063ebd6 445 int mem_index = cpu_mmu_index(&dc->cpu->env);
4acb54ba
EI
446
447 sr = dc->imm & ((1 << 14) - 1);
448 to = dc->imm & (1 << 14);
449 dc->type_b = 1;
450 if (to)
451 dc->cpustate_changed = 1;
452
453 /* msrclr and msrset. */
454 if (!(dc->imm & (1 << 15))) {
455 unsigned int clr = dc->ir & (1 << 16);
456
457 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
458 dc->rd, dc->imm);
1567a005 459
0063ebd6 460 if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
1567a005
EI
461 /* nop??? */
462 return;
463 }
464
465 if ((dc->tb_flags & MSR_EE_FLAG)
466 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
467 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
468 t_gen_raise_exception(dc, EXCP_HW_EXCP);
469 return;
470 }
471
4acb54ba
EI
472 if (dc->rd)
473 msr_read(dc, cpu_R[dc->rd]);
474
475 t0 = tcg_temp_new();
476 t1 = tcg_temp_new();
477 msr_read(dc, t0);
478 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
479
480 if (clr) {
481 tcg_gen_not_tl(t1, t1);
482 tcg_gen_and_tl(t0, t0, t1);
483 } else
484 tcg_gen_or_tl(t0, t0, t1);
485 msr_write(dc, t0);
486 tcg_temp_free(t0);
487 tcg_temp_free(t1);
488 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
489 dc->is_jmp = DISAS_UPDATE;
490 return;
491 }
492
1567a005
EI
493 if (to) {
494 if ((dc->tb_flags & MSR_EE_FLAG)
495 && mem_index == MMU_USER_IDX) {
496 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
497 t_gen_raise_exception(dc, EXCP_HW_EXCP);
498 return;
499 }
500 }
501
4acb54ba
EI
502#if !defined(CONFIG_USER_ONLY)
503 /* Catch read/writes to the mmu block. */
504 if ((sr & ~0xff) == 0x1000) {
505 sr &= 7;
506 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
507 if (to)
64254eba 508 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
4acb54ba 509 else
64254eba 510 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
4acb54ba
EI
511 return;
512 }
513#endif
514
515 if (to) {
516 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
517 switch (sr) {
518 case 0:
519 break;
520 case 1:
521 msr_write(dc, cpu_R[dc->ra]);
522 break;
523 case 0x3:
524 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
525 break;
526 case 0x5:
527 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
528 break;
529 case 0x7:
97694c57 530 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
4acb54ba 531 break;
5818dee5 532 case 0x800:
68cee38a 533 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
534 break;
535 case 0x802:
68cee38a 536 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
5818dee5 537 break;
4acb54ba 538 default:
0063ebd6 539 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
540 break;
541 }
542 } else {
543 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
544
545 switch (sr) {
546 case 0:
547 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
548 break;
549 case 1:
550 msr_read(dc, cpu_R[dc->rd]);
551 break;
552 case 0x3:
553 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
554 break;
555 case 0x5:
556 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
557 break;
558 case 0x7:
97694c57 559 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
4acb54ba
EI
560 break;
561 case 0xb:
562 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
563 break;
5818dee5 564 case 0x800:
68cee38a 565 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
566 break;
567 case 0x802:
68cee38a 568 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
5818dee5 569 break;
4acb54ba
EI
570 case 0x2000:
571 case 0x2001:
572 case 0x2002:
573 case 0x2003:
574 case 0x2004:
575 case 0x2005:
576 case 0x2006:
577 case 0x2007:
578 case 0x2008:
579 case 0x2009:
580 case 0x200a:
581 case 0x200b:
582 case 0x200c:
583 rn = sr & 0xf;
584 tcg_gen_ld_tl(cpu_R[dc->rd],
68cee38a 585 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
586 break;
587 default:
a47dddd7 588 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
589 break;
590 }
591 }
ee7dbcf8
EI
592
593 if (dc->rd == 0) {
594 tcg_gen_movi_tl(cpu_R[0], 0);
595 }
4acb54ba
EI
596}
597
598/* 64-bit signed mul, lower result in d and upper in d2. */
599static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
600{
601 TCGv_i64 t0, t1;
602
603 t0 = tcg_temp_new_i64();
604 t1 = tcg_temp_new_i64();
605
606 tcg_gen_ext_i32_i64(t0, a);
607 tcg_gen_ext_i32_i64(t1, b);
608 tcg_gen_mul_i64(t0, t0, t1);
609
610 tcg_gen_trunc_i64_i32(d, t0);
611 tcg_gen_shri_i64(t0, t0, 32);
612 tcg_gen_trunc_i64_i32(d2, t0);
613
614 tcg_temp_free_i64(t0);
615 tcg_temp_free_i64(t1);
616}
617
618/* 64-bit unsigned muls, lower result in d and upper in d2. */
619static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
620{
621 TCGv_i64 t0, t1;
622
623 t0 = tcg_temp_new_i64();
624 t1 = tcg_temp_new_i64();
625
626 tcg_gen_extu_i32_i64(t0, a);
627 tcg_gen_extu_i32_i64(t1, b);
628 tcg_gen_mul_i64(t0, t0, t1);
629
630 tcg_gen_trunc_i64_i32(d, t0);
631 tcg_gen_shri_i64(t0, t0, 32);
632 tcg_gen_trunc_i64_i32(d2, t0);
633
634 tcg_temp_free_i64(t0);
635 tcg_temp_free_i64(t1);
636}
637
638/* Multiplier unit. */
639static void dec_mul(DisasContext *dc)
640{
641 TCGv d[2];
642 unsigned int subcode;
643
1567a005 644 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
645 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
646 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
1567a005
EI
647 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
648 t_gen_raise_exception(dc, EXCP_HW_EXCP);
649 return;
650 }
651
4acb54ba
EI
652 subcode = dc->imm & 3;
653 d[0] = tcg_temp_new();
654 d[1] = tcg_temp_new();
655
656 if (dc->type_b) {
657 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
658 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
659 goto done;
660 }
661
1567a005
EI
662 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
663 if (subcode >= 1 && subcode <= 3
0063ebd6 664 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
1567a005
EI
665 /* nop??? */
666 }
667
4acb54ba
EI
668 switch (subcode) {
669 case 0:
670 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
671 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
672 break;
673 case 1:
674 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
675 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
676 break;
677 case 2:
678 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
679 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
680 break;
681 case 3:
682 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
683 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
684 break;
685 default:
0063ebd6 686 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
4acb54ba
EI
687 break;
688 }
689done:
690 tcg_temp_free(d[0]);
691 tcg_temp_free(d[1]);
692}
693
694/* Div unit. */
695static void dec_div(DisasContext *dc)
696{
697 unsigned int u;
698
699 u = dc->imm & 2;
700 LOG_DIS("div\n");
701
0063ebd6
AF
702 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
703 && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
1567a005
EI
704 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
705 t_gen_raise_exception(dc, EXCP_HW_EXCP);
706 }
707
4acb54ba 708 if (u)
64254eba
BS
709 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
710 cpu_R[dc->ra]);
4acb54ba 711 else
64254eba
BS
712 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
713 cpu_R[dc->ra]);
4acb54ba
EI
714 if (!dc->rd)
715 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
716}
717
718static void dec_barrel(DisasContext *dc)
719{
720 TCGv t0;
721 unsigned int s, t;
722
1567a005 723 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
724 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
725 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
1567a005
EI
726 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
727 t_gen_raise_exception(dc, EXCP_HW_EXCP);
728 return;
729 }
730
4acb54ba
EI
731 s = dc->imm & (1 << 10);
732 t = dc->imm & (1 << 9);
733
734 LOG_DIS("bs%s%s r%d r%d r%d\n",
735 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
736
737 t0 = tcg_temp_new();
738
739 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
740 tcg_gen_andi_tl(t0, t0, 31);
741
742 if (s)
743 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
744 else {
745 if (t)
746 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
747 else
748 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
749 }
750}
751
752static void dec_bit(DisasContext *dc)
753{
0063ebd6 754 CPUState *cs = CPU(dc->cpu);
09b9f113 755 TCGv t0;
4acb54ba 756 unsigned int op;
0063ebd6 757 int mem_index = cpu_mmu_index(&dc->cpu->env);
4acb54ba 758
ace2e4da 759 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
760 switch (op) {
761 case 0x21:
762 /* src. */
763 t0 = tcg_temp_new();
764
765 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
09b9f113
EI
766 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
767 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 768 if (dc->rd) {
4acb54ba 769 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
09b9f113 770 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 771 }
4acb54ba
EI
772 tcg_temp_free(t0);
773 break;
774
775 case 0x1:
776 case 0x41:
777 /* srl. */
4acb54ba
EI
778 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
779
bb3cb951
EI
780 /* Update carry. Note that write carry only looks at the LSB. */
781 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
782 if (dc->rd) {
783 if (op == 0x41)
784 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
785 else
786 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
787 }
788 break;
789 case 0x60:
790 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
791 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
792 break;
793 case 0x61:
794 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
795 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
796 break;
797 case 0x64:
f062a3c7
EI
798 case 0x66:
799 case 0x74:
800 case 0x76:
4acb54ba
EI
801 /* wdc. */
802 LOG_DIS("wdc r%d\n", dc->ra);
1567a005
EI
803 if ((dc->tb_flags & MSR_EE_FLAG)
804 && mem_index == MMU_USER_IDX) {
805 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
806 t_gen_raise_exception(dc, EXCP_HW_EXCP);
807 return;
808 }
4acb54ba
EI
809 break;
810 case 0x68:
811 /* wic. */
812 LOG_DIS("wic r%d\n", dc->ra);
1567a005
EI
813 if ((dc->tb_flags & MSR_EE_FLAG)
814 && mem_index == MMU_USER_IDX) {
815 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
816 t_gen_raise_exception(dc, EXCP_HW_EXCP);
817 return;
818 }
4acb54ba 819 break;
48b5e96f
EI
820 case 0xe0:
821 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
822 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
823 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
48b5e96f
EI
824 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
825 t_gen_raise_exception(dc, EXCP_HW_EXCP);
826 }
0063ebd6 827 if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
48b5e96f
EI
828 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
829 }
830 break;
ace2e4da
PC
831 case 0x1e0:
832 /* swapb */
833 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
834 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
835 break;
b8c6a5d9 836 case 0x1e2:
ace2e4da
PC
837 /*swaph */
838 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
839 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
840 break;
4acb54ba 841 default:
a47dddd7
AF
842 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
843 dc->pc, op, dc->rd, dc->ra, dc->rb);
4acb54ba
EI
844 break;
845 }
846}
847
848static inline void sync_jmpstate(DisasContext *dc)
849{
844bab60
EI
850 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
851 if (dc->jmp == JMP_DIRECT) {
852 tcg_gen_movi_tl(env_btaken, 1);
853 }
23979dc5
EI
854 dc->jmp = JMP_INDIRECT;
855 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
4acb54ba
EI
856 }
857}
858
859static void dec_imm(DisasContext *dc)
860{
861 LOG_DIS("imm %x\n", dc->imm << 16);
862 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
863 dc->tb_flags |= IMM_FLAG;
864 dc->clear_imm = 0;
865}
866
4acb54ba
EI
867static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
868{
869 unsigned int extimm = dc->tb_flags & IMM_FLAG;
5818dee5
EI
870 /* Should be set to one if r1 is used by loadstores. */
871 int stackprot = 0;
872
873 /* All load/stores use ra. */
874 if (dc->ra == 1) {
875 stackprot = 1;
876 }
4acb54ba 877
9ef55357 878 /* Treat the common cases first. */
4acb54ba 879 if (!dc->type_b) {
4b5ef0b5
EI
880 /* If any of the regs is r0, return a ptr to the other. */
881 if (dc->ra == 0) {
882 return &cpu_R[dc->rb];
883 } else if (dc->rb == 0) {
884 return &cpu_R[dc->ra];
885 }
886
5818dee5
EI
887 if (dc->rb == 1) {
888 stackprot = 1;
889 }
890
4acb54ba
EI
891 *t = tcg_temp_new();
892 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
5818dee5
EI
893
894 if (stackprot) {
64254eba 895 gen_helper_stackprot(cpu_env, *t);
5818dee5 896 }
4acb54ba
EI
897 return t;
898 }
899 /* Immediate. */
900 if (!extimm) {
901 if (dc->imm == 0) {
902 return &cpu_R[dc->ra];
903 }
904 *t = tcg_temp_new();
905 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
906 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
907 } else {
908 *t = tcg_temp_new();
909 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
910 }
911
5818dee5 912 if (stackprot) {
64254eba 913 gen_helper_stackprot(cpu_env, *t);
5818dee5 914 }
4acb54ba
EI
915 return t;
916}
917
918static void dec_load(DisasContext *dc)
919{
47acdd63 920 TCGv t, v, *addr;
8cc9b43f 921 unsigned int size, rev = 0, ex = 0;
47acdd63 922 TCGMemOp mop;
4acb54ba 923
47acdd63
RH
924 mop = dc->opcode & 3;
925 size = 1 << mop;
9f8beb66
EI
926 if (!dc->type_b) {
927 rev = (dc->ir >> 9) & 1;
8cc9b43f 928 ex = (dc->ir >> 10) & 1;
9f8beb66 929 }
47acdd63
RH
930 mop |= MO_TE;
931 if (rev) {
932 mop ^= MO_BSWAP;
933 }
9f8beb66 934
0187688f 935 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 936 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
937 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
938 t_gen_raise_exception(dc, EXCP_HW_EXCP);
939 return;
940 }
4acb54ba 941
8cc9b43f
PC
942 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
943 ex ? "x" : "");
9f8beb66 944
4acb54ba
EI
945 t_sync_flags(dc);
946 addr = compute_ldst_addr(dc, &t);
947
9f8beb66
EI
948 /*
949 * When doing reverse accesses we need to do two things.
950 *
4ff9786c 951 * 1. Reverse the address wrt endianness.
9f8beb66
EI
952 * 2. Byteswap the data lanes on the way back into the CPU core.
953 */
954 if (rev && size != 4) {
955 /* Endian reverse the address. t is addr. */
956 switch (size) {
957 case 1:
958 {
959 /* 00 -> 11
960 01 -> 10
961 10 -> 10
962 11 -> 00 */
963 TCGv low = tcg_temp_new();
964
965 /* Force addr into the temp. */
966 if (addr != &t) {
967 t = tcg_temp_new();
968 tcg_gen_mov_tl(t, *addr);
969 addr = &t;
970 }
971
972 tcg_gen_andi_tl(low, t, 3);
973 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
974 tcg_gen_andi_tl(t, t, ~3);
975 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
976 tcg_gen_mov_tl(env_imm, t);
977 tcg_temp_free(low);
978 break;
979 }
980
981 case 2:
982 /* 00 -> 10
983 10 -> 00. */
984 /* Force addr into the temp. */
985 if (addr != &t) {
986 t = tcg_temp_new();
987 tcg_gen_xori_tl(t, *addr, 2);
988 addr = &t;
989 } else {
990 tcg_gen_xori_tl(t, t, 2);
991 }
992 break;
993 default:
0063ebd6 994 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
995 break;
996 }
997 }
998
8cc9b43f
PC
999 /* lwx does not throw unaligned access errors, so force alignment */
1000 if (ex) {
1001 /* Force addr into the temp. */
1002 if (addr != &t) {
1003 t = tcg_temp_new();
1004 tcg_gen_mov_tl(t, *addr);
1005 addr = &t;
1006 }
1007 tcg_gen_andi_tl(t, t, ~3);
1008 }
1009
4acb54ba
EI
1010 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1011 sync_jmpstate(dc);
968a40f6
EI
1012
1013 /* Verify alignment if needed. */
47acdd63
RH
1014 /*
1015 * Microblaze gives MMU faults priority over faults due to
1016 * unaligned addresses. That's why we speculatively do the load
1017 * into v. If the load succeeds, we verify alignment of the
1018 * address and if that succeeds we write into the destination reg.
1019 */
1020 v = tcg_temp_new();
0063ebd6 1021 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
a12f6507 1022
0063ebd6 1023 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507 1024 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
64254eba 1025 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1026 tcg_const_tl(0), tcg_const_tl(size - 1));
4acb54ba
EI
1027 }
1028
47acdd63
RH
1029 if (ex) {
1030 tcg_gen_mov_tl(env_res_addr, *addr);
1031 tcg_gen_mov_tl(env_res_val, v);
1032 }
1033 if (dc->rd) {
1034 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1035 }
1036 tcg_temp_free(v);
1037
8cc9b43f
PC
1038 if (ex) { /* lwx */
1039 /* no support for for AXI exclusive so always clear C */
1040 write_carryi(dc, 0);
8cc9b43f
PC
1041 }
1042
4acb54ba
EI
1043 if (addr == &t)
1044 tcg_temp_free(t);
1045}
1046
4acb54ba
EI
1047static void dec_store(DisasContext *dc)
1048{
4a536270 1049 TCGv t, *addr, swx_addr;
8cc9b43f
PC
1050 int swx_skip = 0;
1051 unsigned int size, rev = 0, ex = 0;
47acdd63 1052 TCGMemOp mop;
4acb54ba 1053
47acdd63
RH
1054 mop = dc->opcode & 3;
1055 size = 1 << mop;
9f8beb66
EI
1056 if (!dc->type_b) {
1057 rev = (dc->ir >> 9) & 1;
8cc9b43f 1058 ex = (dc->ir >> 10) & 1;
9f8beb66 1059 }
47acdd63
RH
1060 mop |= MO_TE;
1061 if (rev) {
1062 mop ^= MO_BSWAP;
1063 }
4acb54ba 1064
0187688f 1065 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1066 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
0187688f
EI
1067 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1068 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1069 return;
1070 }
1071
8cc9b43f
PC
1072 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1073 ex ? "x" : "");
4acb54ba
EI
1074 t_sync_flags(dc);
1075 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1076 sync_jmpstate(dc);
1077 addr = compute_ldst_addr(dc, &t);
968a40f6 1078
083dbf48 1079 swx_addr = tcg_temp_local_new();
8cc9b43f 1080 if (ex) { /* swx */
11a76217 1081 TCGv tval;
8cc9b43f
PC
1082
1083 /* Force addr into the swx_addr. */
1084 tcg_gen_mov_tl(swx_addr, *addr);
1085 addr = &swx_addr;
1086 /* swx does not throw unaligned access errors, so force alignment */
1087 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1088
8cc9b43f
PC
1089 write_carryi(dc, 1);
1090 swx_skip = gen_new_label();
4a536270 1091 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
11a76217
EI
1092
1093 /* Compare the value loaded at lwx with current contents of
1094 the reserved location.
1095 FIXME: This only works for system emulation where we can expect
1096 this compare and the following write to be atomic. For user
1097 emulation we need to add atomicity between threads. */
1098 tval = tcg_temp_new();
0063ebd6
AF
1099 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
1100 MO_TEUL);
11a76217 1101 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
8cc9b43f 1102 write_carryi(dc, 0);
11a76217 1103 tcg_temp_free(tval);
8cc9b43f
PC
1104 }
1105
9f8beb66
EI
1106 if (rev && size != 4) {
1107 /* Endian reverse the address. t is addr. */
1108 switch (size) {
1109 case 1:
1110 {
1111 /* 00 -> 11
1112 01 -> 10
1113 10 -> 10
1114 11 -> 00 */
1115 TCGv low = tcg_temp_new();
1116
1117 /* Force addr into the temp. */
1118 if (addr != &t) {
1119 t = tcg_temp_new();
1120 tcg_gen_mov_tl(t, *addr);
1121 addr = &t;
1122 }
1123
1124 tcg_gen_andi_tl(low, t, 3);
1125 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1126 tcg_gen_andi_tl(t, t, ~3);
1127 tcg_gen_or_tl(t, t, low);
9f8beb66
EI
1128 tcg_gen_mov_tl(env_imm, t);
1129 tcg_temp_free(low);
1130 break;
1131 }
1132
1133 case 2:
1134 /* 00 -> 10
1135 10 -> 00. */
1136 /* Force addr into the temp. */
1137 if (addr != &t) {
1138 t = tcg_temp_new();
1139 tcg_gen_xori_tl(t, *addr, 2);
1140 addr = &t;
1141 } else {
1142 tcg_gen_xori_tl(t, t, 2);
1143 }
1144 break;
1145 default:
0063ebd6 1146 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
1147 break;
1148 }
9f8beb66 1149 }
0063ebd6 1150 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
a12f6507 1151
968a40f6 1152 /* Verify alignment if needed. */
0063ebd6 1153 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
a12f6507
EI
1154 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1155 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1156 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1157 * the MMU prior to the memaccess, thay way we could put
1158 * the alignment checks in between the probe and the mem
1159 * access.
a12f6507 1160 */
64254eba 1161 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
3aa80988 1162 tcg_const_tl(1), tcg_const_tl(size - 1));
968a40f6 1163 }
083dbf48 1164
8cc9b43f
PC
1165 if (ex) {
1166 gen_set_label(swx_skip);
8cc9b43f 1167 }
083dbf48 1168 tcg_temp_free(swx_addr);
968a40f6 1169
4acb54ba
EI
1170 if (addr == &t)
1171 tcg_temp_free(t);
1172}
1173
1174static inline void eval_cc(DisasContext *dc, unsigned int cc,
1175 TCGv d, TCGv a, TCGv b)
1176{
4acb54ba
EI
1177 switch (cc) {
1178 case CC_EQ:
b2565c69 1179 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
4acb54ba
EI
1180 break;
1181 case CC_NE:
b2565c69 1182 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
4acb54ba
EI
1183 break;
1184 case CC_LT:
b2565c69 1185 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
4acb54ba
EI
1186 break;
1187 case CC_LE:
b2565c69 1188 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
4acb54ba
EI
1189 break;
1190 case CC_GE:
b2565c69 1191 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
4acb54ba
EI
1192 break;
1193 case CC_GT:
b2565c69 1194 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
4acb54ba
EI
1195 break;
1196 default:
0063ebd6 1197 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
4acb54ba
EI
1198 break;
1199 }
1200}
1201
1202static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1203{
1204 int l1;
1205
1206 l1 = gen_new_label();
1207 /* Conditional jmp. */
1208 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1209 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1210 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1211 gen_set_label(l1);
1212}
1213
1214static void dec_bcc(DisasContext *dc)
1215{
1216 unsigned int cc;
1217 unsigned int dslot;
1218
1219 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1220 dslot = dc->ir & (1 << 25);
1221 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1222
1223 dc->delayed_branch = 1;
1224 if (dslot) {
1225 dc->delayed_branch = 2;
1226 dc->tb_flags |= D_FLAG;
1227 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1228 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1229 }
1230
61204ce8
EI
1231 if (dec_alu_op_b_is_small_imm(dc)) {
1232 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1233
1234 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
844bab60 1235 dc->jmp = JMP_DIRECT_CC;
23979dc5 1236 dc->jmp_pc = dc->pc + offset;
61204ce8 1237 } else {
23979dc5 1238 dc->jmp = JMP_INDIRECT;
61204ce8
EI
1239 tcg_gen_movi_tl(env_btarget, dc->pc);
1240 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1241 }
61204ce8 1242 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
4acb54ba
EI
1243}
1244
1245static void dec_br(DisasContext *dc)
1246{
9f6113c7 1247 unsigned int dslot, link, abs, mbar;
0063ebd6 1248 int mem_index = cpu_mmu_index(&dc->cpu->env);
4acb54ba
EI
1249
1250 dslot = dc->ir & (1 << 20);
1251 abs = dc->ir & (1 << 19);
1252 link = dc->ir & (1 << 18);
9f6113c7
EI
1253
1254 /* Memory barrier. */
1255 mbar = (dc->ir >> 16) & 31;
1256 if (mbar == 2 && dc->imm == 4) {
5d45de97
EI
1257 /* mbar IMM & 16 decodes to sleep. */
1258 if (dc->rd & 16) {
1259 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1260 TCGv_i32 tmp_1 = tcg_const_i32(1);
1261
1262 LOG_DIS("sleep\n");
1263
1264 t_sync_flags(dc);
1265 tcg_gen_st_i32(tmp_1, cpu_env,
1266 -offsetof(MicroBlazeCPU, env)
1267 +offsetof(CPUState, halted));
1268 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1269 gen_helper_raise_exception(cpu_env, tmp_hlt);
1270 tcg_temp_free_i32(tmp_hlt);
1271 tcg_temp_free_i32(tmp_1);
1272 return;
1273 }
9f6113c7
EI
1274 LOG_DIS("mbar %d\n", dc->rd);
1275 /* Break the TB. */
1276 dc->cpustate_changed = 1;
1277 return;
1278 }
1279
4acb54ba
EI
1280 LOG_DIS("br%s%s%s%s imm=%x\n",
1281 abs ? "a" : "", link ? "l" : "",
1282 dc->type_b ? "i" : "", dslot ? "d" : "",
1283 dc->imm);
1284
1285 dc->delayed_branch = 1;
1286 if (dslot) {
1287 dc->delayed_branch = 2;
1288 dc->tb_flags |= D_FLAG;
1289 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1290 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1291 }
1292 if (link && dc->rd)
1293 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1294
1295 dc->jmp = JMP_INDIRECT;
1296 if (abs) {
1297 tcg_gen_movi_tl(env_btaken, 1);
1298 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1299 if (link && !dslot) {
1300 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1301 t_gen_raise_exception(dc, EXCP_BREAK);
1302 if (dc->imm == 0) {
1303 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1304 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1305 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1306 return;
1307 }
1308
1309 t_gen_raise_exception(dc, EXCP_DEBUG);
1310 }
1311 }
4acb54ba 1312 } else {
61204ce8
EI
1313 if (dec_alu_op_b_is_small_imm(dc)) {
1314 dc->jmp = JMP_DIRECT;
1315 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1316 } else {
4acb54ba
EI
1317 tcg_gen_movi_tl(env_btaken, 1);
1318 tcg_gen_movi_tl(env_btarget, dc->pc);
1319 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
4acb54ba
EI
1320 }
1321 }
1322}
1323
1324static inline void do_rti(DisasContext *dc)
1325{
1326 TCGv t0, t1;
1327 t0 = tcg_temp_new();
1328 t1 = tcg_temp_new();
1329 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1330 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1331 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1332
1333 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1334 tcg_gen_or_tl(t1, t1, t0);
1335 msr_write(dc, t1);
1336 tcg_temp_free(t1);
1337 tcg_temp_free(t0);
1338 dc->tb_flags &= ~DRTI_FLAG;
1339}
1340
1341static inline void do_rtb(DisasContext *dc)
1342{
1343 TCGv t0, t1;
1344 t0 = tcg_temp_new();
1345 t1 = tcg_temp_new();
1346 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1347 tcg_gen_shri_tl(t0, t1, 1);
1348 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1349
1350 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1351 tcg_gen_or_tl(t1, t1, t0);
1352 msr_write(dc, t1);
1353 tcg_temp_free(t1);
1354 tcg_temp_free(t0);
1355 dc->tb_flags &= ~DRTB_FLAG;
1356}
1357
1358static inline void do_rte(DisasContext *dc)
1359{
1360 TCGv t0, t1;
1361 t0 = tcg_temp_new();
1362 t1 = tcg_temp_new();
1363
1364 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1365 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1366 tcg_gen_shri_tl(t0, t1, 1);
1367 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1368
1369 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1370 tcg_gen_or_tl(t1, t1, t0);
1371 msr_write(dc, t1);
1372 tcg_temp_free(t1);
1373 tcg_temp_free(t0);
1374 dc->tb_flags &= ~DRTE_FLAG;
1375}
1376
1377static void dec_rts(DisasContext *dc)
1378{
1379 unsigned int b_bit, i_bit, e_bit;
0063ebd6 1380 int mem_index = cpu_mmu_index(&dc->cpu->env);
4acb54ba
EI
1381
1382 i_bit = dc->ir & (1 << 21);
1383 b_bit = dc->ir & (1 << 22);
1384 e_bit = dc->ir & (1 << 23);
1385
1386 dc->delayed_branch = 2;
1387 dc->tb_flags |= D_FLAG;
1388 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
68cee38a 1389 cpu_env, offsetof(CPUMBState, bimm));
4acb54ba
EI
1390
1391 if (i_bit) {
1392 LOG_DIS("rtid ir=%x\n", dc->ir);
1567a005
EI
1393 if ((dc->tb_flags & MSR_EE_FLAG)
1394 && mem_index == MMU_USER_IDX) {
1395 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1396 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1397 }
4acb54ba
EI
1398 dc->tb_flags |= DRTI_FLAG;
1399 } else if (b_bit) {
1400 LOG_DIS("rtbd ir=%x\n", dc->ir);
1567a005
EI
1401 if ((dc->tb_flags & MSR_EE_FLAG)
1402 && mem_index == MMU_USER_IDX) {
1403 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1404 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1405 }
4acb54ba
EI
1406 dc->tb_flags |= DRTB_FLAG;
1407 } else if (e_bit) {
1408 LOG_DIS("rted ir=%x\n", dc->ir);
1567a005
EI
1409 if ((dc->tb_flags & MSR_EE_FLAG)
1410 && mem_index == MMU_USER_IDX) {
1411 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1412 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1413 }
4acb54ba
EI
1414 dc->tb_flags |= DRTE_FLAG;
1415 } else
1416 LOG_DIS("rts ir=%x\n", dc->ir);
1417
23979dc5 1418 dc->jmp = JMP_INDIRECT;
4acb54ba
EI
1419 tcg_gen_movi_tl(env_btaken, 1);
1420 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1421}
1422
97694c57
EI
1423static int dec_check_fpuv2(DisasContext *dc)
1424{
1425 int r;
1426
0063ebd6 1427 r = dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU2_MASK;
97694c57
EI
1428
1429 if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1430 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1431 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1432 }
1433 return r;
1434}
1435
1567a005
EI
1436static void dec_fpu(DisasContext *dc)
1437{
97694c57
EI
1438 unsigned int fpu_insn;
1439
1567a005 1440 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
1441 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1442 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU_MASK))) {
97694c57 1443 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1567a005
EI
1444 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1445 return;
1446 }
1447
97694c57
EI
1448 fpu_insn = (dc->ir >> 7) & 7;
1449
1450 switch (fpu_insn) {
1451 case 0:
64254eba
BS
1452 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1453 cpu_R[dc->rb]);
97694c57
EI
1454 break;
1455
1456 case 1:
64254eba
BS
1457 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1458 cpu_R[dc->rb]);
97694c57
EI
1459 break;
1460
1461 case 2:
64254eba
BS
1462 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1463 cpu_R[dc->rb]);
97694c57
EI
1464 break;
1465
1466 case 3:
64254eba
BS
1467 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1468 cpu_R[dc->rb]);
97694c57
EI
1469 break;
1470
1471 case 4:
1472 switch ((dc->ir >> 4) & 7) {
1473 case 0:
64254eba 1474 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1475 cpu_R[dc->ra], cpu_R[dc->rb]);
1476 break;
1477 case 1:
64254eba 1478 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1479 cpu_R[dc->ra], cpu_R[dc->rb]);
1480 break;
1481 case 2:
64254eba 1482 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1483 cpu_R[dc->ra], cpu_R[dc->rb]);
1484 break;
1485 case 3:
64254eba 1486 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1487 cpu_R[dc->ra], cpu_R[dc->rb]);
1488 break;
1489 case 4:
64254eba 1490 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1491 cpu_R[dc->ra], cpu_R[dc->rb]);
1492 break;
1493 case 5:
64254eba 1494 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1495 cpu_R[dc->ra], cpu_R[dc->rb]);
1496 break;
1497 case 6:
64254eba 1498 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1499 cpu_R[dc->ra], cpu_R[dc->rb]);
1500 break;
1501 default:
71547a3b
BS
1502 qemu_log_mask(LOG_UNIMP,
1503 "unimplemented fcmp fpu_insn=%x pc=%x"
1504 " opc=%x\n",
1505 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1506 dc->abort_at_next_insn = 1;
1507 break;
1508 }
1509 break;
1510
1511 case 5:
1512 if (!dec_check_fpuv2(dc)) {
1513 return;
1514 }
64254eba 1515 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1516 break;
1517
1518 case 6:
1519 if (!dec_check_fpuv2(dc)) {
1520 return;
1521 }
64254eba 1522 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1523 break;
1524
1525 case 7:
1526 if (!dec_check_fpuv2(dc)) {
1527 return;
1528 }
64254eba 1529 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1530 break;
1531
1532 default:
71547a3b
BS
1533 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1534 " opc=%x\n",
1535 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1536 dc->abort_at_next_insn = 1;
1537 break;
1538 }
1567a005
EI
1539}
1540
4acb54ba
EI
1541static void dec_null(DisasContext *dc)
1542{
02b33596 1543 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6 1544 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
02b33596
EI
1545 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1546 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1547 return;
1548 }
4acb54ba
EI
1549 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1550 dc->abort_at_next_insn = 1;
1551}
1552
6d76d23e
EI
1553/* Insns connected to FSL or AXI stream attached devices. */
1554static void dec_stream(DisasContext *dc)
1555{
0063ebd6 1556 int mem_index = cpu_mmu_index(&dc->cpu->env);
6d76d23e
EI
1557 TCGv_i32 t_id, t_ctrl;
1558 int ctrl;
1559
1560 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1561 dc->type_b ? "" : "d", dc->imm);
1562
1563 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1564 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1565 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1566 return;
1567 }
1568
1569 t_id = tcg_temp_new();
1570 if (dc->type_b) {
1571 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1572 ctrl = dc->imm >> 10;
1573 } else {
1574 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1575 ctrl = dc->imm >> 5;
1576 }
1577
1578 t_ctrl = tcg_const_tl(ctrl);
1579
1580 if (dc->rd == 0) {
1581 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1582 } else {
1583 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1584 }
1585 tcg_temp_free(t_id);
1586 tcg_temp_free(t_ctrl);
1587}
1588
4acb54ba
EI
1589static struct decoder_info {
1590 struct {
1591 uint32_t bits;
1592 uint32_t mask;
1593 };
1594 void (*dec)(DisasContext *dc);
1595} decinfo[] = {
1596 {DEC_ADD, dec_add},
1597 {DEC_SUB, dec_sub},
1598 {DEC_AND, dec_and},
1599 {DEC_XOR, dec_xor},
1600 {DEC_OR, dec_or},
1601 {DEC_BIT, dec_bit},
1602 {DEC_BARREL, dec_barrel},
1603 {DEC_LD, dec_load},
1604 {DEC_ST, dec_store},
1605 {DEC_IMM, dec_imm},
1606 {DEC_BR, dec_br},
1607 {DEC_BCC, dec_bcc},
1608 {DEC_RTS, dec_rts},
1567a005 1609 {DEC_FPU, dec_fpu},
4acb54ba
EI
1610 {DEC_MUL, dec_mul},
1611 {DEC_DIV, dec_div},
1612 {DEC_MSR, dec_msr},
6d76d23e 1613 {DEC_STREAM, dec_stream},
4acb54ba
EI
1614 {{0, 0}, dec_null}
1615};
1616
64254eba 1617static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1618{
4acb54ba
EI
1619 int i;
1620
fdefe51c 1621 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4acb54ba 1622 tcg_gen_debug_insn_start(dc->pc);
fdefe51c 1623 }
4acb54ba 1624
64254eba 1625 dc->ir = ir;
4acb54ba
EI
1626 LOG_DIS("%8.8x\t", dc->ir);
1627
1628 if (dc->ir)
1629 dc->nr_nops = 0;
1630 else {
1567a005 1631 if ((dc->tb_flags & MSR_EE_FLAG)
0063ebd6
AF
1632 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1633 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1567a005
EI
1634 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1635 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1636 return;
1637 }
1638
4acb54ba
EI
1639 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1640 dc->nr_nops++;
a47dddd7 1641 if (dc->nr_nops > 4) {
0063ebd6 1642 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
a47dddd7 1643 }
4acb54ba
EI
1644 }
1645 /* bit 2 seems to indicate insn type. */
1646 dc->type_b = ir & (1 << 29);
1647
1648 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1649 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1650 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1651 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1652 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1653
1654 /* Large switch for all insns. */
1655 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1656 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1657 decinfo[i].dec(dc);
1658 break;
1659 }
1660 }
1661}
1662
68cee38a 1663static void check_breakpoint(CPUMBState *env, DisasContext *dc)
4acb54ba 1664{
f0c3c505 1665 CPUState *cs = CPU(mb_env_get_cpu(env));
4acb54ba
EI
1666 CPUBreakpoint *bp;
1667
f0c3c505
AF
1668 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
1669 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
4acb54ba
EI
1670 if (bp->pc == dc->pc) {
1671 t_gen_raise_exception(dc, EXCP_DEBUG);
1672 dc->is_jmp = DISAS_UPDATE;
1673 }
1674 }
1675 }
1676}
1677
1678/* generate intermediate code for basic block 'tb'. */
fd327f48 1679static inline void
4a274212
AF
1680gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
1681 bool search_pc)
4acb54ba 1682{
ed2803da 1683 CPUState *cs = CPU(cpu);
4a274212 1684 CPUMBState *env = &cpu->env;
4acb54ba
EI
1685 uint16_t *gen_opc_end;
1686 uint32_t pc_start;
1687 int j, lj;
1688 struct DisasContext ctx;
1689 struct DisasContext *dc = &ctx;
1690 uint32_t next_page_start, org_flags;
1691 target_ulong npc;
1692 int num_insns;
1693 int max_insns;
1694
4acb54ba 1695 pc_start = tb->pc;
0063ebd6 1696 dc->cpu = cpu;
4acb54ba
EI
1697 dc->tb = tb;
1698 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1699
92414b31 1700 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4acb54ba
EI
1701
1702 dc->is_jmp = DISAS_NEXT;
1703 dc->jmp = 0;
1704 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1705 if (dc->delayed_branch) {
1706 dc->jmp = JMP_INDIRECT;
1707 }
4acb54ba 1708 dc->pc = pc_start;
ed2803da 1709 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1710 dc->cpustate_changed = 0;
1711 dc->abort_at_next_insn = 0;
1712 dc->nr_nops = 0;
1713
a47dddd7
AF
1714 if (pc_start & 3) {
1715 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1716 }
4acb54ba
EI
1717
1718 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1719#if !SIM_COMPAT
1720 qemu_log("--------------\n");
a0762859 1721 log_cpu_state(CPU(cpu), 0);
4acb54ba
EI
1722#endif
1723 }
1724
1725 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1726 lj = -1;
1727 num_insns = 0;
1728 max_insns = tb->cflags & CF_COUNT_MASK;
1729 if (max_insns == 0)
1730 max_insns = CF_COUNT_MASK;
1731
806f352d 1732 gen_tb_start();
4acb54ba
EI
1733 do
1734 {
1735#if SIM_COMPAT
1736 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1737 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1738 gen_helper_debug();
1739 }
1740#endif
1741 check_breakpoint(env, dc);
1742
1743 if (search_pc) {
92414b31 1744 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4acb54ba
EI
1745 if (lj < j) {
1746 lj++;
1747 while (lj < j)
ab1103de 1748 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4acb54ba 1749 }
25983cad 1750 tcg_ctx.gen_opc_pc[lj] = dc->pc;
ab1103de 1751 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 1752 tcg_ctx.gen_opc_icount[lj] = num_insns;
4acb54ba
EI
1753 }
1754
1755 /* Pretty disas. */
1756 LOG_DIS("%8.8x:\t", dc->pc);
1757
1758 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1759 gen_io_start();
1760
1761 dc->clear_imm = 1;
64254eba 1762 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1763 if (dc->clear_imm)
1764 dc->tb_flags &= ~IMM_FLAG;
4acb54ba
EI
1765 dc->pc += 4;
1766 num_insns++;
1767
1768 if (dc->delayed_branch) {
1769 dc->delayed_branch--;
1770 if (!dc->delayed_branch) {
1771 if (dc->tb_flags & DRTI_FLAG)
1772 do_rti(dc);
1773 if (dc->tb_flags & DRTB_FLAG)
1774 do_rtb(dc);
1775 if (dc->tb_flags & DRTE_FLAG)
1776 do_rte(dc);
1777 /* Clear the delay slot flag. */
1778 dc->tb_flags &= ~D_FLAG;
1779 /* If it is a direct jump, try direct chaining. */
23979dc5 1780 if (dc->jmp == JMP_INDIRECT) {
4acb54ba
EI
1781 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1782 dc->is_jmp = DISAS_JUMP;
23979dc5 1783 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1784 t_sync_flags(dc);
1785 gen_goto_tb(dc, 0, dc->jmp_pc);
1786 dc->is_jmp = DISAS_TB_JUMP;
1787 } else if (dc->jmp == JMP_DIRECT_CC) {
23979dc5
EI
1788 int l1;
1789
1790 t_sync_flags(dc);
1791 l1 = gen_new_label();
1792 /* Conditional jmp. */
1793 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1794 gen_goto_tb(dc, 1, dc->pc);
1795 gen_set_label(l1);
1796 gen_goto_tb(dc, 0, dc->jmp_pc);
1797
1798 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1799 }
1800 break;
1801 }
1802 }
ed2803da 1803 if (cs->singlestep_enabled) {
4acb54ba 1804 break;
ed2803da 1805 }
4acb54ba 1806 } while (!dc->is_jmp && !dc->cpustate_changed
efd7f486 1807 && tcg_ctx.gen_opc_ptr < gen_opc_end
4acb54ba
EI
1808 && !singlestep
1809 && (dc->pc < next_page_start)
1810 && num_insns < max_insns);
1811
1812 npc = dc->pc;
844bab60 1813 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1814 if (dc->tb_flags & D_FLAG) {
1815 dc->is_jmp = DISAS_UPDATE;
1816 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1817 sync_jmpstate(dc);
1818 } else
1819 npc = dc->jmp_pc;
1820 }
1821
1822 if (tb->cflags & CF_LAST_IO)
1823 gen_io_end();
1824 /* Force an update if the per-tb cpu state has changed. */
1825 if (dc->is_jmp == DISAS_NEXT
1826 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1827 dc->is_jmp = DISAS_UPDATE;
1828 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1829 }
1830 t_sync_flags(dc);
1831
ed2803da 1832 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1833 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1834
1835 if (dc->is_jmp != DISAS_JUMP) {
4acb54ba 1836 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
6c5f738d 1837 }
64254eba 1838 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1839 tcg_temp_free_i32(tmp);
4acb54ba
EI
1840 } else {
1841 switch(dc->is_jmp) {
1842 case DISAS_NEXT:
1843 gen_goto_tb(dc, 1, npc);
1844 break;
1845 default:
1846 case DISAS_JUMP:
1847 case DISAS_UPDATE:
1848 /* indicate that the hash table must be used
1849 to find the next TB */
1850 tcg_gen_exit_tb(0);
1851 break;
1852 case DISAS_TB_JUMP:
1853 /* nothing more to generate */
1854 break;
1855 }
1856 }
806f352d 1857 gen_tb_end(tb, num_insns);
efd7f486 1858 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4acb54ba 1859 if (search_pc) {
92414b31 1860 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4acb54ba
EI
1861 lj++;
1862 while (lj <= j)
ab1103de 1863 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4acb54ba
EI
1864 } else {
1865 tb->size = dc->pc - pc_start;
1866 tb->icount = num_insns;
1867 }
1868
1869#ifdef DEBUG_DISAS
1870#if !SIM_COMPAT
1871 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1872 qemu_log("\n");
1873#if DISAS_GNU
f4359b9f 1874 log_target_disas(env, pc_start, dc->pc - pc_start, 0);
4acb54ba 1875#endif
e6aa0f11 1876 qemu_log("\nisize=%d osize=%td\n",
92414b31
EV
1877 dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
1878 tcg_ctx.gen_opc_buf);
4acb54ba
EI
1879 }
1880#endif
1881#endif
1882 assert(!dc->abort_at_next_insn);
1883}
1884
68cee38a 1885void gen_intermediate_code (CPUMBState *env, struct TranslationBlock *tb)
4acb54ba 1886{
4a274212 1887 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, false);
4acb54ba
EI
1888}
1889
68cee38a 1890void gen_intermediate_code_pc (CPUMBState *env, struct TranslationBlock *tb)
4acb54ba 1891{
4a274212 1892 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, true);
4acb54ba
EI
1893}
1894
878096ee
AF
1895void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1896 int flags)
4acb54ba 1897{
878096ee
AF
1898 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1899 CPUMBState *env = &cpu->env;
4acb54ba
EI
1900 int i;
1901
1902 if (!env || !f)
1903 return;
1904
1905 cpu_fprintf(f, "IN: PC=%x %s\n",
1906 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
97694c57 1907 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
4c24aa0a 1908 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
97694c57 1909 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
17c52a43 1910 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
4acb54ba
EI
1911 env->btaken, env->btarget,
1912 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
17c52a43
EI
1913 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1914 (env->sregs[SR_MSR] & MSR_EIP),
1915 (env->sregs[SR_MSR] & MSR_IE));
1916
4acb54ba
EI
1917 for (i = 0; i < 32; i++) {
1918 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1919 if ((i + 1) % 4 == 0)
1920 cpu_fprintf(f, "\n");
1921 }
1922 cpu_fprintf(f, "\n\n");
1923}
1924
b33ab1f7 1925MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
4acb54ba 1926{
b77f98ca 1927 MicroBlazeCPU *cpu;
4acb54ba 1928
b77f98ca 1929 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
4acb54ba 1930
746b03b2 1931 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
4acb54ba 1932
cd0c24f9
AF
1933 return cpu;
1934}
4acb54ba 1935
cd0c24f9
AF
1936void mb_tcg_init(void)
1937{
1938 int i;
4acb54ba
EI
1939
1940 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1941
1942 env_debug = tcg_global_mem_new(TCG_AREG0,
68cee38a 1943 offsetof(CPUMBState, debug),
4acb54ba
EI
1944 "debug0");
1945 env_iflags = tcg_global_mem_new(TCG_AREG0,
68cee38a 1946 offsetof(CPUMBState, iflags),
4acb54ba
EI
1947 "iflags");
1948 env_imm = tcg_global_mem_new(TCG_AREG0,
68cee38a 1949 offsetof(CPUMBState, imm),
4acb54ba
EI
1950 "imm");
1951 env_btarget = tcg_global_mem_new(TCG_AREG0,
68cee38a 1952 offsetof(CPUMBState, btarget),
4acb54ba
EI
1953 "btarget");
1954 env_btaken = tcg_global_mem_new(TCG_AREG0,
68cee38a 1955 offsetof(CPUMBState, btaken),
4acb54ba 1956 "btaken");
4a536270
EI
1957 env_res_addr = tcg_global_mem_new(TCG_AREG0,
1958 offsetof(CPUMBState, res_addr),
1959 "res_addr");
11a76217
EI
1960 env_res_val = tcg_global_mem_new(TCG_AREG0,
1961 offsetof(CPUMBState, res_val),
1962 "res_val");
4acb54ba
EI
1963 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1964 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
68cee38a 1965 offsetof(CPUMBState, regs[i]),
4acb54ba
EI
1966 regnames[i]);
1967 }
1968 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1969 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
68cee38a 1970 offsetof(CPUMBState, sregs[i]),
4acb54ba
EI
1971 special_regnames[i]);
1972 }
4acb54ba
EI
1973}
1974
68cee38a 1975void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
4acb54ba 1976{
25983cad 1977 env->sregs[SR_PC] = tcg_ctx.gen_opc_pc[pc_pos];
4acb54ba 1978}