]> git.proxmox.com Git - mirror_qemu.git/blame - target/microblaze/translate.c
target/microblaze: Fix width of MSR
[mirror_qemu.git] / target / microblaze / translate.c
CommitLineData
4acb54ba
EI
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
dadc1064 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
4acb54ba
EI
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4acb54ba
EI
19 */
20
8fd9dece 21#include "qemu/osdep.h"
4acb54ba 22#include "cpu.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
2ef6175a 26#include "exec/helper-proto.h"
4acb54ba 27#include "microblaze-decode.h"
f08b6170 28#include "exec/cpu_ldst.h"
2ef6175a 29#include "exec/helper-gen.h"
77fc6f5e 30#include "exec/translator.h"
90c84c56 31#include "qemu/qemu-print.h"
4acb54ba 32
a7e30d84 33#include "trace-tcg.h"
508127e2 34#include "exec/log.h"
a7e30d84
LV
35
36
4acb54ba
EI
37#define SIM_COMPAT 0
38#define DISAS_GNU 1
39#define DISAS_MB 1
40#if DISAS_MB && !SIM_COMPAT
41# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42#else
43# define LOG_DIS(...) do { } while (0)
44#endif
45
46#define D(x)
47
48#define EXTRACT_FIELD(src, start, end) \
49 (((src) >> start) & ((1 << (end - start + 1)) - 1))
50
77fc6f5e
LV
51/* is_jmp field values */
52#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
53#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
54#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55
cfeea807
EI
56static TCGv_i32 env_debug;
57static TCGv_i32 cpu_R[32];
0f96e96b 58static TCGv_i32 cpu_pc;
3e0e16ae 59static TCGv_i32 cpu_msr;
aa28e6d4
RH
60static TCGv_i64 cpu_ear;
61static TCGv_i64 cpu_esr;
62static TCGv_i64 cpu_fsr;
63static TCGv_i64 cpu_btr;
64static TCGv_i64 cpu_edr;
cfeea807
EI
65static TCGv_i32 env_imm;
66static TCGv_i32 env_btaken;
0f96e96b 67static TCGv_i32 cpu_btarget;
cfeea807 68static TCGv_i32 env_iflags;
403322ea 69static TCGv env_res_addr;
cfeea807 70static TCGv_i32 env_res_val;
4acb54ba 71
022c62cb 72#include "exec/gen-icount.h"
4acb54ba
EI
73
74/* This is the state at translation time. */
75typedef struct DisasContext {
0063ebd6 76 MicroBlazeCPU *cpu;
cfeea807 77 uint32_t pc;
4acb54ba
EI
78
79 /* Decoder. */
80 int type_b;
81 uint32_t ir;
82 uint8_t opcode;
83 uint8_t rd, ra, rb;
84 uint16_t imm;
85
86 unsigned int cpustate_changed;
87 unsigned int delayed_branch;
88 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
89 unsigned int clear_imm;
90 int is_jmp;
91
844bab60
EI
92#define JMP_NOJMP 0
93#define JMP_DIRECT 1
94#define JMP_DIRECT_CC 2
95#define JMP_INDIRECT 3
4acb54ba
EI
96 unsigned int jmp;
97 uint32_t jmp_pc;
98
99 int abort_at_next_insn;
4acb54ba
EI
100 struct TranslationBlock *tb;
101 int singlestep_enabled;
102} DisasContext;
103
38972938 104static const char *regnames[] =
4acb54ba
EI
105{
106 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
107 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
108 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
109 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
110};
111
4acb54ba
EI
112static inline void t_sync_flags(DisasContext *dc)
113{
4abf79a4 114 /* Synch the tb dependent flags between translator and runtime. */
4acb54ba 115 if (dc->tb_flags != dc->synced_flags) {
cfeea807 116 tcg_gen_movi_i32(env_iflags, dc->tb_flags);
4acb54ba
EI
117 dc->synced_flags = dc->tb_flags;
118 }
119}
120
121static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
122{
123 TCGv_i32 tmp = tcg_const_i32(index);
124
125 t_sync_flags(dc);
0f96e96b 126 tcg_gen_movi_i32(cpu_pc, dc->pc);
64254eba 127 gen_helper_raise_exception(cpu_env, tmp);
4acb54ba
EI
128 tcg_temp_free_i32(tmp);
129 dc->is_jmp = DISAS_UPDATE;
130}
131
90aa39a1
SF
132static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
133{
134#ifndef CONFIG_USER_ONLY
135 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136#else
137 return true;
138#endif
139}
140
4acb54ba
EI
141static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
142{
90aa39a1 143 if (use_goto_tb(dc, dest)) {
4acb54ba 144 tcg_gen_goto_tb(n);
0f96e96b 145 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 146 tcg_gen_exit_tb(dc->tb, n);
4acb54ba 147 } else {
0f96e96b 148 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 149 tcg_gen_exit_tb(NULL, 0);
4acb54ba
EI
150 }
151}
152
cfeea807 153static void read_carry(DisasContext *dc, TCGv_i32 d)
ee8b246f 154{
3e0e16ae 155 tcg_gen_shri_i32(d, cpu_msr, 31);
ee8b246f
EI
156}
157
04ec7df7
EI
158/*
159 * write_carry sets the carry bits in MSR based on bit 0 of v.
160 * v[31:1] are ignored.
161 */
cfeea807 162static void write_carry(DisasContext *dc, TCGv_i32 v)
ee8b246f 163{
0a22f8cf 164 /* Deposit bit 0 into MSR_C and the alias MSR_CC. */
3e0e16ae
RH
165 tcg_gen_deposit_i32(cpu_msr, cpu_msr, v, 2, 1);
166 tcg_gen_deposit_i32(cpu_msr, cpu_msr, v, 31, 1);
ee8b246f
EI
167}
168
65ab5eb4 169static void write_carryi(DisasContext *dc, bool carry)
8cc9b43f 170{
cfeea807
EI
171 TCGv_i32 t0 = tcg_temp_new_i32();
172 tcg_gen_movi_i32(t0, carry);
8cc9b43f 173 write_carry(dc, t0);
cfeea807 174 tcg_temp_free_i32(t0);
8cc9b43f
PC
175}
176
9ba8cd45
EI
177/*
178 * Returns true if the insn an illegal operation.
179 * If exceptions are enabled, an exception is raised.
180 */
181static bool trap_illegal(DisasContext *dc, bool cond)
182{
183 if (cond && (dc->tb_flags & MSR_EE_FLAG)
5143fdf3 184 && dc->cpu->cfg.illegal_opcode_exception) {
aa28e6d4 185 tcg_gen_movi_i64(cpu_esr, ESR_EC_ILLEGAL_OP);
9ba8cd45
EI
186 t_gen_raise_exception(dc, EXCP_HW_EXCP);
187 }
188 return cond;
189}
190
bdfc1e88
EI
191/*
192 * Returns true if the insn is illegal in userspace.
193 * If exceptions are enabled, an exception is raised.
194 */
195static bool trap_userspace(DisasContext *dc, bool cond)
196{
197 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
198 bool cond_user = cond && mem_index == MMU_USER_IDX;
199
200 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
aa28e6d4 201 tcg_gen_movi_i64(cpu_esr, ESR_EC_PRIVINSN);
bdfc1e88
EI
202 t_gen_raise_exception(dc, EXCP_HW_EXCP);
203 }
204 return cond_user;
205}
206
61204ce8
EI
207/* True if ALU operand b is a small immediate that may deserve
208 faster treatment. */
209static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
210{
211 /* Immediate insn without the imm prefix ? */
212 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
213}
214
cfeea807 215static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
4acb54ba
EI
216{
217 if (dc->type_b) {
218 if (dc->tb_flags & IMM_FLAG)
cfeea807 219 tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
4acb54ba 220 else
cfeea807 221 tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
4acb54ba
EI
222 return &env_imm;
223 } else
224 return &cpu_R[dc->rb];
225}
226
227static void dec_add(DisasContext *dc)
228{
229 unsigned int k, c;
cfeea807 230 TCGv_i32 cf;
4acb54ba
EI
231
232 k = dc->opcode & 4;
233 c = dc->opcode & 2;
234
235 LOG_DIS("add%s%s%s r%d r%d r%d\n",
236 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
237 dc->rd, dc->ra, dc->rb);
238
40cbf5b7
EI
239 /* Take care of the easy cases first. */
240 if (k) {
241 /* k - keep carry, no need to update MSR. */
242 /* If rd == r0, it's a nop. */
243 if (dc->rd) {
cfeea807 244 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
40cbf5b7
EI
245
246 if (c) {
247 /* c - Add carry into the result. */
cfeea807 248 cf = tcg_temp_new_i32();
40cbf5b7
EI
249
250 read_carry(dc, cf);
cfeea807
EI
251 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
252 tcg_temp_free_i32(cf);
40cbf5b7
EI
253 }
254 }
255 return;
256 }
257
258 /* From now on, we can assume k is zero. So we need to update MSR. */
259 /* Extract carry. */
cfeea807 260 cf = tcg_temp_new_i32();
40cbf5b7
EI
261 if (c) {
262 read_carry(dc, cf);
263 } else {
cfeea807 264 tcg_gen_movi_i32(cf, 0);
40cbf5b7
EI
265 }
266
267 if (dc->rd) {
cfeea807 268 TCGv_i32 ncf = tcg_temp_new_i32();
5d0bb823 269 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
cfeea807
EI
270 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
271 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
40cbf5b7 272 write_carry(dc, ncf);
cfeea807 273 tcg_temp_free_i32(ncf);
40cbf5b7 274 } else {
5d0bb823 275 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
40cbf5b7 276 write_carry(dc, cf);
4acb54ba 277 }
cfeea807 278 tcg_temp_free_i32(cf);
4acb54ba
EI
279}
280
281static void dec_sub(DisasContext *dc)
282{
283 unsigned int u, cmp, k, c;
cfeea807 284 TCGv_i32 cf, na;
4acb54ba
EI
285
286 u = dc->imm & 2;
287 k = dc->opcode & 4;
288 c = dc->opcode & 2;
289 cmp = (dc->imm & 1) && (!dc->type_b) && k;
290
291 if (cmp) {
292 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
293 if (dc->rd) {
294 if (u)
295 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
296 else
297 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
298 }
e0a42ebc
EI
299 return;
300 }
301
302 LOG_DIS("sub%s%s r%d, r%d r%d\n",
303 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
304
305 /* Take care of the easy cases first. */
306 if (k) {
307 /* k - keep carry, no need to update MSR. */
308 /* If rd == r0, it's a nop. */
309 if (dc->rd) {
cfeea807 310 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
e0a42ebc
EI
311
312 if (c) {
313 /* c - Add carry into the result. */
cfeea807 314 cf = tcg_temp_new_i32();
e0a42ebc
EI
315
316 read_carry(dc, cf);
cfeea807
EI
317 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
318 tcg_temp_free_i32(cf);
e0a42ebc
EI
319 }
320 }
321 return;
322 }
323
324 /* From now on, we can assume k is zero. So we need to update MSR. */
325 /* Extract carry. And complement a into na. */
cfeea807
EI
326 cf = tcg_temp_new_i32();
327 na = tcg_temp_new_i32();
e0a42ebc
EI
328 if (c) {
329 read_carry(dc, cf);
330 } else {
cfeea807 331 tcg_gen_movi_i32(cf, 1);
e0a42ebc
EI
332 }
333
334 /* d = b + ~a + c. carry defaults to 1. */
cfeea807 335 tcg_gen_not_i32(na, cpu_R[dc->ra]);
e0a42ebc
EI
336
337 if (dc->rd) {
cfeea807 338 TCGv_i32 ncf = tcg_temp_new_i32();
5d0bb823 339 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
cfeea807
EI
340 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
341 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
e0a42ebc 342 write_carry(dc, ncf);
cfeea807 343 tcg_temp_free_i32(ncf);
e0a42ebc 344 } else {
5d0bb823 345 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
e0a42ebc 346 write_carry(dc, cf);
4acb54ba 347 }
cfeea807
EI
348 tcg_temp_free_i32(cf);
349 tcg_temp_free_i32(na);
4acb54ba
EI
350}
351
352static void dec_pattern(DisasContext *dc)
353{
354 unsigned int mode;
4acb54ba 355
9ba8cd45
EI
356 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
357 return;
1567a005
EI
358 }
359
4acb54ba
EI
360 mode = dc->opcode & 3;
361 switch (mode) {
362 case 0:
363 /* pcmpbf. */
364 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
365 if (dc->rd)
366 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
367 break;
368 case 2:
369 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
370 if (dc->rd) {
cfeea807 371 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
86112805 372 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
373 }
374 break;
375 case 3:
376 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
4acb54ba 377 if (dc->rd) {
cfeea807 378 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
86112805 379 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
380 }
381 break;
382 default:
0063ebd6 383 cpu_abort(CPU(dc->cpu),
4acb54ba
EI
384 "unsupported pattern insn opcode=%x\n", dc->opcode);
385 break;
386 }
387}
388
389static void dec_and(DisasContext *dc)
390{
391 unsigned int not;
392
393 if (!dc->type_b && (dc->imm & (1 << 10))) {
394 dec_pattern(dc);
395 return;
396 }
397
398 not = dc->opcode & (1 << 1);
399 LOG_DIS("and%s\n", not ? "n" : "");
400
401 if (!dc->rd)
402 return;
403
404 if (not) {
cfeea807 405 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba 406 } else
cfeea807 407 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
408}
409
410static void dec_or(DisasContext *dc)
411{
412 if (!dc->type_b && (dc->imm & (1 << 10))) {
413 dec_pattern(dc);
414 return;
415 }
416
417 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
418 if (dc->rd)
cfeea807 419 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
420}
421
422static void dec_xor(DisasContext *dc)
423{
424 if (!dc->type_b && (dc->imm & (1 << 10))) {
425 dec_pattern(dc);
426 return;
427 }
428
429 LOG_DIS("xor r%d\n", dc->rd);
430 if (dc->rd)
cfeea807 431 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba
EI
432}
433
cfeea807 434static inline void msr_read(DisasContext *dc, TCGv_i32 d)
4acb54ba 435{
3e0e16ae 436 tcg_gen_mov_i32(d, cpu_msr);
4acb54ba
EI
437}
438
cfeea807 439static inline void msr_write(DisasContext *dc, TCGv_i32 v)
4acb54ba
EI
440{
441 dc->cpustate_changed = 1;
3e0e16ae
RH
442 /* PVR bit is not writable, and is never set. */
443 tcg_gen_andi_i32(cpu_msr, v, ~MSR_PVR);
4acb54ba
EI
444}
445
446static void dec_msr(DisasContext *dc)
447{
0063ebd6 448 CPUState *cs = CPU(dc->cpu);
cfeea807 449 TCGv_i32 t0, t1;
2023e9a3 450 unsigned int sr, rn;
f0f7e7f7 451 bool to, clrset, extended = false;
4acb54ba 452
2023e9a3
EI
453 sr = extract32(dc->imm, 0, 14);
454 to = extract32(dc->imm, 14, 1);
455 clrset = extract32(dc->imm, 15, 1) == 0;
4acb54ba 456 dc->type_b = 1;
2023e9a3 457 if (to) {
4acb54ba 458 dc->cpustate_changed = 1;
f0f7e7f7
EI
459 }
460
461 /* Extended MSRs are only available if addr_size > 32. */
462 if (dc->cpu->cfg.addr_size > 32) {
463 /* The E-bit is encoded differently for To/From MSR. */
464 static const unsigned int e_bit[] = { 19, 24 };
465
466 extended = extract32(dc->imm, e_bit[to], 1);
2023e9a3 467 }
4acb54ba
EI
468
469 /* msrclr and msrset. */
2023e9a3
EI
470 if (clrset) {
471 bool clr = extract32(dc->ir, 16, 1);
4acb54ba
EI
472
473 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
474 dc->rd, dc->imm);
1567a005 475
56837509 476 if (!dc->cpu->cfg.use_msr_instr) {
1567a005
EI
477 /* nop??? */
478 return;
479 }
480
bdfc1e88 481 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
1567a005
EI
482 return;
483 }
484
4acb54ba
EI
485 if (dc->rd)
486 msr_read(dc, cpu_R[dc->rd]);
487
cfeea807
EI
488 t0 = tcg_temp_new_i32();
489 t1 = tcg_temp_new_i32();
4acb54ba 490 msr_read(dc, t0);
cfeea807 491 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
4acb54ba
EI
492
493 if (clr) {
cfeea807
EI
494 tcg_gen_not_i32(t1, t1);
495 tcg_gen_and_i32(t0, t0, t1);
4acb54ba 496 } else
cfeea807 497 tcg_gen_or_i32(t0, t0, t1);
4acb54ba 498 msr_write(dc, t0);
cfeea807
EI
499 tcg_temp_free_i32(t0);
500 tcg_temp_free_i32(t1);
0f96e96b 501 tcg_gen_movi_i32(cpu_pc, dc->pc + 4);
4acb54ba
EI
502 dc->is_jmp = DISAS_UPDATE;
503 return;
504 }
505
bdfc1e88
EI
506 if (trap_userspace(dc, to)) {
507 return;
1567a005
EI
508 }
509
4acb54ba
EI
510#if !defined(CONFIG_USER_ONLY)
511 /* Catch read/writes to the mmu block. */
512 if ((sr & ~0xff) == 0x1000) {
f0f7e7f7 513 TCGv_i32 tmp_ext = tcg_const_i32(extended);
05a9a651
EI
514 TCGv_i32 tmp_sr;
515
4acb54ba 516 sr &= 7;
05a9a651 517 tmp_sr = tcg_const_i32(sr);
4acb54ba 518 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
05a9a651 519 if (to) {
f0f7e7f7 520 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
05a9a651 521 } else {
f0f7e7f7 522 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
05a9a651
EI
523 }
524 tcg_temp_free_i32(tmp_sr);
f0f7e7f7 525 tcg_temp_free_i32(tmp_ext);
4acb54ba
EI
526 return;
527 }
528#endif
529
530 if (to) {
531 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
532 switch (sr) {
aa28e6d4 533 case SR_PC:
4acb54ba 534 break;
aa28e6d4 535 case SR_MSR:
4acb54ba
EI
536 msr_write(dc, cpu_R[dc->ra]);
537 break;
351527b7 538 case SR_EAR:
aa28e6d4
RH
539 tcg_gen_extu_i32_i64(cpu_ear, cpu_R[dc->ra]);
540 break;
351527b7 541 case SR_ESR:
aa28e6d4
RH
542 tcg_gen_extu_i32_i64(cpu_esr, cpu_R[dc->ra]);
543 break;
ab6dd380 544 case SR_FSR:
aa28e6d4
RH
545 tcg_gen_extu_i32_i64(cpu_fsr, cpu_R[dc->ra]);
546 break;
547 case SR_BTR:
548 tcg_gen_extu_i32_i64(cpu_btr, cpu_R[dc->ra]);
549 break;
550 case SR_EDR:
551 tcg_gen_extu_i32_i64(cpu_edr, cpu_R[dc->ra]);
4acb54ba 552 break;
5818dee5 553 case 0x800:
cfeea807
EI
554 tcg_gen_st_i32(cpu_R[dc->ra],
555 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
556 break;
557 case 0x802:
cfeea807
EI
558 tcg_gen_st_i32(cpu_R[dc->ra],
559 cpu_env, offsetof(CPUMBState, shr));
5818dee5 560 break;
4acb54ba 561 default:
0063ebd6 562 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
4acb54ba
EI
563 break;
564 }
565 } else {
566 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
567
568 switch (sr) {
aa28e6d4 569 case SR_PC:
cfeea807 570 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
4acb54ba 571 break;
aa28e6d4 572 case SR_MSR:
4acb54ba
EI
573 msr_read(dc, cpu_R[dc->rd]);
574 break;
351527b7 575 case SR_EAR:
a1b48e3a 576 if (extended) {
aa28e6d4
RH
577 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_ear);
578 } else {
579 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_ear);
a1b48e3a 580 }
aa28e6d4 581 break;
351527b7 582 case SR_ESR:
aa28e6d4
RH
583 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_esr);
584 break;
351527b7 585 case SR_FSR:
aa28e6d4
RH
586 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_fsr);
587 break;
351527b7 588 case SR_BTR:
aa28e6d4
RH
589 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_btr);
590 break;
7cdae31d 591 case SR_EDR:
aa28e6d4 592 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_edr);
4acb54ba 593 break;
5818dee5 594 case 0x800:
cfeea807
EI
595 tcg_gen_ld_i32(cpu_R[dc->rd],
596 cpu_env, offsetof(CPUMBState, slr));
5818dee5
EI
597 break;
598 case 0x802:
cfeea807
EI
599 tcg_gen_ld_i32(cpu_R[dc->rd],
600 cpu_env, offsetof(CPUMBState, shr));
5818dee5 601 break;
351527b7 602 case 0x2000 ... 0x200c:
4acb54ba 603 rn = sr & 0xf;
cfeea807 604 tcg_gen_ld_i32(cpu_R[dc->rd],
68cee38a 605 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
4acb54ba
EI
606 break;
607 default:
a47dddd7 608 cpu_abort(cs, "unknown mfs reg %x\n", sr);
4acb54ba
EI
609 break;
610 }
611 }
ee7dbcf8
EI
612
613 if (dc->rd == 0) {
cfeea807 614 tcg_gen_movi_i32(cpu_R[0], 0);
ee7dbcf8 615 }
4acb54ba
EI
616}
617
4acb54ba
EI
618/* Multiplier unit. */
619static void dec_mul(DisasContext *dc)
620{
cfeea807 621 TCGv_i32 tmp;
4acb54ba
EI
622 unsigned int subcode;
623
9ba8cd45 624 if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
1567a005
EI
625 return;
626 }
627
4acb54ba 628 subcode = dc->imm & 3;
4acb54ba
EI
629
630 if (dc->type_b) {
631 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
cfeea807 632 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
16ece88d 633 return;
4acb54ba
EI
634 }
635
1567a005 636 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
9b964318 637 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
1567a005
EI
638 /* nop??? */
639 }
640
cfeea807 641 tmp = tcg_temp_new_i32();
4acb54ba
EI
642 switch (subcode) {
643 case 0:
644 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807 645 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
646 break;
647 case 1:
648 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807
EI
649 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
650 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
651 break;
652 case 2:
653 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807
EI
654 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
655 cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
656 break;
657 case 3:
658 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
cfeea807 659 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
4acb54ba
EI
660 break;
661 default:
0063ebd6 662 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
4acb54ba
EI
663 break;
664 }
cfeea807 665 tcg_temp_free_i32(tmp);
4acb54ba
EI
666}
667
668/* Div unit. */
669static void dec_div(DisasContext *dc)
670{
671 unsigned int u;
672
673 u = dc->imm & 2;
674 LOG_DIS("div\n");
675
9ba8cd45
EI
676 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
677 return;
1567a005
EI
678 }
679
4acb54ba 680 if (u)
64254eba
BS
681 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
682 cpu_R[dc->ra]);
4acb54ba 683 else
64254eba
BS
684 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
685 cpu_R[dc->ra]);
4acb54ba 686 if (!dc->rd)
cfeea807 687 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
4acb54ba
EI
688}
689
690static void dec_barrel(DisasContext *dc)
691{
cfeea807 692 TCGv_i32 t0;
faa48d74 693 unsigned int imm_w, imm_s;
d09b2585 694 bool s, t, e = false, i = false;
4acb54ba 695
9ba8cd45 696 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
1567a005
EI
697 return;
698 }
699
faa48d74
EI
700 if (dc->type_b) {
701 /* Insert and extract are only available in immediate mode. */
d09b2585 702 i = extract32(dc->imm, 15, 1);
faa48d74
EI
703 e = extract32(dc->imm, 14, 1);
704 }
e3e84983
EI
705 s = extract32(dc->imm, 10, 1);
706 t = extract32(dc->imm, 9, 1);
faa48d74
EI
707 imm_w = extract32(dc->imm, 6, 5);
708 imm_s = extract32(dc->imm, 0, 5);
4acb54ba 709
faa48d74
EI
710 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
711 e ? "e" : "",
4acb54ba
EI
712 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
713
faa48d74
EI
714 if (e) {
715 if (imm_w + imm_s > 32 || imm_w == 0) {
716 /* These inputs have an undefined behavior. */
717 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
718 imm_w, imm_s);
719 } else {
720 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
721 }
d09b2585
EI
722 } else if (i) {
723 int width = imm_w - imm_s + 1;
724
725 if (imm_w < imm_s) {
726 /* These inputs have an undefined behavior. */
727 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
728 imm_w, imm_s);
729 } else {
730 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
731 imm_s, width);
732 }
faa48d74 733 } else {
cfeea807 734 t0 = tcg_temp_new_i32();
4acb54ba 735
cfeea807
EI
736 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
737 tcg_gen_andi_i32(t0, t0, 31);
4acb54ba 738
faa48d74 739 if (s) {
cfeea807 740 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
2acf6d53 741 } else {
faa48d74 742 if (t) {
cfeea807 743 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
faa48d74 744 } else {
cfeea807 745 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
faa48d74 746 }
2acf6d53 747 }
cfeea807 748 tcg_temp_free_i32(t0);
4acb54ba
EI
749 }
750}
751
752static void dec_bit(DisasContext *dc)
753{
0063ebd6 754 CPUState *cs = CPU(dc->cpu);
cfeea807 755 TCGv_i32 t0;
4acb54ba
EI
756 unsigned int op;
757
ace2e4da 758 op = dc->ir & ((1 << 9) - 1);
4acb54ba
EI
759 switch (op) {
760 case 0x21:
761 /* src. */
cfeea807 762 t0 = tcg_temp_new_i32();
4acb54ba
EI
763
764 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
3e0e16ae 765 tcg_gen_andi_i32(t0, cpu_msr, MSR_CC);
09b9f113 766 write_carry(dc, cpu_R[dc->ra]);
4acb54ba 767 if (dc->rd) {
cfeea807
EI
768 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
769 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
4acb54ba 770 }
cfeea807 771 tcg_temp_free_i32(t0);
4acb54ba
EI
772 break;
773
774 case 0x1:
775 case 0x41:
776 /* srl. */
4acb54ba
EI
777 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
778
bb3cb951
EI
779 /* Update carry. Note that write carry only looks at the LSB. */
780 write_carry(dc, cpu_R[dc->ra]);
4acb54ba
EI
781 if (dc->rd) {
782 if (op == 0x41)
cfeea807 783 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
4acb54ba 784 else
cfeea807 785 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
4acb54ba
EI
786 }
787 break;
788 case 0x60:
789 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
790 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
791 break;
792 case 0x61:
793 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
794 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
795 break;
796 case 0x64:
f062a3c7
EI
797 case 0x66:
798 case 0x74:
799 case 0x76:
4acb54ba
EI
800 /* wdc. */
801 LOG_DIS("wdc r%d\n", dc->ra);
bdfc1e88 802 trap_userspace(dc, true);
4acb54ba
EI
803 break;
804 case 0x68:
805 /* wic. */
806 LOG_DIS("wic r%d\n", dc->ra);
bdfc1e88 807 trap_userspace(dc, true);
4acb54ba 808 break;
48b5e96f 809 case 0xe0:
9ba8cd45
EI
810 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
811 return;
48b5e96f 812 }
8fc5239e 813 if (dc->cpu->cfg.use_pcmp_instr) {
5318420c 814 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
48b5e96f
EI
815 }
816 break;
ace2e4da
PC
817 case 0x1e0:
818 /* swapb */
819 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
820 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
821 break;
b8c6a5d9 822 case 0x1e2:
ace2e4da
PC
823 /*swaph */
824 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
825 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
826 break;
4acb54ba 827 default:
a47dddd7
AF
828 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
829 dc->pc, op, dc->rd, dc->ra, dc->rb);
4acb54ba
EI
830 break;
831 }
832}
833
834static inline void sync_jmpstate(DisasContext *dc)
835{
844bab60
EI
836 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
837 if (dc->jmp == JMP_DIRECT) {
cfeea807 838 tcg_gen_movi_i32(env_btaken, 1);
844bab60 839 }
23979dc5 840 dc->jmp = JMP_INDIRECT;
0f96e96b 841 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
4acb54ba
EI
842 }
843}
844
845static void dec_imm(DisasContext *dc)
846{
847 LOG_DIS("imm %x\n", dc->imm << 16);
cfeea807 848 tcg_gen_movi_i32(env_imm, (dc->imm << 16));
4acb54ba
EI
849 dc->tb_flags |= IMM_FLAG;
850 dc->clear_imm = 0;
851}
852
d248e1be 853static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
4acb54ba 854{
0e9033c8
EI
855 bool extimm = dc->tb_flags & IMM_FLAG;
856 /* Should be set to true if r1 is used by loadstores. */
857 bool stackprot = false;
403322ea 858 TCGv_i32 t32;
5818dee5
EI
859
860 /* All load/stores use ra. */
9aaaa181 861 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
0e9033c8 862 stackprot = true;
5818dee5 863 }
4acb54ba 864
9ef55357 865 /* Treat the common cases first. */
4acb54ba 866 if (!dc->type_b) {
d248e1be
EI
867 if (ea) {
868 int addr_size = dc->cpu->cfg.addr_size;
869
870 if (addr_size == 32) {
871 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
872 return;
873 }
874
875 tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
876 if (addr_size < 64) {
877 /* Mask off out of range bits. */
878 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
879 }
880 return;
881 }
882
0dc4af5c 883 /* If any of the regs is r0, set t to the value of the other reg. */
4b5ef0b5 884 if (dc->ra == 0) {
403322ea 885 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
0dc4af5c 886 return;
4b5ef0b5 887 } else if (dc->rb == 0) {
403322ea 888 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
0dc4af5c 889 return;
4b5ef0b5
EI
890 }
891
9aaaa181 892 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
0e9033c8 893 stackprot = true;
5818dee5
EI
894 }
895
403322ea
EI
896 t32 = tcg_temp_new_i32();
897 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
898 tcg_gen_extu_i32_tl(t, t32);
899 tcg_temp_free_i32(t32);
5818dee5
EI
900
901 if (stackprot) {
0a87e691 902 gen_helper_stackprot(cpu_env, t);
5818dee5 903 }
0dc4af5c 904 return;
4acb54ba
EI
905 }
906 /* Immediate. */
403322ea 907 t32 = tcg_temp_new_i32();
4acb54ba 908 if (!extimm) {
f7a66e3a 909 tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
4acb54ba 910 } else {
403322ea 911 tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
4acb54ba 912 }
403322ea
EI
913 tcg_gen_extu_i32_tl(t, t32);
914 tcg_temp_free_i32(t32);
4acb54ba 915
5818dee5 916 if (stackprot) {
0a87e691 917 gen_helper_stackprot(cpu_env, t);
5818dee5 918 }
0dc4af5c 919 return;
4acb54ba
EI
920}
921
922static void dec_load(DisasContext *dc)
923{
403322ea
EI
924 TCGv_i32 v;
925 TCGv addr;
8534063a 926 unsigned int size;
d248e1be
EI
927 bool rev = false, ex = false, ea = false;
928 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
14776ab5 929 MemOp mop;
4acb54ba 930
47acdd63
RH
931 mop = dc->opcode & 3;
932 size = 1 << mop;
9f8beb66 933 if (!dc->type_b) {
d248e1be 934 ea = extract32(dc->ir, 7, 1);
8534063a
EI
935 rev = extract32(dc->ir, 9, 1);
936 ex = extract32(dc->ir, 10, 1);
9f8beb66 937 }
47acdd63
RH
938 mop |= MO_TE;
939 if (rev) {
940 mop ^= MO_BSWAP;
941 }
9f8beb66 942
9ba8cd45 943 if (trap_illegal(dc, size > 4)) {
0187688f
EI
944 return;
945 }
4acb54ba 946
d248e1be
EI
947 if (trap_userspace(dc, ea)) {
948 return;
949 }
950
951 LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
952 ex ? "x" : "",
953 ea ? "ea" : "");
9f8beb66 954
4acb54ba 955 t_sync_flags(dc);
403322ea 956 addr = tcg_temp_new();
d248e1be
EI
957 compute_ldst_addr(dc, ea, addr);
958 /* Extended addressing bypasses the MMU. */
959 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
4acb54ba 960
9f8beb66
EI
961 /*
962 * When doing reverse accesses we need to do two things.
963 *
4ff9786c 964 * 1. Reverse the address wrt endianness.
9f8beb66
EI
965 * 2. Byteswap the data lanes on the way back into the CPU core.
966 */
967 if (rev && size != 4) {
968 /* Endian reverse the address. t is addr. */
969 switch (size) {
970 case 1:
971 {
a6338015 972 tcg_gen_xori_tl(addr, addr, 3);
9f8beb66
EI
973 break;
974 }
975
976 case 2:
977 /* 00 -> 10
978 10 -> 00. */
403322ea 979 tcg_gen_xori_tl(addr, addr, 2);
9f8beb66
EI
980 break;
981 default:
0063ebd6 982 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
983 break;
984 }
985 }
986
8cc9b43f
PC
987 /* lwx does not throw unaligned access errors, so force alignment */
988 if (ex) {
403322ea 989 tcg_gen_andi_tl(addr, addr, ~3);
8cc9b43f
PC
990 }
991
4acb54ba
EI
992 /* If we get a fault on a dslot, the jmpstate better be in sync. */
993 sync_jmpstate(dc);
968a40f6
EI
994
995 /* Verify alignment if needed. */
47acdd63
RH
996 /*
997 * Microblaze gives MMU faults priority over faults due to
998 * unaligned addresses. That's why we speculatively do the load
999 * into v. If the load succeeds, we verify alignment of the
1000 * address and if that succeeds we write into the destination reg.
1001 */
cfeea807 1002 v = tcg_temp_new_i32();
d248e1be 1003 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
a12f6507 1004
1507e5f6 1005 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
a6338015
EI
1006 TCGv_i32 t0 = tcg_const_i32(0);
1007 TCGv_i32 treg = tcg_const_i32(dc->rd);
1008 TCGv_i32 tsize = tcg_const_i32(size - 1);
1009
0f96e96b 1010 tcg_gen_movi_i32(cpu_pc, dc->pc);
a6338015
EI
1011 gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
1012
1013 tcg_temp_free_i32(t0);
1014 tcg_temp_free_i32(treg);
1015 tcg_temp_free_i32(tsize);
4acb54ba
EI
1016 }
1017
47acdd63 1018 if (ex) {
403322ea 1019 tcg_gen_mov_tl(env_res_addr, addr);
cfeea807 1020 tcg_gen_mov_i32(env_res_val, v);
47acdd63
RH
1021 }
1022 if (dc->rd) {
cfeea807 1023 tcg_gen_mov_i32(cpu_R[dc->rd], v);
47acdd63 1024 }
cfeea807 1025 tcg_temp_free_i32(v);
47acdd63 1026
8cc9b43f 1027 if (ex) { /* lwx */
b6af0975 1028 /* no support for AXI exclusive so always clear C */
8cc9b43f 1029 write_carryi(dc, 0);
8cc9b43f
PC
1030 }
1031
403322ea 1032 tcg_temp_free(addr);
4acb54ba
EI
1033}
1034
4acb54ba
EI
1035static void dec_store(DisasContext *dc)
1036{
403322ea 1037 TCGv addr;
42a268c2 1038 TCGLabel *swx_skip = NULL;
b51b3d43 1039 unsigned int size;
d248e1be
EI
1040 bool rev = false, ex = false, ea = false;
1041 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
14776ab5 1042 MemOp mop;
4acb54ba 1043
47acdd63
RH
1044 mop = dc->opcode & 3;
1045 size = 1 << mop;
9f8beb66 1046 if (!dc->type_b) {
d248e1be 1047 ea = extract32(dc->ir, 7, 1);
b51b3d43
EI
1048 rev = extract32(dc->ir, 9, 1);
1049 ex = extract32(dc->ir, 10, 1);
9f8beb66 1050 }
47acdd63
RH
1051 mop |= MO_TE;
1052 if (rev) {
1053 mop ^= MO_BSWAP;
1054 }
4acb54ba 1055
9ba8cd45 1056 if (trap_illegal(dc, size > 4)) {
0187688f
EI
1057 return;
1058 }
1059
d248e1be
EI
1060 trap_userspace(dc, ea);
1061
1062 LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1063 ex ? "x" : "",
1064 ea ? "ea" : "");
4acb54ba
EI
1065 t_sync_flags(dc);
1066 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1067 sync_jmpstate(dc);
0dc4af5c 1068 /* SWX needs a temp_local. */
403322ea 1069 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
d248e1be
EI
1070 compute_ldst_addr(dc, ea, addr);
1071 /* Extended addressing bypasses the MMU. */
1072 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
968a40f6 1073
8cc9b43f 1074 if (ex) { /* swx */
cfeea807 1075 TCGv_i32 tval;
8cc9b43f 1076
8cc9b43f 1077 /* swx does not throw unaligned access errors, so force alignment */
403322ea 1078 tcg_gen_andi_tl(addr, addr, ~3);
8cc9b43f 1079
8cc9b43f
PC
1080 write_carryi(dc, 1);
1081 swx_skip = gen_new_label();
403322ea 1082 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
11a76217 1083
071cdc67
EI
1084 /*
1085 * Compare the value loaded at lwx with current contents of
1086 * the reserved location.
1087 */
cfeea807 1088 tval = tcg_temp_new_i32();
071cdc67
EI
1089
1090 tcg_gen_atomic_cmpxchg_i32(tval, addr, env_res_val,
1091 cpu_R[dc->rd], mem_index,
1092 mop);
1093
cfeea807 1094 tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
8cc9b43f 1095 write_carryi(dc, 0);
cfeea807 1096 tcg_temp_free_i32(tval);
8cc9b43f
PC
1097 }
1098
9f8beb66
EI
1099 if (rev && size != 4) {
1100 /* Endian reverse the address. t is addr. */
1101 switch (size) {
1102 case 1:
1103 {
a6338015 1104 tcg_gen_xori_tl(addr, addr, 3);
9f8beb66
EI
1105 break;
1106 }
1107
1108 case 2:
1109 /* 00 -> 10
1110 10 -> 00. */
1111 /* Force addr into the temp. */
403322ea 1112 tcg_gen_xori_tl(addr, addr, 2);
9f8beb66
EI
1113 break;
1114 default:
0063ebd6 1115 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
9f8beb66
EI
1116 break;
1117 }
9f8beb66 1118 }
071cdc67
EI
1119
1120 if (!ex) {
1121 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1122 }
a12f6507 1123
968a40f6 1124 /* Verify alignment if needed. */
1507e5f6 1125 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
a6338015
EI
1126 TCGv_i32 t1 = tcg_const_i32(1);
1127 TCGv_i32 treg = tcg_const_i32(dc->rd);
1128 TCGv_i32 tsize = tcg_const_i32(size - 1);
1129
0f96e96b 1130 tcg_gen_movi_i32(cpu_pc, dc->pc);
a12f6507 1131 /* FIXME: if the alignment is wrong, we should restore the value
4abf79a4 1132 * in memory. One possible way to achieve this is to probe
9f8beb66
EI
1133 * the MMU prior to the memaccess, thay way we could put
1134 * the alignment checks in between the probe and the mem
1135 * access.
a12f6507 1136 */
a6338015
EI
1137 gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
1138
1139 tcg_temp_free_i32(t1);
1140 tcg_temp_free_i32(treg);
1141 tcg_temp_free_i32(tsize);
968a40f6 1142 }
083dbf48 1143
8cc9b43f
PC
1144 if (ex) {
1145 gen_set_label(swx_skip);
8cc9b43f 1146 }
968a40f6 1147
403322ea 1148 tcg_temp_free(addr);
4acb54ba
EI
1149}
1150
1151static inline void eval_cc(DisasContext *dc, unsigned int cc,
9e6e1828 1152 TCGv_i32 d, TCGv_i32 a)
4acb54ba 1153{
d89b86e9
EI
1154 static const int mb_to_tcg_cc[] = {
1155 [CC_EQ] = TCG_COND_EQ,
1156 [CC_NE] = TCG_COND_NE,
1157 [CC_LT] = TCG_COND_LT,
1158 [CC_LE] = TCG_COND_LE,
1159 [CC_GE] = TCG_COND_GE,
1160 [CC_GT] = TCG_COND_GT,
1161 };
1162
4acb54ba 1163 switch (cc) {
d89b86e9
EI
1164 case CC_EQ:
1165 case CC_NE:
1166 case CC_LT:
1167 case CC_LE:
1168 case CC_GE:
1169 case CC_GT:
9e6e1828 1170 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
d89b86e9
EI
1171 break;
1172 default:
1173 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1174 break;
4acb54ba
EI
1175 }
1176}
1177
0f96e96b 1178static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
4acb54ba 1179{
0f96e96b 1180 TCGv_i32 zero = tcg_const_i32(0);
e956caf2 1181
0f96e96b
RH
1182 tcg_gen_movcond_i32(TCG_COND_NE, cpu_pc,
1183 env_btaken, zero,
e956caf2
EI
1184 pc_true, pc_false);
1185
0f96e96b 1186 tcg_temp_free_i32(zero);
4acb54ba
EI
1187}
1188
f91c60f0
EI
1189static void dec_setup_dslot(DisasContext *dc)
1190{
1191 TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1192
1193 dc->delayed_branch = 2;
1194 dc->tb_flags |= D_FLAG;
1195
1196 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1197 tcg_temp_free_i32(tmp);
1198}
1199
4acb54ba
EI
1200static void dec_bcc(DisasContext *dc)
1201{
1202 unsigned int cc;
1203 unsigned int dslot;
1204
1205 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1206 dslot = dc->ir & (1 << 25);
1207 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1208
1209 dc->delayed_branch = 1;
1210 if (dslot) {
f91c60f0 1211 dec_setup_dslot(dc);
4acb54ba
EI
1212 }
1213
61204ce8
EI
1214 if (dec_alu_op_b_is_small_imm(dc)) {
1215 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1216
0f96e96b 1217 tcg_gen_movi_i32(cpu_btarget, dc->pc + offset);
844bab60 1218 dc->jmp = JMP_DIRECT_CC;
23979dc5 1219 dc->jmp_pc = dc->pc + offset;
61204ce8 1220 } else {
23979dc5 1221 dc->jmp = JMP_INDIRECT;
0f96e96b 1222 tcg_gen_addi_i32(cpu_btarget, *dec_alu_op_b(dc), dc->pc);
61204ce8 1223 }
9e6e1828 1224 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
4acb54ba
EI
1225}
1226
1227static void dec_br(DisasContext *dc)
1228{
9f6113c7 1229 unsigned int dslot, link, abs, mbar;
4acb54ba
EI
1230
1231 dslot = dc->ir & (1 << 20);
1232 abs = dc->ir & (1 << 19);
1233 link = dc->ir & (1 << 18);
9f6113c7
EI
1234
1235 /* Memory barrier. */
1236 mbar = (dc->ir >> 16) & 31;
1237 if (mbar == 2 && dc->imm == 4) {
badcbf9d
EI
1238 uint16_t mbar_imm = dc->rd;
1239
6f3c458b
EI
1240 LOG_DIS("mbar %d\n", mbar_imm);
1241
3f172744
EI
1242 /* Data access memory barrier. */
1243 if ((mbar_imm & 2) == 0) {
1244 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1245 }
1246
5d45de97 1247 /* mbar IMM & 16 decodes to sleep. */
badcbf9d 1248 if (mbar_imm & 16) {
5d45de97
EI
1249 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1250 TCGv_i32 tmp_1 = tcg_const_i32(1);
1251
1252 LOG_DIS("sleep\n");
1253
b4919e7d
EI
1254 if (trap_userspace(dc, true)) {
1255 /* Sleep is a privileged instruction. */
1256 return;
1257 }
1258
5d45de97
EI
1259 t_sync_flags(dc);
1260 tcg_gen_st_i32(tmp_1, cpu_env,
1261 -offsetof(MicroBlazeCPU, env)
1262 +offsetof(CPUState, halted));
0f96e96b 1263 tcg_gen_movi_i32(cpu_pc, dc->pc + 4);
5d45de97
EI
1264 gen_helper_raise_exception(cpu_env, tmp_hlt);
1265 tcg_temp_free_i32(tmp_hlt);
1266 tcg_temp_free_i32(tmp_1);
1267 return;
1268 }
9f6113c7
EI
1269 /* Break the TB. */
1270 dc->cpustate_changed = 1;
1271 return;
1272 }
1273
4acb54ba
EI
1274 LOG_DIS("br%s%s%s%s imm=%x\n",
1275 abs ? "a" : "", link ? "l" : "",
1276 dc->type_b ? "i" : "", dslot ? "d" : "",
1277 dc->imm);
1278
1279 dc->delayed_branch = 1;
1280 if (dslot) {
f91c60f0 1281 dec_setup_dslot(dc);
4acb54ba
EI
1282 }
1283 if (link && dc->rd)
cfeea807 1284 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
4acb54ba
EI
1285
1286 dc->jmp = JMP_INDIRECT;
1287 if (abs) {
cfeea807 1288 tcg_gen_movi_i32(env_btaken, 1);
0f96e96b 1289 tcg_gen_mov_i32(cpu_btarget, *(dec_alu_op_b(dc)));
ff21f70a
EI
1290 if (link && !dslot) {
1291 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1292 t_gen_raise_exception(dc, EXCP_BREAK);
1293 if (dc->imm == 0) {
bdfc1e88 1294 if (trap_userspace(dc, true)) {
ff21f70a
EI
1295 return;
1296 }
1297
1298 t_gen_raise_exception(dc, EXCP_DEBUG);
1299 }
1300 }
4acb54ba 1301 } else {
61204ce8
EI
1302 if (dec_alu_op_b_is_small_imm(dc)) {
1303 dc->jmp = JMP_DIRECT;
1304 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1305 } else {
cfeea807 1306 tcg_gen_movi_i32(env_btaken, 1);
0f96e96b 1307 tcg_gen_addi_i32(cpu_btarget, *dec_alu_op_b(dc), dc->pc);
4acb54ba
EI
1308 }
1309 }
1310}
1311
1312static inline void do_rti(DisasContext *dc)
1313{
cfeea807
EI
1314 TCGv_i32 t0, t1;
1315 t0 = tcg_temp_new_i32();
1316 t1 = tcg_temp_new_i32();
3e0e16ae 1317 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf
EI
1318 tcg_gen_shri_i32(t0, t1, 1);
1319 tcg_gen_ori_i32(t1, t1, MSR_IE);
cfeea807
EI
1320 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1321
1322 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1323 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1324 msr_write(dc, t1);
cfeea807
EI
1325 tcg_temp_free_i32(t1);
1326 tcg_temp_free_i32(t0);
4acb54ba
EI
1327 dc->tb_flags &= ~DRTI_FLAG;
1328}
1329
1330static inline void do_rtb(DisasContext *dc)
1331{
cfeea807
EI
1332 TCGv_i32 t0, t1;
1333 t0 = tcg_temp_new_i32();
1334 t1 = tcg_temp_new_i32();
3e0e16ae 1335 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf 1336 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
cfeea807
EI
1337 tcg_gen_shri_i32(t0, t1, 1);
1338 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1339
1340 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1341 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1342 msr_write(dc, t1);
cfeea807
EI
1343 tcg_temp_free_i32(t1);
1344 tcg_temp_free_i32(t0);
4acb54ba
EI
1345 dc->tb_flags &= ~DRTB_FLAG;
1346}
1347
1348static inline void do_rte(DisasContext *dc)
1349{
cfeea807
EI
1350 TCGv_i32 t0, t1;
1351 t0 = tcg_temp_new_i32();
1352 t1 = tcg_temp_new_i32();
4acb54ba 1353
3e0e16ae 1354 tcg_gen_mov_i32(t1, cpu_msr);
0a22f8cf 1355 tcg_gen_ori_i32(t1, t1, MSR_EE);
cfeea807
EI
1356 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1357 tcg_gen_shri_i32(t0, t1, 1);
1358 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
4acb54ba 1359
cfeea807
EI
1360 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1361 tcg_gen_or_i32(t1, t1, t0);
4acb54ba 1362 msr_write(dc, t1);
cfeea807
EI
1363 tcg_temp_free_i32(t1);
1364 tcg_temp_free_i32(t0);
4acb54ba
EI
1365 dc->tb_flags &= ~DRTE_FLAG;
1366}
1367
1368static void dec_rts(DisasContext *dc)
1369{
1370 unsigned int b_bit, i_bit, e_bit;
1371
1372 i_bit = dc->ir & (1 << 21);
1373 b_bit = dc->ir & (1 << 22);
1374 e_bit = dc->ir & (1 << 23);
1375
bdfc1e88
EI
1376 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1377 return;
1378 }
1379
f91c60f0 1380 dec_setup_dslot(dc);
4acb54ba
EI
1381
1382 if (i_bit) {
1383 LOG_DIS("rtid ir=%x\n", dc->ir);
1384 dc->tb_flags |= DRTI_FLAG;
1385 } else if (b_bit) {
1386 LOG_DIS("rtbd ir=%x\n", dc->ir);
1387 dc->tb_flags |= DRTB_FLAG;
1388 } else if (e_bit) {
1389 LOG_DIS("rted ir=%x\n", dc->ir);
1390 dc->tb_flags |= DRTE_FLAG;
1391 } else
1392 LOG_DIS("rts ir=%x\n", dc->ir);
1393
23979dc5 1394 dc->jmp = JMP_INDIRECT;
cfeea807 1395 tcg_gen_movi_i32(env_btaken, 1);
0f96e96b 1396 tcg_gen_add_i32(cpu_btarget, cpu_R[dc->ra], *dec_alu_op_b(dc));
4acb54ba
EI
1397}
1398
97694c57
EI
1399static int dec_check_fpuv2(DisasContext *dc)
1400{
be67e9ab 1401 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
aa28e6d4 1402 tcg_gen_movi_i64(cpu_esr, ESR_EC_FPU);
97694c57
EI
1403 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1404 }
2016a6a7 1405 return (dc->cpu->cfg.use_fpu == 2) ? PVR2_USE_FPU2_MASK : 0;
97694c57
EI
1406}
1407
1567a005
EI
1408static void dec_fpu(DisasContext *dc)
1409{
97694c57
EI
1410 unsigned int fpu_insn;
1411
9ba8cd45 1412 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1567a005
EI
1413 return;
1414 }
1415
97694c57
EI
1416 fpu_insn = (dc->ir >> 7) & 7;
1417
1418 switch (fpu_insn) {
1419 case 0:
64254eba
BS
1420 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1421 cpu_R[dc->rb]);
97694c57
EI
1422 break;
1423
1424 case 1:
64254eba
BS
1425 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1426 cpu_R[dc->rb]);
97694c57
EI
1427 break;
1428
1429 case 2:
64254eba
BS
1430 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1431 cpu_R[dc->rb]);
97694c57
EI
1432 break;
1433
1434 case 3:
64254eba
BS
1435 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1436 cpu_R[dc->rb]);
97694c57
EI
1437 break;
1438
1439 case 4:
1440 switch ((dc->ir >> 4) & 7) {
1441 case 0:
64254eba 1442 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
97694c57
EI
1443 cpu_R[dc->ra], cpu_R[dc->rb]);
1444 break;
1445 case 1:
64254eba 1446 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1447 cpu_R[dc->ra], cpu_R[dc->rb]);
1448 break;
1449 case 2:
64254eba 1450 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
97694c57
EI
1451 cpu_R[dc->ra], cpu_R[dc->rb]);
1452 break;
1453 case 3:
64254eba 1454 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
97694c57
EI
1455 cpu_R[dc->ra], cpu_R[dc->rb]);
1456 break;
1457 case 4:
64254eba 1458 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
97694c57
EI
1459 cpu_R[dc->ra], cpu_R[dc->rb]);
1460 break;
1461 case 5:
64254eba 1462 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
97694c57
EI
1463 cpu_R[dc->ra], cpu_R[dc->rb]);
1464 break;
1465 case 6:
64254eba 1466 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
97694c57
EI
1467 cpu_R[dc->ra], cpu_R[dc->rb]);
1468 break;
1469 default:
71547a3b
BS
1470 qemu_log_mask(LOG_UNIMP,
1471 "unimplemented fcmp fpu_insn=%x pc=%x"
1472 " opc=%x\n",
1473 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1474 dc->abort_at_next_insn = 1;
1475 break;
1476 }
1477 break;
1478
1479 case 5:
1480 if (!dec_check_fpuv2(dc)) {
1481 return;
1482 }
64254eba 1483 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1484 break;
1485
1486 case 6:
1487 if (!dec_check_fpuv2(dc)) {
1488 return;
1489 }
64254eba 1490 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1491 break;
1492
1493 case 7:
1494 if (!dec_check_fpuv2(dc)) {
1495 return;
1496 }
64254eba 1497 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
97694c57
EI
1498 break;
1499
1500 default:
71547a3b
BS
1501 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1502 " opc=%x\n",
1503 fpu_insn, dc->pc, dc->opcode);
97694c57
EI
1504 dc->abort_at_next_insn = 1;
1505 break;
1506 }
1567a005
EI
1507}
1508
4acb54ba
EI
1509static void dec_null(DisasContext *dc)
1510{
9ba8cd45 1511 if (trap_illegal(dc, true)) {
02b33596
EI
1512 return;
1513 }
1d512a65 1514 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
4acb54ba
EI
1515 dc->abort_at_next_insn = 1;
1516}
1517
6d76d23e
EI
1518/* Insns connected to FSL or AXI stream attached devices. */
1519static void dec_stream(DisasContext *dc)
1520{
6d76d23e
EI
1521 TCGv_i32 t_id, t_ctrl;
1522 int ctrl;
1523
1524 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1525 dc->type_b ? "" : "d", dc->imm);
1526
bdfc1e88 1527 if (trap_userspace(dc, true)) {
6d76d23e
EI
1528 return;
1529 }
1530
cfeea807 1531 t_id = tcg_temp_new_i32();
6d76d23e 1532 if (dc->type_b) {
cfeea807 1533 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
6d76d23e
EI
1534 ctrl = dc->imm >> 10;
1535 } else {
cfeea807 1536 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
6d76d23e
EI
1537 ctrl = dc->imm >> 5;
1538 }
1539
cfeea807 1540 t_ctrl = tcg_const_i32(ctrl);
6d76d23e
EI
1541
1542 if (dc->rd == 0) {
1543 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1544 } else {
1545 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1546 }
cfeea807
EI
1547 tcg_temp_free_i32(t_id);
1548 tcg_temp_free_i32(t_ctrl);
6d76d23e
EI
1549}
1550
4acb54ba
EI
1551static struct decoder_info {
1552 struct {
1553 uint32_t bits;
1554 uint32_t mask;
1555 };
1556 void (*dec)(DisasContext *dc);
1557} decinfo[] = {
1558 {DEC_ADD, dec_add},
1559 {DEC_SUB, dec_sub},
1560 {DEC_AND, dec_and},
1561 {DEC_XOR, dec_xor},
1562 {DEC_OR, dec_or},
1563 {DEC_BIT, dec_bit},
1564 {DEC_BARREL, dec_barrel},
1565 {DEC_LD, dec_load},
1566 {DEC_ST, dec_store},
1567 {DEC_IMM, dec_imm},
1568 {DEC_BR, dec_br},
1569 {DEC_BCC, dec_bcc},
1570 {DEC_RTS, dec_rts},
1567a005 1571 {DEC_FPU, dec_fpu},
4acb54ba
EI
1572 {DEC_MUL, dec_mul},
1573 {DEC_DIV, dec_div},
1574 {DEC_MSR, dec_msr},
6d76d23e 1575 {DEC_STREAM, dec_stream},
4acb54ba
EI
1576 {{0, 0}, dec_null}
1577};
1578
64254eba 1579static inline void decode(DisasContext *dc, uint32_t ir)
4acb54ba 1580{
4acb54ba
EI
1581 int i;
1582
64254eba 1583 dc->ir = ir;
4acb54ba
EI
1584 LOG_DIS("%8.8x\t", dc->ir);
1585
462c2544 1586 if (ir == 0) {
1ee1bd28 1587 trap_illegal(dc, dc->cpu->cfg.opcode_0_illegal);
462c2544
EI
1588 /* Don't decode nop/zero instructions any further. */
1589 return;
4acb54ba 1590 }
462c2544 1591
4acb54ba
EI
1592 /* bit 2 seems to indicate insn type. */
1593 dc->type_b = ir & (1 << 29);
1594
1595 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1596 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1597 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1598 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1599 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1600
1601 /* Large switch for all insns. */
1602 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1603 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1604 decinfo[i].dec(dc);
1605 break;
1606 }
1607 }
1608}
1609
4acb54ba 1610/* generate intermediate code for basic block 'tb'. */
8b86d6d2 1611void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
4acb54ba 1612{
9c489ea6 1613 CPUMBState *env = cs->env_ptr;
f5c7e93a 1614 MicroBlazeCPU *cpu = env_archcpu(env);
4acb54ba 1615 uint32_t pc_start;
4acb54ba
EI
1616 struct DisasContext ctx;
1617 struct DisasContext *dc = &ctx;
56371527 1618 uint32_t page_start, org_flags;
cfeea807 1619 uint32_t npc;
4acb54ba 1620 int num_insns;
4acb54ba 1621
4acb54ba 1622 pc_start = tb->pc;
0063ebd6 1623 dc->cpu = cpu;
4acb54ba
EI
1624 dc->tb = tb;
1625 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1626
4acb54ba
EI
1627 dc->is_jmp = DISAS_NEXT;
1628 dc->jmp = 0;
1629 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
23979dc5
EI
1630 if (dc->delayed_branch) {
1631 dc->jmp = JMP_INDIRECT;
1632 }
4acb54ba 1633 dc->pc = pc_start;
ed2803da 1634 dc->singlestep_enabled = cs->singlestep_enabled;
4acb54ba
EI
1635 dc->cpustate_changed = 0;
1636 dc->abort_at_next_insn = 0;
4acb54ba 1637
a47dddd7
AF
1638 if (pc_start & 3) {
1639 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1640 }
4acb54ba 1641
56371527 1642 page_start = pc_start & TARGET_PAGE_MASK;
4acb54ba 1643 num_insns = 0;
4acb54ba 1644
cd42d5b2 1645 gen_tb_start(tb);
4acb54ba
EI
1646 do
1647 {
667b8e29 1648 tcg_gen_insn_start(dc->pc);
959082fc 1649 num_insns++;
4acb54ba 1650
b933066a
RH
1651#if SIM_COMPAT
1652 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
0f96e96b 1653 tcg_gen_movi_i32(cpu_pc, dc->pc);
b933066a
RH
1654 gen_helper_debug();
1655 }
1656#endif
1657
1658 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1659 t_gen_raise_exception(dc, EXCP_DEBUG);
1660 dc->is_jmp = DISAS_UPDATE;
522a0d4e
RH
1661 /* The address covered by the breakpoint must be included in
1662 [tb->pc, tb->pc + tb->size) in order to for it to be
1663 properly cleared -- thus we increment the PC here so that
1664 the logic setting tb->size below does the right thing. */
1665 dc->pc += 4;
b933066a
RH
1666 break;
1667 }
1668
4acb54ba
EI
1669 /* Pretty disas. */
1670 LOG_DIS("%8.8x:\t", dc->pc);
1671
c5a49c63 1672 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
4acb54ba 1673 gen_io_start();
959082fc 1674 }
4acb54ba
EI
1675
1676 dc->clear_imm = 1;
64254eba 1677 decode(dc, cpu_ldl_code(env, dc->pc));
4acb54ba
EI
1678 if (dc->clear_imm)
1679 dc->tb_flags &= ~IMM_FLAG;
4acb54ba 1680 dc->pc += 4;
4acb54ba
EI
1681
1682 if (dc->delayed_branch) {
1683 dc->delayed_branch--;
1684 if (!dc->delayed_branch) {
1685 if (dc->tb_flags & DRTI_FLAG)
1686 do_rti(dc);
1687 if (dc->tb_flags & DRTB_FLAG)
1688 do_rtb(dc);
1689 if (dc->tb_flags & DRTE_FLAG)
1690 do_rte(dc);
1691 /* Clear the delay slot flag. */
1692 dc->tb_flags &= ~D_FLAG;
1693 /* If it is a direct jump, try direct chaining. */
23979dc5 1694 if (dc->jmp == JMP_INDIRECT) {
0f96e96b
RH
1695 TCGv_i32 tmp_pc = tcg_const_i32(dc->pc);
1696 eval_cond_jmp(dc, cpu_btarget, tmp_pc);
1697 tcg_temp_free_i32(tmp_pc);
4acb54ba 1698 dc->is_jmp = DISAS_JUMP;
23979dc5 1699 } else if (dc->jmp == JMP_DIRECT) {
844bab60
EI
1700 t_sync_flags(dc);
1701 gen_goto_tb(dc, 0, dc->jmp_pc);
1702 dc->is_jmp = DISAS_TB_JUMP;
1703 } else if (dc->jmp == JMP_DIRECT_CC) {
42a268c2 1704 TCGLabel *l1 = gen_new_label();
23979dc5 1705 t_sync_flags(dc);
23979dc5 1706 /* Conditional jmp. */
cfeea807 1707 tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
23979dc5
EI
1708 gen_goto_tb(dc, 1, dc->pc);
1709 gen_set_label(l1);
1710 gen_goto_tb(dc, 0, dc->jmp_pc);
1711
1712 dc->is_jmp = DISAS_TB_JUMP;
4acb54ba
EI
1713 }
1714 break;
1715 }
1716 }
ed2803da 1717 if (cs->singlestep_enabled) {
4acb54ba 1718 break;
ed2803da 1719 }
4acb54ba 1720 } while (!dc->is_jmp && !dc->cpustate_changed
fe700adb
RH
1721 && !tcg_op_buf_full()
1722 && !singlestep
56371527 1723 && (dc->pc - page_start < TARGET_PAGE_SIZE)
fe700adb 1724 && num_insns < max_insns);
4acb54ba
EI
1725
1726 npc = dc->pc;
844bab60 1727 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
4acb54ba
EI
1728 if (dc->tb_flags & D_FLAG) {
1729 dc->is_jmp = DISAS_UPDATE;
0f96e96b 1730 tcg_gen_movi_i32(cpu_pc, npc);
4acb54ba
EI
1731 sync_jmpstate(dc);
1732 } else
1733 npc = dc->jmp_pc;
1734 }
1735
4acb54ba
EI
1736 /* Force an update if the per-tb cpu state has changed. */
1737 if (dc->is_jmp == DISAS_NEXT
1738 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1739 dc->is_jmp = DISAS_UPDATE;
0f96e96b 1740 tcg_gen_movi_i32(cpu_pc, npc);
4acb54ba
EI
1741 }
1742 t_sync_flags(dc);
1743
ed2803da 1744 if (unlikely(cs->singlestep_enabled)) {
6c5f738d
EI
1745 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1746
1747 if (dc->is_jmp != DISAS_JUMP) {
0f96e96b 1748 tcg_gen_movi_i32(cpu_pc, npc);
6c5f738d 1749 }
64254eba 1750 gen_helper_raise_exception(cpu_env, tmp);
6c5f738d 1751 tcg_temp_free_i32(tmp);
4acb54ba
EI
1752 } else {
1753 switch(dc->is_jmp) {
1754 case DISAS_NEXT:
1755 gen_goto_tb(dc, 1, npc);
1756 break;
1757 default:
1758 case DISAS_JUMP:
1759 case DISAS_UPDATE:
1760 /* indicate that the hash table must be used
1761 to find the next TB */
07ea28b4 1762 tcg_gen_exit_tb(NULL, 0);
4acb54ba
EI
1763 break;
1764 case DISAS_TB_JUMP:
1765 /* nothing more to generate */
1766 break;
1767 }
1768 }
806f352d 1769 gen_tb_end(tb, num_insns);
0a7df5da 1770
4e5e1215
RH
1771 tb->size = dc->pc - pc_start;
1772 tb->icount = num_insns;
4acb54ba
EI
1773
1774#ifdef DEBUG_DISAS
1775#if !SIM_COMPAT
4910e6e4
RH
1776 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1777 && qemu_log_in_addr_range(pc_start)) {
fc59d2d8 1778 FILE *logfile = qemu_log_lock();
f01a5e7e 1779 qemu_log("--------------\n");
1d48474d 1780 log_target_disas(cs, pc_start, dc->pc - pc_start);
fc59d2d8 1781 qemu_log_unlock(logfile);
4acb54ba
EI
1782 }
1783#endif
1784#endif
1785 assert(!dc->abort_at_next_insn);
1786}
1787
90c84c56 1788void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
4acb54ba 1789{
878096ee
AF
1790 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1791 CPUMBState *env = &cpu->env;
4acb54ba
EI
1792 int i;
1793
90c84c56 1794 if (!env) {
4acb54ba 1795 return;
90c84c56 1796 }
4acb54ba 1797
0f96e96b 1798 qemu_fprintf(f, "IN: PC=%x %s\n",
76e8187d 1799 env->pc, lookup_symbol(env->pc));
3e0e16ae 1800 qemu_fprintf(f, "rmsr=%x resr=%" PRIx64 " rear=%" PRIx64 " "
2ead1b18
JK
1801 "debug=%x imm=%x iflags=%x fsr=%" PRIx64 " "
1802 "rbtr=%" PRIx64 "\n",
78e9caf2 1803 env->msr, env->esr, env->ear,
5a8e0136 1804 env->debug, env->imm, env->iflags, env->fsr,
6fbf78f2 1805 env->btr);
0f96e96b 1806 qemu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
90c84c56 1807 env->btaken, env->btarget,
2e5282ca
RH
1808 (env->msr & MSR_UM) ? "user" : "kernel",
1809 (env->msr & MSR_UMS) ? "user" : "kernel",
1810 (bool)(env->msr & MSR_EIP),
1811 (bool)(env->msr & MSR_IE));
2ead1b18
JK
1812 for (i = 0; i < 12; i++) {
1813 qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1814 if ((i + 1) % 4 == 0) {
1815 qemu_fprintf(f, "\n");
1816 }
1817 }
17c52a43 1818
2ead1b18
JK
1819 /* Registers that aren't modeled are reported as 0 */
1820 qemu_fprintf(f, "redr=%" PRIx64 " rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
af20a93a 1821 "rtlblo=0 rtlbhi=0\n", env->edr);
2ead1b18 1822 qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
4acb54ba 1823 for (i = 0; i < 32; i++) {
90c84c56 1824 qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
4acb54ba 1825 if ((i + 1) % 4 == 0)
90c84c56 1826 qemu_fprintf(f, "\n");
4acb54ba 1827 }
90c84c56 1828 qemu_fprintf(f, "\n\n");
4acb54ba
EI
1829}
1830
cd0c24f9
AF
1831void mb_tcg_init(void)
1832{
1833 int i;
4acb54ba 1834
cfeea807 1835 env_debug = tcg_global_mem_new_i32(cpu_env,
68cee38a 1836 offsetof(CPUMBState, debug),
4acb54ba 1837 "debug0");
cfeea807 1838 env_iflags = tcg_global_mem_new_i32(cpu_env,
68cee38a 1839 offsetof(CPUMBState, iflags),
4acb54ba 1840 "iflags");
cfeea807 1841 env_imm = tcg_global_mem_new_i32(cpu_env,
68cee38a 1842 offsetof(CPUMBState, imm),
4acb54ba 1843 "imm");
0f96e96b 1844 cpu_btarget = tcg_global_mem_new_i32(cpu_env,
68cee38a 1845 offsetof(CPUMBState, btarget),
4acb54ba 1846 "btarget");
cfeea807 1847 env_btaken = tcg_global_mem_new_i32(cpu_env,
68cee38a 1848 offsetof(CPUMBState, btaken),
4acb54ba 1849 "btaken");
403322ea 1850 env_res_addr = tcg_global_mem_new(cpu_env,
4a536270
EI
1851 offsetof(CPUMBState, res_addr),
1852 "res_addr");
cfeea807 1853 env_res_val = tcg_global_mem_new_i32(cpu_env,
11a76217
EI
1854 offsetof(CPUMBState, res_val),
1855 "res_val");
4acb54ba 1856 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
cfeea807 1857 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
68cee38a 1858 offsetof(CPUMBState, regs[i]),
4acb54ba
EI
1859 regnames[i]);
1860 }
76e8187d 1861
aa28e6d4 1862 cpu_pc =
0f96e96b 1863 tcg_global_mem_new_i32(cpu_env, offsetof(CPUMBState, pc), "rpc");
aa28e6d4 1864 cpu_msr =
3e0e16ae 1865 tcg_global_mem_new_i32(cpu_env, offsetof(CPUMBState, msr), "rmsr");
aa28e6d4 1866 cpu_ear =
b2e80a3c 1867 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, ear), "rear");
aa28e6d4 1868 cpu_esr =
78e9caf2 1869 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, esr), "resr");
aa28e6d4 1870 cpu_fsr =
5a8e0136 1871 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, fsr), "rfsr");
aa28e6d4 1872 cpu_btr =
6fbf78f2 1873 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, btr), "rbtr");
aa28e6d4 1874 cpu_edr =
af20a93a 1875 tcg_global_mem_new_i64(cpu_env, offsetof(CPUMBState, edr), "redr");
4acb54ba
EI
1876}
1877
bad729e2
RH
1878void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1879 target_ulong *data)
4acb54ba 1880{
76e8187d 1881 env->pc = data[0];
4acb54ba 1882}