]> git.proxmox.com Git - qemu.git/blob - target-microblaze/translate.c
microblaze: Improve addkc
[qemu.git] / target-microblaze / translate.c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <assert.h>
26
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "helper.h"
32 #include "microblaze-decode.h"
33 #include "qemu-common.h"
34
35 #define GEN_HELPER 1
36 #include "helper.h"
37
38 #define SIM_COMPAT 0
39 #define DISAS_GNU 1
40 #define DISAS_MB 1
41 #if DISAS_MB && !SIM_COMPAT
42 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
43 #else
44 # define LOG_DIS(...) do { } while (0)
45 #endif
46
47 #define D(x)
48
49 #define EXTRACT_FIELD(src, start, end) \
50 (((src) >> start) & ((1 << (end - start + 1)) - 1))
51
52 static TCGv env_debug;
53 static TCGv_ptr cpu_env;
54 static TCGv cpu_R[32];
55 static TCGv cpu_SR[18];
56 static TCGv env_imm;
57 static TCGv env_btaken;
58 static TCGv env_btarget;
59 static TCGv env_iflags;
60
61 #include "gen-icount.h"
62
63 /* This is the state at translation time. */
64 typedef struct DisasContext {
65 CPUState *env;
66 target_ulong pc;
67
68 /* Decoder. */
69 int type_b;
70 uint32_t ir;
71 uint8_t opcode;
72 uint8_t rd, ra, rb;
73 uint16_t imm;
74
75 unsigned int cpustate_changed;
76 unsigned int delayed_branch;
77 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
78 unsigned int clear_imm;
79 int is_jmp;
80
81 #define JMP_NOJMP 0
82 #define JMP_DIRECT 1
83 #define JMP_DIRECT_CC 2
84 #define JMP_INDIRECT 3
85 unsigned int jmp;
86 uint32_t jmp_pc;
87
88 int abort_at_next_insn;
89 int nr_nops;
90 struct TranslationBlock *tb;
91 int singlestep_enabled;
92 } DisasContext;
93
94 static const char *regnames[] =
95 {
96 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
97 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
98 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
99 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
100 };
101
102 static const char *special_regnames[] =
103 {
104 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
105 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
106 "sr16", "sr17", "sr18"
107 };
108
109 /* Sign extend at translation time. */
110 static inline int sign_extend(unsigned int val, unsigned int width)
111 {
112 int sval;
113
114 /* LSL. */
115 val <<= 31 - width;
116 sval = val;
117 /* ASR. */
118 sval >>= 31 - width;
119 return sval;
120 }
121
122 static inline void t_sync_flags(DisasContext *dc)
123 {
124 /* Synch the tb dependant flags between translator and runtime. */
125 if (dc->tb_flags != dc->synced_flags) {
126 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
127 dc->synced_flags = dc->tb_flags;
128 }
129 }
130
131 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
132 {
133 TCGv_i32 tmp = tcg_const_i32(index);
134
135 t_sync_flags(dc);
136 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
137 gen_helper_raise_exception(tmp);
138 tcg_temp_free_i32(tmp);
139 dc->is_jmp = DISAS_UPDATE;
140 }
141
142 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
143 {
144 TranslationBlock *tb;
145 tb = dc->tb;
146 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
147 tcg_gen_goto_tb(n);
148 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
149 tcg_gen_exit_tb((long)tb + n);
150 } else {
151 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
152 tcg_gen_exit_tb(0);
153 }
154 }
155
156 static void read_carry(DisasContext *dc, TCGv d)
157 {
158 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
159 }
160
161 static void write_carry(DisasContext *dc, TCGv v)
162 {
163 TCGv t0 = tcg_temp_new();
164 tcg_gen_shli_tl(t0, v, 31);
165 tcg_gen_sari_tl(t0, t0, 31);
166 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
167 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
168 ~(MSR_C | MSR_CC));
169 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
170 tcg_temp_free(t0);
171 }
172
173 /* True if ALU operand b is a small immediate that may deserve
174 faster treatment. */
175 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
176 {
177 /* Immediate insn without the imm prefix ? */
178 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
179 }
180
181 static inline TCGv *dec_alu_op_b(DisasContext *dc)
182 {
183 if (dc->type_b) {
184 if (dc->tb_flags & IMM_FLAG)
185 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
186 else
187 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
188 return &env_imm;
189 } else
190 return &cpu_R[dc->rb];
191 }
192
193 static void dec_add(DisasContext *dc)
194 {
195 unsigned int k, c;
196 TCGv cf;
197
198 k = dc->opcode & 4;
199 c = dc->opcode & 2;
200
201 LOG_DIS("add%s%s%s r%d r%d r%d\n",
202 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
203 dc->rd, dc->ra, dc->rb);
204
205 /* Take care of the easy cases first. */
206 if (k) {
207 /* k - keep carry, no need to update MSR. */
208 /* If rd == r0, it's a nop. */
209 if (dc->rd) {
210 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
211
212 if (c) {
213 /* c - Add carry into the result. */
214 cf = tcg_temp_new();
215
216 read_carry(dc, cf);
217 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
218 tcg_temp_free(cf);
219 }
220 }
221 return;
222 }
223
224 /* From now on, we can assume k is zero. So we need to update MSR. */
225 /* Extract carry. */
226 cf = tcg_temp_new();
227 if (c) {
228 read_carry(dc, cf);
229 } else {
230 tcg_gen_movi_tl(cf, 0);
231 }
232
233 if (dc->rd) {
234 TCGv ncf = tcg_temp_new();
235 gen_helper_addkc(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
236 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
237 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
238 write_carry(dc, ncf);
239 tcg_temp_free(ncf);
240 } else {
241 gen_helper_addkc(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)),
242 tcg_const_tl(cf));
243 write_carry(dc, cf);
244 }
245 tcg_temp_free(cf);
246 }
247
248 static void dec_sub(DisasContext *dc)
249 {
250 unsigned int u, cmp, k, c;
251
252 u = dc->imm & 2;
253 k = dc->opcode & 4;
254 c = dc->opcode & 2;
255 cmp = (dc->imm & 1) && (!dc->type_b) && k;
256
257 if (cmp) {
258 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
259 if (dc->rd) {
260 if (u)
261 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
262 else
263 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
264 }
265 } else {
266 LOG_DIS("sub%s%s r%d, r%d r%d\n",
267 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
268
269 if (!k || c) {
270 TCGv t;
271 t = tcg_temp_new();
272 if (dc->rd)
273 gen_helper_subkc(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)),
274 tcg_const_tl(k), tcg_const_tl(c));
275 else
276 gen_helper_subkc(t, cpu_R[dc->ra], *(dec_alu_op_b(dc)),
277 tcg_const_tl(k), tcg_const_tl(c));
278 tcg_temp_free(t);
279 }
280 else if (dc->rd)
281 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
282 }
283 }
284
285 static void dec_pattern(DisasContext *dc)
286 {
287 unsigned int mode;
288 int l1;
289
290 if ((dc->tb_flags & MSR_EE_FLAG)
291 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
292 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
293 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
294 t_gen_raise_exception(dc, EXCP_HW_EXCP);
295 }
296
297 mode = dc->opcode & 3;
298 switch (mode) {
299 case 0:
300 /* pcmpbf. */
301 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
302 if (dc->rd)
303 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
304 break;
305 case 2:
306 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
307 if (dc->rd) {
308 TCGv t0 = tcg_temp_local_new();
309 l1 = gen_new_label();
310 tcg_gen_movi_tl(t0, 1);
311 tcg_gen_brcond_tl(TCG_COND_EQ,
312 cpu_R[dc->ra], cpu_R[dc->rb], l1);
313 tcg_gen_movi_tl(t0, 0);
314 gen_set_label(l1);
315 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
316 tcg_temp_free(t0);
317 }
318 break;
319 case 3:
320 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
321 l1 = gen_new_label();
322 if (dc->rd) {
323 TCGv t0 = tcg_temp_local_new();
324 tcg_gen_movi_tl(t0, 1);
325 tcg_gen_brcond_tl(TCG_COND_NE,
326 cpu_R[dc->ra], cpu_R[dc->rb], l1);
327 tcg_gen_movi_tl(t0, 0);
328 gen_set_label(l1);
329 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
330 tcg_temp_free(t0);
331 }
332 break;
333 default:
334 cpu_abort(dc->env,
335 "unsupported pattern insn opcode=%x\n", dc->opcode);
336 break;
337 }
338 }
339
340 static void dec_and(DisasContext *dc)
341 {
342 unsigned int not;
343
344 if (!dc->type_b && (dc->imm & (1 << 10))) {
345 dec_pattern(dc);
346 return;
347 }
348
349 not = dc->opcode & (1 << 1);
350 LOG_DIS("and%s\n", not ? "n" : "");
351
352 if (!dc->rd)
353 return;
354
355 if (not) {
356 TCGv t = tcg_temp_new();
357 tcg_gen_not_tl(t, *(dec_alu_op_b(dc)));
358 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], t);
359 tcg_temp_free(t);
360 } else
361 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
362 }
363
364 static void dec_or(DisasContext *dc)
365 {
366 if (!dc->type_b && (dc->imm & (1 << 10))) {
367 dec_pattern(dc);
368 return;
369 }
370
371 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
372 if (dc->rd)
373 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
374 }
375
376 static void dec_xor(DisasContext *dc)
377 {
378 if (!dc->type_b && (dc->imm & (1 << 10))) {
379 dec_pattern(dc);
380 return;
381 }
382
383 LOG_DIS("xor r%d\n", dc->rd);
384 if (dc->rd)
385 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
386 }
387
388 static inline void msr_read(DisasContext *dc, TCGv d)
389 {
390 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
391 }
392
393 static inline void msr_write(DisasContext *dc, TCGv v)
394 {
395 dc->cpustate_changed = 1;
396 tcg_gen_mov_tl(cpu_SR[SR_MSR], v);
397 /* PVR, we have a processor version register. */
398 tcg_gen_ori_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], (1 << 10));
399 }
400
401 static void dec_msr(DisasContext *dc)
402 {
403 TCGv t0, t1;
404 unsigned int sr, to, rn;
405 int mem_index = cpu_mmu_index(dc->env);
406
407 sr = dc->imm & ((1 << 14) - 1);
408 to = dc->imm & (1 << 14);
409 dc->type_b = 1;
410 if (to)
411 dc->cpustate_changed = 1;
412
413 /* msrclr and msrset. */
414 if (!(dc->imm & (1 << 15))) {
415 unsigned int clr = dc->ir & (1 << 16);
416
417 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
418 dc->rd, dc->imm);
419
420 if (!(dc->env->pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
421 /* nop??? */
422 return;
423 }
424
425 if ((dc->tb_flags & MSR_EE_FLAG)
426 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
427 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
428 t_gen_raise_exception(dc, EXCP_HW_EXCP);
429 return;
430 }
431
432 if (dc->rd)
433 msr_read(dc, cpu_R[dc->rd]);
434
435 t0 = tcg_temp_new();
436 t1 = tcg_temp_new();
437 msr_read(dc, t0);
438 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
439
440 if (clr) {
441 tcg_gen_not_tl(t1, t1);
442 tcg_gen_and_tl(t0, t0, t1);
443 } else
444 tcg_gen_or_tl(t0, t0, t1);
445 msr_write(dc, t0);
446 tcg_temp_free(t0);
447 tcg_temp_free(t1);
448 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
449 dc->is_jmp = DISAS_UPDATE;
450 return;
451 }
452
453 if (to) {
454 if ((dc->tb_flags & MSR_EE_FLAG)
455 && mem_index == MMU_USER_IDX) {
456 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
457 t_gen_raise_exception(dc, EXCP_HW_EXCP);
458 return;
459 }
460 }
461
462 #if !defined(CONFIG_USER_ONLY)
463 /* Catch read/writes to the mmu block. */
464 if ((sr & ~0xff) == 0x1000) {
465 sr &= 7;
466 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
467 if (to)
468 gen_helper_mmu_write(tcg_const_tl(sr), cpu_R[dc->ra]);
469 else
470 gen_helper_mmu_read(cpu_R[dc->rd], tcg_const_tl(sr));
471 return;
472 }
473 #endif
474
475 if (to) {
476 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
477 switch (sr) {
478 case 0:
479 break;
480 case 1:
481 msr_write(dc, cpu_R[dc->ra]);
482 break;
483 case 0x3:
484 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
485 break;
486 case 0x5:
487 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
488 break;
489 case 0x7:
490 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
491 break;
492 default:
493 cpu_abort(dc->env, "unknown mts reg %x\n", sr);
494 break;
495 }
496 } else {
497 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
498
499 switch (sr) {
500 case 0:
501 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
502 break;
503 case 1:
504 msr_read(dc, cpu_R[dc->rd]);
505 break;
506 case 0x3:
507 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
508 break;
509 case 0x5:
510 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
511 break;
512 case 0x7:
513 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
514 break;
515 case 0xb:
516 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
517 break;
518 case 0x2000:
519 case 0x2001:
520 case 0x2002:
521 case 0x2003:
522 case 0x2004:
523 case 0x2005:
524 case 0x2006:
525 case 0x2007:
526 case 0x2008:
527 case 0x2009:
528 case 0x200a:
529 case 0x200b:
530 case 0x200c:
531 rn = sr & 0xf;
532 tcg_gen_ld_tl(cpu_R[dc->rd],
533 cpu_env, offsetof(CPUState, pvr.regs[rn]));
534 break;
535 default:
536 cpu_abort(dc->env, "unknown mfs reg %x\n", sr);
537 break;
538 }
539 }
540
541 if (dc->rd == 0) {
542 tcg_gen_movi_tl(cpu_R[0], 0);
543 }
544 }
545
546 /* 64-bit signed mul, lower result in d and upper in d2. */
547 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
548 {
549 TCGv_i64 t0, t1;
550
551 t0 = tcg_temp_new_i64();
552 t1 = tcg_temp_new_i64();
553
554 tcg_gen_ext_i32_i64(t0, a);
555 tcg_gen_ext_i32_i64(t1, b);
556 tcg_gen_mul_i64(t0, t0, t1);
557
558 tcg_gen_trunc_i64_i32(d, t0);
559 tcg_gen_shri_i64(t0, t0, 32);
560 tcg_gen_trunc_i64_i32(d2, t0);
561
562 tcg_temp_free_i64(t0);
563 tcg_temp_free_i64(t1);
564 }
565
566 /* 64-bit unsigned muls, lower result in d and upper in d2. */
567 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
568 {
569 TCGv_i64 t0, t1;
570
571 t0 = tcg_temp_new_i64();
572 t1 = tcg_temp_new_i64();
573
574 tcg_gen_extu_i32_i64(t0, a);
575 tcg_gen_extu_i32_i64(t1, b);
576 tcg_gen_mul_i64(t0, t0, t1);
577
578 tcg_gen_trunc_i64_i32(d, t0);
579 tcg_gen_shri_i64(t0, t0, 32);
580 tcg_gen_trunc_i64_i32(d2, t0);
581
582 tcg_temp_free_i64(t0);
583 tcg_temp_free_i64(t1);
584 }
585
586 /* Multiplier unit. */
587 static void dec_mul(DisasContext *dc)
588 {
589 TCGv d[2];
590 unsigned int subcode;
591
592 if ((dc->tb_flags & MSR_EE_FLAG)
593 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
594 && !(dc->env->pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
595 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
596 t_gen_raise_exception(dc, EXCP_HW_EXCP);
597 return;
598 }
599
600 subcode = dc->imm & 3;
601 d[0] = tcg_temp_new();
602 d[1] = tcg_temp_new();
603
604 if (dc->type_b) {
605 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
606 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
607 goto done;
608 }
609
610 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
611 if (subcode >= 1 && subcode <= 3
612 && !((dc->env->pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
613 /* nop??? */
614 }
615
616 switch (subcode) {
617 case 0:
618 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
619 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
620 break;
621 case 1:
622 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
623 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
624 break;
625 case 2:
626 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
627 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
628 break;
629 case 3:
630 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
631 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
632 break;
633 default:
634 cpu_abort(dc->env, "unknown MUL insn %x\n", subcode);
635 break;
636 }
637 done:
638 tcg_temp_free(d[0]);
639 tcg_temp_free(d[1]);
640 }
641
642 /* Div unit. */
643 static void dec_div(DisasContext *dc)
644 {
645 unsigned int u;
646
647 u = dc->imm & 2;
648 LOG_DIS("div\n");
649
650 if ((dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
651 && !((dc->env->pvr.regs[0] & PVR0_USE_DIV_MASK))) {
652 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
653 t_gen_raise_exception(dc, EXCP_HW_EXCP);
654 }
655
656 if (u)
657 gen_helper_divu(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
658 else
659 gen_helper_divs(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
660 if (!dc->rd)
661 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
662 }
663
664 static void dec_barrel(DisasContext *dc)
665 {
666 TCGv t0;
667 unsigned int s, t;
668
669 if ((dc->tb_flags & MSR_EE_FLAG)
670 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
671 && !(dc->env->pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
672 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
673 t_gen_raise_exception(dc, EXCP_HW_EXCP);
674 return;
675 }
676
677 s = dc->imm & (1 << 10);
678 t = dc->imm & (1 << 9);
679
680 LOG_DIS("bs%s%s r%d r%d r%d\n",
681 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
682
683 t0 = tcg_temp_new();
684
685 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
686 tcg_gen_andi_tl(t0, t0, 31);
687
688 if (s)
689 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
690 else {
691 if (t)
692 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
693 else
694 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
695 }
696 }
697
698 static void dec_bit(DisasContext *dc)
699 {
700 TCGv t0, t1;
701 unsigned int op;
702 int mem_index = cpu_mmu_index(dc->env);
703
704 op = dc->ir & ((1 << 8) - 1);
705 switch (op) {
706 case 0x21:
707 /* src. */
708 t0 = tcg_temp_new();
709
710 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
711 tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1);
712 if (dc->rd) {
713 t1 = tcg_temp_new();
714 read_carry(dc, t1);
715 tcg_gen_shli_tl(t1, t1, 31);
716
717 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
718 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t1);
719 tcg_temp_free(t1);
720 }
721
722 /* Update carry. */
723 write_carry(dc, t0);
724 tcg_temp_free(t0);
725 break;
726
727 case 0x1:
728 case 0x41:
729 /* srl. */
730 t0 = tcg_temp_new();
731 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
732
733 /* Update carry. */
734 tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1);
735 write_carry(dc, t0);
736 tcg_temp_free(t0);
737 if (dc->rd) {
738 if (op == 0x41)
739 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
740 else
741 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
742 }
743 break;
744 case 0x60:
745 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
746 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
747 break;
748 case 0x61:
749 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
750 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
751 break;
752 case 0x64:
753 case 0x66:
754 case 0x74:
755 case 0x76:
756 /* wdc. */
757 LOG_DIS("wdc r%d\n", dc->ra);
758 if ((dc->tb_flags & MSR_EE_FLAG)
759 && mem_index == MMU_USER_IDX) {
760 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
761 t_gen_raise_exception(dc, EXCP_HW_EXCP);
762 return;
763 }
764 break;
765 case 0x68:
766 /* wic. */
767 LOG_DIS("wic r%d\n", dc->ra);
768 if ((dc->tb_flags & MSR_EE_FLAG)
769 && mem_index == MMU_USER_IDX) {
770 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
771 t_gen_raise_exception(dc, EXCP_HW_EXCP);
772 return;
773 }
774 break;
775 default:
776 cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
777 dc->pc, op, dc->rd, dc->ra, dc->rb);
778 break;
779 }
780 }
781
782 static inline void sync_jmpstate(DisasContext *dc)
783 {
784 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
785 if (dc->jmp == JMP_DIRECT) {
786 tcg_gen_movi_tl(env_btaken, 1);
787 }
788 dc->jmp = JMP_INDIRECT;
789 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
790 }
791 }
792
793 static void dec_imm(DisasContext *dc)
794 {
795 LOG_DIS("imm %x\n", dc->imm << 16);
796 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
797 dc->tb_flags |= IMM_FLAG;
798 dc->clear_imm = 0;
799 }
800
801 static inline void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
802 unsigned int size)
803 {
804 int mem_index = cpu_mmu_index(dc->env);
805
806 if (size == 1) {
807 tcg_gen_qemu_ld8u(dst, addr, mem_index);
808 } else if (size == 2) {
809 tcg_gen_qemu_ld16u(dst, addr, mem_index);
810 } else if (size == 4) {
811 tcg_gen_qemu_ld32u(dst, addr, mem_index);
812 } else
813 cpu_abort(dc->env, "Incorrect load size %d\n", size);
814 }
815
816 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
817 {
818 unsigned int extimm = dc->tb_flags & IMM_FLAG;
819
820 /* Treat the common cases first. */
821 if (!dc->type_b) {
822 /* If any of the regs is r0, return a ptr to the other. */
823 if (dc->ra == 0) {
824 return &cpu_R[dc->rb];
825 } else if (dc->rb == 0) {
826 return &cpu_R[dc->ra];
827 }
828
829 *t = tcg_temp_new();
830 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
831 return t;
832 }
833 /* Immediate. */
834 if (!extimm) {
835 if (dc->imm == 0) {
836 return &cpu_R[dc->ra];
837 }
838 *t = tcg_temp_new();
839 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
840 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
841 } else {
842 *t = tcg_temp_new();
843 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
844 }
845
846 return t;
847 }
848
849 static inline void dec_byteswap(DisasContext *dc, TCGv dst, TCGv src, int size)
850 {
851 if (size == 4) {
852 tcg_gen_bswap32_tl(dst, src);
853 } else if (size == 2) {
854 TCGv t = tcg_temp_new();
855
856 /* bswap16 assumes the high bits are zero. */
857 tcg_gen_andi_tl(t, src, 0xffff);
858 tcg_gen_bswap16_tl(dst, t);
859 tcg_temp_free(t);
860 } else {
861 /* Ignore.
862 cpu_abort(dc->env, "Invalid ldst byteswap size %d\n", size);
863 */
864 }
865 }
866
867 static void dec_load(DisasContext *dc)
868 {
869 TCGv t, *addr;
870 unsigned int size, rev = 0;
871
872 size = 1 << (dc->opcode & 3);
873
874 if (!dc->type_b) {
875 rev = (dc->ir >> 9) & 1;
876 }
877
878 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
879 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
880 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
881 t_gen_raise_exception(dc, EXCP_HW_EXCP);
882 return;
883 }
884
885 LOG_DIS("l%d%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "");
886
887 t_sync_flags(dc);
888 addr = compute_ldst_addr(dc, &t);
889
890 /*
891 * When doing reverse accesses we need to do two things.
892 *
893 * 1. Reverse the address wrt endianess.
894 * 2. Byteswap the data lanes on the way back into the CPU core.
895 */
896 if (rev && size != 4) {
897 /* Endian reverse the address. t is addr. */
898 switch (size) {
899 case 1:
900 {
901 /* 00 -> 11
902 01 -> 10
903 10 -> 10
904 11 -> 00 */
905 TCGv low = tcg_temp_new();
906
907 /* Force addr into the temp. */
908 if (addr != &t) {
909 t = tcg_temp_new();
910 tcg_gen_mov_tl(t, *addr);
911 addr = &t;
912 }
913
914 tcg_gen_andi_tl(low, t, 3);
915 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
916 tcg_gen_andi_tl(t, t, ~3);
917 tcg_gen_or_tl(t, t, low);
918 tcg_gen_mov_tl(env_imm, t);
919 tcg_temp_free(low);
920 break;
921 }
922
923 case 2:
924 /* 00 -> 10
925 10 -> 00. */
926 /* Force addr into the temp. */
927 if (addr != &t) {
928 t = tcg_temp_new();
929 tcg_gen_xori_tl(t, *addr, 2);
930 addr = &t;
931 } else {
932 tcg_gen_xori_tl(t, t, 2);
933 }
934 break;
935 default:
936 cpu_abort(dc->env, "Invalid reverse size\n");
937 break;
938 }
939 }
940
941 /* If we get a fault on a dslot, the jmpstate better be in sync. */
942 sync_jmpstate(dc);
943
944 /* Verify alignment if needed. */
945 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
946 TCGv v = tcg_temp_new();
947
948 /*
949 * Microblaze gives MMU faults priority over faults due to
950 * unaligned addresses. That's why we speculatively do the load
951 * into v. If the load succeeds, we verify alignment of the
952 * address and if that succeeds we write into the destination reg.
953 */
954 gen_load(dc, v, *addr, size);
955
956 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
957 gen_helper_memalign(*addr, tcg_const_tl(dc->rd),
958 tcg_const_tl(0), tcg_const_tl(size - 1));
959 if (dc->rd) {
960 if (rev) {
961 dec_byteswap(dc, cpu_R[dc->rd], v, size);
962 } else {
963 tcg_gen_mov_tl(cpu_R[dc->rd], v);
964 }
965 }
966 tcg_temp_free(v);
967 } else {
968 if (dc->rd) {
969 gen_load(dc, cpu_R[dc->rd], *addr, size);
970 if (rev) {
971 dec_byteswap(dc, cpu_R[dc->rd], cpu_R[dc->rd], size);
972 }
973 } else {
974 /* We are loading into r0, no need to reverse. */
975 gen_load(dc, env_imm, *addr, size);
976 }
977 }
978
979 if (addr == &t)
980 tcg_temp_free(t);
981 }
982
983 static void gen_store(DisasContext *dc, TCGv addr, TCGv val,
984 unsigned int size)
985 {
986 int mem_index = cpu_mmu_index(dc->env);
987
988 if (size == 1)
989 tcg_gen_qemu_st8(val, addr, mem_index);
990 else if (size == 2) {
991 tcg_gen_qemu_st16(val, addr, mem_index);
992 } else if (size == 4) {
993 tcg_gen_qemu_st32(val, addr, mem_index);
994 } else
995 cpu_abort(dc->env, "Incorrect store size %d\n", size);
996 }
997
998 static void dec_store(DisasContext *dc)
999 {
1000 TCGv t, *addr;
1001 unsigned int size, rev = 0;
1002
1003 size = 1 << (dc->opcode & 3);
1004 if (!dc->type_b) {
1005 rev = (dc->ir >> 9) & 1;
1006 }
1007
1008 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1009 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1010 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1011 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1012 return;
1013 }
1014
1015 LOG_DIS("s%d%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "");
1016 t_sync_flags(dc);
1017 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1018 sync_jmpstate(dc);
1019 addr = compute_ldst_addr(dc, &t);
1020
1021 if (rev && size != 4) {
1022 /* Endian reverse the address. t is addr. */
1023 switch (size) {
1024 case 1:
1025 {
1026 /* 00 -> 11
1027 01 -> 10
1028 10 -> 10
1029 11 -> 00 */
1030 TCGv low = tcg_temp_new();
1031
1032 /* Force addr into the temp. */
1033 if (addr != &t) {
1034 t = tcg_temp_new();
1035 tcg_gen_mov_tl(t, *addr);
1036 addr = &t;
1037 }
1038
1039 tcg_gen_andi_tl(low, t, 3);
1040 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1041 tcg_gen_andi_tl(t, t, ~3);
1042 tcg_gen_or_tl(t, t, low);
1043 tcg_gen_mov_tl(env_imm, t);
1044 tcg_temp_free(low);
1045 break;
1046 }
1047
1048 case 2:
1049 /* 00 -> 10
1050 10 -> 00. */
1051 /* Force addr into the temp. */
1052 if (addr != &t) {
1053 t = tcg_temp_new();
1054 tcg_gen_xori_tl(t, *addr, 2);
1055 addr = &t;
1056 } else {
1057 tcg_gen_xori_tl(t, t, 2);
1058 }
1059 break;
1060 default:
1061 cpu_abort(dc->env, "Invalid reverse size\n");
1062 break;
1063 }
1064
1065 if (size != 1) {
1066 TCGv bs_data = tcg_temp_new();
1067 dec_byteswap(dc, bs_data, cpu_R[dc->rd], size);
1068 gen_store(dc, *addr, bs_data, size);
1069 tcg_temp_free(bs_data);
1070 } else {
1071 gen_store(dc, *addr, cpu_R[dc->rd], size);
1072 }
1073 } else {
1074 if (rev) {
1075 TCGv bs_data = tcg_temp_new();
1076 dec_byteswap(dc, bs_data, cpu_R[dc->rd], size);
1077 gen_store(dc, *addr, bs_data, size);
1078 tcg_temp_free(bs_data);
1079 } else {
1080 gen_store(dc, *addr, cpu_R[dc->rd], size);
1081 }
1082 }
1083
1084 /* Verify alignment if needed. */
1085 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1086 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1087 /* FIXME: if the alignment is wrong, we should restore the value
1088 * in memory. One possible way to acheive this is to probe
1089 * the MMU prior to the memaccess, thay way we could put
1090 * the alignment checks in between the probe and the mem
1091 * access.
1092 */
1093 gen_helper_memalign(*addr, tcg_const_tl(dc->rd),
1094 tcg_const_tl(1), tcg_const_tl(size - 1));
1095 }
1096
1097 if (addr == &t)
1098 tcg_temp_free(t);
1099 }
1100
1101 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1102 TCGv d, TCGv a, TCGv b)
1103 {
1104 switch (cc) {
1105 case CC_EQ:
1106 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1107 break;
1108 case CC_NE:
1109 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1110 break;
1111 case CC_LT:
1112 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1113 break;
1114 case CC_LE:
1115 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1116 break;
1117 case CC_GE:
1118 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1119 break;
1120 case CC_GT:
1121 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1122 break;
1123 default:
1124 cpu_abort(dc->env, "Unknown condition code %x.\n", cc);
1125 break;
1126 }
1127 }
1128
1129 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1130 {
1131 int l1;
1132
1133 l1 = gen_new_label();
1134 /* Conditional jmp. */
1135 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1136 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1137 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1138 gen_set_label(l1);
1139 }
1140
1141 static void dec_bcc(DisasContext *dc)
1142 {
1143 unsigned int cc;
1144 unsigned int dslot;
1145
1146 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1147 dslot = dc->ir & (1 << 25);
1148 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1149
1150 dc->delayed_branch = 1;
1151 if (dslot) {
1152 dc->delayed_branch = 2;
1153 dc->tb_flags |= D_FLAG;
1154 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1155 cpu_env, offsetof(CPUState, bimm));
1156 }
1157
1158 if (dec_alu_op_b_is_small_imm(dc)) {
1159 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1160
1161 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1162 dc->jmp = JMP_DIRECT_CC;
1163 dc->jmp_pc = dc->pc + offset;
1164 } else {
1165 dc->jmp = JMP_INDIRECT;
1166 tcg_gen_movi_tl(env_btarget, dc->pc);
1167 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1168 }
1169 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1170 }
1171
1172 static void dec_br(DisasContext *dc)
1173 {
1174 unsigned int dslot, link, abs;
1175 int mem_index = cpu_mmu_index(dc->env);
1176
1177 dslot = dc->ir & (1 << 20);
1178 abs = dc->ir & (1 << 19);
1179 link = dc->ir & (1 << 18);
1180 LOG_DIS("br%s%s%s%s imm=%x\n",
1181 abs ? "a" : "", link ? "l" : "",
1182 dc->type_b ? "i" : "", dslot ? "d" : "",
1183 dc->imm);
1184
1185 dc->delayed_branch = 1;
1186 if (dslot) {
1187 dc->delayed_branch = 2;
1188 dc->tb_flags |= D_FLAG;
1189 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1190 cpu_env, offsetof(CPUState, bimm));
1191 }
1192 if (link && dc->rd)
1193 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1194
1195 dc->jmp = JMP_INDIRECT;
1196 if (abs) {
1197 tcg_gen_movi_tl(env_btaken, 1);
1198 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1199 if (link && !dslot) {
1200 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1201 t_gen_raise_exception(dc, EXCP_BREAK);
1202 if (dc->imm == 0) {
1203 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1204 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1205 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1206 return;
1207 }
1208
1209 t_gen_raise_exception(dc, EXCP_DEBUG);
1210 }
1211 }
1212 } else {
1213 if (dec_alu_op_b_is_small_imm(dc)) {
1214 dc->jmp = JMP_DIRECT;
1215 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1216 } else {
1217 tcg_gen_movi_tl(env_btaken, 1);
1218 tcg_gen_movi_tl(env_btarget, dc->pc);
1219 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1220 }
1221 }
1222 }
1223
1224 static inline void do_rti(DisasContext *dc)
1225 {
1226 TCGv t0, t1;
1227 t0 = tcg_temp_new();
1228 t1 = tcg_temp_new();
1229 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1230 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1231 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1232
1233 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1234 tcg_gen_or_tl(t1, t1, t0);
1235 msr_write(dc, t1);
1236 tcg_temp_free(t1);
1237 tcg_temp_free(t0);
1238 dc->tb_flags &= ~DRTI_FLAG;
1239 }
1240
1241 static inline void do_rtb(DisasContext *dc)
1242 {
1243 TCGv t0, t1;
1244 t0 = tcg_temp_new();
1245 t1 = tcg_temp_new();
1246 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1247 tcg_gen_shri_tl(t0, t1, 1);
1248 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1249
1250 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1251 tcg_gen_or_tl(t1, t1, t0);
1252 msr_write(dc, t1);
1253 tcg_temp_free(t1);
1254 tcg_temp_free(t0);
1255 dc->tb_flags &= ~DRTB_FLAG;
1256 }
1257
1258 static inline void do_rte(DisasContext *dc)
1259 {
1260 TCGv t0, t1;
1261 t0 = tcg_temp_new();
1262 t1 = tcg_temp_new();
1263
1264 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1265 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1266 tcg_gen_shri_tl(t0, t1, 1);
1267 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1268
1269 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1270 tcg_gen_or_tl(t1, t1, t0);
1271 msr_write(dc, t1);
1272 tcg_temp_free(t1);
1273 tcg_temp_free(t0);
1274 dc->tb_flags &= ~DRTE_FLAG;
1275 }
1276
1277 static void dec_rts(DisasContext *dc)
1278 {
1279 unsigned int b_bit, i_bit, e_bit;
1280 int mem_index = cpu_mmu_index(dc->env);
1281
1282 i_bit = dc->ir & (1 << 21);
1283 b_bit = dc->ir & (1 << 22);
1284 e_bit = dc->ir & (1 << 23);
1285
1286 dc->delayed_branch = 2;
1287 dc->tb_flags |= D_FLAG;
1288 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1289 cpu_env, offsetof(CPUState, bimm));
1290
1291 if (i_bit) {
1292 LOG_DIS("rtid ir=%x\n", dc->ir);
1293 if ((dc->tb_flags & MSR_EE_FLAG)
1294 && mem_index == MMU_USER_IDX) {
1295 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1296 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1297 }
1298 dc->tb_flags |= DRTI_FLAG;
1299 } else if (b_bit) {
1300 LOG_DIS("rtbd ir=%x\n", dc->ir);
1301 if ((dc->tb_flags & MSR_EE_FLAG)
1302 && mem_index == MMU_USER_IDX) {
1303 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1304 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1305 }
1306 dc->tb_flags |= DRTB_FLAG;
1307 } else if (e_bit) {
1308 LOG_DIS("rted ir=%x\n", dc->ir);
1309 if ((dc->tb_flags & MSR_EE_FLAG)
1310 && mem_index == MMU_USER_IDX) {
1311 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1312 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1313 }
1314 dc->tb_flags |= DRTE_FLAG;
1315 } else
1316 LOG_DIS("rts ir=%x\n", dc->ir);
1317
1318 dc->jmp = JMP_INDIRECT;
1319 tcg_gen_movi_tl(env_btaken, 1);
1320 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1321 }
1322
1323 static int dec_check_fpuv2(DisasContext *dc)
1324 {
1325 int r;
1326
1327 r = dc->env->pvr.regs[2] & PVR2_USE_FPU2_MASK;
1328
1329 if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1330 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1331 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1332 }
1333 return r;
1334 }
1335
1336 static void dec_fpu(DisasContext *dc)
1337 {
1338 unsigned int fpu_insn;
1339
1340 if ((dc->tb_flags & MSR_EE_FLAG)
1341 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1342 && !((dc->env->pvr.regs[2] & PVR2_USE_FPU_MASK))) {
1343 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1344 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1345 return;
1346 }
1347
1348 fpu_insn = (dc->ir >> 7) & 7;
1349
1350 switch (fpu_insn) {
1351 case 0:
1352 gen_helper_fadd(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
1353 break;
1354
1355 case 1:
1356 gen_helper_frsub(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
1357 break;
1358
1359 case 2:
1360 gen_helper_fmul(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
1361 break;
1362
1363 case 3:
1364 gen_helper_fdiv(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
1365 break;
1366
1367 case 4:
1368 switch ((dc->ir >> 4) & 7) {
1369 case 0:
1370 gen_helper_fcmp_un(cpu_R[dc->rd],
1371 cpu_R[dc->ra], cpu_R[dc->rb]);
1372 break;
1373 case 1:
1374 gen_helper_fcmp_lt(cpu_R[dc->rd],
1375 cpu_R[dc->ra], cpu_R[dc->rb]);
1376 break;
1377 case 2:
1378 gen_helper_fcmp_eq(cpu_R[dc->rd],
1379 cpu_R[dc->ra], cpu_R[dc->rb]);
1380 break;
1381 case 3:
1382 gen_helper_fcmp_le(cpu_R[dc->rd],
1383 cpu_R[dc->ra], cpu_R[dc->rb]);
1384 break;
1385 case 4:
1386 gen_helper_fcmp_gt(cpu_R[dc->rd],
1387 cpu_R[dc->ra], cpu_R[dc->rb]);
1388 break;
1389 case 5:
1390 gen_helper_fcmp_ne(cpu_R[dc->rd],
1391 cpu_R[dc->ra], cpu_R[dc->rb]);
1392 break;
1393 case 6:
1394 gen_helper_fcmp_ge(cpu_R[dc->rd],
1395 cpu_R[dc->ra], cpu_R[dc->rb]);
1396 break;
1397 default:
1398 qemu_log ("unimplemented fcmp fpu_insn=%x pc=%x opc=%x\n",
1399 fpu_insn, dc->pc, dc->opcode);
1400 dc->abort_at_next_insn = 1;
1401 break;
1402 }
1403 break;
1404
1405 case 5:
1406 if (!dec_check_fpuv2(dc)) {
1407 return;
1408 }
1409 gen_helper_flt(cpu_R[dc->rd], cpu_R[dc->ra]);
1410 break;
1411
1412 case 6:
1413 if (!dec_check_fpuv2(dc)) {
1414 return;
1415 }
1416 gen_helper_fint(cpu_R[dc->rd], cpu_R[dc->ra]);
1417 break;
1418
1419 case 7:
1420 if (!dec_check_fpuv2(dc)) {
1421 return;
1422 }
1423 gen_helper_fsqrt(cpu_R[dc->rd], cpu_R[dc->ra]);
1424 break;
1425
1426 default:
1427 qemu_log ("unimplemented FPU insn fpu_insn=%x pc=%x opc=%x\n",
1428 fpu_insn, dc->pc, dc->opcode);
1429 dc->abort_at_next_insn = 1;
1430 break;
1431 }
1432 }
1433
1434 static void dec_null(DisasContext *dc)
1435 {
1436 if ((dc->tb_flags & MSR_EE_FLAG)
1437 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1438 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1439 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1440 return;
1441 }
1442 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1443 dc->abort_at_next_insn = 1;
1444 }
1445
1446 static struct decoder_info {
1447 struct {
1448 uint32_t bits;
1449 uint32_t mask;
1450 };
1451 void (*dec)(DisasContext *dc);
1452 } decinfo[] = {
1453 {DEC_ADD, dec_add},
1454 {DEC_SUB, dec_sub},
1455 {DEC_AND, dec_and},
1456 {DEC_XOR, dec_xor},
1457 {DEC_OR, dec_or},
1458 {DEC_BIT, dec_bit},
1459 {DEC_BARREL, dec_barrel},
1460 {DEC_LD, dec_load},
1461 {DEC_ST, dec_store},
1462 {DEC_IMM, dec_imm},
1463 {DEC_BR, dec_br},
1464 {DEC_BCC, dec_bcc},
1465 {DEC_RTS, dec_rts},
1466 {DEC_FPU, dec_fpu},
1467 {DEC_MUL, dec_mul},
1468 {DEC_DIV, dec_div},
1469 {DEC_MSR, dec_msr},
1470 {{0, 0}, dec_null}
1471 };
1472
1473 static inline void decode(DisasContext *dc)
1474 {
1475 uint32_t ir;
1476 int i;
1477
1478 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
1479 tcg_gen_debug_insn_start(dc->pc);
1480
1481 dc->ir = ir = ldl_code(dc->pc);
1482 LOG_DIS("%8.8x\t", dc->ir);
1483
1484 if (dc->ir)
1485 dc->nr_nops = 0;
1486 else {
1487 if ((dc->tb_flags & MSR_EE_FLAG)
1488 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1489 && (dc->env->pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1490 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1491 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1492 return;
1493 }
1494
1495 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1496 dc->nr_nops++;
1497 if (dc->nr_nops > 4)
1498 cpu_abort(dc->env, "fetching nop sequence\n");
1499 }
1500 /* bit 2 seems to indicate insn type. */
1501 dc->type_b = ir & (1 << 29);
1502
1503 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1504 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1505 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1506 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1507 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1508
1509 /* Large switch for all insns. */
1510 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1511 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1512 decinfo[i].dec(dc);
1513 break;
1514 }
1515 }
1516 }
1517
1518 static void check_breakpoint(CPUState *env, DisasContext *dc)
1519 {
1520 CPUBreakpoint *bp;
1521
1522 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1523 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1524 if (bp->pc == dc->pc) {
1525 t_gen_raise_exception(dc, EXCP_DEBUG);
1526 dc->is_jmp = DISAS_UPDATE;
1527 }
1528 }
1529 }
1530 }
1531
1532 /* generate intermediate code for basic block 'tb'. */
1533 static void
1534 gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
1535 int search_pc)
1536 {
1537 uint16_t *gen_opc_end;
1538 uint32_t pc_start;
1539 int j, lj;
1540 struct DisasContext ctx;
1541 struct DisasContext *dc = &ctx;
1542 uint32_t next_page_start, org_flags;
1543 target_ulong npc;
1544 int num_insns;
1545 int max_insns;
1546
1547 qemu_log_try_set_file(stderr);
1548
1549 pc_start = tb->pc;
1550 dc->env = env;
1551 dc->tb = tb;
1552 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1553
1554 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1555
1556 dc->is_jmp = DISAS_NEXT;
1557 dc->jmp = 0;
1558 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1559 if (dc->delayed_branch) {
1560 dc->jmp = JMP_INDIRECT;
1561 }
1562 dc->pc = pc_start;
1563 dc->singlestep_enabled = env->singlestep_enabled;
1564 dc->cpustate_changed = 0;
1565 dc->abort_at_next_insn = 0;
1566 dc->nr_nops = 0;
1567
1568 if (pc_start & 3)
1569 cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start);
1570
1571 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1572 #if !SIM_COMPAT
1573 qemu_log("--------------\n");
1574 log_cpu_state(env, 0);
1575 #endif
1576 }
1577
1578 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1579 lj = -1;
1580 num_insns = 0;
1581 max_insns = tb->cflags & CF_COUNT_MASK;
1582 if (max_insns == 0)
1583 max_insns = CF_COUNT_MASK;
1584
1585 gen_icount_start();
1586 do
1587 {
1588 #if SIM_COMPAT
1589 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1590 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1591 gen_helper_debug();
1592 }
1593 #endif
1594 check_breakpoint(env, dc);
1595
1596 if (search_pc) {
1597 j = gen_opc_ptr - gen_opc_buf;
1598 if (lj < j) {
1599 lj++;
1600 while (lj < j)
1601 gen_opc_instr_start[lj++] = 0;
1602 }
1603 gen_opc_pc[lj] = dc->pc;
1604 gen_opc_instr_start[lj] = 1;
1605 gen_opc_icount[lj] = num_insns;
1606 }
1607
1608 /* Pretty disas. */
1609 LOG_DIS("%8.8x:\t", dc->pc);
1610
1611 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1612 gen_io_start();
1613
1614 dc->clear_imm = 1;
1615 decode(dc);
1616 if (dc->clear_imm)
1617 dc->tb_flags &= ~IMM_FLAG;
1618 dc->pc += 4;
1619 num_insns++;
1620
1621 if (dc->delayed_branch) {
1622 dc->delayed_branch--;
1623 if (!dc->delayed_branch) {
1624 if (dc->tb_flags & DRTI_FLAG)
1625 do_rti(dc);
1626 if (dc->tb_flags & DRTB_FLAG)
1627 do_rtb(dc);
1628 if (dc->tb_flags & DRTE_FLAG)
1629 do_rte(dc);
1630 /* Clear the delay slot flag. */
1631 dc->tb_flags &= ~D_FLAG;
1632 /* If it is a direct jump, try direct chaining. */
1633 if (dc->jmp == JMP_INDIRECT) {
1634 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1635 dc->is_jmp = DISAS_JUMP;
1636 } else if (dc->jmp == JMP_DIRECT) {
1637 t_sync_flags(dc);
1638 gen_goto_tb(dc, 0, dc->jmp_pc);
1639 dc->is_jmp = DISAS_TB_JUMP;
1640 } else if (dc->jmp == JMP_DIRECT_CC) {
1641 int l1;
1642
1643 t_sync_flags(dc);
1644 l1 = gen_new_label();
1645 /* Conditional jmp. */
1646 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1647 gen_goto_tb(dc, 1, dc->pc);
1648 gen_set_label(l1);
1649 gen_goto_tb(dc, 0, dc->jmp_pc);
1650
1651 dc->is_jmp = DISAS_TB_JUMP;
1652 }
1653 break;
1654 }
1655 }
1656 if (env->singlestep_enabled)
1657 break;
1658 } while (!dc->is_jmp && !dc->cpustate_changed
1659 && gen_opc_ptr < gen_opc_end
1660 && !singlestep
1661 && (dc->pc < next_page_start)
1662 && num_insns < max_insns);
1663
1664 npc = dc->pc;
1665 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1666 if (dc->tb_flags & D_FLAG) {
1667 dc->is_jmp = DISAS_UPDATE;
1668 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1669 sync_jmpstate(dc);
1670 } else
1671 npc = dc->jmp_pc;
1672 }
1673
1674 if (tb->cflags & CF_LAST_IO)
1675 gen_io_end();
1676 /* Force an update if the per-tb cpu state has changed. */
1677 if (dc->is_jmp == DISAS_NEXT
1678 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1679 dc->is_jmp = DISAS_UPDATE;
1680 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1681 }
1682 t_sync_flags(dc);
1683
1684 if (unlikely(env->singlestep_enabled)) {
1685 t_gen_raise_exception(dc, EXCP_DEBUG);
1686 if (dc->is_jmp == DISAS_NEXT)
1687 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1688 } else {
1689 switch(dc->is_jmp) {
1690 case DISAS_NEXT:
1691 gen_goto_tb(dc, 1, npc);
1692 break;
1693 default:
1694 case DISAS_JUMP:
1695 case DISAS_UPDATE:
1696 /* indicate that the hash table must be used
1697 to find the next TB */
1698 tcg_gen_exit_tb(0);
1699 break;
1700 case DISAS_TB_JUMP:
1701 /* nothing more to generate */
1702 break;
1703 }
1704 }
1705 gen_icount_end(tb, num_insns);
1706 *gen_opc_ptr = INDEX_op_end;
1707 if (search_pc) {
1708 j = gen_opc_ptr - gen_opc_buf;
1709 lj++;
1710 while (lj <= j)
1711 gen_opc_instr_start[lj++] = 0;
1712 } else {
1713 tb->size = dc->pc - pc_start;
1714 tb->icount = num_insns;
1715 }
1716
1717 #ifdef DEBUG_DISAS
1718 #if !SIM_COMPAT
1719 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1720 qemu_log("\n");
1721 #if DISAS_GNU
1722 log_target_disas(pc_start, dc->pc - pc_start, 0);
1723 #endif
1724 qemu_log("\nisize=%d osize=%td\n",
1725 dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
1726 }
1727 #endif
1728 #endif
1729 assert(!dc->abort_at_next_insn);
1730 }
1731
1732 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
1733 {
1734 gen_intermediate_code_internal(env, tb, 0);
1735 }
1736
1737 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
1738 {
1739 gen_intermediate_code_internal(env, tb, 1);
1740 }
1741
1742 void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf,
1743 int flags)
1744 {
1745 int i;
1746
1747 if (!env || !f)
1748 return;
1749
1750 cpu_fprintf(f, "IN: PC=%x %s\n",
1751 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1752 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1753 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1754 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1755 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1756 env->btaken, env->btarget,
1757 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1758 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1759 (env->sregs[SR_MSR] & MSR_EIP),
1760 (env->sregs[SR_MSR] & MSR_IE));
1761
1762 for (i = 0; i < 32; i++) {
1763 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1764 if ((i + 1) % 4 == 0)
1765 cpu_fprintf(f, "\n");
1766 }
1767 cpu_fprintf(f, "\n\n");
1768 }
1769
1770 CPUState *cpu_mb_init (const char *cpu_model)
1771 {
1772 CPUState *env;
1773 static int tcg_initialized = 0;
1774 int i;
1775
1776 env = qemu_mallocz(sizeof(CPUState));
1777
1778 cpu_exec_init(env);
1779 cpu_reset(env);
1780 set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
1781
1782 if (tcg_initialized)
1783 return env;
1784
1785 tcg_initialized = 1;
1786
1787 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1788
1789 env_debug = tcg_global_mem_new(TCG_AREG0,
1790 offsetof(CPUState, debug),
1791 "debug0");
1792 env_iflags = tcg_global_mem_new(TCG_AREG0,
1793 offsetof(CPUState, iflags),
1794 "iflags");
1795 env_imm = tcg_global_mem_new(TCG_AREG0,
1796 offsetof(CPUState, imm),
1797 "imm");
1798 env_btarget = tcg_global_mem_new(TCG_AREG0,
1799 offsetof(CPUState, btarget),
1800 "btarget");
1801 env_btaken = tcg_global_mem_new(TCG_AREG0,
1802 offsetof(CPUState, btaken),
1803 "btaken");
1804 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1805 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1806 offsetof(CPUState, regs[i]),
1807 regnames[i]);
1808 }
1809 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1810 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
1811 offsetof(CPUState, sregs[i]),
1812 special_regnames[i]);
1813 }
1814 #define GEN_HELPER 2
1815 #include "helper.h"
1816
1817 return env;
1818 }
1819
1820 void cpu_reset (CPUState *env)
1821 {
1822 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
1823 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
1824 log_cpu_state(env, 0);
1825 }
1826
1827 memset(env, 0, offsetof(CPUMBState, breakpoints));
1828 tlb_flush(env, 1);
1829
1830 env->pvr.regs[0] = PVR0_PVR_FULL_MASK \
1831 | PVR0_USE_BARREL_MASK \
1832 | PVR0_USE_DIV_MASK \
1833 | PVR0_USE_HW_MUL_MASK \
1834 | PVR0_USE_EXC_MASK \
1835 | PVR0_USE_ICACHE_MASK \
1836 | PVR0_USE_DCACHE_MASK \
1837 | PVR0_USE_MMU \
1838 | (0xb << 8);
1839 env->pvr.regs[2] = PVR2_D_OPB_MASK \
1840 | PVR2_D_LMB_MASK \
1841 | PVR2_I_OPB_MASK \
1842 | PVR2_I_LMB_MASK \
1843 | PVR2_USE_MSR_INSTR \
1844 | PVR2_USE_PCMP_INSTR \
1845 | PVR2_USE_BARREL_MASK \
1846 | PVR2_USE_DIV_MASK \
1847 | PVR2_USE_HW_MUL_MASK \
1848 | PVR2_USE_MUL64_MASK \
1849 | PVR2_USE_FPU_MASK \
1850 | PVR2_USE_FPU2_MASK \
1851 | PVR2_FPU_EXC_MASK \
1852 | 0;
1853 env->pvr.regs[10] = 0x0c000000; /* Default to spartan 3a dsp family. */
1854 env->pvr.regs[11] = PVR11_USE_MMU | (16 << 17);
1855
1856 #if defined(CONFIG_USER_ONLY)
1857 /* start in user mode with interrupts enabled. */
1858 env->sregs[SR_MSR] = MSR_EE | MSR_IE | MSR_VM | MSR_UM;
1859 env->pvr.regs[10] = 0x0c000000; /* Spartan 3a dsp. */
1860 #else
1861 env->sregs[SR_MSR] = 0;
1862 mmu_init(&env->mmu);
1863 env->mmu.c_mmu = 3;
1864 env->mmu.c_mmu_tlb_access = 3;
1865 env->mmu.c_mmu_zones = 16;
1866 #endif
1867 }
1868
1869 void gen_pc_load(CPUState *env, struct TranslationBlock *tb,
1870 unsigned long searched_pc, int pc_pos, void *puc)
1871 {
1872 env->sregs[SR_PC] = gen_opc_pc[pc_pos];
1873 }