]> git.proxmox.com Git - mirror_qemu.git/blob - target-m68k/translate.c
target-m68k: add addressing modes to scc
[mirror_qemu.git] / target-m68k / translate.c
1 /*
2 * m68k translation
3 *
4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/log.h"
27 #include "exec/cpu_ldst.h"
28
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34
35
36 //#define DEBUG_DISPATCH 1
37
38 /* Fake floating point. */
39 #define tcg_gen_mov_f64 tcg_gen_mov_i64
40 #define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
41 #define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
42
43 #define DEFO32(name, offset) static TCGv QREG_##name;
44 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
45 #define DEFF64(name, offset) static TCGv_i64 QREG_##name;
46 #include "qregs.def"
47 #undef DEFO32
48 #undef DEFO64
49 #undef DEFF64
50
51 static TCGv_i32 cpu_halted;
52 static TCGv_i32 cpu_exception_index;
53
54 static TCGv_env cpu_env;
55
56 static char cpu_reg_names[3*8*3 + 5*4];
57 static TCGv cpu_dregs[8];
58 static TCGv cpu_aregs[8];
59 static TCGv_i64 cpu_fregs[8];
60 static TCGv_i64 cpu_macc[4];
61
62 #define REG(insn, pos) (((insn) >> (pos)) & 7)
63 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
64 #define AREG(insn, pos) cpu_aregs[REG(insn, pos)]
65 #define FREG(insn, pos) cpu_fregs[REG(insn, pos)]
66 #define MACREG(acc) cpu_macc[acc]
67 #define QREG_SP cpu_aregs[7]
68
69 static TCGv NULL_QREG;
70 #define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
71 /* Used to distinguish stores from bad addressing modes. */
72 static TCGv store_dummy;
73
74 #include "exec/gen-icount.h"
75
76 void m68k_tcg_init(void)
77 {
78 char *p;
79 int i;
80
81 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
82 tcg_ctx.tcg_env = cpu_env;
83
84 #define DEFO32(name, offset) \
85 QREG_##name = tcg_global_mem_new_i32(cpu_env, \
86 offsetof(CPUM68KState, offset), #name);
87 #define DEFO64(name, offset) \
88 QREG_##name = tcg_global_mem_new_i64(cpu_env, \
89 offsetof(CPUM68KState, offset), #name);
90 #define DEFF64(name, offset) DEFO64(name, offset)
91 #include "qregs.def"
92 #undef DEFO32
93 #undef DEFO64
94 #undef DEFF64
95
96 cpu_halted = tcg_global_mem_new_i32(cpu_env,
97 -offsetof(M68kCPU, env) +
98 offsetof(CPUState, halted), "HALTED");
99 cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
100 -offsetof(M68kCPU, env) +
101 offsetof(CPUState, exception_index),
102 "EXCEPTION");
103
104 p = cpu_reg_names;
105 for (i = 0; i < 8; i++) {
106 sprintf(p, "D%d", i);
107 cpu_dregs[i] = tcg_global_mem_new(cpu_env,
108 offsetof(CPUM68KState, dregs[i]), p);
109 p += 3;
110 sprintf(p, "A%d", i);
111 cpu_aregs[i] = tcg_global_mem_new(cpu_env,
112 offsetof(CPUM68KState, aregs[i]), p);
113 p += 3;
114 sprintf(p, "F%d", i);
115 cpu_fregs[i] = tcg_global_mem_new_i64(cpu_env,
116 offsetof(CPUM68KState, fregs[i]), p);
117 p += 3;
118 }
119 for (i = 0; i < 4; i++) {
120 sprintf(p, "ACC%d", i);
121 cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
122 offsetof(CPUM68KState, macc[i]), p);
123 p += 5;
124 }
125
126 NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
127 store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
128 }
129
130 /* internal defines */
131 typedef struct DisasContext {
132 CPUM68KState *env;
133 target_ulong insn_pc; /* Start of the current instruction. */
134 target_ulong pc;
135 int is_jmp;
136 CCOp cc_op; /* Current CC operation */
137 int cc_op_synced;
138 int user;
139 uint32_t fpcr;
140 struct TranslationBlock *tb;
141 int singlestep_enabled;
142 TCGv_i64 mactmp;
143 int done_mac;
144 } DisasContext;
145
146 #define DISAS_JUMP_NEXT 4
147
148 #if defined(CONFIG_USER_ONLY)
149 #define IS_USER(s) 1
150 #else
151 #define IS_USER(s) s->user
152 #endif
153
154 /* XXX: move that elsewhere */
155 /* ??? Fix exceptions. */
156 static void *gen_throws_exception;
157 #define gen_last_qop NULL
158
159 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
160
161 #ifdef DEBUG_DISPATCH
162 #define DISAS_INSN(name) \
163 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
164 uint16_t insn); \
165 static void disas_##name(CPUM68KState *env, DisasContext *s, \
166 uint16_t insn) \
167 { \
168 qemu_log("Dispatch " #name "\n"); \
169 real_disas_##name(env, s, insn); \
170 } \
171 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
172 uint16_t insn)
173 #else
174 #define DISAS_INSN(name) \
175 static void disas_##name(CPUM68KState *env, DisasContext *s, \
176 uint16_t insn)
177 #endif
178
179 static const uint8_t cc_op_live[CC_OP_NB] = {
180 [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
181 [CC_OP_ADD] = CCF_X | CCF_N | CCF_V,
182 [CC_OP_SUB] = CCF_X | CCF_N | CCF_V,
183 [CC_OP_CMP] = CCF_X | CCF_N | CCF_V,
184 [CC_OP_LOGIC] = CCF_X | CCF_N
185 };
186
187 static void set_cc_op(DisasContext *s, CCOp op)
188 {
189 CCOp old_op = s->cc_op;
190 int dead;
191
192 if (old_op == op) {
193 return;
194 }
195 s->cc_op = op;
196 s->cc_op_synced = 0;
197
198 /* Discard CC computation that will no longer be used.
199 Note that X and N are never dead. */
200 dead = cc_op_live[old_op] & ~cc_op_live[op];
201 if (dead & CCF_C) {
202 tcg_gen_discard_i32(QREG_CC_C);
203 }
204 if (dead & CCF_Z) {
205 tcg_gen_discard_i32(QREG_CC_Z);
206 }
207 if (dead & CCF_V) {
208 tcg_gen_discard_i32(QREG_CC_V);
209 }
210 }
211
212 /* Update the CPU env CC_OP state. */
213 static void update_cc_op(DisasContext *s)
214 {
215 if (!s->cc_op_synced) {
216 s->cc_op_synced = 1;
217 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
218 }
219 }
220
221 /* Generate a load from the specified address. Narrow values are
222 sign extended to full register width. */
223 static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
224 {
225 TCGv tmp;
226 int index = IS_USER(s);
227 tmp = tcg_temp_new_i32();
228 switch(opsize) {
229 case OS_BYTE:
230 if (sign)
231 tcg_gen_qemu_ld8s(tmp, addr, index);
232 else
233 tcg_gen_qemu_ld8u(tmp, addr, index);
234 break;
235 case OS_WORD:
236 if (sign)
237 tcg_gen_qemu_ld16s(tmp, addr, index);
238 else
239 tcg_gen_qemu_ld16u(tmp, addr, index);
240 break;
241 case OS_LONG:
242 case OS_SINGLE:
243 tcg_gen_qemu_ld32u(tmp, addr, index);
244 break;
245 default:
246 g_assert_not_reached();
247 }
248 gen_throws_exception = gen_last_qop;
249 return tmp;
250 }
251
252 static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr)
253 {
254 TCGv_i64 tmp;
255 int index = IS_USER(s);
256 tmp = tcg_temp_new_i64();
257 tcg_gen_qemu_ldf64(tmp, addr, index);
258 gen_throws_exception = gen_last_qop;
259 return tmp;
260 }
261
262 /* Generate a store. */
263 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
264 {
265 int index = IS_USER(s);
266 switch(opsize) {
267 case OS_BYTE:
268 tcg_gen_qemu_st8(val, addr, index);
269 break;
270 case OS_WORD:
271 tcg_gen_qemu_st16(val, addr, index);
272 break;
273 case OS_LONG:
274 case OS_SINGLE:
275 tcg_gen_qemu_st32(val, addr, index);
276 break;
277 default:
278 g_assert_not_reached();
279 }
280 gen_throws_exception = gen_last_qop;
281 }
282
283 static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val)
284 {
285 int index = IS_USER(s);
286 tcg_gen_qemu_stf64(val, addr, index);
287 gen_throws_exception = gen_last_qop;
288 }
289
290 typedef enum {
291 EA_STORE,
292 EA_LOADU,
293 EA_LOADS
294 } ea_what;
295
296 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
297 otherwise generate a store. */
298 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
299 ea_what what)
300 {
301 if (what == EA_STORE) {
302 gen_store(s, opsize, addr, val);
303 return store_dummy;
304 } else {
305 return gen_load(s, opsize, addr, what == EA_LOADS);
306 }
307 }
308
309 /* Read a 16-bit immediate constant */
310 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
311 {
312 uint16_t im;
313 im = cpu_lduw_code(env, s->pc);
314 s->pc += 2;
315 return im;
316 }
317
318 /* Read an 8-bit immediate constant */
319 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
320 {
321 return read_im16(env, s);
322 }
323
324 /* Read a 32-bit immediate constant. */
325 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
326 {
327 uint32_t im;
328 im = read_im16(env, s) << 16;
329 im |= 0xffff & read_im16(env, s);
330 return im;
331 }
332
333 /* Calculate and address index. */
334 static TCGv gen_addr_index(uint16_t ext, TCGv tmp)
335 {
336 TCGv add;
337 int scale;
338
339 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
340 if ((ext & 0x800) == 0) {
341 tcg_gen_ext16s_i32(tmp, add);
342 add = tmp;
343 }
344 scale = (ext >> 9) & 3;
345 if (scale != 0) {
346 tcg_gen_shli_i32(tmp, add, scale);
347 add = tmp;
348 }
349 return add;
350 }
351
352 /* Handle a base + index + displacement effective addresss.
353 A NULL_QREG base means pc-relative. */
354 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
355 {
356 uint32_t offset;
357 uint16_t ext;
358 TCGv add;
359 TCGv tmp;
360 uint32_t bd, od;
361
362 offset = s->pc;
363 ext = read_im16(env, s);
364
365 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
366 return NULL_QREG;
367
368 if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
369 !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
370 ext &= ~(3 << 9);
371 }
372
373 if (ext & 0x100) {
374 /* full extension word format */
375 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
376 return NULL_QREG;
377
378 if ((ext & 0x30) > 0x10) {
379 /* base displacement */
380 if ((ext & 0x30) == 0x20) {
381 bd = (int16_t)read_im16(env, s);
382 } else {
383 bd = read_im32(env, s);
384 }
385 } else {
386 bd = 0;
387 }
388 tmp = tcg_temp_new();
389 if ((ext & 0x44) == 0) {
390 /* pre-index */
391 add = gen_addr_index(ext, tmp);
392 } else {
393 add = NULL_QREG;
394 }
395 if ((ext & 0x80) == 0) {
396 /* base not suppressed */
397 if (IS_NULL_QREG(base)) {
398 base = tcg_const_i32(offset + bd);
399 bd = 0;
400 }
401 if (!IS_NULL_QREG(add)) {
402 tcg_gen_add_i32(tmp, add, base);
403 add = tmp;
404 } else {
405 add = base;
406 }
407 }
408 if (!IS_NULL_QREG(add)) {
409 if (bd != 0) {
410 tcg_gen_addi_i32(tmp, add, bd);
411 add = tmp;
412 }
413 } else {
414 add = tcg_const_i32(bd);
415 }
416 if ((ext & 3) != 0) {
417 /* memory indirect */
418 base = gen_load(s, OS_LONG, add, 0);
419 if ((ext & 0x44) == 4) {
420 add = gen_addr_index(ext, tmp);
421 tcg_gen_add_i32(tmp, add, base);
422 add = tmp;
423 } else {
424 add = base;
425 }
426 if ((ext & 3) > 1) {
427 /* outer displacement */
428 if ((ext & 3) == 2) {
429 od = (int16_t)read_im16(env, s);
430 } else {
431 od = read_im32(env, s);
432 }
433 } else {
434 od = 0;
435 }
436 if (od != 0) {
437 tcg_gen_addi_i32(tmp, add, od);
438 add = tmp;
439 }
440 }
441 } else {
442 /* brief extension word format */
443 tmp = tcg_temp_new();
444 add = gen_addr_index(ext, tmp);
445 if (!IS_NULL_QREG(base)) {
446 tcg_gen_add_i32(tmp, add, base);
447 if ((int8_t)ext)
448 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
449 } else {
450 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
451 }
452 add = tmp;
453 }
454 return add;
455 }
456
457 /* Evaluate all the CC flags. */
458
459 static void gen_flush_flags(DisasContext *s)
460 {
461 TCGv t0, t1;
462
463 switch (s->cc_op) {
464 case CC_OP_FLAGS:
465 return;
466
467 case CC_OP_ADD:
468 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
469 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
470 /* Compute signed overflow for addition. */
471 t0 = tcg_temp_new();
472 t1 = tcg_temp_new();
473 tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
474 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
475 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
476 tcg_temp_free(t0);
477 tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
478 tcg_temp_free(t1);
479 break;
480
481 case CC_OP_SUB:
482 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
483 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
484 /* Compute signed overflow for subtraction. */
485 t0 = tcg_temp_new();
486 t1 = tcg_temp_new();
487 tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
488 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
489 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
490 tcg_temp_free(t0);
491 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
492 tcg_temp_free(t1);
493 break;
494
495 case CC_OP_CMP:
496 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
497 tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
498 /* Compute signed overflow for subtraction. */
499 t0 = tcg_temp_new();
500 tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
501 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
502 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
503 tcg_temp_free(t0);
504 tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
505 break;
506
507 case CC_OP_LOGIC:
508 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
509 tcg_gen_movi_i32(QREG_CC_C, 0);
510 tcg_gen_movi_i32(QREG_CC_V, 0);
511 break;
512
513 case CC_OP_DYNAMIC:
514 gen_helper_flush_flags(cpu_env, QREG_CC_OP);
515 break;
516
517 default:
518 t0 = tcg_const_i32(s->cc_op);
519 gen_helper_flush_flags(cpu_env, t0);
520 tcg_temp_free(t0);
521 break;
522 }
523
524 /* Note that flush_flags also assigned to env->cc_op. */
525 s->cc_op = CC_OP_FLAGS;
526 s->cc_op_synced = 1;
527 }
528
529 /* Sign or zero extend a value. */
530
531 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
532 {
533 switch (opsize) {
534 case OS_BYTE:
535 if (sign) {
536 tcg_gen_ext8s_i32(res, val);
537 } else {
538 tcg_gen_ext8u_i32(res, val);
539 }
540 break;
541 case OS_WORD:
542 if (sign) {
543 tcg_gen_ext16s_i32(res, val);
544 } else {
545 tcg_gen_ext16u_i32(res, val);
546 }
547 break;
548 case OS_LONG:
549 tcg_gen_mov_i32(res, val);
550 break;
551 default:
552 g_assert_not_reached();
553 }
554 }
555
556 static TCGv gen_extend(TCGv val, int opsize, int sign)
557 {
558 TCGv tmp;
559
560 if (opsize == OS_LONG) {
561 tmp = val;
562 } else {
563 tmp = tcg_temp_new();
564 gen_ext(tmp, val, opsize, sign);
565 }
566
567 return tmp;
568 }
569
570 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
571 {
572 gen_ext(QREG_CC_N, val, opsize, 1);
573 set_cc_op(s, CC_OP_LOGIC);
574 }
575
576 static void gen_update_cc_add(TCGv dest, TCGv src)
577 {
578 tcg_gen_mov_i32(QREG_CC_N, dest);
579 tcg_gen_mov_i32(QREG_CC_V, src);
580 }
581
582 static inline int opsize_bytes(int opsize)
583 {
584 switch (opsize) {
585 case OS_BYTE: return 1;
586 case OS_WORD: return 2;
587 case OS_LONG: return 4;
588 case OS_SINGLE: return 4;
589 case OS_DOUBLE: return 8;
590 case OS_EXTENDED: return 12;
591 case OS_PACKED: return 12;
592 default:
593 g_assert_not_reached();
594 }
595 }
596
597 static inline int insn_opsize(int insn)
598 {
599 switch ((insn >> 6) & 3) {
600 case 0: return OS_BYTE;
601 case 1: return OS_WORD;
602 case 2: return OS_LONG;
603 default:
604 g_assert_not_reached();
605 }
606 }
607
608 /* Assign value to a register. If the width is less than the register width
609 only the low part of the register is set. */
610 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
611 {
612 TCGv tmp;
613 switch (opsize) {
614 case OS_BYTE:
615 tcg_gen_andi_i32(reg, reg, 0xffffff00);
616 tmp = tcg_temp_new();
617 tcg_gen_ext8u_i32(tmp, val);
618 tcg_gen_or_i32(reg, reg, tmp);
619 break;
620 case OS_WORD:
621 tcg_gen_andi_i32(reg, reg, 0xffff0000);
622 tmp = tcg_temp_new();
623 tcg_gen_ext16u_i32(tmp, val);
624 tcg_gen_or_i32(reg, reg, tmp);
625 break;
626 case OS_LONG:
627 case OS_SINGLE:
628 tcg_gen_mov_i32(reg, val);
629 break;
630 default:
631 g_assert_not_reached();
632 }
633 }
634
635 /* Generate code for an "effective address". Does not adjust the base
636 register for autoincrement addressing modes. */
637 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
638 int opsize)
639 {
640 TCGv reg;
641 TCGv tmp;
642 uint16_t ext;
643 uint32_t offset;
644
645 switch ((insn >> 3) & 7) {
646 case 0: /* Data register direct. */
647 case 1: /* Address register direct. */
648 return NULL_QREG;
649 case 2: /* Indirect register */
650 case 3: /* Indirect postincrement. */
651 return AREG(insn, 0);
652 case 4: /* Indirect predecrememnt. */
653 reg = AREG(insn, 0);
654 tmp = tcg_temp_new();
655 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
656 return tmp;
657 case 5: /* Indirect displacement. */
658 reg = AREG(insn, 0);
659 tmp = tcg_temp_new();
660 ext = read_im16(env, s);
661 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
662 return tmp;
663 case 6: /* Indirect index + displacement. */
664 reg = AREG(insn, 0);
665 return gen_lea_indexed(env, s, reg);
666 case 7: /* Other */
667 switch (insn & 7) {
668 case 0: /* Absolute short. */
669 offset = (int16_t)read_im16(env, s);
670 return tcg_const_i32(offset);
671 case 1: /* Absolute long. */
672 offset = read_im32(env, s);
673 return tcg_const_i32(offset);
674 case 2: /* pc displacement */
675 offset = s->pc;
676 offset += (int16_t)read_im16(env, s);
677 return tcg_const_i32(offset);
678 case 3: /* pc index+displacement. */
679 return gen_lea_indexed(env, s, NULL_QREG);
680 case 4: /* Immediate. */
681 default:
682 return NULL_QREG;
683 }
684 }
685 /* Should never happen. */
686 return NULL_QREG;
687 }
688
689 /* Helper function for gen_ea. Reuse the computed address between the
690 for read/write operands. */
691 static inline TCGv gen_ea_once(CPUM68KState *env, DisasContext *s,
692 uint16_t insn, int opsize, TCGv val,
693 TCGv *addrp, ea_what what)
694 {
695 TCGv tmp;
696
697 if (addrp && what == EA_STORE) {
698 tmp = *addrp;
699 } else {
700 tmp = gen_lea(env, s, insn, opsize);
701 if (IS_NULL_QREG(tmp))
702 return tmp;
703 if (addrp)
704 *addrp = tmp;
705 }
706 return gen_ldst(s, opsize, tmp, val, what);
707 }
708
709 /* Generate code to load/store a value from/into an EA. If VAL > 0 this is
710 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
711 ADDRP is non-null for readwrite operands. */
712 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
713 int opsize, TCGv val, TCGv *addrp, ea_what what)
714 {
715 TCGv reg;
716 TCGv result;
717 uint32_t offset;
718
719 switch ((insn >> 3) & 7) {
720 case 0: /* Data register direct. */
721 reg = DREG(insn, 0);
722 if (what == EA_STORE) {
723 gen_partset_reg(opsize, reg, val);
724 return store_dummy;
725 } else {
726 return gen_extend(reg, opsize, what == EA_LOADS);
727 }
728 case 1: /* Address register direct. */
729 reg = AREG(insn, 0);
730 if (what == EA_STORE) {
731 tcg_gen_mov_i32(reg, val);
732 return store_dummy;
733 } else {
734 return gen_extend(reg, opsize, what == EA_LOADS);
735 }
736 case 2: /* Indirect register */
737 reg = AREG(insn, 0);
738 return gen_ldst(s, opsize, reg, val, what);
739 case 3: /* Indirect postincrement. */
740 reg = AREG(insn, 0);
741 result = gen_ldst(s, opsize, reg, val, what);
742 /* ??? This is not exception safe. The instruction may still
743 fault after this point. */
744 if (what == EA_STORE || !addrp)
745 tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize));
746 return result;
747 case 4: /* Indirect predecrememnt. */
748 {
749 TCGv tmp;
750 if (addrp && what == EA_STORE) {
751 tmp = *addrp;
752 } else {
753 tmp = gen_lea(env, s, insn, opsize);
754 if (IS_NULL_QREG(tmp))
755 return tmp;
756 if (addrp)
757 *addrp = tmp;
758 }
759 result = gen_ldst(s, opsize, tmp, val, what);
760 /* ??? This is not exception safe. The instruction may still
761 fault after this point. */
762 if (what == EA_STORE || !addrp) {
763 reg = AREG(insn, 0);
764 tcg_gen_mov_i32(reg, tmp);
765 }
766 }
767 return result;
768 case 5: /* Indirect displacement. */
769 case 6: /* Indirect index + displacement. */
770 return gen_ea_once(env, s, insn, opsize, val, addrp, what);
771 case 7: /* Other */
772 switch (insn & 7) {
773 case 0: /* Absolute short. */
774 case 1: /* Absolute long. */
775 case 2: /* pc displacement */
776 case 3: /* pc index+displacement. */
777 return gen_ea_once(env, s, insn, opsize, val, addrp, what);
778 case 4: /* Immediate. */
779 /* Sign extend values for consistency. */
780 switch (opsize) {
781 case OS_BYTE:
782 if (what == EA_LOADS) {
783 offset = (int8_t)read_im8(env, s);
784 } else {
785 offset = read_im8(env, s);
786 }
787 break;
788 case OS_WORD:
789 if (what == EA_LOADS) {
790 offset = (int16_t)read_im16(env, s);
791 } else {
792 offset = read_im16(env, s);
793 }
794 break;
795 case OS_LONG:
796 offset = read_im32(env, s);
797 break;
798 default:
799 g_assert_not_reached();
800 }
801 return tcg_const_i32(offset);
802 default:
803 return NULL_QREG;
804 }
805 }
806 /* Should never happen. */
807 return NULL_QREG;
808 }
809
810 typedef struct {
811 TCGCond tcond;
812 bool g1;
813 bool g2;
814 TCGv v1;
815 TCGv v2;
816 } DisasCompare;
817
818 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
819 {
820 TCGv tmp, tmp2;
821 TCGCond tcond;
822 CCOp op = s->cc_op;
823
824 /* The CC_OP_CMP form can handle most normal comparisons directly. */
825 if (op == CC_OP_CMP) {
826 c->g1 = c->g2 = 1;
827 c->v1 = QREG_CC_N;
828 c->v2 = QREG_CC_V;
829 switch (cond) {
830 case 2: /* HI */
831 case 3: /* LS */
832 tcond = TCG_COND_LEU;
833 goto done;
834 case 4: /* CC */
835 case 5: /* CS */
836 tcond = TCG_COND_LTU;
837 goto done;
838 case 6: /* NE */
839 case 7: /* EQ */
840 tcond = TCG_COND_EQ;
841 goto done;
842 case 10: /* PL */
843 case 11: /* MI */
844 c->g1 = c->g2 = 0;
845 c->v2 = tcg_const_i32(0);
846 c->v1 = tmp = tcg_temp_new();
847 tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
848 /* fallthru */
849 case 12: /* GE */
850 case 13: /* LT */
851 tcond = TCG_COND_LT;
852 goto done;
853 case 14: /* GT */
854 case 15: /* LE */
855 tcond = TCG_COND_LE;
856 goto done;
857 }
858 }
859
860 c->g1 = 1;
861 c->g2 = 0;
862 c->v2 = tcg_const_i32(0);
863
864 switch (cond) {
865 case 0: /* T */
866 case 1: /* F */
867 c->v1 = c->v2;
868 tcond = TCG_COND_NEVER;
869 goto done;
870 case 14: /* GT (!(Z || (N ^ V))) */
871 case 15: /* LE (Z || (N ^ V)) */
872 /* Logic operations clear V, which simplifies LE to (Z || N),
873 and since Z and N are co-located, this becomes a normal
874 comparison vs N. */
875 if (op == CC_OP_LOGIC) {
876 c->v1 = QREG_CC_N;
877 tcond = TCG_COND_LE;
878 goto done;
879 }
880 break;
881 case 12: /* GE (!(N ^ V)) */
882 case 13: /* LT (N ^ V) */
883 /* Logic operations clear V, which simplifies this to N. */
884 if (op != CC_OP_LOGIC) {
885 break;
886 }
887 /* fallthru */
888 case 10: /* PL (!N) */
889 case 11: /* MI (N) */
890 /* Several cases represent N normally. */
891 if (op == CC_OP_ADD || op == CC_OP_SUB || op == CC_OP_LOGIC) {
892 c->v1 = QREG_CC_N;
893 tcond = TCG_COND_LT;
894 goto done;
895 }
896 break;
897 case 6: /* NE (!Z) */
898 case 7: /* EQ (Z) */
899 /* Some cases fold Z into N. */
900 if (op == CC_OP_ADD || op == CC_OP_SUB || op == CC_OP_LOGIC) {
901 tcond = TCG_COND_EQ;
902 c->v1 = QREG_CC_N;
903 goto done;
904 }
905 break;
906 case 4: /* CC (!C) */
907 case 5: /* CS (C) */
908 /* Some cases fold C into X. */
909 if (op == CC_OP_ADD || op == CC_OP_SUB) {
910 tcond = TCG_COND_NE;
911 c->v1 = QREG_CC_X;
912 goto done;
913 }
914 /* fallthru */
915 case 8: /* VC (!V) */
916 case 9: /* VS (V) */
917 /* Logic operations clear V and C. */
918 if (op == CC_OP_LOGIC) {
919 tcond = TCG_COND_NEVER;
920 c->v1 = c->v2;
921 goto done;
922 }
923 break;
924 }
925
926 /* Otherwise, flush flag state to CC_OP_FLAGS. */
927 gen_flush_flags(s);
928
929 switch (cond) {
930 case 0: /* T */
931 case 1: /* F */
932 default:
933 /* Invalid, or handled above. */
934 abort();
935 case 2: /* HI (!C && !Z) -> !(C || Z)*/
936 case 3: /* LS (C || Z) */
937 c->v1 = tmp = tcg_temp_new();
938 c->g1 = 0;
939 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
940 tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
941 tcond = TCG_COND_NE;
942 break;
943 case 4: /* CC (!C) */
944 case 5: /* CS (C) */
945 c->v1 = QREG_CC_C;
946 tcond = TCG_COND_NE;
947 break;
948 case 6: /* NE (!Z) */
949 case 7: /* EQ (Z) */
950 c->v1 = QREG_CC_Z;
951 tcond = TCG_COND_EQ;
952 break;
953 case 8: /* VC (!V) */
954 case 9: /* VS (V) */
955 c->v1 = QREG_CC_V;
956 tcond = TCG_COND_LT;
957 break;
958 case 10: /* PL (!N) */
959 case 11: /* MI (N) */
960 c->v1 = QREG_CC_N;
961 tcond = TCG_COND_LT;
962 break;
963 case 12: /* GE (!(N ^ V)) */
964 case 13: /* LT (N ^ V) */
965 c->v1 = tmp = tcg_temp_new();
966 c->g1 = 0;
967 tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
968 tcond = TCG_COND_LT;
969 break;
970 case 14: /* GT (!(Z || (N ^ V))) */
971 case 15: /* LE (Z || (N ^ V)) */
972 c->v1 = tmp = tcg_temp_new();
973 c->g1 = 0;
974 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
975 tcg_gen_neg_i32(tmp, tmp);
976 tmp2 = tcg_temp_new();
977 tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
978 tcg_gen_or_i32(tmp, tmp, tmp2);
979 tcg_temp_free(tmp2);
980 tcond = TCG_COND_LT;
981 break;
982 }
983
984 done:
985 if ((cond & 1) == 0) {
986 tcond = tcg_invert_cond(tcond);
987 }
988 c->tcond = tcond;
989 }
990
991 static void free_cond(DisasCompare *c)
992 {
993 if (!c->g1) {
994 tcg_temp_free(c->v1);
995 }
996 if (!c->g2) {
997 tcg_temp_free(c->v2);
998 }
999 }
1000
1001 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1002 {
1003 DisasCompare c;
1004
1005 gen_cc_cond(&c, s, cond);
1006 update_cc_op(s);
1007 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1008 free_cond(&c);
1009 }
1010
1011 /* Force a TB lookup after an instruction that changes the CPU state. */
1012 static void gen_lookup_tb(DisasContext *s)
1013 {
1014 update_cc_op(s);
1015 tcg_gen_movi_i32(QREG_PC, s->pc);
1016 s->is_jmp = DISAS_UPDATE;
1017 }
1018
1019 /* Generate a jump to an immediate address. */
1020 static void gen_jmp_im(DisasContext *s, uint32_t dest)
1021 {
1022 update_cc_op(s);
1023 tcg_gen_movi_i32(QREG_PC, dest);
1024 s->is_jmp = DISAS_JUMP;
1025 }
1026
1027 /* Generate a jump to the address in qreg DEST. */
1028 static void gen_jmp(DisasContext *s, TCGv dest)
1029 {
1030 update_cc_op(s);
1031 tcg_gen_mov_i32(QREG_PC, dest);
1032 s->is_jmp = DISAS_JUMP;
1033 }
1034
1035 static void gen_exception(DisasContext *s, uint32_t where, int nr)
1036 {
1037 update_cc_op(s);
1038 gen_jmp_im(s, where);
1039 gen_helper_raise_exception(cpu_env, tcg_const_i32(nr));
1040 }
1041
1042 static inline void gen_addr_fault(DisasContext *s)
1043 {
1044 gen_exception(s, s->insn_pc, EXCP_ADDRESS);
1045 }
1046
1047 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
1048 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
1049 op_sign ? EA_LOADS : EA_LOADU); \
1050 if (IS_NULL_QREG(result)) { \
1051 gen_addr_fault(s); \
1052 return; \
1053 } \
1054 } while (0)
1055
1056 #define DEST_EA(env, insn, opsize, val, addrp) do { \
1057 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
1058 if (IS_NULL_QREG(ea_result)) { \
1059 gen_addr_fault(s); \
1060 return; \
1061 } \
1062 } while (0)
1063
1064 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1065 {
1066 #ifndef CONFIG_USER_ONLY
1067 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
1068 (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1069 #else
1070 return true;
1071 #endif
1072 }
1073
1074 /* Generate a jump to an immediate address. */
1075 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1076 {
1077 if (unlikely(s->singlestep_enabled)) {
1078 gen_exception(s, dest, EXCP_DEBUG);
1079 } else if (use_goto_tb(s, dest)) {
1080 tcg_gen_goto_tb(n);
1081 tcg_gen_movi_i32(QREG_PC, dest);
1082 tcg_gen_exit_tb((uintptr_t)s->tb + n);
1083 } else {
1084 gen_jmp_im(s, dest);
1085 tcg_gen_exit_tb(0);
1086 }
1087 s->is_jmp = DISAS_TB_JUMP;
1088 }
1089
1090 DISAS_INSN(scc)
1091 {
1092 DisasCompare c;
1093 int cond;
1094 TCGv tmp;
1095
1096 cond = (insn >> 8) & 0xf;
1097 gen_cc_cond(&c, s, cond);
1098
1099 tmp = tcg_temp_new();
1100 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1101 free_cond(&c);
1102
1103 tcg_gen_neg_i32(tmp, tmp);
1104 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1105 tcg_temp_free(tmp);
1106 }
1107
1108 DISAS_INSN(undef_mac)
1109 {
1110 gen_exception(s, s->pc - 2, EXCP_LINEA);
1111 }
1112
1113 DISAS_INSN(undef_fpu)
1114 {
1115 gen_exception(s, s->pc - 2, EXCP_LINEF);
1116 }
1117
1118 DISAS_INSN(undef)
1119 {
1120 M68kCPU *cpu = m68k_env_get_cpu(env);
1121
1122 gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
1123 cpu_abort(CPU(cpu), "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
1124 }
1125
1126 DISAS_INSN(mulw)
1127 {
1128 TCGv reg;
1129 TCGv tmp;
1130 TCGv src;
1131 int sign;
1132
1133 sign = (insn & 0x100) != 0;
1134 reg = DREG(insn, 9);
1135 tmp = tcg_temp_new();
1136 if (sign)
1137 tcg_gen_ext16s_i32(tmp, reg);
1138 else
1139 tcg_gen_ext16u_i32(tmp, reg);
1140 SRC_EA(env, src, OS_WORD, sign, NULL);
1141 tcg_gen_mul_i32(tmp, tmp, src);
1142 tcg_gen_mov_i32(reg, tmp);
1143 gen_logic_cc(s, tmp, OS_WORD);
1144 }
1145
1146 DISAS_INSN(divw)
1147 {
1148 TCGv reg;
1149 TCGv tmp;
1150 TCGv src;
1151 int sign;
1152
1153 sign = (insn & 0x100) != 0;
1154 reg = DREG(insn, 9);
1155 if (sign) {
1156 tcg_gen_ext16s_i32(QREG_DIV1, reg);
1157 } else {
1158 tcg_gen_ext16u_i32(QREG_DIV1, reg);
1159 }
1160 SRC_EA(env, src, OS_WORD, sign, NULL);
1161 tcg_gen_mov_i32(QREG_DIV2, src);
1162 if (sign) {
1163 gen_helper_divs(cpu_env, tcg_const_i32(1));
1164 } else {
1165 gen_helper_divu(cpu_env, tcg_const_i32(1));
1166 }
1167
1168 tmp = tcg_temp_new();
1169 src = tcg_temp_new();
1170 tcg_gen_ext16u_i32(tmp, QREG_DIV1);
1171 tcg_gen_shli_i32(src, QREG_DIV2, 16);
1172 tcg_gen_or_i32(reg, tmp, src);
1173
1174 set_cc_op(s, CC_OP_FLAGS);
1175 }
1176
1177 DISAS_INSN(divl)
1178 {
1179 TCGv num;
1180 TCGv den;
1181 TCGv reg;
1182 uint16_t ext;
1183
1184 ext = read_im16(env, s);
1185 if (ext & 0x87f8) {
1186 gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1187 return;
1188 }
1189 num = DREG(ext, 12);
1190 reg = DREG(ext, 0);
1191 tcg_gen_mov_i32(QREG_DIV1, num);
1192 SRC_EA(env, den, OS_LONG, 0, NULL);
1193 tcg_gen_mov_i32(QREG_DIV2, den);
1194 if (ext & 0x0800) {
1195 gen_helper_divs(cpu_env, tcg_const_i32(0));
1196 } else {
1197 gen_helper_divu(cpu_env, tcg_const_i32(0));
1198 }
1199 if ((ext & 7) == ((ext >> 12) & 7)) {
1200 /* div */
1201 tcg_gen_mov_i32 (reg, QREG_DIV1);
1202 } else {
1203 /* rem */
1204 tcg_gen_mov_i32 (reg, QREG_DIV2);
1205 }
1206 set_cc_op(s, CC_OP_FLAGS);
1207 }
1208
1209 DISAS_INSN(addsub)
1210 {
1211 TCGv reg;
1212 TCGv dest;
1213 TCGv src;
1214 TCGv tmp;
1215 TCGv addr;
1216 int add;
1217
1218 add = (insn & 0x4000) != 0;
1219 reg = DREG(insn, 9);
1220 dest = tcg_temp_new();
1221 if (insn & 0x100) {
1222 SRC_EA(env, tmp, OS_LONG, 0, &addr);
1223 src = reg;
1224 } else {
1225 tmp = reg;
1226 SRC_EA(env, src, OS_LONG, 0, NULL);
1227 }
1228 if (add) {
1229 tcg_gen_add_i32(dest, tmp, src);
1230 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1231 set_cc_op(s, CC_OP_ADD);
1232 } else {
1233 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1234 tcg_gen_sub_i32(dest, tmp, src);
1235 set_cc_op(s, CC_OP_SUB);
1236 }
1237 gen_update_cc_add(dest, src);
1238 if (insn & 0x100) {
1239 DEST_EA(env, insn, OS_LONG, dest, &addr);
1240 } else {
1241 tcg_gen_mov_i32(reg, dest);
1242 }
1243 }
1244
1245
1246 /* Reverse the order of the bits in REG. */
1247 DISAS_INSN(bitrev)
1248 {
1249 TCGv reg;
1250 reg = DREG(insn, 0);
1251 gen_helper_bitrev(reg, reg);
1252 }
1253
1254 DISAS_INSN(bitop_reg)
1255 {
1256 int opsize;
1257 int op;
1258 TCGv src1;
1259 TCGv src2;
1260 TCGv tmp;
1261 TCGv addr;
1262 TCGv dest;
1263
1264 if ((insn & 0x38) != 0)
1265 opsize = OS_BYTE;
1266 else
1267 opsize = OS_LONG;
1268 op = (insn >> 6) & 3;
1269
1270 gen_flush_flags(s);
1271
1272 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1273 src2 = DREG(insn, 9);
1274 dest = tcg_temp_new();
1275
1276 tmp = tcg_temp_new();
1277 if (opsize == OS_BYTE)
1278 tcg_gen_andi_i32(tmp, src2, 7);
1279 else
1280 tcg_gen_andi_i32(tmp, src2, 31);
1281
1282 src2 = tcg_const_i32(1);
1283 tcg_gen_shl_i32(src2, src2, tmp);
1284 tcg_temp_free(tmp);
1285
1286 tcg_gen_and_i32(QREG_CC_Z, src1, src2);
1287
1288 switch (op) {
1289 case 1: /* bchg */
1290 tcg_gen_xor_i32(dest, src1, src2);
1291 break;
1292 case 2: /* bclr */
1293 tcg_gen_andc_i32(dest, src1, src2);
1294 break;
1295 case 3: /* bset */
1296 tcg_gen_or_i32(dest, src1, src2);
1297 break;
1298 default: /* btst */
1299 break;
1300 }
1301 tcg_temp_free(src2);
1302 if (op) {
1303 DEST_EA(env, insn, opsize, dest, &addr);
1304 }
1305 tcg_temp_free(dest);
1306 }
1307
1308 DISAS_INSN(sats)
1309 {
1310 TCGv reg;
1311 reg = DREG(insn, 0);
1312 gen_flush_flags(s);
1313 gen_helper_sats(reg, reg, QREG_CC_V);
1314 gen_logic_cc(s, reg, OS_LONG);
1315 }
1316
1317 static void gen_push(DisasContext *s, TCGv val)
1318 {
1319 TCGv tmp;
1320
1321 tmp = tcg_temp_new();
1322 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1323 gen_store(s, OS_LONG, tmp, val);
1324 tcg_gen_mov_i32(QREG_SP, tmp);
1325 }
1326
1327 DISAS_INSN(movem)
1328 {
1329 TCGv addr;
1330 int i;
1331 uint16_t mask;
1332 TCGv reg;
1333 TCGv tmp;
1334 int is_load;
1335
1336 mask = read_im16(env, s);
1337 tmp = gen_lea(env, s, insn, OS_LONG);
1338 if (IS_NULL_QREG(tmp)) {
1339 gen_addr_fault(s);
1340 return;
1341 }
1342 addr = tcg_temp_new();
1343 tcg_gen_mov_i32(addr, tmp);
1344 is_load = ((insn & 0x0400) != 0);
1345 for (i = 0; i < 16; i++, mask >>= 1) {
1346 if (mask & 1) {
1347 if (i < 8)
1348 reg = DREG(i, 0);
1349 else
1350 reg = AREG(i, 0);
1351 if (is_load) {
1352 tmp = gen_load(s, OS_LONG, addr, 0);
1353 tcg_gen_mov_i32(reg, tmp);
1354 } else {
1355 gen_store(s, OS_LONG, addr, reg);
1356 }
1357 if (mask != 1)
1358 tcg_gen_addi_i32(addr, addr, 4);
1359 }
1360 }
1361 }
1362
1363 DISAS_INSN(bitop_im)
1364 {
1365 int opsize;
1366 int op;
1367 TCGv src1;
1368 uint32_t mask;
1369 int bitnum;
1370 TCGv tmp;
1371 TCGv addr;
1372
1373 if ((insn & 0x38) != 0)
1374 opsize = OS_BYTE;
1375 else
1376 opsize = OS_LONG;
1377 op = (insn >> 6) & 3;
1378
1379 bitnum = read_im16(env, s);
1380 if (bitnum & 0xff00) {
1381 disas_undef(env, s, insn);
1382 return;
1383 }
1384
1385 gen_flush_flags(s);
1386
1387 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1388
1389 if (opsize == OS_BYTE)
1390 bitnum &= 7;
1391 else
1392 bitnum &= 31;
1393 mask = 1 << bitnum;
1394
1395 tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
1396
1397 if (op) {
1398 tmp = tcg_temp_new();
1399 switch (op) {
1400 case 1: /* bchg */
1401 tcg_gen_xori_i32(tmp, src1, mask);
1402 break;
1403 case 2: /* bclr */
1404 tcg_gen_andi_i32(tmp, src1, ~mask);
1405 break;
1406 case 3: /* bset */
1407 tcg_gen_ori_i32(tmp, src1, mask);
1408 break;
1409 default: /* btst */
1410 break;
1411 }
1412 DEST_EA(env, insn, opsize, tmp, &addr);
1413 tcg_temp_free(tmp);
1414 }
1415 }
1416
1417 DISAS_INSN(arith_im)
1418 {
1419 int op;
1420 uint32_t im;
1421 TCGv src1;
1422 TCGv dest;
1423 TCGv addr;
1424
1425 op = (insn >> 9) & 7;
1426 SRC_EA(env, src1, OS_LONG, 0, (op == 6) ? NULL : &addr);
1427 im = read_im32(env, s);
1428 dest = tcg_temp_new();
1429 switch (op) {
1430 case 0: /* ori */
1431 tcg_gen_ori_i32(dest, src1, im);
1432 gen_logic_cc(s, dest, OS_LONG);
1433 break;
1434 case 1: /* andi */
1435 tcg_gen_andi_i32(dest, src1, im);
1436 gen_logic_cc(s, dest, OS_LONG);
1437 break;
1438 case 2: /* subi */
1439 tcg_gen_mov_i32(dest, src1);
1440 tcg_gen_setcondi_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
1441 tcg_gen_subi_i32(dest, dest, im);
1442 gen_update_cc_add(dest, tcg_const_i32(im));
1443 set_cc_op(s, CC_OP_SUB);
1444 break;
1445 case 3: /* addi */
1446 tcg_gen_mov_i32(dest, src1);
1447 tcg_gen_addi_i32(dest, dest, im);
1448 gen_update_cc_add(dest, tcg_const_i32(im));
1449 tcg_gen_setcondi_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
1450 set_cc_op(s, CC_OP_ADD);
1451 break;
1452 case 5: /* eori */
1453 tcg_gen_xori_i32(dest, src1, im);
1454 gen_logic_cc(s, dest, OS_LONG);
1455 break;
1456 case 6: /* cmpi */
1457 gen_update_cc_add(src1, tcg_const_i32(im));
1458 set_cc_op(s, CC_OP_CMP);
1459 break;
1460 default:
1461 abort();
1462 }
1463 if (op != 6) {
1464 DEST_EA(env, insn, OS_LONG, dest, &addr);
1465 }
1466 }
1467
1468 DISAS_INSN(byterev)
1469 {
1470 TCGv reg;
1471
1472 reg = DREG(insn, 0);
1473 tcg_gen_bswap32_i32(reg, reg);
1474 }
1475
1476 DISAS_INSN(move)
1477 {
1478 TCGv src;
1479 TCGv dest;
1480 int op;
1481 int opsize;
1482
1483 switch (insn >> 12) {
1484 case 1: /* move.b */
1485 opsize = OS_BYTE;
1486 break;
1487 case 2: /* move.l */
1488 opsize = OS_LONG;
1489 break;
1490 case 3: /* move.w */
1491 opsize = OS_WORD;
1492 break;
1493 default:
1494 abort();
1495 }
1496 SRC_EA(env, src, opsize, 1, NULL);
1497 op = (insn >> 6) & 7;
1498 if (op == 1) {
1499 /* movea */
1500 /* The value will already have been sign extended. */
1501 dest = AREG(insn, 9);
1502 tcg_gen_mov_i32(dest, src);
1503 } else {
1504 /* normal move */
1505 uint16_t dest_ea;
1506 dest_ea = ((insn >> 9) & 7) | (op << 3);
1507 DEST_EA(env, dest_ea, opsize, src, NULL);
1508 /* This will be correct because loads sign extend. */
1509 gen_logic_cc(s, src, opsize);
1510 }
1511 }
1512
1513 DISAS_INSN(negx)
1514 {
1515 TCGv reg;
1516
1517 gen_flush_flags(s);
1518 reg = DREG(insn, 0);
1519 gen_helper_subx_cc(reg, cpu_env, tcg_const_i32(0), reg);
1520 }
1521
1522 DISAS_INSN(lea)
1523 {
1524 TCGv reg;
1525 TCGv tmp;
1526
1527 reg = AREG(insn, 9);
1528 tmp = gen_lea(env, s, insn, OS_LONG);
1529 if (IS_NULL_QREG(tmp)) {
1530 gen_addr_fault(s);
1531 return;
1532 }
1533 tcg_gen_mov_i32(reg, tmp);
1534 }
1535
1536 DISAS_INSN(clr)
1537 {
1538 int opsize;
1539
1540 opsize = insn_opsize(insn);
1541 DEST_EA(env, insn, opsize, tcg_const_i32(0), NULL);
1542 gen_logic_cc(s, tcg_const_i32(0), opsize);
1543 }
1544
1545 static TCGv gen_get_ccr(DisasContext *s)
1546 {
1547 TCGv dest;
1548
1549 gen_flush_flags(s);
1550 update_cc_op(s);
1551 dest = tcg_temp_new();
1552 gen_helper_get_ccr(dest, cpu_env);
1553 return dest;
1554 }
1555
1556 DISAS_INSN(move_from_ccr)
1557 {
1558 TCGv ccr;
1559
1560 ccr = gen_get_ccr(s);
1561 DEST_EA(env, insn, OS_WORD, ccr, NULL);
1562 }
1563
1564 DISAS_INSN(neg)
1565 {
1566 TCGv reg;
1567 TCGv src1;
1568
1569 reg = DREG(insn, 0);
1570 src1 = tcg_temp_new();
1571 tcg_gen_mov_i32(src1, reg);
1572 tcg_gen_neg_i32(reg, src1);
1573 gen_update_cc_add(reg, src1);
1574 tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, src1, 0);
1575 set_cc_op(s, CC_OP_SUB);
1576 }
1577
1578 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
1579 {
1580 if (ccr_only) {
1581 tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
1582 tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
1583 tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
1584 tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
1585 tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
1586 } else {
1587 gen_helper_set_sr(cpu_env, tcg_const_i32(val));
1588 }
1589 set_cc_op(s, CC_OP_FLAGS);
1590 }
1591
1592 static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
1593 int ccr_only)
1594 {
1595 if ((insn & 0x38) == 0) {
1596 if (ccr_only) {
1597 gen_helper_set_ccr(cpu_env, DREG(insn, 0));
1598 } else {
1599 gen_helper_set_sr(cpu_env, DREG(insn, 0));
1600 }
1601 set_cc_op(s, CC_OP_FLAGS);
1602 } else if ((insn & 0x3f) == 0x3c) {
1603 uint16_t val;
1604 val = read_im16(env, s);
1605 gen_set_sr_im(s, val, ccr_only);
1606 } else {
1607 disas_undef(env, s, insn);
1608 }
1609 }
1610
1611
1612 DISAS_INSN(move_to_ccr)
1613 {
1614 gen_set_sr(env, s, insn, 1);
1615 }
1616
1617 DISAS_INSN(not)
1618 {
1619 TCGv reg;
1620
1621 reg = DREG(insn, 0);
1622 tcg_gen_not_i32(reg, reg);
1623 gen_logic_cc(s, reg, OS_LONG);
1624 }
1625
1626 DISAS_INSN(swap)
1627 {
1628 TCGv src1;
1629 TCGv src2;
1630 TCGv reg;
1631
1632 src1 = tcg_temp_new();
1633 src2 = tcg_temp_new();
1634 reg = DREG(insn, 0);
1635 tcg_gen_shli_i32(src1, reg, 16);
1636 tcg_gen_shri_i32(src2, reg, 16);
1637 tcg_gen_or_i32(reg, src1, src2);
1638 gen_logic_cc(s, reg, OS_LONG);
1639 }
1640
1641 DISAS_INSN(bkpt)
1642 {
1643 gen_exception(s, s->pc - 2, EXCP_DEBUG);
1644 }
1645
1646 DISAS_INSN(pea)
1647 {
1648 TCGv tmp;
1649
1650 tmp = gen_lea(env, s, insn, OS_LONG);
1651 if (IS_NULL_QREG(tmp)) {
1652 gen_addr_fault(s);
1653 return;
1654 }
1655 gen_push(s, tmp);
1656 }
1657
1658 DISAS_INSN(ext)
1659 {
1660 int op;
1661 TCGv reg;
1662 TCGv tmp;
1663
1664 reg = DREG(insn, 0);
1665 op = (insn >> 6) & 7;
1666 tmp = tcg_temp_new();
1667 if (op == 3)
1668 tcg_gen_ext16s_i32(tmp, reg);
1669 else
1670 tcg_gen_ext8s_i32(tmp, reg);
1671 if (op == 2)
1672 gen_partset_reg(OS_WORD, reg, tmp);
1673 else
1674 tcg_gen_mov_i32(reg, tmp);
1675 gen_logic_cc(s, tmp, OS_LONG);
1676 }
1677
1678 DISAS_INSN(tst)
1679 {
1680 int opsize;
1681 TCGv tmp;
1682
1683 opsize = insn_opsize(insn);
1684 SRC_EA(env, tmp, opsize, 1, NULL);
1685 gen_logic_cc(s, tmp, opsize);
1686 }
1687
1688 DISAS_INSN(pulse)
1689 {
1690 /* Implemented as a NOP. */
1691 }
1692
1693 DISAS_INSN(illegal)
1694 {
1695 gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
1696 }
1697
1698 /* ??? This should be atomic. */
1699 DISAS_INSN(tas)
1700 {
1701 TCGv dest;
1702 TCGv src1;
1703 TCGv addr;
1704
1705 dest = tcg_temp_new();
1706 SRC_EA(env, src1, OS_BYTE, 1, &addr);
1707 gen_logic_cc(s, src1, OS_BYTE);
1708 tcg_gen_ori_i32(dest, src1, 0x80);
1709 DEST_EA(env, insn, OS_BYTE, dest, &addr);
1710 }
1711
1712 DISAS_INSN(mull)
1713 {
1714 uint16_t ext;
1715 TCGv reg;
1716 TCGv src1;
1717 TCGv dest;
1718
1719 /* The upper 32 bits of the product are discarded, so
1720 muls.l and mulu.l are functionally equivalent. */
1721 ext = read_im16(env, s);
1722 if (ext & 0x87ff) {
1723 gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1724 return;
1725 }
1726 reg = DREG(ext, 12);
1727 SRC_EA(env, src1, OS_LONG, 0, NULL);
1728 dest = tcg_temp_new();
1729 tcg_gen_mul_i32(dest, src1, reg);
1730 tcg_gen_mov_i32(reg, dest);
1731 /* Unlike m68k, coldfire always clears the overflow bit. */
1732 gen_logic_cc(s, dest, OS_LONG);
1733 }
1734
1735 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
1736 {
1737 TCGv reg;
1738 TCGv tmp;
1739
1740 reg = AREG(insn, 0);
1741 tmp = tcg_temp_new();
1742 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1743 gen_store(s, OS_LONG, tmp, reg);
1744 if ((insn & 7) != 7) {
1745 tcg_gen_mov_i32(reg, tmp);
1746 }
1747 tcg_gen_addi_i32(QREG_SP, tmp, offset);
1748 tcg_temp_free(tmp);
1749 }
1750
1751 DISAS_INSN(link)
1752 {
1753 int16_t offset;
1754
1755 offset = read_im16(env, s);
1756 gen_link(s, insn, offset);
1757 }
1758
1759 DISAS_INSN(linkl)
1760 {
1761 int32_t offset;
1762
1763 offset = read_im32(env, s);
1764 gen_link(s, insn, offset);
1765 }
1766
1767 DISAS_INSN(unlk)
1768 {
1769 TCGv src;
1770 TCGv reg;
1771 TCGv tmp;
1772
1773 src = tcg_temp_new();
1774 reg = AREG(insn, 0);
1775 tcg_gen_mov_i32(src, reg);
1776 tmp = gen_load(s, OS_LONG, src, 0);
1777 tcg_gen_mov_i32(reg, tmp);
1778 tcg_gen_addi_i32(QREG_SP, src, 4);
1779 }
1780
1781 DISAS_INSN(nop)
1782 {
1783 }
1784
1785 DISAS_INSN(rts)
1786 {
1787 TCGv tmp;
1788
1789 tmp = gen_load(s, OS_LONG, QREG_SP, 0);
1790 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
1791 gen_jmp(s, tmp);
1792 }
1793
1794 DISAS_INSN(jump)
1795 {
1796 TCGv tmp;
1797
1798 /* Load the target address first to ensure correct exception
1799 behavior. */
1800 tmp = gen_lea(env, s, insn, OS_LONG);
1801 if (IS_NULL_QREG(tmp)) {
1802 gen_addr_fault(s);
1803 return;
1804 }
1805 if ((insn & 0x40) == 0) {
1806 /* jsr */
1807 gen_push(s, tcg_const_i32(s->pc));
1808 }
1809 gen_jmp(s, tmp);
1810 }
1811
1812 DISAS_INSN(addsubq)
1813 {
1814 TCGv src1;
1815 TCGv src2;
1816 TCGv dest;
1817 int val;
1818 TCGv addr;
1819
1820 SRC_EA(env, src1, OS_LONG, 0, &addr);
1821 val = (insn >> 9) & 7;
1822 if (val == 0)
1823 val = 8;
1824 dest = tcg_temp_new();
1825 tcg_gen_mov_i32(dest, src1);
1826 if ((insn & 0x38) == 0x08) {
1827 /* Don't update condition codes if the destination is an
1828 address register. */
1829 if (insn & 0x0100) {
1830 tcg_gen_subi_i32(dest, dest, val);
1831 } else {
1832 tcg_gen_addi_i32(dest, dest, val);
1833 }
1834 } else {
1835 src2 = tcg_const_i32(val);
1836 if (insn & 0x0100) {
1837 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src2);
1838 tcg_gen_sub_i32(dest, dest, src2);
1839 set_cc_op(s, CC_OP_SUB);
1840 } else {
1841 tcg_gen_add_i32(dest, dest, src2);
1842 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src2);
1843 set_cc_op(s, CC_OP_ADD);
1844 }
1845 gen_update_cc_add(dest, src2);
1846 }
1847 DEST_EA(env, insn, OS_LONG, dest, &addr);
1848 }
1849
1850 DISAS_INSN(tpf)
1851 {
1852 switch (insn & 7) {
1853 case 2: /* One extension word. */
1854 s->pc += 2;
1855 break;
1856 case 3: /* Two extension words. */
1857 s->pc += 4;
1858 break;
1859 case 4: /* No extension words. */
1860 break;
1861 default:
1862 disas_undef(env, s, insn);
1863 }
1864 }
1865
1866 DISAS_INSN(branch)
1867 {
1868 int32_t offset;
1869 uint32_t base;
1870 int op;
1871 TCGLabel *l1;
1872
1873 base = s->pc;
1874 op = (insn >> 8) & 0xf;
1875 offset = (int8_t)insn;
1876 if (offset == 0) {
1877 offset = (int16_t)read_im16(env, s);
1878 } else if (offset == -1) {
1879 offset = read_im32(env, s);
1880 }
1881 if (op == 1) {
1882 /* bsr */
1883 gen_push(s, tcg_const_i32(s->pc));
1884 }
1885 if (op > 1) {
1886 /* Bcc */
1887 l1 = gen_new_label();
1888 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
1889 gen_jmp_tb(s, 1, base + offset);
1890 gen_set_label(l1);
1891 gen_jmp_tb(s, 0, s->pc);
1892 } else {
1893 /* Unconditional branch. */
1894 gen_jmp_tb(s, 0, base + offset);
1895 }
1896 }
1897
1898 DISAS_INSN(moveq)
1899 {
1900 uint32_t val;
1901
1902 val = (int8_t)insn;
1903 tcg_gen_movi_i32(DREG(insn, 9), val);
1904 gen_logic_cc(s, tcg_const_i32(val), OS_LONG);
1905 }
1906
1907 DISAS_INSN(mvzs)
1908 {
1909 int opsize;
1910 TCGv src;
1911 TCGv reg;
1912
1913 if (insn & 0x40)
1914 opsize = OS_WORD;
1915 else
1916 opsize = OS_BYTE;
1917 SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
1918 reg = DREG(insn, 9);
1919 tcg_gen_mov_i32(reg, src);
1920 gen_logic_cc(s, src, opsize);
1921 }
1922
1923 DISAS_INSN(or)
1924 {
1925 TCGv reg;
1926 TCGv dest;
1927 TCGv src;
1928 TCGv addr;
1929
1930 reg = DREG(insn, 9);
1931 dest = tcg_temp_new();
1932 if (insn & 0x100) {
1933 SRC_EA(env, src, OS_LONG, 0, &addr);
1934 tcg_gen_or_i32(dest, src, reg);
1935 DEST_EA(env, insn, OS_LONG, dest, &addr);
1936 } else {
1937 SRC_EA(env, src, OS_LONG, 0, NULL);
1938 tcg_gen_or_i32(dest, src, reg);
1939 tcg_gen_mov_i32(reg, dest);
1940 }
1941 gen_logic_cc(s, dest, OS_LONG);
1942 }
1943
1944 DISAS_INSN(suba)
1945 {
1946 TCGv src;
1947 TCGv reg;
1948
1949 SRC_EA(env, src, OS_LONG, 0, NULL);
1950 reg = AREG(insn, 9);
1951 tcg_gen_sub_i32(reg, reg, src);
1952 }
1953
1954 DISAS_INSN(subx)
1955 {
1956 TCGv reg;
1957 TCGv src;
1958
1959 gen_flush_flags(s);
1960 reg = DREG(insn, 9);
1961 src = DREG(insn, 0);
1962 gen_helper_subx_cc(reg, cpu_env, reg, src);
1963 }
1964
1965 DISAS_INSN(mov3q)
1966 {
1967 TCGv src;
1968 int val;
1969
1970 val = (insn >> 9) & 7;
1971 if (val == 0)
1972 val = -1;
1973 src = tcg_const_i32(val);
1974 gen_logic_cc(s, src, OS_LONG);
1975 DEST_EA(env, insn, OS_LONG, src, NULL);
1976 }
1977
1978 DISAS_INSN(cmp)
1979 {
1980 TCGv src;
1981 TCGv reg;
1982 int opsize;
1983
1984 opsize = insn_opsize(insn);
1985 SRC_EA(env, src, opsize, -1, NULL);
1986 reg = DREG(insn, 9);
1987 gen_update_cc_add(reg, src);
1988 set_cc_op(s, CC_OP_CMP);
1989 }
1990
1991 DISAS_INSN(cmpa)
1992 {
1993 int opsize;
1994 TCGv src;
1995 TCGv reg;
1996
1997 if (insn & 0x100) {
1998 opsize = OS_LONG;
1999 } else {
2000 opsize = OS_WORD;
2001 }
2002 SRC_EA(env, src, opsize, 1, NULL);
2003 reg = AREG(insn, 9);
2004 gen_update_cc_add(reg, src);
2005 set_cc_op(s, CC_OP_CMP);
2006 }
2007
2008 DISAS_INSN(eor)
2009 {
2010 TCGv src;
2011 TCGv reg;
2012 TCGv dest;
2013 TCGv addr;
2014
2015 SRC_EA(env, src, OS_LONG, 0, &addr);
2016 reg = DREG(insn, 9);
2017 dest = tcg_temp_new();
2018 tcg_gen_xor_i32(dest, src, reg);
2019 gen_logic_cc(s, dest, OS_LONG);
2020 DEST_EA(env, insn, OS_LONG, dest, &addr);
2021 }
2022
2023 static void do_exg(TCGv reg1, TCGv reg2)
2024 {
2025 TCGv temp = tcg_temp_new();
2026 tcg_gen_mov_i32(temp, reg1);
2027 tcg_gen_mov_i32(reg1, reg2);
2028 tcg_gen_mov_i32(reg2, temp);
2029 tcg_temp_free(temp);
2030 }
2031
2032 DISAS_INSN(exg_aa)
2033 {
2034 /* exchange Dx and Dy */
2035 do_exg(DREG(insn, 9), DREG(insn, 0));
2036 }
2037
2038 DISAS_INSN(exg_dd)
2039 {
2040 /* exchange Ax and Ay */
2041 do_exg(AREG(insn, 9), AREG(insn, 0));
2042 }
2043
2044 DISAS_INSN(exg_da)
2045 {
2046 /* exchange Dx and Ay */
2047 do_exg(DREG(insn, 9), AREG(insn, 0));
2048 }
2049
2050 DISAS_INSN(and)
2051 {
2052 TCGv src;
2053 TCGv reg;
2054 TCGv dest;
2055 TCGv addr;
2056
2057 reg = DREG(insn, 9);
2058 dest = tcg_temp_new();
2059 if (insn & 0x100) {
2060 SRC_EA(env, src, OS_LONG, 0, &addr);
2061 tcg_gen_and_i32(dest, src, reg);
2062 DEST_EA(env, insn, OS_LONG, dest, &addr);
2063 } else {
2064 SRC_EA(env, src, OS_LONG, 0, NULL);
2065 tcg_gen_and_i32(dest, src, reg);
2066 tcg_gen_mov_i32(reg, dest);
2067 }
2068 gen_logic_cc(s, dest, OS_LONG);
2069 }
2070
2071 DISAS_INSN(adda)
2072 {
2073 TCGv src;
2074 TCGv reg;
2075
2076 SRC_EA(env, src, OS_LONG, 0, NULL);
2077 reg = AREG(insn, 9);
2078 tcg_gen_add_i32(reg, reg, src);
2079 }
2080
2081 DISAS_INSN(addx)
2082 {
2083 TCGv reg;
2084 TCGv src;
2085
2086 gen_flush_flags(s);
2087 reg = DREG(insn, 9);
2088 src = DREG(insn, 0);
2089 gen_helper_addx_cc(reg, cpu_env, reg, src);
2090 }
2091
2092 /* TODO: This could be implemented without helper functions. */
2093 DISAS_INSN(shift_im)
2094 {
2095 TCGv reg;
2096 int tmp;
2097 TCGv shift;
2098
2099 set_cc_op(s, CC_OP_FLAGS);
2100
2101 reg = DREG(insn, 0);
2102 tmp = (insn >> 9) & 7;
2103 if (tmp == 0)
2104 tmp = 8;
2105 shift = tcg_const_i32(tmp);
2106 /* No need to flush flags becuse we know we will set C flag. */
2107 if (insn & 0x100) {
2108 gen_helper_shl_cc(reg, cpu_env, reg, shift);
2109 } else {
2110 if (insn & 8) {
2111 gen_helper_shr_cc(reg, cpu_env, reg, shift);
2112 } else {
2113 gen_helper_sar_cc(reg, cpu_env, reg, shift);
2114 }
2115 }
2116 }
2117
2118 DISAS_INSN(shift_reg)
2119 {
2120 TCGv reg;
2121 TCGv shift;
2122
2123 reg = DREG(insn, 0);
2124 shift = DREG(insn, 9);
2125 if (insn & 0x100) {
2126 gen_helper_shl_cc(reg, cpu_env, reg, shift);
2127 } else {
2128 if (insn & 8) {
2129 gen_helper_shr_cc(reg, cpu_env, reg, shift);
2130 } else {
2131 gen_helper_sar_cc(reg, cpu_env, reg, shift);
2132 }
2133 }
2134 set_cc_op(s, CC_OP_FLAGS);
2135 }
2136
2137 DISAS_INSN(ff1)
2138 {
2139 TCGv reg;
2140 reg = DREG(insn, 0);
2141 gen_logic_cc(s, reg, OS_LONG);
2142 gen_helper_ff1(reg, reg);
2143 }
2144
2145 static TCGv gen_get_sr(DisasContext *s)
2146 {
2147 TCGv ccr;
2148 TCGv sr;
2149
2150 ccr = gen_get_ccr(s);
2151 sr = tcg_temp_new();
2152 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2153 tcg_gen_or_i32(sr, sr, ccr);
2154 return sr;
2155 }
2156
2157 DISAS_INSN(strldsr)
2158 {
2159 uint16_t ext;
2160 uint32_t addr;
2161
2162 addr = s->pc - 2;
2163 ext = read_im16(env, s);
2164 if (ext != 0x46FC) {
2165 gen_exception(s, addr, EXCP_UNSUPPORTED);
2166 return;
2167 }
2168 ext = read_im16(env, s);
2169 if (IS_USER(s) || (ext & SR_S) == 0) {
2170 gen_exception(s, addr, EXCP_PRIVILEGE);
2171 return;
2172 }
2173 gen_push(s, gen_get_sr(s));
2174 gen_set_sr_im(s, ext, 0);
2175 }
2176
2177 DISAS_INSN(move_from_sr)
2178 {
2179 TCGv sr;
2180
2181 if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
2182 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2183 return;
2184 }
2185 sr = gen_get_sr(s);
2186 DEST_EA(env, insn, OS_WORD, sr, NULL);
2187 }
2188
2189 DISAS_INSN(move_to_sr)
2190 {
2191 if (IS_USER(s)) {
2192 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2193 return;
2194 }
2195 gen_set_sr(env, s, insn, 0);
2196 gen_lookup_tb(s);
2197 }
2198
2199 DISAS_INSN(move_from_usp)
2200 {
2201 if (IS_USER(s)) {
2202 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2203 return;
2204 }
2205 tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
2206 offsetof(CPUM68KState, sp[M68K_USP]));
2207 }
2208
2209 DISAS_INSN(move_to_usp)
2210 {
2211 if (IS_USER(s)) {
2212 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2213 return;
2214 }
2215 tcg_gen_st_i32(AREG(insn, 0), cpu_env,
2216 offsetof(CPUM68KState, sp[M68K_USP]));
2217 }
2218
2219 DISAS_INSN(halt)
2220 {
2221 gen_exception(s, s->pc, EXCP_HALT_INSN);
2222 }
2223
2224 DISAS_INSN(stop)
2225 {
2226 uint16_t ext;
2227
2228 if (IS_USER(s)) {
2229 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2230 return;
2231 }
2232
2233 ext = read_im16(env, s);
2234
2235 gen_set_sr_im(s, ext, 0);
2236 tcg_gen_movi_i32(cpu_halted, 1);
2237 gen_exception(s, s->pc, EXCP_HLT);
2238 }
2239
2240 DISAS_INSN(rte)
2241 {
2242 if (IS_USER(s)) {
2243 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2244 return;
2245 }
2246 gen_exception(s, s->pc - 2, EXCP_RTE);
2247 }
2248
2249 DISAS_INSN(movec)
2250 {
2251 uint16_t ext;
2252 TCGv reg;
2253
2254 if (IS_USER(s)) {
2255 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2256 return;
2257 }
2258
2259 ext = read_im16(env, s);
2260
2261 if (ext & 0x8000) {
2262 reg = AREG(ext, 12);
2263 } else {
2264 reg = DREG(ext, 12);
2265 }
2266 gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
2267 gen_lookup_tb(s);
2268 }
2269
2270 DISAS_INSN(intouch)
2271 {
2272 if (IS_USER(s)) {
2273 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2274 return;
2275 }
2276 /* ICache fetch. Implement as no-op. */
2277 }
2278
2279 DISAS_INSN(cpushl)
2280 {
2281 if (IS_USER(s)) {
2282 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2283 return;
2284 }
2285 /* Cache push/invalidate. Implement as no-op. */
2286 }
2287
2288 DISAS_INSN(wddata)
2289 {
2290 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2291 }
2292
2293 DISAS_INSN(wdebug)
2294 {
2295 M68kCPU *cpu = m68k_env_get_cpu(env);
2296
2297 if (IS_USER(s)) {
2298 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2299 return;
2300 }
2301 /* TODO: Implement wdebug. */
2302 cpu_abort(CPU(cpu), "WDEBUG not implemented");
2303 }
2304
2305 DISAS_INSN(trap)
2306 {
2307 gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
2308 }
2309
2310 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
2311 immediately before the next FP instruction is executed. */
2312 DISAS_INSN(fpu)
2313 {
2314 uint16_t ext;
2315 int32_t offset;
2316 int opmode;
2317 TCGv_i64 src;
2318 TCGv_i64 dest;
2319 TCGv_i64 res;
2320 TCGv tmp32;
2321 int round;
2322 int set_dest;
2323 int opsize;
2324
2325 ext = read_im16(env, s);
2326 opmode = ext & 0x7f;
2327 switch ((ext >> 13) & 7) {
2328 case 0: case 2:
2329 break;
2330 case 1:
2331 goto undef;
2332 case 3: /* fmove out */
2333 src = FREG(ext, 7);
2334 tmp32 = tcg_temp_new_i32();
2335 /* fmove */
2336 /* ??? TODO: Proper behavior on overflow. */
2337 switch ((ext >> 10) & 7) {
2338 case 0:
2339 opsize = OS_LONG;
2340 gen_helper_f64_to_i32(tmp32, cpu_env, src);
2341 break;
2342 case 1:
2343 opsize = OS_SINGLE;
2344 gen_helper_f64_to_f32(tmp32, cpu_env, src);
2345 break;
2346 case 4:
2347 opsize = OS_WORD;
2348 gen_helper_f64_to_i32(tmp32, cpu_env, src);
2349 break;
2350 case 5: /* OS_DOUBLE */
2351 tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2352 switch ((insn >> 3) & 7) {
2353 case 2:
2354 case 3:
2355 break;
2356 case 4:
2357 tcg_gen_addi_i32(tmp32, tmp32, -8);
2358 break;
2359 case 5:
2360 offset = cpu_ldsw_code(env, s->pc);
2361 s->pc += 2;
2362 tcg_gen_addi_i32(tmp32, tmp32, offset);
2363 break;
2364 default:
2365 goto undef;
2366 }
2367 gen_store64(s, tmp32, src);
2368 switch ((insn >> 3) & 7) {
2369 case 3:
2370 tcg_gen_addi_i32(tmp32, tmp32, 8);
2371 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2372 break;
2373 case 4:
2374 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2375 break;
2376 }
2377 tcg_temp_free_i32(tmp32);
2378 return;
2379 case 6:
2380 opsize = OS_BYTE;
2381 gen_helper_f64_to_i32(tmp32, cpu_env, src);
2382 break;
2383 default:
2384 goto undef;
2385 }
2386 DEST_EA(env, insn, opsize, tmp32, NULL);
2387 tcg_temp_free_i32(tmp32);
2388 return;
2389 case 4: /* fmove to control register. */
2390 switch ((ext >> 10) & 7) {
2391 case 4: /* FPCR */
2392 /* Not implemented. Ignore writes. */
2393 break;
2394 case 1: /* FPIAR */
2395 case 2: /* FPSR */
2396 default:
2397 cpu_abort(NULL, "Unimplemented: fmove to control %d",
2398 (ext >> 10) & 7);
2399 }
2400 break;
2401 case 5: /* fmove from control register. */
2402 switch ((ext >> 10) & 7) {
2403 case 4: /* FPCR */
2404 /* Not implemented. Always return zero. */
2405 tmp32 = tcg_const_i32(0);
2406 break;
2407 case 1: /* FPIAR */
2408 case 2: /* FPSR */
2409 default:
2410 cpu_abort(NULL, "Unimplemented: fmove from control %d",
2411 (ext >> 10) & 7);
2412 goto undef;
2413 }
2414 DEST_EA(env, insn, OS_LONG, tmp32, NULL);
2415 break;
2416 case 6: /* fmovem */
2417 case 7:
2418 {
2419 TCGv addr;
2420 uint16_t mask;
2421 int i;
2422 if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
2423 goto undef;
2424 tmp32 = gen_lea(env, s, insn, OS_LONG);
2425 if (IS_NULL_QREG(tmp32)) {
2426 gen_addr_fault(s);
2427 return;
2428 }
2429 addr = tcg_temp_new_i32();
2430 tcg_gen_mov_i32(addr, tmp32);
2431 mask = 0x80;
2432 for (i = 0; i < 8; i++) {
2433 if (ext & mask) {
2434 dest = FREG(i, 0);
2435 if (ext & (1 << 13)) {
2436 /* store */
2437 tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
2438 } else {
2439 /* load */
2440 tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
2441 }
2442 if (ext & (mask - 1))
2443 tcg_gen_addi_i32(addr, addr, 8);
2444 }
2445 mask >>= 1;
2446 }
2447 tcg_temp_free_i32(addr);
2448 }
2449 return;
2450 }
2451 if (ext & (1 << 14)) {
2452 /* Source effective address. */
2453 switch ((ext >> 10) & 7) {
2454 case 0: opsize = OS_LONG; break;
2455 case 1: opsize = OS_SINGLE; break;
2456 case 4: opsize = OS_WORD; break;
2457 case 5: opsize = OS_DOUBLE; break;
2458 case 6: opsize = OS_BYTE; break;
2459 default:
2460 goto undef;
2461 }
2462 if (opsize == OS_DOUBLE) {
2463 tmp32 = tcg_temp_new_i32();
2464 tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2465 switch ((insn >> 3) & 7) {
2466 case 2:
2467 case 3:
2468 break;
2469 case 4:
2470 tcg_gen_addi_i32(tmp32, tmp32, -8);
2471 break;
2472 case 5:
2473 offset = cpu_ldsw_code(env, s->pc);
2474 s->pc += 2;
2475 tcg_gen_addi_i32(tmp32, tmp32, offset);
2476 break;
2477 case 7:
2478 offset = cpu_ldsw_code(env, s->pc);
2479 offset += s->pc - 2;
2480 s->pc += 2;
2481 tcg_gen_addi_i32(tmp32, tmp32, offset);
2482 break;
2483 default:
2484 goto undef;
2485 }
2486 src = gen_load64(s, tmp32);
2487 switch ((insn >> 3) & 7) {
2488 case 3:
2489 tcg_gen_addi_i32(tmp32, tmp32, 8);
2490 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2491 break;
2492 case 4:
2493 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2494 break;
2495 }
2496 tcg_temp_free_i32(tmp32);
2497 } else {
2498 SRC_EA(env, tmp32, opsize, 1, NULL);
2499 src = tcg_temp_new_i64();
2500 switch (opsize) {
2501 case OS_LONG:
2502 case OS_WORD:
2503 case OS_BYTE:
2504 gen_helper_i32_to_f64(src, cpu_env, tmp32);
2505 break;
2506 case OS_SINGLE:
2507 gen_helper_f32_to_f64(src, cpu_env, tmp32);
2508 break;
2509 }
2510 }
2511 } else {
2512 /* Source register. */
2513 src = FREG(ext, 10);
2514 }
2515 dest = FREG(ext, 7);
2516 res = tcg_temp_new_i64();
2517 if (opmode != 0x3a)
2518 tcg_gen_mov_f64(res, dest);
2519 round = 1;
2520 set_dest = 1;
2521 switch (opmode) {
2522 case 0: case 0x40: case 0x44: /* fmove */
2523 tcg_gen_mov_f64(res, src);
2524 break;
2525 case 1: /* fint */
2526 gen_helper_iround_f64(res, cpu_env, src);
2527 round = 0;
2528 break;
2529 case 3: /* fintrz */
2530 gen_helper_itrunc_f64(res, cpu_env, src);
2531 round = 0;
2532 break;
2533 case 4: case 0x41: case 0x45: /* fsqrt */
2534 gen_helper_sqrt_f64(res, cpu_env, src);
2535 break;
2536 case 0x18: case 0x58: case 0x5c: /* fabs */
2537 gen_helper_abs_f64(res, src);
2538 break;
2539 case 0x1a: case 0x5a: case 0x5e: /* fneg */
2540 gen_helper_chs_f64(res, src);
2541 break;
2542 case 0x20: case 0x60: case 0x64: /* fdiv */
2543 gen_helper_div_f64(res, cpu_env, res, src);
2544 break;
2545 case 0x22: case 0x62: case 0x66: /* fadd */
2546 gen_helper_add_f64(res, cpu_env, res, src);
2547 break;
2548 case 0x23: case 0x63: case 0x67: /* fmul */
2549 gen_helper_mul_f64(res, cpu_env, res, src);
2550 break;
2551 case 0x28: case 0x68: case 0x6c: /* fsub */
2552 gen_helper_sub_f64(res, cpu_env, res, src);
2553 break;
2554 case 0x38: /* fcmp */
2555 gen_helper_sub_cmp_f64(res, cpu_env, res, src);
2556 set_dest = 0;
2557 round = 0;
2558 break;
2559 case 0x3a: /* ftst */
2560 tcg_gen_mov_f64(res, src);
2561 set_dest = 0;
2562 round = 0;
2563 break;
2564 default:
2565 goto undef;
2566 }
2567 if (ext & (1 << 14)) {
2568 tcg_temp_free_i64(src);
2569 }
2570 if (round) {
2571 if (opmode & 0x40) {
2572 if ((opmode & 0x4) != 0)
2573 round = 0;
2574 } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
2575 round = 0;
2576 }
2577 }
2578 if (round) {
2579 TCGv tmp = tcg_temp_new_i32();
2580 gen_helper_f64_to_f32(tmp, cpu_env, res);
2581 gen_helper_f32_to_f64(res, cpu_env, tmp);
2582 tcg_temp_free_i32(tmp);
2583 }
2584 tcg_gen_mov_f64(QREG_FP_RESULT, res);
2585 if (set_dest) {
2586 tcg_gen_mov_f64(dest, res);
2587 }
2588 tcg_temp_free_i64(res);
2589 return;
2590 undef:
2591 /* FIXME: Is this right for offset addressing modes? */
2592 s->pc -= 2;
2593 disas_undef_fpu(env, s, insn);
2594 }
2595
2596 DISAS_INSN(fbcc)
2597 {
2598 uint32_t offset;
2599 uint32_t addr;
2600 TCGv flag;
2601 TCGLabel *l1;
2602
2603 addr = s->pc;
2604 offset = cpu_ldsw_code(env, s->pc);
2605 s->pc += 2;
2606 if (insn & (1 << 6)) {
2607 offset = (offset << 16) | read_im16(env, s);
2608 }
2609
2610 l1 = gen_new_label();
2611 /* TODO: Raise BSUN exception. */
2612 flag = tcg_temp_new();
2613 gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
2614 /* Jump to l1 if condition is true. */
2615 switch (insn & 0xf) {
2616 case 0: /* f */
2617 break;
2618 case 1: /* eq (=0) */
2619 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2620 break;
2621 case 2: /* ogt (=1) */
2622 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
2623 break;
2624 case 3: /* oge (=0 or =1) */
2625 tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
2626 break;
2627 case 4: /* olt (=-1) */
2628 tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
2629 break;
2630 case 5: /* ole (=-1 or =0) */
2631 tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
2632 break;
2633 case 6: /* ogl (=-1 or =1) */
2634 tcg_gen_andi_i32(flag, flag, 1);
2635 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2636 break;
2637 case 7: /* or (=2) */
2638 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
2639 break;
2640 case 8: /* un (<2) */
2641 tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
2642 break;
2643 case 9: /* ueq (=0 or =2) */
2644 tcg_gen_andi_i32(flag, flag, 1);
2645 tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2646 break;
2647 case 10: /* ugt (>0) */
2648 tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
2649 break;
2650 case 11: /* uge (>=0) */
2651 tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
2652 break;
2653 case 12: /* ult (=-1 or =2) */
2654 tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
2655 break;
2656 case 13: /* ule (!=1) */
2657 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
2658 break;
2659 case 14: /* ne (!=0) */
2660 tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2661 break;
2662 case 15: /* t */
2663 tcg_gen_br(l1);
2664 break;
2665 }
2666 gen_jmp_tb(s, 0, s->pc);
2667 gen_set_label(l1);
2668 gen_jmp_tb(s, 1, addr + offset);
2669 }
2670
2671 DISAS_INSN(frestore)
2672 {
2673 M68kCPU *cpu = m68k_env_get_cpu(env);
2674
2675 /* TODO: Implement frestore. */
2676 cpu_abort(CPU(cpu), "FRESTORE not implemented");
2677 }
2678
2679 DISAS_INSN(fsave)
2680 {
2681 M68kCPU *cpu = m68k_env_get_cpu(env);
2682
2683 /* TODO: Implement fsave. */
2684 cpu_abort(CPU(cpu), "FSAVE not implemented");
2685 }
2686
2687 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
2688 {
2689 TCGv tmp = tcg_temp_new();
2690 if (s->env->macsr & MACSR_FI) {
2691 if (upper)
2692 tcg_gen_andi_i32(tmp, val, 0xffff0000);
2693 else
2694 tcg_gen_shli_i32(tmp, val, 16);
2695 } else if (s->env->macsr & MACSR_SU) {
2696 if (upper)
2697 tcg_gen_sari_i32(tmp, val, 16);
2698 else
2699 tcg_gen_ext16s_i32(tmp, val);
2700 } else {
2701 if (upper)
2702 tcg_gen_shri_i32(tmp, val, 16);
2703 else
2704 tcg_gen_ext16u_i32(tmp, val);
2705 }
2706 return tmp;
2707 }
2708
2709 static void gen_mac_clear_flags(void)
2710 {
2711 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
2712 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
2713 }
2714
2715 DISAS_INSN(mac)
2716 {
2717 TCGv rx;
2718 TCGv ry;
2719 uint16_t ext;
2720 int acc;
2721 TCGv tmp;
2722 TCGv addr;
2723 TCGv loadval;
2724 int dual;
2725 TCGv saved_flags;
2726
2727 if (!s->done_mac) {
2728 s->mactmp = tcg_temp_new_i64();
2729 s->done_mac = 1;
2730 }
2731
2732 ext = read_im16(env, s);
2733
2734 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
2735 dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
2736 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
2737 disas_undef(env, s, insn);
2738 return;
2739 }
2740 if (insn & 0x30) {
2741 /* MAC with load. */
2742 tmp = gen_lea(env, s, insn, OS_LONG);
2743 addr = tcg_temp_new();
2744 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
2745 /* Load the value now to ensure correct exception behavior.
2746 Perform writeback after reading the MAC inputs. */
2747 loadval = gen_load(s, OS_LONG, addr, 0);
2748
2749 acc ^= 1;
2750 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
2751 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
2752 } else {
2753 loadval = addr = NULL_QREG;
2754 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2755 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2756 }
2757
2758 gen_mac_clear_flags();
2759 #if 0
2760 l1 = -1;
2761 /* Disabled because conditional branches clobber temporary vars. */
2762 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
2763 /* Skip the multiply if we know we will ignore it. */
2764 l1 = gen_new_label();
2765 tmp = tcg_temp_new();
2766 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
2767 gen_op_jmp_nz32(tmp, l1);
2768 }
2769 #endif
2770
2771 if ((ext & 0x0800) == 0) {
2772 /* Word. */
2773 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
2774 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
2775 }
2776 if (s->env->macsr & MACSR_FI) {
2777 gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
2778 } else {
2779 if (s->env->macsr & MACSR_SU)
2780 gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
2781 else
2782 gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
2783 switch ((ext >> 9) & 3) {
2784 case 1:
2785 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
2786 break;
2787 case 3:
2788 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
2789 break;
2790 }
2791 }
2792
2793 if (dual) {
2794 /* Save the overflow flag from the multiply. */
2795 saved_flags = tcg_temp_new();
2796 tcg_gen_mov_i32(saved_flags, QREG_MACSR);
2797 } else {
2798 saved_flags = NULL_QREG;
2799 }
2800
2801 #if 0
2802 /* Disabled because conditional branches clobber temporary vars. */
2803 if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
2804 /* Skip the accumulate if the value is already saturated. */
2805 l1 = gen_new_label();
2806 tmp = tcg_temp_new();
2807 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
2808 gen_op_jmp_nz32(tmp, l1);
2809 }
2810 #endif
2811
2812 if (insn & 0x100)
2813 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2814 else
2815 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2816
2817 if (s->env->macsr & MACSR_FI)
2818 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2819 else if (s->env->macsr & MACSR_SU)
2820 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2821 else
2822 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2823
2824 #if 0
2825 /* Disabled because conditional branches clobber temporary vars. */
2826 if (l1 != -1)
2827 gen_set_label(l1);
2828 #endif
2829
2830 if (dual) {
2831 /* Dual accumulate variant. */
2832 acc = (ext >> 2) & 3;
2833 /* Restore the overflow flag from the multiplier. */
2834 tcg_gen_mov_i32(QREG_MACSR, saved_flags);
2835 #if 0
2836 /* Disabled because conditional branches clobber temporary vars. */
2837 if ((s->env->macsr & MACSR_OMC) != 0) {
2838 /* Skip the accumulate if the value is already saturated. */
2839 l1 = gen_new_label();
2840 tmp = tcg_temp_new();
2841 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
2842 gen_op_jmp_nz32(tmp, l1);
2843 }
2844 #endif
2845 if (ext & 2)
2846 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2847 else
2848 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2849 if (s->env->macsr & MACSR_FI)
2850 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2851 else if (s->env->macsr & MACSR_SU)
2852 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2853 else
2854 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2855 #if 0
2856 /* Disabled because conditional branches clobber temporary vars. */
2857 if (l1 != -1)
2858 gen_set_label(l1);
2859 #endif
2860 }
2861 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
2862
2863 if (insn & 0x30) {
2864 TCGv rw;
2865 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2866 tcg_gen_mov_i32(rw, loadval);
2867 /* FIXME: Should address writeback happen with the masked or
2868 unmasked value? */
2869 switch ((insn >> 3) & 7) {
2870 case 3: /* Post-increment. */
2871 tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
2872 break;
2873 case 4: /* Pre-decrement. */
2874 tcg_gen_mov_i32(AREG(insn, 0), addr);
2875 }
2876 }
2877 }
2878
2879 DISAS_INSN(from_mac)
2880 {
2881 TCGv rx;
2882 TCGv_i64 acc;
2883 int accnum;
2884
2885 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2886 accnum = (insn >> 9) & 3;
2887 acc = MACREG(accnum);
2888 if (s->env->macsr & MACSR_FI) {
2889 gen_helper_get_macf(rx, cpu_env, acc);
2890 } else if ((s->env->macsr & MACSR_OMC) == 0) {
2891 tcg_gen_extrl_i64_i32(rx, acc);
2892 } else if (s->env->macsr & MACSR_SU) {
2893 gen_helper_get_macs(rx, acc);
2894 } else {
2895 gen_helper_get_macu(rx, acc);
2896 }
2897 if (insn & 0x40) {
2898 tcg_gen_movi_i64(acc, 0);
2899 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2900 }
2901 }
2902
2903 DISAS_INSN(move_mac)
2904 {
2905 /* FIXME: This can be done without a helper. */
2906 int src;
2907 TCGv dest;
2908 src = insn & 3;
2909 dest = tcg_const_i32((insn >> 9) & 3);
2910 gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
2911 gen_mac_clear_flags();
2912 gen_helper_mac_set_flags(cpu_env, dest);
2913 }
2914
2915 DISAS_INSN(from_macsr)
2916 {
2917 TCGv reg;
2918
2919 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2920 tcg_gen_mov_i32(reg, QREG_MACSR);
2921 }
2922
2923 DISAS_INSN(from_mask)
2924 {
2925 TCGv reg;
2926 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2927 tcg_gen_mov_i32(reg, QREG_MAC_MASK);
2928 }
2929
2930 DISAS_INSN(from_mext)
2931 {
2932 TCGv reg;
2933 TCGv acc;
2934 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2935 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2936 if (s->env->macsr & MACSR_FI)
2937 gen_helper_get_mac_extf(reg, cpu_env, acc);
2938 else
2939 gen_helper_get_mac_exti(reg, cpu_env, acc);
2940 }
2941
2942 DISAS_INSN(macsr_to_ccr)
2943 {
2944 TCGv tmp = tcg_temp_new();
2945 tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
2946 gen_helper_set_sr(cpu_env, tmp);
2947 tcg_temp_free(tmp);
2948 set_cc_op(s, CC_OP_FLAGS);
2949 }
2950
2951 DISAS_INSN(to_mac)
2952 {
2953 TCGv_i64 acc;
2954 TCGv val;
2955 int accnum;
2956 accnum = (insn >> 9) & 3;
2957 acc = MACREG(accnum);
2958 SRC_EA(env, val, OS_LONG, 0, NULL);
2959 if (s->env->macsr & MACSR_FI) {
2960 tcg_gen_ext_i32_i64(acc, val);
2961 tcg_gen_shli_i64(acc, acc, 8);
2962 } else if (s->env->macsr & MACSR_SU) {
2963 tcg_gen_ext_i32_i64(acc, val);
2964 } else {
2965 tcg_gen_extu_i32_i64(acc, val);
2966 }
2967 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2968 gen_mac_clear_flags();
2969 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
2970 }
2971
2972 DISAS_INSN(to_macsr)
2973 {
2974 TCGv val;
2975 SRC_EA(env, val, OS_LONG, 0, NULL);
2976 gen_helper_set_macsr(cpu_env, val);
2977 gen_lookup_tb(s);
2978 }
2979
2980 DISAS_INSN(to_mask)
2981 {
2982 TCGv val;
2983 SRC_EA(env, val, OS_LONG, 0, NULL);
2984 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
2985 }
2986
2987 DISAS_INSN(to_mext)
2988 {
2989 TCGv val;
2990 TCGv acc;
2991 SRC_EA(env, val, OS_LONG, 0, NULL);
2992 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2993 if (s->env->macsr & MACSR_FI)
2994 gen_helper_set_mac_extf(cpu_env, val, acc);
2995 else if (s->env->macsr & MACSR_SU)
2996 gen_helper_set_mac_exts(cpu_env, val, acc);
2997 else
2998 gen_helper_set_mac_extu(cpu_env, val, acc);
2999 }
3000
3001 static disas_proc opcode_table[65536];
3002
3003 static void
3004 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
3005 {
3006 int i;
3007 int from;
3008 int to;
3009
3010 /* Sanity check. All set bits must be included in the mask. */
3011 if (opcode & ~mask) {
3012 fprintf(stderr,
3013 "qemu internal error: bogus opcode definition %04x/%04x\n",
3014 opcode, mask);
3015 abort();
3016 }
3017 /* This could probably be cleverer. For now just optimize the case where
3018 the top bits are known. */
3019 /* Find the first zero bit in the mask. */
3020 i = 0x8000;
3021 while ((i & mask) != 0)
3022 i >>= 1;
3023 /* Iterate over all combinations of this and lower bits. */
3024 if (i == 0)
3025 i = 1;
3026 else
3027 i <<= 1;
3028 from = opcode & ~(i - 1);
3029 to = from + i;
3030 for (i = from; i < to; i++) {
3031 if ((i & mask) == opcode)
3032 opcode_table[i] = proc;
3033 }
3034 }
3035
3036 /* Register m68k opcode handlers. Order is important.
3037 Later insn override earlier ones. */
3038 void register_m68k_insns (CPUM68KState *env)
3039 {
3040 /* Build the opcode table only once to avoid
3041 multithreading issues. */
3042 if (opcode_table[0] != NULL) {
3043 return;
3044 }
3045
3046 /* use BASE() for instruction available
3047 * for CF_ISA_A and M68000.
3048 */
3049 #define BASE(name, opcode, mask) \
3050 register_opcode(disas_##name, 0x##opcode, 0x##mask)
3051 #define INSN(name, opcode, mask, feature) do { \
3052 if (m68k_feature(env, M68K_FEATURE_##feature)) \
3053 BASE(name, opcode, mask); \
3054 } while(0)
3055 BASE(undef, 0000, 0000);
3056 INSN(arith_im, 0080, fff8, CF_ISA_A);
3057 INSN(arith_im, 0000, ff00, M68000);
3058 INSN(undef, 00c0, ffc0, M68000);
3059 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
3060 BASE(bitop_reg, 0100, f1c0);
3061 BASE(bitop_reg, 0140, f1c0);
3062 BASE(bitop_reg, 0180, f1c0);
3063 BASE(bitop_reg, 01c0, f1c0);
3064 INSN(arith_im, 0280, fff8, CF_ISA_A);
3065 INSN(arith_im, 0200, ff00, M68000);
3066 INSN(undef, 02c0, ffc0, M68000);
3067 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
3068 INSN(arith_im, 0480, fff8, CF_ISA_A);
3069 INSN(arith_im, 0400, ff00, M68000);
3070 INSN(undef, 04c0, ffc0, M68000);
3071 INSN(arith_im, 0600, ff00, M68000);
3072 INSN(undef, 06c0, ffc0, M68000);
3073 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
3074 INSN(arith_im, 0680, fff8, CF_ISA_A);
3075 INSN(arith_im, 0c00, ff38, CF_ISA_A);
3076 INSN(arith_im, 0c00, ff00, M68000);
3077 BASE(bitop_im, 0800, ffc0);
3078 BASE(bitop_im, 0840, ffc0);
3079 BASE(bitop_im, 0880, ffc0);
3080 BASE(bitop_im, 08c0, ffc0);
3081 INSN(arith_im, 0a80, fff8, CF_ISA_A);
3082 INSN(arith_im, 0a00, ff00, M68000);
3083 BASE(move, 1000, f000);
3084 BASE(move, 2000, f000);
3085 BASE(move, 3000, f000);
3086 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
3087 INSN(negx, 4080, fff8, CF_ISA_A);
3088 INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
3089 INSN(move_from_sr, 40c0, ffc0, M68000);
3090 BASE(lea, 41c0, f1c0);
3091 BASE(clr, 4200, ff00);
3092 BASE(undef, 42c0, ffc0);
3093 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
3094 INSN(move_from_ccr, 42c0, ffc0, M68000);
3095 INSN(neg, 4480, fff8, CF_ISA_A);
3096 INSN(neg, 4400, ff00, M68000);
3097 INSN(undef, 44c0, ffc0, M68000);
3098 BASE(move_to_ccr, 44c0, ffc0);
3099 INSN(not, 4680, fff8, CF_ISA_A);
3100 INSN(not, 4600, ff00, M68000);
3101 INSN(undef, 46c0, ffc0, M68000);
3102 INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
3103 INSN(linkl, 4808, fff8, M68000);
3104 BASE(pea, 4840, ffc0);
3105 BASE(swap, 4840, fff8);
3106 INSN(bkpt, 4848, fff8, BKPT);
3107 BASE(movem, 48c0, fbc0);
3108 BASE(ext, 4880, fff8);
3109 BASE(ext, 48c0, fff8);
3110 BASE(ext, 49c0, fff8);
3111 BASE(tst, 4a00, ff00);
3112 INSN(tas, 4ac0, ffc0, CF_ISA_B);
3113 INSN(tas, 4ac0, ffc0, M68000);
3114 INSN(halt, 4ac8, ffff, CF_ISA_A);
3115 INSN(pulse, 4acc, ffff, CF_ISA_A);
3116 BASE(illegal, 4afc, ffff);
3117 INSN(mull, 4c00, ffc0, CF_ISA_A);
3118 INSN(mull, 4c00, ffc0, LONG_MULDIV);
3119 INSN(divl, 4c40, ffc0, CF_ISA_A);
3120 INSN(divl, 4c40, ffc0, LONG_MULDIV);
3121 INSN(sats, 4c80, fff8, CF_ISA_B);
3122 BASE(trap, 4e40, fff0);
3123 BASE(link, 4e50, fff8);
3124 BASE(unlk, 4e58, fff8);
3125 INSN(move_to_usp, 4e60, fff8, USP);
3126 INSN(move_from_usp, 4e68, fff8, USP);
3127 BASE(nop, 4e71, ffff);
3128 BASE(stop, 4e72, ffff);
3129 BASE(rte, 4e73, ffff);
3130 BASE(rts, 4e75, ffff);
3131 INSN(movec, 4e7b, ffff, CF_ISA_A);
3132 BASE(jump, 4e80, ffc0);
3133 INSN(jump, 4ec0, ffc0, CF_ISA_A);
3134 INSN(addsubq, 5180, f1c0, CF_ISA_A);
3135 INSN(jump, 4ec0, ffc0, M68000);
3136 INSN(addsubq, 5000, f080, M68000);
3137 INSN(addsubq, 5080, f0c0, M68000);
3138 INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */
3139 INSN(scc, 50c0, f0c0, M68000); /* Scc.B <EA> */
3140 INSN(addsubq, 5080, f1c0, CF_ISA_A);
3141 INSN(tpf, 51f8, fff8, CF_ISA_A);
3142
3143 /* Branch instructions. */
3144 BASE(branch, 6000, f000);
3145 /* Disable long branch instructions, then add back the ones we want. */
3146 BASE(undef, 60ff, f0ff); /* All long branches. */
3147 INSN(branch, 60ff, f0ff, CF_ISA_B);
3148 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
3149 INSN(branch, 60ff, ffff, BRAL);
3150 INSN(branch, 60ff, f0ff, BCCL);
3151
3152 BASE(moveq, 7000, f100);
3153 INSN(mvzs, 7100, f100, CF_ISA_B);
3154 BASE(or, 8000, f000);
3155 BASE(divw, 80c0, f0c0);
3156 BASE(addsub, 9000, f000);
3157 INSN(subx, 9180, f1f8, CF_ISA_A);
3158 INSN(suba, 91c0, f1c0, CF_ISA_A);
3159
3160 BASE(undef_mac, a000, f000);
3161 INSN(mac, a000, f100, CF_EMAC);
3162 INSN(from_mac, a180, f9b0, CF_EMAC);
3163 INSN(move_mac, a110, f9fc, CF_EMAC);
3164 INSN(from_macsr,a980, f9f0, CF_EMAC);
3165 INSN(from_mask, ad80, fff0, CF_EMAC);
3166 INSN(from_mext, ab80, fbf0, CF_EMAC);
3167 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
3168 INSN(to_mac, a100, f9c0, CF_EMAC);
3169 INSN(to_macsr, a900, ffc0, CF_EMAC);
3170 INSN(to_mext, ab00, fbc0, CF_EMAC);
3171 INSN(to_mask, ad00, ffc0, CF_EMAC);
3172
3173 INSN(mov3q, a140, f1c0, CF_ISA_B);
3174 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
3175 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
3176 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
3177 INSN(cmp, b080, f1c0, CF_ISA_A);
3178 INSN(cmpa, b1c0, f1c0, CF_ISA_A);
3179 INSN(cmp, b000, f100, M68000);
3180 INSN(eor, b100, f100, M68000);
3181 INSN(cmpa, b0c0, f0c0, M68000);
3182 INSN(eor, b180, f1c0, CF_ISA_A);
3183 BASE(and, c000, f000);
3184 INSN(exg_dd, c140, f1f8, M68000);
3185 INSN(exg_aa, c148, f1f8, M68000);
3186 INSN(exg_da, c188, f1f8, M68000);
3187 BASE(mulw, c0c0, f0c0);
3188 BASE(addsub, d000, f000);
3189 INSN(addx, d180, f1f8, CF_ISA_A);
3190 INSN(adda, d1c0, f1c0, CF_ISA_A);
3191 INSN(adda, d0c0, f0c0, M68000);
3192 INSN(shift_im, e080, f0f0, CF_ISA_A);
3193 INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
3194 INSN(undef_fpu, f000, f000, CF_ISA_A);
3195 INSN(fpu, f200, ffc0, CF_FPU);
3196 INSN(fbcc, f280, ffc0, CF_FPU);
3197 INSN(frestore, f340, ffc0, CF_FPU);
3198 INSN(fsave, f340, ffc0, CF_FPU);
3199 INSN(intouch, f340, ffc0, CF_ISA_A);
3200 INSN(cpushl, f428, ff38, CF_ISA_A);
3201 INSN(wddata, fb00, ff00, CF_ISA_A);
3202 INSN(wdebug, fbc0, ffc0, CF_ISA_A);
3203 #undef INSN
3204 }
3205
3206 /* ??? Some of this implementation is not exception safe. We should always
3207 write back the result to memory before setting the condition codes. */
3208 static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
3209 {
3210 uint16_t insn;
3211
3212 insn = read_im16(env, s);
3213
3214 opcode_table[insn](env, s, insn);
3215 }
3216
3217 /* generate intermediate code for basic block 'tb'. */
3218 void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
3219 {
3220 M68kCPU *cpu = m68k_env_get_cpu(env);
3221 CPUState *cs = CPU(cpu);
3222 DisasContext dc1, *dc = &dc1;
3223 target_ulong pc_start;
3224 int pc_offset;
3225 int num_insns;
3226 int max_insns;
3227
3228 /* generate intermediate code */
3229 pc_start = tb->pc;
3230
3231 dc->tb = tb;
3232
3233 dc->env = env;
3234 dc->is_jmp = DISAS_NEXT;
3235 dc->pc = pc_start;
3236 dc->cc_op = CC_OP_DYNAMIC;
3237 dc->cc_op_synced = 1;
3238 dc->singlestep_enabled = cs->singlestep_enabled;
3239 dc->fpcr = env->fpcr;
3240 dc->user = (env->sr & SR_S) == 0;
3241 dc->done_mac = 0;
3242 num_insns = 0;
3243 max_insns = tb->cflags & CF_COUNT_MASK;
3244 if (max_insns == 0) {
3245 max_insns = CF_COUNT_MASK;
3246 }
3247 if (max_insns > TCG_MAX_INSNS) {
3248 max_insns = TCG_MAX_INSNS;
3249 }
3250
3251 gen_tb_start(tb);
3252 do {
3253 pc_offset = dc->pc - pc_start;
3254 gen_throws_exception = NULL;
3255 tcg_gen_insn_start(dc->pc, dc->cc_op);
3256 num_insns++;
3257
3258 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
3259 gen_exception(dc, dc->pc, EXCP_DEBUG);
3260 dc->is_jmp = DISAS_JUMP;
3261 /* The address covered by the breakpoint must be included in
3262 [tb->pc, tb->pc + tb->size) in order to for it to be
3263 properly cleared -- thus we increment the PC here so that
3264 the logic setting tb->size below does the right thing. */
3265 dc->pc += 2;
3266 break;
3267 }
3268
3269 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
3270 gen_io_start();
3271 }
3272
3273 dc->insn_pc = dc->pc;
3274 disas_m68k_insn(env, dc);
3275 } while (!dc->is_jmp && !tcg_op_buf_full() &&
3276 !cs->singlestep_enabled &&
3277 !singlestep &&
3278 (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
3279 num_insns < max_insns);
3280
3281 if (tb->cflags & CF_LAST_IO)
3282 gen_io_end();
3283 if (unlikely(cs->singlestep_enabled)) {
3284 /* Make sure the pc is updated, and raise a debug exception. */
3285 if (!dc->is_jmp) {
3286 update_cc_op(dc);
3287 tcg_gen_movi_i32(QREG_PC, dc->pc);
3288 }
3289 gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
3290 } else {
3291 switch(dc->is_jmp) {
3292 case DISAS_NEXT:
3293 update_cc_op(dc);
3294 gen_jmp_tb(dc, 0, dc->pc);
3295 break;
3296 default:
3297 case DISAS_JUMP:
3298 case DISAS_UPDATE:
3299 update_cc_op(dc);
3300 /* indicate that the hash table must be used to find the next TB */
3301 tcg_gen_exit_tb(0);
3302 break;
3303 case DISAS_TB_JUMP:
3304 /* nothing more to generate */
3305 break;
3306 }
3307 }
3308 gen_tb_end(tb, num_insns);
3309
3310 #ifdef DEBUG_DISAS
3311 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
3312 && qemu_log_in_addr_range(pc_start)) {
3313 qemu_log("----------------\n");
3314 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3315 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
3316 qemu_log("\n");
3317 }
3318 #endif
3319 tb->size = dc->pc - pc_start;
3320 tb->icount = num_insns;
3321 }
3322
3323 void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
3324 int flags)
3325 {
3326 M68kCPU *cpu = M68K_CPU(cs);
3327 CPUM68KState *env = &cpu->env;
3328 int i;
3329 uint16_t sr;
3330 CPU_DoubleU u;
3331 for (i = 0; i < 8; i++)
3332 {
3333 u.d = env->fregs[i];
3334 cpu_fprintf(f, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n",
3335 i, env->dregs[i], i, env->aregs[i],
3336 i, u.l.upper, u.l.lower, *(double *)&u.d);
3337 }
3338 cpu_fprintf (f, "PC = %08x ", env->pc);
3339 sr = env->sr | cpu_m68k_get_ccr(env);
3340 cpu_fprintf(f, "SR = %04x %c%c%c%c%c ", sr, (sr & CCF_X) ? 'X' : '-',
3341 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
3342 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
3343 cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
3344 }
3345
3346 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
3347 target_ulong *data)
3348 {
3349 int cc_op = data[1];
3350 env->pc = data[0];
3351 if (cc_op != CC_OP_DYNAMIC) {
3352 env->cc_op = cc_op;
3353 }
3354 }