]> git.proxmox.com Git - mirror_qemu.git/blob - target/m68k/translate.c
tcg: define tcg_init_ctx and make tcg_ctx a pointer
[mirror_qemu.git] / target / m68k / translate.c
1 /*
2 * m68k translation
3 *
4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/log.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35
36 //#define DEBUG_DISPATCH 1
37
38 #define DEFO32(name, offset) static TCGv QREG_##name;
39 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
40 #include "qregs.def"
41 #undef DEFO32
42 #undef DEFO64
43
44 static TCGv_i32 cpu_halted;
45 static TCGv_i32 cpu_exception_index;
46
47 static TCGv_env cpu_env;
48
49 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
50 static TCGv cpu_dregs[8];
51 static TCGv cpu_aregs[8];
52 static TCGv_i64 cpu_macc[4];
53
54 #define REG(insn, pos) (((insn) >> (pos)) & 7)
55 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
56 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
57 #define MACREG(acc) cpu_macc[acc]
58 #define QREG_SP get_areg(s, 7)
59
60 static TCGv NULL_QREG;
61 #define IS_NULL_QREG(t) (t == NULL_QREG)
62 /* Used to distinguish stores from bad addressing modes. */
63 static TCGv store_dummy;
64
65 #include "exec/gen-icount.h"
66
67 void m68k_tcg_init(void)
68 {
69 char *p;
70 int i;
71
72 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
73 tcg_ctx->tcg_env = cpu_env;
74
75 #define DEFO32(name, offset) \
76 QREG_##name = tcg_global_mem_new_i32(cpu_env, \
77 offsetof(CPUM68KState, offset), #name);
78 #define DEFO64(name, offset) \
79 QREG_##name = tcg_global_mem_new_i64(cpu_env, \
80 offsetof(CPUM68KState, offset), #name);
81 #include "qregs.def"
82 #undef DEFO32
83 #undef DEFO64
84
85 cpu_halted = tcg_global_mem_new_i32(cpu_env,
86 -offsetof(M68kCPU, env) +
87 offsetof(CPUState, halted), "HALTED");
88 cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
89 -offsetof(M68kCPU, env) +
90 offsetof(CPUState, exception_index),
91 "EXCEPTION");
92
93 p = cpu_reg_names;
94 for (i = 0; i < 8; i++) {
95 sprintf(p, "D%d", i);
96 cpu_dregs[i] = tcg_global_mem_new(cpu_env,
97 offsetof(CPUM68KState, dregs[i]), p);
98 p += 3;
99 sprintf(p, "A%d", i);
100 cpu_aregs[i] = tcg_global_mem_new(cpu_env,
101 offsetof(CPUM68KState, aregs[i]), p);
102 p += 3;
103 }
104 for (i = 0; i < 4; i++) {
105 sprintf(p, "ACC%d", i);
106 cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
107 offsetof(CPUM68KState, macc[i]), p);
108 p += 5;
109 }
110
111 NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
112 store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
113 }
114
115 /* internal defines */
116 typedef struct DisasContext {
117 CPUM68KState *env;
118 target_ulong insn_pc; /* Start of the current instruction. */
119 target_ulong pc;
120 int is_jmp;
121 CCOp cc_op; /* Current CC operation */
122 int cc_op_synced;
123 int user;
124 struct TranslationBlock *tb;
125 int singlestep_enabled;
126 TCGv_i64 mactmp;
127 int done_mac;
128 int writeback_mask;
129 TCGv writeback[8];
130 } DisasContext;
131
132 static TCGv get_areg(DisasContext *s, unsigned regno)
133 {
134 if (s->writeback_mask & (1 << regno)) {
135 return s->writeback[regno];
136 } else {
137 return cpu_aregs[regno];
138 }
139 }
140
141 static void delay_set_areg(DisasContext *s, unsigned regno,
142 TCGv val, bool give_temp)
143 {
144 if (s->writeback_mask & (1 << regno)) {
145 if (give_temp) {
146 tcg_temp_free(s->writeback[regno]);
147 s->writeback[regno] = val;
148 } else {
149 tcg_gen_mov_i32(s->writeback[regno], val);
150 }
151 } else {
152 s->writeback_mask |= 1 << regno;
153 if (give_temp) {
154 s->writeback[regno] = val;
155 } else {
156 TCGv tmp = tcg_temp_new();
157 s->writeback[regno] = tmp;
158 tcg_gen_mov_i32(tmp, val);
159 }
160 }
161 }
162
163 static void do_writebacks(DisasContext *s)
164 {
165 unsigned mask = s->writeback_mask;
166 if (mask) {
167 s->writeback_mask = 0;
168 do {
169 unsigned regno = ctz32(mask);
170 tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
171 tcg_temp_free(s->writeback[regno]);
172 mask &= mask - 1;
173 } while (mask);
174 }
175 }
176
177 /* is_jmp field values */
178 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
179 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
180 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
181 #define DISAS_JUMP_NEXT DISAS_TARGET_3
182
183 #if defined(CONFIG_USER_ONLY)
184 #define IS_USER(s) 1
185 #else
186 #define IS_USER(s) s->user
187 #endif
188
189 /* XXX: move that elsewhere */
190 /* ??? Fix exceptions. */
191 static void *gen_throws_exception;
192 #define gen_last_qop NULL
193
194 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
195
196 #ifdef DEBUG_DISPATCH
197 #define DISAS_INSN(name) \
198 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
199 uint16_t insn); \
200 static void disas_##name(CPUM68KState *env, DisasContext *s, \
201 uint16_t insn) \
202 { \
203 qemu_log("Dispatch " #name "\n"); \
204 real_disas_##name(env, s, insn); \
205 } \
206 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
207 uint16_t insn)
208 #else
209 #define DISAS_INSN(name) \
210 static void disas_##name(CPUM68KState *env, DisasContext *s, \
211 uint16_t insn)
212 #endif
213
214 static const uint8_t cc_op_live[CC_OP_NB] = {
215 [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
216 [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
217 [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
218 [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
219 [CC_OP_LOGIC] = CCF_X | CCF_N
220 };
221
222 static void set_cc_op(DisasContext *s, CCOp op)
223 {
224 CCOp old_op = s->cc_op;
225 int dead;
226
227 if (old_op == op) {
228 return;
229 }
230 s->cc_op = op;
231 s->cc_op_synced = 0;
232
233 /* Discard CC computation that will no longer be used.
234 Note that X and N are never dead. */
235 dead = cc_op_live[old_op] & ~cc_op_live[op];
236 if (dead & CCF_C) {
237 tcg_gen_discard_i32(QREG_CC_C);
238 }
239 if (dead & CCF_Z) {
240 tcg_gen_discard_i32(QREG_CC_Z);
241 }
242 if (dead & CCF_V) {
243 tcg_gen_discard_i32(QREG_CC_V);
244 }
245 }
246
247 /* Update the CPU env CC_OP state. */
248 static void update_cc_op(DisasContext *s)
249 {
250 if (!s->cc_op_synced) {
251 s->cc_op_synced = 1;
252 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
253 }
254 }
255
256 /* Generate a jump to an immediate address. */
257 static void gen_jmp_im(DisasContext *s, uint32_t dest)
258 {
259 update_cc_op(s);
260 tcg_gen_movi_i32(QREG_PC, dest);
261 s->is_jmp = DISAS_JUMP;
262 }
263
264 /* Generate a jump to the address in qreg DEST. */
265 static void gen_jmp(DisasContext *s, TCGv dest)
266 {
267 update_cc_op(s);
268 tcg_gen_mov_i32(QREG_PC, dest);
269 s->is_jmp = DISAS_JUMP;
270 }
271
272 static void gen_raise_exception(int nr)
273 {
274 TCGv_i32 tmp = tcg_const_i32(nr);
275
276 gen_helper_raise_exception(cpu_env, tmp);
277 tcg_temp_free_i32(tmp);
278 }
279
280 static void gen_exception(DisasContext *s, uint32_t where, int nr)
281 {
282 update_cc_op(s);
283 gen_jmp_im(s, where);
284 gen_raise_exception(nr);
285 }
286
287 static inline void gen_addr_fault(DisasContext *s)
288 {
289 gen_exception(s, s->insn_pc, EXCP_ADDRESS);
290 }
291
292 /* Generate a load from the specified address. Narrow values are
293 sign extended to full register width. */
294 static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
295 {
296 TCGv tmp;
297 int index = IS_USER(s);
298 tmp = tcg_temp_new_i32();
299 switch(opsize) {
300 case OS_BYTE:
301 if (sign)
302 tcg_gen_qemu_ld8s(tmp, addr, index);
303 else
304 tcg_gen_qemu_ld8u(tmp, addr, index);
305 break;
306 case OS_WORD:
307 if (sign)
308 tcg_gen_qemu_ld16s(tmp, addr, index);
309 else
310 tcg_gen_qemu_ld16u(tmp, addr, index);
311 break;
312 case OS_LONG:
313 tcg_gen_qemu_ld32u(tmp, addr, index);
314 break;
315 default:
316 g_assert_not_reached();
317 }
318 gen_throws_exception = gen_last_qop;
319 return tmp;
320 }
321
322 /* Generate a store. */
323 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
324 {
325 int index = IS_USER(s);
326 switch(opsize) {
327 case OS_BYTE:
328 tcg_gen_qemu_st8(val, addr, index);
329 break;
330 case OS_WORD:
331 tcg_gen_qemu_st16(val, addr, index);
332 break;
333 case OS_LONG:
334 tcg_gen_qemu_st32(val, addr, index);
335 break;
336 default:
337 g_assert_not_reached();
338 }
339 gen_throws_exception = gen_last_qop;
340 }
341
342 typedef enum {
343 EA_STORE,
344 EA_LOADU,
345 EA_LOADS
346 } ea_what;
347
348 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
349 otherwise generate a store. */
350 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
351 ea_what what)
352 {
353 if (what == EA_STORE) {
354 gen_store(s, opsize, addr, val);
355 return store_dummy;
356 } else {
357 return gen_load(s, opsize, addr, what == EA_LOADS);
358 }
359 }
360
361 /* Read a 16-bit immediate constant */
362 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
363 {
364 uint16_t im;
365 im = cpu_lduw_code(env, s->pc);
366 s->pc += 2;
367 return im;
368 }
369
370 /* Read an 8-bit immediate constant */
371 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
372 {
373 return read_im16(env, s);
374 }
375
376 /* Read a 32-bit immediate constant. */
377 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
378 {
379 uint32_t im;
380 im = read_im16(env, s) << 16;
381 im |= 0xffff & read_im16(env, s);
382 return im;
383 }
384
385 /* Read a 64-bit immediate constant. */
386 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
387 {
388 uint64_t im;
389 im = (uint64_t)read_im32(env, s) << 32;
390 im |= (uint64_t)read_im32(env, s);
391 return im;
392 }
393
394 /* Calculate and address index. */
395 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
396 {
397 TCGv add;
398 int scale;
399
400 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
401 if ((ext & 0x800) == 0) {
402 tcg_gen_ext16s_i32(tmp, add);
403 add = tmp;
404 }
405 scale = (ext >> 9) & 3;
406 if (scale != 0) {
407 tcg_gen_shli_i32(tmp, add, scale);
408 add = tmp;
409 }
410 return add;
411 }
412
413 /* Handle a base + index + displacement effective addresss.
414 A NULL_QREG base means pc-relative. */
415 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
416 {
417 uint32_t offset;
418 uint16_t ext;
419 TCGv add;
420 TCGv tmp;
421 uint32_t bd, od;
422
423 offset = s->pc;
424 ext = read_im16(env, s);
425
426 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
427 return NULL_QREG;
428
429 if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
430 !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
431 ext &= ~(3 << 9);
432 }
433
434 if (ext & 0x100) {
435 /* full extension word format */
436 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
437 return NULL_QREG;
438
439 if ((ext & 0x30) > 0x10) {
440 /* base displacement */
441 if ((ext & 0x30) == 0x20) {
442 bd = (int16_t)read_im16(env, s);
443 } else {
444 bd = read_im32(env, s);
445 }
446 } else {
447 bd = 0;
448 }
449 tmp = tcg_temp_new();
450 if ((ext & 0x44) == 0) {
451 /* pre-index */
452 add = gen_addr_index(s, ext, tmp);
453 } else {
454 add = NULL_QREG;
455 }
456 if ((ext & 0x80) == 0) {
457 /* base not suppressed */
458 if (IS_NULL_QREG(base)) {
459 base = tcg_const_i32(offset + bd);
460 bd = 0;
461 }
462 if (!IS_NULL_QREG(add)) {
463 tcg_gen_add_i32(tmp, add, base);
464 add = tmp;
465 } else {
466 add = base;
467 }
468 }
469 if (!IS_NULL_QREG(add)) {
470 if (bd != 0) {
471 tcg_gen_addi_i32(tmp, add, bd);
472 add = tmp;
473 }
474 } else {
475 add = tcg_const_i32(bd);
476 }
477 if ((ext & 3) != 0) {
478 /* memory indirect */
479 base = gen_load(s, OS_LONG, add, 0);
480 if ((ext & 0x44) == 4) {
481 add = gen_addr_index(s, ext, tmp);
482 tcg_gen_add_i32(tmp, add, base);
483 add = tmp;
484 } else {
485 add = base;
486 }
487 if ((ext & 3) > 1) {
488 /* outer displacement */
489 if ((ext & 3) == 2) {
490 od = (int16_t)read_im16(env, s);
491 } else {
492 od = read_im32(env, s);
493 }
494 } else {
495 od = 0;
496 }
497 if (od != 0) {
498 tcg_gen_addi_i32(tmp, add, od);
499 add = tmp;
500 }
501 }
502 } else {
503 /* brief extension word format */
504 tmp = tcg_temp_new();
505 add = gen_addr_index(s, ext, tmp);
506 if (!IS_NULL_QREG(base)) {
507 tcg_gen_add_i32(tmp, add, base);
508 if ((int8_t)ext)
509 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
510 } else {
511 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
512 }
513 add = tmp;
514 }
515 return add;
516 }
517
518 /* Sign or zero extend a value. */
519
520 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
521 {
522 switch (opsize) {
523 case OS_BYTE:
524 if (sign) {
525 tcg_gen_ext8s_i32(res, val);
526 } else {
527 tcg_gen_ext8u_i32(res, val);
528 }
529 break;
530 case OS_WORD:
531 if (sign) {
532 tcg_gen_ext16s_i32(res, val);
533 } else {
534 tcg_gen_ext16u_i32(res, val);
535 }
536 break;
537 case OS_LONG:
538 tcg_gen_mov_i32(res, val);
539 break;
540 default:
541 g_assert_not_reached();
542 }
543 }
544
545 /* Evaluate all the CC flags. */
546
547 static void gen_flush_flags(DisasContext *s)
548 {
549 TCGv t0, t1;
550
551 switch (s->cc_op) {
552 case CC_OP_FLAGS:
553 return;
554
555 case CC_OP_ADDB:
556 case CC_OP_ADDW:
557 case CC_OP_ADDL:
558 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
559 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
560 /* Compute signed overflow for addition. */
561 t0 = tcg_temp_new();
562 t1 = tcg_temp_new();
563 tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
564 gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
565 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
566 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
567 tcg_temp_free(t0);
568 tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
569 tcg_temp_free(t1);
570 break;
571
572 case CC_OP_SUBB:
573 case CC_OP_SUBW:
574 case CC_OP_SUBL:
575 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
576 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
577 /* Compute signed overflow for subtraction. */
578 t0 = tcg_temp_new();
579 t1 = tcg_temp_new();
580 tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
581 gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
582 tcg_gen_xor_i32(t1, QREG_CC_N, t0);
583 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
584 tcg_temp_free(t0);
585 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
586 tcg_temp_free(t1);
587 break;
588
589 case CC_OP_CMPB:
590 case CC_OP_CMPW:
591 case CC_OP_CMPL:
592 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
593 tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
594 gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
595 /* Compute signed overflow for subtraction. */
596 t0 = tcg_temp_new();
597 tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
598 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
599 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
600 tcg_temp_free(t0);
601 tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
602 break;
603
604 case CC_OP_LOGIC:
605 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
606 tcg_gen_movi_i32(QREG_CC_C, 0);
607 tcg_gen_movi_i32(QREG_CC_V, 0);
608 break;
609
610 case CC_OP_DYNAMIC:
611 gen_helper_flush_flags(cpu_env, QREG_CC_OP);
612 s->cc_op_synced = 1;
613 break;
614
615 default:
616 t0 = tcg_const_i32(s->cc_op);
617 gen_helper_flush_flags(cpu_env, t0);
618 tcg_temp_free(t0);
619 s->cc_op_synced = 1;
620 break;
621 }
622
623 /* Note that flush_flags also assigned to env->cc_op. */
624 s->cc_op = CC_OP_FLAGS;
625 }
626
627 static inline TCGv gen_extend(TCGv val, int opsize, int sign)
628 {
629 TCGv tmp;
630
631 if (opsize == OS_LONG) {
632 tmp = val;
633 } else {
634 tmp = tcg_temp_new();
635 gen_ext(tmp, val, opsize, sign);
636 }
637
638 return tmp;
639 }
640
641 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
642 {
643 gen_ext(QREG_CC_N, val, opsize, 1);
644 set_cc_op(s, CC_OP_LOGIC);
645 }
646
647 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
648 {
649 tcg_gen_mov_i32(QREG_CC_N, dest);
650 tcg_gen_mov_i32(QREG_CC_V, src);
651 set_cc_op(s, CC_OP_CMPB + opsize);
652 }
653
654 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
655 {
656 gen_ext(QREG_CC_N, dest, opsize, 1);
657 tcg_gen_mov_i32(QREG_CC_V, src);
658 }
659
660 static inline int opsize_bytes(int opsize)
661 {
662 switch (opsize) {
663 case OS_BYTE: return 1;
664 case OS_WORD: return 2;
665 case OS_LONG: return 4;
666 case OS_SINGLE: return 4;
667 case OS_DOUBLE: return 8;
668 case OS_EXTENDED: return 12;
669 case OS_PACKED: return 12;
670 default:
671 g_assert_not_reached();
672 }
673 }
674
675 static inline int insn_opsize(int insn)
676 {
677 switch ((insn >> 6) & 3) {
678 case 0: return OS_BYTE;
679 case 1: return OS_WORD;
680 case 2: return OS_LONG;
681 default:
682 g_assert_not_reached();
683 }
684 }
685
686 static inline int ext_opsize(int ext, int pos)
687 {
688 switch ((ext >> pos) & 7) {
689 case 0: return OS_LONG;
690 case 1: return OS_SINGLE;
691 case 2: return OS_EXTENDED;
692 case 3: return OS_PACKED;
693 case 4: return OS_WORD;
694 case 5: return OS_DOUBLE;
695 case 6: return OS_BYTE;
696 default:
697 g_assert_not_reached();
698 }
699 }
700
701 /* Assign value to a register. If the width is less than the register width
702 only the low part of the register is set. */
703 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
704 {
705 TCGv tmp;
706 switch (opsize) {
707 case OS_BYTE:
708 tcg_gen_andi_i32(reg, reg, 0xffffff00);
709 tmp = tcg_temp_new();
710 tcg_gen_ext8u_i32(tmp, val);
711 tcg_gen_or_i32(reg, reg, tmp);
712 tcg_temp_free(tmp);
713 break;
714 case OS_WORD:
715 tcg_gen_andi_i32(reg, reg, 0xffff0000);
716 tmp = tcg_temp_new();
717 tcg_gen_ext16u_i32(tmp, val);
718 tcg_gen_or_i32(reg, reg, tmp);
719 tcg_temp_free(tmp);
720 break;
721 case OS_LONG:
722 case OS_SINGLE:
723 tcg_gen_mov_i32(reg, val);
724 break;
725 default:
726 g_assert_not_reached();
727 }
728 }
729
730 /* Generate code for an "effective address". Does not adjust the base
731 register for autoincrement addressing modes. */
732 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
733 int mode, int reg0, int opsize)
734 {
735 TCGv reg;
736 TCGv tmp;
737 uint16_t ext;
738 uint32_t offset;
739
740 switch (mode) {
741 case 0: /* Data register direct. */
742 case 1: /* Address register direct. */
743 return NULL_QREG;
744 case 3: /* Indirect postincrement. */
745 if (opsize == OS_UNSIZED) {
746 return NULL_QREG;
747 }
748 /* fallthru */
749 case 2: /* Indirect register */
750 return get_areg(s, reg0);
751 case 4: /* Indirect predecrememnt. */
752 if (opsize == OS_UNSIZED) {
753 return NULL_QREG;
754 }
755 reg = get_areg(s, reg0);
756 tmp = tcg_temp_new();
757 if (reg0 == 7 && opsize == OS_BYTE &&
758 m68k_feature(s->env, M68K_FEATURE_M68000)) {
759 tcg_gen_subi_i32(tmp, reg, 2);
760 } else {
761 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
762 }
763 return tmp;
764 case 5: /* Indirect displacement. */
765 reg = get_areg(s, reg0);
766 tmp = tcg_temp_new();
767 ext = read_im16(env, s);
768 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
769 return tmp;
770 case 6: /* Indirect index + displacement. */
771 reg = get_areg(s, reg0);
772 return gen_lea_indexed(env, s, reg);
773 case 7: /* Other */
774 switch (reg0) {
775 case 0: /* Absolute short. */
776 offset = (int16_t)read_im16(env, s);
777 return tcg_const_i32(offset);
778 case 1: /* Absolute long. */
779 offset = read_im32(env, s);
780 return tcg_const_i32(offset);
781 case 2: /* pc displacement */
782 offset = s->pc;
783 offset += (int16_t)read_im16(env, s);
784 return tcg_const_i32(offset);
785 case 3: /* pc index+displacement. */
786 return gen_lea_indexed(env, s, NULL_QREG);
787 case 4: /* Immediate. */
788 default:
789 return NULL_QREG;
790 }
791 }
792 /* Should never happen. */
793 return NULL_QREG;
794 }
795
796 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
797 int opsize)
798 {
799 int mode = extract32(insn, 3, 3);
800 int reg0 = REG(insn, 0);
801 return gen_lea_mode(env, s, mode, reg0, opsize);
802 }
803
804 /* Generate code to load/store a value from/into an EA. If WHAT > 0 this is
805 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
806 ADDRP is non-null for readwrite operands. */
807 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
808 int opsize, TCGv val, TCGv *addrp, ea_what what)
809 {
810 TCGv reg, tmp, result;
811 int32_t offset;
812
813 switch (mode) {
814 case 0: /* Data register direct. */
815 reg = cpu_dregs[reg0];
816 if (what == EA_STORE) {
817 gen_partset_reg(opsize, reg, val);
818 return store_dummy;
819 } else {
820 return gen_extend(reg, opsize, what == EA_LOADS);
821 }
822 case 1: /* Address register direct. */
823 reg = get_areg(s, reg0);
824 if (what == EA_STORE) {
825 tcg_gen_mov_i32(reg, val);
826 return store_dummy;
827 } else {
828 return gen_extend(reg, opsize, what == EA_LOADS);
829 }
830 case 2: /* Indirect register */
831 reg = get_areg(s, reg0);
832 return gen_ldst(s, opsize, reg, val, what);
833 case 3: /* Indirect postincrement. */
834 reg = get_areg(s, reg0);
835 result = gen_ldst(s, opsize, reg, val, what);
836 if (what == EA_STORE || !addrp) {
837 TCGv tmp = tcg_temp_new();
838 if (reg0 == 7 && opsize == OS_BYTE &&
839 m68k_feature(s->env, M68K_FEATURE_M68000)) {
840 tcg_gen_addi_i32(tmp, reg, 2);
841 } else {
842 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
843 }
844 delay_set_areg(s, reg0, tmp, true);
845 }
846 return result;
847 case 4: /* Indirect predecrememnt. */
848 if (addrp && what == EA_STORE) {
849 tmp = *addrp;
850 } else {
851 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
852 if (IS_NULL_QREG(tmp)) {
853 return tmp;
854 }
855 if (addrp) {
856 *addrp = tmp;
857 }
858 }
859 result = gen_ldst(s, opsize, tmp, val, what);
860 if (what == EA_STORE || !addrp) {
861 delay_set_areg(s, reg0, tmp, false);
862 }
863 return result;
864 case 5: /* Indirect displacement. */
865 case 6: /* Indirect index + displacement. */
866 do_indirect:
867 if (addrp && what == EA_STORE) {
868 tmp = *addrp;
869 } else {
870 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
871 if (IS_NULL_QREG(tmp)) {
872 return tmp;
873 }
874 if (addrp) {
875 *addrp = tmp;
876 }
877 }
878 return gen_ldst(s, opsize, tmp, val, what);
879 case 7: /* Other */
880 switch (reg0) {
881 case 0: /* Absolute short. */
882 case 1: /* Absolute long. */
883 case 2: /* pc displacement */
884 case 3: /* pc index+displacement. */
885 goto do_indirect;
886 case 4: /* Immediate. */
887 /* Sign extend values for consistency. */
888 switch (opsize) {
889 case OS_BYTE:
890 if (what == EA_LOADS) {
891 offset = (int8_t)read_im8(env, s);
892 } else {
893 offset = read_im8(env, s);
894 }
895 break;
896 case OS_WORD:
897 if (what == EA_LOADS) {
898 offset = (int16_t)read_im16(env, s);
899 } else {
900 offset = read_im16(env, s);
901 }
902 break;
903 case OS_LONG:
904 offset = read_im32(env, s);
905 break;
906 default:
907 g_assert_not_reached();
908 }
909 return tcg_const_i32(offset);
910 default:
911 return NULL_QREG;
912 }
913 }
914 /* Should never happen. */
915 return NULL_QREG;
916 }
917
918 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
919 int opsize, TCGv val, TCGv *addrp, ea_what what)
920 {
921 int mode = extract32(insn, 3, 3);
922 int reg0 = REG(insn, 0);
923 return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what);
924 }
925
926 static TCGv_ptr gen_fp_ptr(int freg)
927 {
928 TCGv_ptr fp = tcg_temp_new_ptr();
929 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
930 return fp;
931 }
932
933 static TCGv_ptr gen_fp_result_ptr(void)
934 {
935 TCGv_ptr fp = tcg_temp_new_ptr();
936 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
937 return fp;
938 }
939
940 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
941 {
942 TCGv t32;
943 TCGv_i64 t64;
944
945 t32 = tcg_temp_new();
946 tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
947 tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
948 tcg_temp_free(t32);
949
950 t64 = tcg_temp_new_i64();
951 tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
952 tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
953 tcg_temp_free_i64(t64);
954 }
955
956 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
957 {
958 TCGv tmp;
959 TCGv_i64 t64;
960 int index = IS_USER(s);
961
962 t64 = tcg_temp_new_i64();
963 tmp = tcg_temp_new();
964 switch (opsize) {
965 case OS_BYTE:
966 tcg_gen_qemu_ld8s(tmp, addr, index);
967 gen_helper_exts32(cpu_env, fp, tmp);
968 break;
969 case OS_WORD:
970 tcg_gen_qemu_ld16s(tmp, addr, index);
971 gen_helper_exts32(cpu_env, fp, tmp);
972 break;
973 case OS_LONG:
974 tcg_gen_qemu_ld32u(tmp, addr, index);
975 gen_helper_exts32(cpu_env, fp, tmp);
976 break;
977 case OS_SINGLE:
978 tcg_gen_qemu_ld32u(tmp, addr, index);
979 gen_helper_extf32(cpu_env, fp, tmp);
980 break;
981 case OS_DOUBLE:
982 tcg_gen_qemu_ld64(t64, addr, index);
983 gen_helper_extf64(cpu_env, fp, t64);
984 tcg_temp_free_i64(t64);
985 break;
986 case OS_EXTENDED:
987 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
988 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
989 break;
990 }
991 tcg_gen_qemu_ld32u(tmp, addr, index);
992 tcg_gen_shri_i32(tmp, tmp, 16);
993 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
994 tcg_gen_addi_i32(tmp, addr, 4);
995 tcg_gen_qemu_ld64(t64, tmp, index);
996 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
997 break;
998 case OS_PACKED:
999 /* unimplemented data type on 68040/ColdFire
1000 * FIXME if needed for another FPU
1001 */
1002 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1003 break;
1004 default:
1005 g_assert_not_reached();
1006 }
1007 tcg_temp_free(tmp);
1008 tcg_temp_free_i64(t64);
1009 gen_throws_exception = gen_last_qop;
1010 }
1011
1012 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
1013 {
1014 TCGv tmp;
1015 TCGv_i64 t64;
1016 int index = IS_USER(s);
1017
1018 t64 = tcg_temp_new_i64();
1019 tmp = tcg_temp_new();
1020 switch (opsize) {
1021 case OS_BYTE:
1022 gen_helper_reds32(tmp, cpu_env, fp);
1023 tcg_gen_qemu_st8(tmp, addr, index);
1024 break;
1025 case OS_WORD:
1026 gen_helper_reds32(tmp, cpu_env, fp);
1027 tcg_gen_qemu_st16(tmp, addr, index);
1028 break;
1029 case OS_LONG:
1030 gen_helper_reds32(tmp, cpu_env, fp);
1031 tcg_gen_qemu_st32(tmp, addr, index);
1032 break;
1033 case OS_SINGLE:
1034 gen_helper_redf32(tmp, cpu_env, fp);
1035 tcg_gen_qemu_st32(tmp, addr, index);
1036 break;
1037 case OS_DOUBLE:
1038 gen_helper_redf64(t64, cpu_env, fp);
1039 tcg_gen_qemu_st64(t64, addr, index);
1040 break;
1041 case OS_EXTENDED:
1042 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1043 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1044 break;
1045 }
1046 tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1047 tcg_gen_shli_i32(tmp, tmp, 16);
1048 tcg_gen_qemu_st32(tmp, addr, index);
1049 tcg_gen_addi_i32(tmp, addr, 4);
1050 tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1051 tcg_gen_qemu_st64(t64, tmp, index);
1052 break;
1053 case OS_PACKED:
1054 /* unimplemented data type on 68040/ColdFire
1055 * FIXME if needed for another FPU
1056 */
1057 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1058 break;
1059 default:
1060 g_assert_not_reached();
1061 }
1062 tcg_temp_free(tmp);
1063 tcg_temp_free_i64(t64);
1064 gen_throws_exception = gen_last_qop;
1065 }
1066
1067 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1068 TCGv_ptr fp, ea_what what)
1069 {
1070 if (what == EA_STORE) {
1071 gen_store_fp(s, opsize, addr, fp);
1072 } else {
1073 gen_load_fp(s, opsize, addr, fp);
1074 }
1075 }
1076
1077 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1078 int reg0, int opsize, TCGv_ptr fp, ea_what what)
1079 {
1080 TCGv reg, addr, tmp;
1081 TCGv_i64 t64;
1082
1083 switch (mode) {
1084 case 0: /* Data register direct. */
1085 reg = cpu_dregs[reg0];
1086 if (what == EA_STORE) {
1087 switch (opsize) {
1088 case OS_BYTE:
1089 case OS_WORD:
1090 case OS_LONG:
1091 gen_helper_reds32(reg, cpu_env, fp);
1092 break;
1093 case OS_SINGLE:
1094 gen_helper_redf32(reg, cpu_env, fp);
1095 break;
1096 default:
1097 g_assert_not_reached();
1098 }
1099 } else {
1100 tmp = tcg_temp_new();
1101 switch (opsize) {
1102 case OS_BYTE:
1103 tcg_gen_ext8s_i32(tmp, reg);
1104 gen_helper_exts32(cpu_env, fp, tmp);
1105 break;
1106 case OS_WORD:
1107 tcg_gen_ext16s_i32(tmp, reg);
1108 gen_helper_exts32(cpu_env, fp, tmp);
1109 break;
1110 case OS_LONG:
1111 gen_helper_exts32(cpu_env, fp, reg);
1112 break;
1113 case OS_SINGLE:
1114 gen_helper_extf32(cpu_env, fp, reg);
1115 break;
1116 default:
1117 g_assert_not_reached();
1118 }
1119 tcg_temp_free(tmp);
1120 }
1121 return 0;
1122 case 1: /* Address register direct. */
1123 return -1;
1124 case 2: /* Indirect register */
1125 addr = get_areg(s, reg0);
1126 gen_ldst_fp(s, opsize, addr, fp, what);
1127 return 0;
1128 case 3: /* Indirect postincrement. */
1129 addr = cpu_aregs[reg0];
1130 gen_ldst_fp(s, opsize, addr, fp, what);
1131 tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1132 return 0;
1133 case 4: /* Indirect predecrememnt. */
1134 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1135 if (IS_NULL_QREG(addr)) {
1136 return -1;
1137 }
1138 gen_ldst_fp(s, opsize, addr, fp, what);
1139 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1140 return 0;
1141 case 5: /* Indirect displacement. */
1142 case 6: /* Indirect index + displacement. */
1143 do_indirect:
1144 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1145 if (IS_NULL_QREG(addr)) {
1146 return -1;
1147 }
1148 gen_ldst_fp(s, opsize, addr, fp, what);
1149 return 0;
1150 case 7: /* Other */
1151 switch (reg0) {
1152 case 0: /* Absolute short. */
1153 case 1: /* Absolute long. */
1154 case 2: /* pc displacement */
1155 case 3: /* pc index+displacement. */
1156 goto do_indirect;
1157 case 4: /* Immediate. */
1158 if (what == EA_STORE) {
1159 return -1;
1160 }
1161 switch (opsize) {
1162 case OS_BYTE:
1163 tmp = tcg_const_i32((int8_t)read_im8(env, s));
1164 gen_helper_exts32(cpu_env, fp, tmp);
1165 tcg_temp_free(tmp);
1166 break;
1167 case OS_WORD:
1168 tmp = tcg_const_i32((int16_t)read_im16(env, s));
1169 gen_helper_exts32(cpu_env, fp, tmp);
1170 tcg_temp_free(tmp);
1171 break;
1172 case OS_LONG:
1173 tmp = tcg_const_i32(read_im32(env, s));
1174 gen_helper_exts32(cpu_env, fp, tmp);
1175 tcg_temp_free(tmp);
1176 break;
1177 case OS_SINGLE:
1178 tmp = tcg_const_i32(read_im32(env, s));
1179 gen_helper_extf32(cpu_env, fp, tmp);
1180 tcg_temp_free(tmp);
1181 break;
1182 case OS_DOUBLE:
1183 t64 = tcg_const_i64(read_im64(env, s));
1184 gen_helper_extf64(cpu_env, fp, t64);
1185 tcg_temp_free_i64(t64);
1186 break;
1187 case OS_EXTENDED:
1188 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1189 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1190 break;
1191 }
1192 tmp = tcg_const_i32(read_im32(env, s) >> 16);
1193 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1194 tcg_temp_free(tmp);
1195 t64 = tcg_const_i64(read_im64(env, s));
1196 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1197 tcg_temp_free_i64(t64);
1198 break;
1199 case OS_PACKED:
1200 /* unimplemented data type on 68040/ColdFire
1201 * FIXME if needed for another FPU
1202 */
1203 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1204 break;
1205 default:
1206 g_assert_not_reached();
1207 }
1208 return 0;
1209 default:
1210 return -1;
1211 }
1212 }
1213 return -1;
1214 }
1215
1216 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1217 int opsize, TCGv_ptr fp, ea_what what)
1218 {
1219 int mode = extract32(insn, 3, 3);
1220 int reg0 = REG(insn, 0);
1221 return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what);
1222 }
1223
1224 typedef struct {
1225 TCGCond tcond;
1226 bool g1;
1227 bool g2;
1228 TCGv v1;
1229 TCGv v2;
1230 } DisasCompare;
1231
1232 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1233 {
1234 TCGv tmp, tmp2;
1235 TCGCond tcond;
1236 CCOp op = s->cc_op;
1237
1238 /* The CC_OP_CMP form can handle most normal comparisons directly. */
1239 if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1240 c->g1 = c->g2 = 1;
1241 c->v1 = QREG_CC_N;
1242 c->v2 = QREG_CC_V;
1243 switch (cond) {
1244 case 2: /* HI */
1245 case 3: /* LS */
1246 tcond = TCG_COND_LEU;
1247 goto done;
1248 case 4: /* CC */
1249 case 5: /* CS */
1250 tcond = TCG_COND_LTU;
1251 goto done;
1252 case 6: /* NE */
1253 case 7: /* EQ */
1254 tcond = TCG_COND_EQ;
1255 goto done;
1256 case 10: /* PL */
1257 case 11: /* MI */
1258 c->g1 = c->g2 = 0;
1259 c->v2 = tcg_const_i32(0);
1260 c->v1 = tmp = tcg_temp_new();
1261 tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1262 gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1263 /* fallthru */
1264 case 12: /* GE */
1265 case 13: /* LT */
1266 tcond = TCG_COND_LT;
1267 goto done;
1268 case 14: /* GT */
1269 case 15: /* LE */
1270 tcond = TCG_COND_LE;
1271 goto done;
1272 }
1273 }
1274
1275 c->g1 = 1;
1276 c->g2 = 0;
1277 c->v2 = tcg_const_i32(0);
1278
1279 switch (cond) {
1280 case 0: /* T */
1281 case 1: /* F */
1282 c->v1 = c->v2;
1283 tcond = TCG_COND_NEVER;
1284 goto done;
1285 case 14: /* GT (!(Z || (N ^ V))) */
1286 case 15: /* LE (Z || (N ^ V)) */
1287 /* Logic operations clear V, which simplifies LE to (Z || N),
1288 and since Z and N are co-located, this becomes a normal
1289 comparison vs N. */
1290 if (op == CC_OP_LOGIC) {
1291 c->v1 = QREG_CC_N;
1292 tcond = TCG_COND_LE;
1293 goto done;
1294 }
1295 break;
1296 case 12: /* GE (!(N ^ V)) */
1297 case 13: /* LT (N ^ V) */
1298 /* Logic operations clear V, which simplifies this to N. */
1299 if (op != CC_OP_LOGIC) {
1300 break;
1301 }
1302 /* fallthru */
1303 case 10: /* PL (!N) */
1304 case 11: /* MI (N) */
1305 /* Several cases represent N normally. */
1306 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1307 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1308 op == CC_OP_LOGIC) {
1309 c->v1 = QREG_CC_N;
1310 tcond = TCG_COND_LT;
1311 goto done;
1312 }
1313 break;
1314 case 6: /* NE (!Z) */
1315 case 7: /* EQ (Z) */
1316 /* Some cases fold Z into N. */
1317 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1318 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1319 op == CC_OP_LOGIC) {
1320 tcond = TCG_COND_EQ;
1321 c->v1 = QREG_CC_N;
1322 goto done;
1323 }
1324 break;
1325 case 4: /* CC (!C) */
1326 case 5: /* CS (C) */
1327 /* Some cases fold C into X. */
1328 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1329 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1330 tcond = TCG_COND_NE;
1331 c->v1 = QREG_CC_X;
1332 goto done;
1333 }
1334 /* fallthru */
1335 case 8: /* VC (!V) */
1336 case 9: /* VS (V) */
1337 /* Logic operations clear V and C. */
1338 if (op == CC_OP_LOGIC) {
1339 tcond = TCG_COND_NEVER;
1340 c->v1 = c->v2;
1341 goto done;
1342 }
1343 break;
1344 }
1345
1346 /* Otherwise, flush flag state to CC_OP_FLAGS. */
1347 gen_flush_flags(s);
1348
1349 switch (cond) {
1350 case 0: /* T */
1351 case 1: /* F */
1352 default:
1353 /* Invalid, or handled above. */
1354 abort();
1355 case 2: /* HI (!C && !Z) -> !(C || Z)*/
1356 case 3: /* LS (C || Z) */
1357 c->v1 = tmp = tcg_temp_new();
1358 c->g1 = 0;
1359 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1360 tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1361 tcond = TCG_COND_NE;
1362 break;
1363 case 4: /* CC (!C) */
1364 case 5: /* CS (C) */
1365 c->v1 = QREG_CC_C;
1366 tcond = TCG_COND_NE;
1367 break;
1368 case 6: /* NE (!Z) */
1369 case 7: /* EQ (Z) */
1370 c->v1 = QREG_CC_Z;
1371 tcond = TCG_COND_EQ;
1372 break;
1373 case 8: /* VC (!V) */
1374 case 9: /* VS (V) */
1375 c->v1 = QREG_CC_V;
1376 tcond = TCG_COND_LT;
1377 break;
1378 case 10: /* PL (!N) */
1379 case 11: /* MI (N) */
1380 c->v1 = QREG_CC_N;
1381 tcond = TCG_COND_LT;
1382 break;
1383 case 12: /* GE (!(N ^ V)) */
1384 case 13: /* LT (N ^ V) */
1385 c->v1 = tmp = tcg_temp_new();
1386 c->g1 = 0;
1387 tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1388 tcond = TCG_COND_LT;
1389 break;
1390 case 14: /* GT (!(Z || (N ^ V))) */
1391 case 15: /* LE (Z || (N ^ V)) */
1392 c->v1 = tmp = tcg_temp_new();
1393 c->g1 = 0;
1394 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1395 tcg_gen_neg_i32(tmp, tmp);
1396 tmp2 = tcg_temp_new();
1397 tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1398 tcg_gen_or_i32(tmp, tmp, tmp2);
1399 tcg_temp_free(tmp2);
1400 tcond = TCG_COND_LT;
1401 break;
1402 }
1403
1404 done:
1405 if ((cond & 1) == 0) {
1406 tcond = tcg_invert_cond(tcond);
1407 }
1408 c->tcond = tcond;
1409 }
1410
1411 static void free_cond(DisasCompare *c)
1412 {
1413 if (!c->g1) {
1414 tcg_temp_free(c->v1);
1415 }
1416 if (!c->g2) {
1417 tcg_temp_free(c->v2);
1418 }
1419 }
1420
1421 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1422 {
1423 DisasCompare c;
1424
1425 gen_cc_cond(&c, s, cond);
1426 update_cc_op(s);
1427 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1428 free_cond(&c);
1429 }
1430
1431 /* Force a TB lookup after an instruction that changes the CPU state. */
1432 static void gen_lookup_tb(DisasContext *s)
1433 {
1434 update_cc_op(s);
1435 tcg_gen_movi_i32(QREG_PC, s->pc);
1436 s->is_jmp = DISAS_UPDATE;
1437 }
1438
1439 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
1440 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
1441 op_sign ? EA_LOADS : EA_LOADU); \
1442 if (IS_NULL_QREG(result)) { \
1443 gen_addr_fault(s); \
1444 return; \
1445 } \
1446 } while (0)
1447
1448 #define DEST_EA(env, insn, opsize, val, addrp) do { \
1449 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
1450 if (IS_NULL_QREG(ea_result)) { \
1451 gen_addr_fault(s); \
1452 return; \
1453 } \
1454 } while (0)
1455
1456 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1457 {
1458 #ifndef CONFIG_USER_ONLY
1459 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
1460 (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1461 #else
1462 return true;
1463 #endif
1464 }
1465
1466 /* Generate a jump to an immediate address. */
1467 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1468 {
1469 if (unlikely(s->singlestep_enabled)) {
1470 gen_exception(s, dest, EXCP_DEBUG);
1471 } else if (use_goto_tb(s, dest)) {
1472 tcg_gen_goto_tb(n);
1473 tcg_gen_movi_i32(QREG_PC, dest);
1474 tcg_gen_exit_tb((uintptr_t)s->tb + n);
1475 } else {
1476 gen_jmp_im(s, dest);
1477 tcg_gen_exit_tb(0);
1478 }
1479 s->is_jmp = DISAS_TB_JUMP;
1480 }
1481
1482 DISAS_INSN(scc)
1483 {
1484 DisasCompare c;
1485 int cond;
1486 TCGv tmp;
1487
1488 cond = (insn >> 8) & 0xf;
1489 gen_cc_cond(&c, s, cond);
1490
1491 tmp = tcg_temp_new();
1492 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1493 free_cond(&c);
1494
1495 tcg_gen_neg_i32(tmp, tmp);
1496 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1497 tcg_temp_free(tmp);
1498 }
1499
1500 DISAS_INSN(dbcc)
1501 {
1502 TCGLabel *l1;
1503 TCGv reg;
1504 TCGv tmp;
1505 int16_t offset;
1506 uint32_t base;
1507
1508 reg = DREG(insn, 0);
1509 base = s->pc;
1510 offset = (int16_t)read_im16(env, s);
1511 l1 = gen_new_label();
1512 gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1513
1514 tmp = tcg_temp_new();
1515 tcg_gen_ext16s_i32(tmp, reg);
1516 tcg_gen_addi_i32(tmp, tmp, -1);
1517 gen_partset_reg(OS_WORD, reg, tmp);
1518 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1519 gen_jmp_tb(s, 1, base + offset);
1520 gen_set_label(l1);
1521 gen_jmp_tb(s, 0, s->pc);
1522 }
1523
1524 DISAS_INSN(undef_mac)
1525 {
1526 gen_exception(s, s->pc - 2, EXCP_LINEA);
1527 }
1528
1529 DISAS_INSN(undef_fpu)
1530 {
1531 gen_exception(s, s->pc - 2, EXCP_LINEF);
1532 }
1533
1534 DISAS_INSN(undef)
1535 {
1536 /* ??? This is both instructions that are as yet unimplemented
1537 for the 680x0 series, as well as those that are implemented
1538 but actually illegal for CPU32 or pre-68020. */
1539 qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x",
1540 insn, s->pc - 2);
1541 gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
1542 }
1543
1544 DISAS_INSN(mulw)
1545 {
1546 TCGv reg;
1547 TCGv tmp;
1548 TCGv src;
1549 int sign;
1550
1551 sign = (insn & 0x100) != 0;
1552 reg = DREG(insn, 9);
1553 tmp = tcg_temp_new();
1554 if (sign)
1555 tcg_gen_ext16s_i32(tmp, reg);
1556 else
1557 tcg_gen_ext16u_i32(tmp, reg);
1558 SRC_EA(env, src, OS_WORD, sign, NULL);
1559 tcg_gen_mul_i32(tmp, tmp, src);
1560 tcg_gen_mov_i32(reg, tmp);
1561 gen_logic_cc(s, tmp, OS_LONG);
1562 tcg_temp_free(tmp);
1563 }
1564
1565 DISAS_INSN(divw)
1566 {
1567 int sign;
1568 TCGv src;
1569 TCGv destr;
1570
1571 /* divX.w <EA>,Dn 32/16 -> 16r:16q */
1572
1573 sign = (insn & 0x100) != 0;
1574
1575 /* dest.l / src.w */
1576
1577 SRC_EA(env, src, OS_WORD, sign, NULL);
1578 destr = tcg_const_i32(REG(insn, 9));
1579 if (sign) {
1580 gen_helper_divsw(cpu_env, destr, src);
1581 } else {
1582 gen_helper_divuw(cpu_env, destr, src);
1583 }
1584 tcg_temp_free(destr);
1585
1586 set_cc_op(s, CC_OP_FLAGS);
1587 }
1588
1589 DISAS_INSN(divl)
1590 {
1591 TCGv num, reg, den;
1592 int sign;
1593 uint16_t ext;
1594
1595 ext = read_im16(env, s);
1596
1597 sign = (ext & 0x0800) != 0;
1598
1599 if (ext & 0x400) {
1600 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1601 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
1602 return;
1603 }
1604
1605 /* divX.l <EA>, Dr:Dq 64/32 -> 32r:32q */
1606
1607 SRC_EA(env, den, OS_LONG, 0, NULL);
1608 num = tcg_const_i32(REG(ext, 12));
1609 reg = tcg_const_i32(REG(ext, 0));
1610 if (sign) {
1611 gen_helper_divsll(cpu_env, num, reg, den);
1612 } else {
1613 gen_helper_divull(cpu_env, num, reg, den);
1614 }
1615 tcg_temp_free(reg);
1616 tcg_temp_free(num);
1617 set_cc_op(s, CC_OP_FLAGS);
1618 return;
1619 }
1620
1621 /* divX.l <EA>, Dq 32/32 -> 32q */
1622 /* divXl.l <EA>, Dr:Dq 32/32 -> 32r:32q */
1623
1624 SRC_EA(env, den, OS_LONG, 0, NULL);
1625 num = tcg_const_i32(REG(ext, 12));
1626 reg = tcg_const_i32(REG(ext, 0));
1627 if (sign) {
1628 gen_helper_divsl(cpu_env, num, reg, den);
1629 } else {
1630 gen_helper_divul(cpu_env, num, reg, den);
1631 }
1632 tcg_temp_free(reg);
1633 tcg_temp_free(num);
1634
1635 set_cc_op(s, CC_OP_FLAGS);
1636 }
1637
1638 static void bcd_add(TCGv dest, TCGv src)
1639 {
1640 TCGv t0, t1;
1641
1642 /* dest10 = dest10 + src10 + X
1643 *
1644 * t1 = src
1645 * t2 = t1 + 0x066
1646 * t3 = t2 + dest + X
1647 * t4 = t2 ^ dest
1648 * t5 = t3 ^ t4
1649 * t6 = ~t5 & 0x110
1650 * t7 = (t6 >> 2) | (t6 >> 3)
1651 * return t3 - t7
1652 */
1653
1654 /* t1 = (src + 0x066) + dest + X
1655 * = result with some possible exceding 0x6
1656 */
1657
1658 t0 = tcg_const_i32(0x066);
1659 tcg_gen_add_i32(t0, t0, src);
1660
1661 t1 = tcg_temp_new();
1662 tcg_gen_add_i32(t1, t0, dest);
1663 tcg_gen_add_i32(t1, t1, QREG_CC_X);
1664
1665 /* we will remove exceding 0x6 where there is no carry */
1666
1667 /* t0 = (src + 0x0066) ^ dest
1668 * = t1 without carries
1669 */
1670
1671 tcg_gen_xor_i32(t0, t0, dest);
1672
1673 /* extract the carries
1674 * t0 = t0 ^ t1
1675 * = only the carries
1676 */
1677
1678 tcg_gen_xor_i32(t0, t0, t1);
1679
1680 /* generate 0x1 where there is no carry
1681 * and for each 0x10, generate a 0x6
1682 */
1683
1684 tcg_gen_shri_i32(t0, t0, 3);
1685 tcg_gen_not_i32(t0, t0);
1686 tcg_gen_andi_i32(t0, t0, 0x22);
1687 tcg_gen_add_i32(dest, t0, t0);
1688 tcg_gen_add_i32(dest, dest, t0);
1689 tcg_temp_free(t0);
1690
1691 /* remove the exceding 0x6
1692 * for digits that have not generated a carry
1693 */
1694
1695 tcg_gen_sub_i32(dest, t1, dest);
1696 tcg_temp_free(t1);
1697 }
1698
1699 static void bcd_sub(TCGv dest, TCGv src)
1700 {
1701 TCGv t0, t1, t2;
1702
1703 /* dest10 = dest10 - src10 - X
1704 * = bcd_add(dest + 1 - X, 0x199 - src)
1705 */
1706
1707 /* t0 = 0x066 + (0x199 - src) */
1708
1709 t0 = tcg_temp_new();
1710 tcg_gen_subfi_i32(t0, 0x1ff, src);
1711
1712 /* t1 = t0 + dest + 1 - X*/
1713
1714 t1 = tcg_temp_new();
1715 tcg_gen_add_i32(t1, t0, dest);
1716 tcg_gen_addi_i32(t1, t1, 1);
1717 tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1718
1719 /* t2 = t0 ^ dest */
1720
1721 t2 = tcg_temp_new();
1722 tcg_gen_xor_i32(t2, t0, dest);
1723
1724 /* t0 = t1 ^ t2 */
1725
1726 tcg_gen_xor_i32(t0, t1, t2);
1727
1728 /* t2 = ~t0 & 0x110
1729 * t0 = (t2 >> 2) | (t2 >> 3)
1730 *
1731 * to fit on 8bit operands, changed in:
1732 *
1733 * t2 = ~(t0 >> 3) & 0x22
1734 * t0 = t2 + t2
1735 * t0 = t0 + t2
1736 */
1737
1738 tcg_gen_shri_i32(t2, t0, 3);
1739 tcg_gen_not_i32(t2, t2);
1740 tcg_gen_andi_i32(t2, t2, 0x22);
1741 tcg_gen_add_i32(t0, t2, t2);
1742 tcg_gen_add_i32(t0, t0, t2);
1743 tcg_temp_free(t2);
1744
1745 /* return t1 - t0 */
1746
1747 tcg_gen_sub_i32(dest, t1, t0);
1748 tcg_temp_free(t0);
1749 tcg_temp_free(t1);
1750 }
1751
1752 static void bcd_flags(TCGv val)
1753 {
1754 tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1755 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1756
1757 tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1758
1759 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1760 }
1761
1762 DISAS_INSN(abcd_reg)
1763 {
1764 TCGv src;
1765 TCGv dest;
1766
1767 gen_flush_flags(s); /* !Z is sticky */
1768
1769 src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1770 dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1771 bcd_add(dest, src);
1772 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1773
1774 bcd_flags(dest);
1775 }
1776
1777 DISAS_INSN(abcd_mem)
1778 {
1779 TCGv src, dest, addr;
1780
1781 gen_flush_flags(s); /* !Z is sticky */
1782
1783 /* Indirect pre-decrement load (mode 4) */
1784
1785 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1786 NULL_QREG, NULL, EA_LOADU);
1787 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1788 NULL_QREG, &addr, EA_LOADU);
1789
1790 bcd_add(dest, src);
1791
1792 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
1793
1794 bcd_flags(dest);
1795 }
1796
1797 DISAS_INSN(sbcd_reg)
1798 {
1799 TCGv src, dest;
1800
1801 gen_flush_flags(s); /* !Z is sticky */
1802
1803 src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1804 dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1805
1806 bcd_sub(dest, src);
1807
1808 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1809
1810 bcd_flags(dest);
1811 }
1812
1813 DISAS_INSN(sbcd_mem)
1814 {
1815 TCGv src, dest, addr;
1816
1817 gen_flush_flags(s); /* !Z is sticky */
1818
1819 /* Indirect pre-decrement load (mode 4) */
1820
1821 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1822 NULL_QREG, NULL, EA_LOADU);
1823 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1824 NULL_QREG, &addr, EA_LOADU);
1825
1826 bcd_sub(dest, src);
1827
1828 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
1829
1830 bcd_flags(dest);
1831 }
1832
1833 DISAS_INSN(nbcd)
1834 {
1835 TCGv src, dest;
1836 TCGv addr;
1837
1838 gen_flush_flags(s); /* !Z is sticky */
1839
1840 SRC_EA(env, src, OS_BYTE, 0, &addr);
1841
1842 dest = tcg_const_i32(0);
1843 bcd_sub(dest, src);
1844
1845 DEST_EA(env, insn, OS_BYTE, dest, &addr);
1846
1847 bcd_flags(dest);
1848
1849 tcg_temp_free(dest);
1850 }
1851
1852 DISAS_INSN(addsub)
1853 {
1854 TCGv reg;
1855 TCGv dest;
1856 TCGv src;
1857 TCGv tmp;
1858 TCGv addr;
1859 int add;
1860 int opsize;
1861
1862 add = (insn & 0x4000) != 0;
1863 opsize = insn_opsize(insn);
1864 reg = gen_extend(DREG(insn, 9), opsize, 1);
1865 dest = tcg_temp_new();
1866 if (insn & 0x100) {
1867 SRC_EA(env, tmp, opsize, 1, &addr);
1868 src = reg;
1869 } else {
1870 tmp = reg;
1871 SRC_EA(env, src, opsize, 1, NULL);
1872 }
1873 if (add) {
1874 tcg_gen_add_i32(dest, tmp, src);
1875 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1876 set_cc_op(s, CC_OP_ADDB + opsize);
1877 } else {
1878 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1879 tcg_gen_sub_i32(dest, tmp, src);
1880 set_cc_op(s, CC_OP_SUBB + opsize);
1881 }
1882 gen_update_cc_add(dest, src, opsize);
1883 if (insn & 0x100) {
1884 DEST_EA(env, insn, opsize, dest, &addr);
1885 } else {
1886 gen_partset_reg(opsize, DREG(insn, 9), dest);
1887 }
1888 tcg_temp_free(dest);
1889 }
1890
1891 /* Reverse the order of the bits in REG. */
1892 DISAS_INSN(bitrev)
1893 {
1894 TCGv reg;
1895 reg = DREG(insn, 0);
1896 gen_helper_bitrev(reg, reg);
1897 }
1898
1899 DISAS_INSN(bitop_reg)
1900 {
1901 int opsize;
1902 int op;
1903 TCGv src1;
1904 TCGv src2;
1905 TCGv tmp;
1906 TCGv addr;
1907 TCGv dest;
1908
1909 if ((insn & 0x38) != 0)
1910 opsize = OS_BYTE;
1911 else
1912 opsize = OS_LONG;
1913 op = (insn >> 6) & 3;
1914 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1915
1916 gen_flush_flags(s);
1917 src2 = tcg_temp_new();
1918 if (opsize == OS_BYTE)
1919 tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1920 else
1921 tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1922
1923 tmp = tcg_const_i32(1);
1924 tcg_gen_shl_i32(tmp, tmp, src2);
1925 tcg_temp_free(src2);
1926
1927 tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1928
1929 dest = tcg_temp_new();
1930 switch (op) {
1931 case 1: /* bchg */
1932 tcg_gen_xor_i32(dest, src1, tmp);
1933 break;
1934 case 2: /* bclr */
1935 tcg_gen_andc_i32(dest, src1, tmp);
1936 break;
1937 case 3: /* bset */
1938 tcg_gen_or_i32(dest, src1, tmp);
1939 break;
1940 default: /* btst */
1941 break;
1942 }
1943 tcg_temp_free(tmp);
1944 if (op) {
1945 DEST_EA(env, insn, opsize, dest, &addr);
1946 }
1947 tcg_temp_free(dest);
1948 }
1949
1950 DISAS_INSN(sats)
1951 {
1952 TCGv reg;
1953 reg = DREG(insn, 0);
1954 gen_flush_flags(s);
1955 gen_helper_sats(reg, reg, QREG_CC_V);
1956 gen_logic_cc(s, reg, OS_LONG);
1957 }
1958
1959 static void gen_push(DisasContext *s, TCGv val)
1960 {
1961 TCGv tmp;
1962
1963 tmp = tcg_temp_new();
1964 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1965 gen_store(s, OS_LONG, tmp, val);
1966 tcg_gen_mov_i32(QREG_SP, tmp);
1967 tcg_temp_free(tmp);
1968 }
1969
1970 static TCGv mreg(int reg)
1971 {
1972 if (reg < 8) {
1973 /* Dx */
1974 return cpu_dregs[reg];
1975 }
1976 /* Ax */
1977 return cpu_aregs[reg & 7];
1978 }
1979
1980 DISAS_INSN(movem)
1981 {
1982 TCGv addr, incr, tmp, r[16];
1983 int is_load = (insn & 0x0400) != 0;
1984 int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1985 uint16_t mask = read_im16(env, s);
1986 int mode = extract32(insn, 3, 3);
1987 int reg0 = REG(insn, 0);
1988 int i;
1989
1990 tmp = cpu_aregs[reg0];
1991
1992 switch (mode) {
1993 case 0: /* data register direct */
1994 case 1: /* addr register direct */
1995 do_addr_fault:
1996 gen_addr_fault(s);
1997 return;
1998
1999 case 2: /* indirect */
2000 break;
2001
2002 case 3: /* indirect post-increment */
2003 if (!is_load) {
2004 /* post-increment is not allowed */
2005 goto do_addr_fault;
2006 }
2007 break;
2008
2009 case 4: /* indirect pre-decrement */
2010 if (is_load) {
2011 /* pre-decrement is not allowed */
2012 goto do_addr_fault;
2013 }
2014 /* We want a bare copy of the address reg, without any pre-decrement
2015 adjustment, as gen_lea would provide. */
2016 break;
2017
2018 default:
2019 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
2020 if (IS_NULL_QREG(tmp)) {
2021 goto do_addr_fault;
2022 }
2023 break;
2024 }
2025
2026 addr = tcg_temp_new();
2027 tcg_gen_mov_i32(addr, tmp);
2028 incr = tcg_const_i32(opsize_bytes(opsize));
2029
2030 if (is_load) {
2031 /* memory to register */
2032 for (i = 0; i < 16; i++) {
2033 if (mask & (1 << i)) {
2034 r[i] = gen_load(s, opsize, addr, 1);
2035 tcg_gen_add_i32(addr, addr, incr);
2036 }
2037 }
2038 for (i = 0; i < 16; i++) {
2039 if (mask & (1 << i)) {
2040 tcg_gen_mov_i32(mreg(i), r[i]);
2041 tcg_temp_free(r[i]);
2042 }
2043 }
2044 if (mode == 3) {
2045 /* post-increment: movem (An)+,X */
2046 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2047 }
2048 } else {
2049 /* register to memory */
2050 if (mode == 4) {
2051 /* pre-decrement: movem X,-(An) */
2052 for (i = 15; i >= 0; i--) {
2053 if ((mask << i) & 0x8000) {
2054 tcg_gen_sub_i32(addr, addr, incr);
2055 if (reg0 + 8 == i &&
2056 m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2057 /* M68020+: if the addressing register is the
2058 * register moved to memory, the value written
2059 * is the initial value decremented by the size of
2060 * the operation, regardless of how many actual
2061 * stores have been performed until this point.
2062 * M68000/M68010: the value is the initial value.
2063 */
2064 tmp = tcg_temp_new();
2065 tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2066 gen_store(s, opsize, addr, tmp);
2067 tcg_temp_free(tmp);
2068 } else {
2069 gen_store(s, opsize, addr, mreg(i));
2070 }
2071 }
2072 }
2073 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2074 } else {
2075 for (i = 0; i < 16; i++) {
2076 if (mask & (1 << i)) {
2077 gen_store(s, opsize, addr, mreg(i));
2078 tcg_gen_add_i32(addr, addr, incr);
2079 }
2080 }
2081 }
2082 }
2083
2084 tcg_temp_free(incr);
2085 tcg_temp_free(addr);
2086 }
2087
2088 DISAS_INSN(bitop_im)
2089 {
2090 int opsize;
2091 int op;
2092 TCGv src1;
2093 uint32_t mask;
2094 int bitnum;
2095 TCGv tmp;
2096 TCGv addr;
2097
2098 if ((insn & 0x38) != 0)
2099 opsize = OS_BYTE;
2100 else
2101 opsize = OS_LONG;
2102 op = (insn >> 6) & 3;
2103
2104 bitnum = read_im16(env, s);
2105 if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2106 if (bitnum & 0xfe00) {
2107 disas_undef(env, s, insn);
2108 return;
2109 }
2110 } else {
2111 if (bitnum & 0xff00) {
2112 disas_undef(env, s, insn);
2113 return;
2114 }
2115 }
2116
2117 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2118
2119 gen_flush_flags(s);
2120 if (opsize == OS_BYTE)
2121 bitnum &= 7;
2122 else
2123 bitnum &= 31;
2124 mask = 1 << bitnum;
2125
2126 tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2127
2128 if (op) {
2129 tmp = tcg_temp_new();
2130 switch (op) {
2131 case 1: /* bchg */
2132 tcg_gen_xori_i32(tmp, src1, mask);
2133 break;
2134 case 2: /* bclr */
2135 tcg_gen_andi_i32(tmp, src1, ~mask);
2136 break;
2137 case 3: /* bset */
2138 tcg_gen_ori_i32(tmp, src1, mask);
2139 break;
2140 default: /* btst */
2141 break;
2142 }
2143 DEST_EA(env, insn, opsize, tmp, &addr);
2144 tcg_temp_free(tmp);
2145 }
2146 }
2147
2148 DISAS_INSN(arith_im)
2149 {
2150 int op;
2151 TCGv im;
2152 TCGv src1;
2153 TCGv dest;
2154 TCGv addr;
2155 int opsize;
2156
2157 op = (insn >> 9) & 7;
2158 opsize = insn_opsize(insn);
2159 switch (opsize) {
2160 case OS_BYTE:
2161 im = tcg_const_i32((int8_t)read_im8(env, s));
2162 break;
2163 case OS_WORD:
2164 im = tcg_const_i32((int16_t)read_im16(env, s));
2165 break;
2166 case OS_LONG:
2167 im = tcg_const_i32(read_im32(env, s));
2168 break;
2169 default:
2170 abort();
2171 }
2172 SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2173 dest = tcg_temp_new();
2174 switch (op) {
2175 case 0: /* ori */
2176 tcg_gen_or_i32(dest, src1, im);
2177 gen_logic_cc(s, dest, opsize);
2178 break;
2179 case 1: /* andi */
2180 tcg_gen_and_i32(dest, src1, im);
2181 gen_logic_cc(s, dest, opsize);
2182 break;
2183 case 2: /* subi */
2184 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2185 tcg_gen_sub_i32(dest, src1, im);
2186 gen_update_cc_add(dest, im, opsize);
2187 set_cc_op(s, CC_OP_SUBB + opsize);
2188 break;
2189 case 3: /* addi */
2190 tcg_gen_add_i32(dest, src1, im);
2191 gen_update_cc_add(dest, im, opsize);
2192 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2193 set_cc_op(s, CC_OP_ADDB + opsize);
2194 break;
2195 case 5: /* eori */
2196 tcg_gen_xor_i32(dest, src1, im);
2197 gen_logic_cc(s, dest, opsize);
2198 break;
2199 case 6: /* cmpi */
2200 gen_update_cc_cmp(s, src1, im, opsize);
2201 break;
2202 default:
2203 abort();
2204 }
2205 tcg_temp_free(im);
2206 if (op != 6) {
2207 DEST_EA(env, insn, opsize, dest, &addr);
2208 }
2209 tcg_temp_free(dest);
2210 }
2211
2212 DISAS_INSN(cas)
2213 {
2214 int opsize;
2215 TCGv addr;
2216 uint16_t ext;
2217 TCGv load;
2218 TCGv cmp;
2219 TCGMemOp opc;
2220
2221 switch ((insn >> 9) & 3) {
2222 case 1:
2223 opsize = OS_BYTE;
2224 opc = MO_SB;
2225 break;
2226 case 2:
2227 opsize = OS_WORD;
2228 opc = MO_TESW;
2229 break;
2230 case 3:
2231 opsize = OS_LONG;
2232 opc = MO_TESL;
2233 break;
2234 default:
2235 g_assert_not_reached();
2236 }
2237
2238 ext = read_im16(env, s);
2239
2240 /* cas Dc,Du,<EA> */
2241
2242 addr = gen_lea(env, s, insn, opsize);
2243 if (IS_NULL_QREG(addr)) {
2244 gen_addr_fault(s);
2245 return;
2246 }
2247
2248 cmp = gen_extend(DREG(ext, 0), opsize, 1);
2249
2250 /* if <EA> == Dc then
2251 * <EA> = Du
2252 * Dc = <EA> (because <EA> == Dc)
2253 * else
2254 * Dc = <EA>
2255 */
2256
2257 load = tcg_temp_new();
2258 tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2259 IS_USER(s), opc);
2260 /* update flags before setting cmp to load */
2261 gen_update_cc_cmp(s, load, cmp, opsize);
2262 gen_partset_reg(opsize, DREG(ext, 0), load);
2263
2264 tcg_temp_free(load);
2265
2266 switch (extract32(insn, 3, 3)) {
2267 case 3: /* Indirect postincrement. */
2268 tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2269 break;
2270 case 4: /* Indirect predecrememnt. */
2271 tcg_gen_mov_i32(AREG(insn, 0), addr);
2272 break;
2273 }
2274 }
2275
2276 DISAS_INSN(cas2w)
2277 {
2278 uint16_t ext1, ext2;
2279 TCGv addr1, addr2;
2280 TCGv regs;
2281
2282 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2283
2284 ext1 = read_im16(env, s);
2285
2286 if (ext1 & 0x8000) {
2287 /* Address Register */
2288 addr1 = AREG(ext1, 12);
2289 } else {
2290 /* Data Register */
2291 addr1 = DREG(ext1, 12);
2292 }
2293
2294 ext2 = read_im16(env, s);
2295 if (ext2 & 0x8000) {
2296 /* Address Register */
2297 addr2 = AREG(ext2, 12);
2298 } else {
2299 /* Data Register */
2300 addr2 = DREG(ext2, 12);
2301 }
2302
2303 /* if (R1) == Dc1 && (R2) == Dc2 then
2304 * (R1) = Du1
2305 * (R2) = Du2
2306 * else
2307 * Dc1 = (R1)
2308 * Dc2 = (R2)
2309 */
2310
2311 regs = tcg_const_i32(REG(ext2, 6) |
2312 (REG(ext1, 6) << 3) |
2313 (REG(ext2, 0) << 6) |
2314 (REG(ext1, 0) << 9));
2315 if (tb_cflags(s->tb) & CF_PARALLEL) {
2316 gen_helper_exit_atomic(cpu_env);
2317 } else {
2318 gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2319 }
2320 tcg_temp_free(regs);
2321
2322 /* Note that cas2w also assigned to env->cc_op. */
2323 s->cc_op = CC_OP_CMPW;
2324 s->cc_op_synced = 1;
2325 }
2326
2327 DISAS_INSN(cas2l)
2328 {
2329 uint16_t ext1, ext2;
2330 TCGv addr1, addr2, regs;
2331
2332 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2333
2334 ext1 = read_im16(env, s);
2335
2336 if (ext1 & 0x8000) {
2337 /* Address Register */
2338 addr1 = AREG(ext1, 12);
2339 } else {
2340 /* Data Register */
2341 addr1 = DREG(ext1, 12);
2342 }
2343
2344 ext2 = read_im16(env, s);
2345 if (ext2 & 0x8000) {
2346 /* Address Register */
2347 addr2 = AREG(ext2, 12);
2348 } else {
2349 /* Data Register */
2350 addr2 = DREG(ext2, 12);
2351 }
2352
2353 /* if (R1) == Dc1 && (R2) == Dc2 then
2354 * (R1) = Du1
2355 * (R2) = Du2
2356 * else
2357 * Dc1 = (R1)
2358 * Dc2 = (R2)
2359 */
2360
2361 regs = tcg_const_i32(REG(ext2, 6) |
2362 (REG(ext1, 6) << 3) |
2363 (REG(ext2, 0) << 6) |
2364 (REG(ext1, 0) << 9));
2365 if (tb_cflags(s->tb) & CF_PARALLEL) {
2366 gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2367 } else {
2368 gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2369 }
2370 tcg_temp_free(regs);
2371
2372 /* Note that cas2l also assigned to env->cc_op. */
2373 s->cc_op = CC_OP_CMPL;
2374 s->cc_op_synced = 1;
2375 }
2376
2377 DISAS_INSN(byterev)
2378 {
2379 TCGv reg;
2380
2381 reg = DREG(insn, 0);
2382 tcg_gen_bswap32_i32(reg, reg);
2383 }
2384
2385 DISAS_INSN(move)
2386 {
2387 TCGv src;
2388 TCGv dest;
2389 int op;
2390 int opsize;
2391
2392 switch (insn >> 12) {
2393 case 1: /* move.b */
2394 opsize = OS_BYTE;
2395 break;
2396 case 2: /* move.l */
2397 opsize = OS_LONG;
2398 break;
2399 case 3: /* move.w */
2400 opsize = OS_WORD;
2401 break;
2402 default:
2403 abort();
2404 }
2405 SRC_EA(env, src, opsize, 1, NULL);
2406 op = (insn >> 6) & 7;
2407 if (op == 1) {
2408 /* movea */
2409 /* The value will already have been sign extended. */
2410 dest = AREG(insn, 9);
2411 tcg_gen_mov_i32(dest, src);
2412 } else {
2413 /* normal move */
2414 uint16_t dest_ea;
2415 dest_ea = ((insn >> 9) & 7) | (op << 3);
2416 DEST_EA(env, dest_ea, opsize, src, NULL);
2417 /* This will be correct because loads sign extend. */
2418 gen_logic_cc(s, src, opsize);
2419 }
2420 }
2421
2422 DISAS_INSN(negx)
2423 {
2424 TCGv z;
2425 TCGv src;
2426 TCGv addr;
2427 int opsize;
2428
2429 opsize = insn_opsize(insn);
2430 SRC_EA(env, src, opsize, 1, &addr);
2431
2432 gen_flush_flags(s); /* compute old Z */
2433
2434 /* Perform substract with borrow.
2435 * (X, N) = -(src + X);
2436 */
2437
2438 z = tcg_const_i32(0);
2439 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2440 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2441 tcg_temp_free(z);
2442 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2443
2444 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2445
2446 /* Compute signed-overflow for negation. The normal formula for
2447 * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2448 * this simplies to res & src.
2449 */
2450
2451 tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2452
2453 /* Copy the rest of the results into place. */
2454 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2455 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2456
2457 set_cc_op(s, CC_OP_FLAGS);
2458
2459 /* result is in QREG_CC_N */
2460
2461 DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2462 }
2463
2464 DISAS_INSN(lea)
2465 {
2466 TCGv reg;
2467 TCGv tmp;
2468
2469 reg = AREG(insn, 9);
2470 tmp = gen_lea(env, s, insn, OS_LONG);
2471 if (IS_NULL_QREG(tmp)) {
2472 gen_addr_fault(s);
2473 return;
2474 }
2475 tcg_gen_mov_i32(reg, tmp);
2476 }
2477
2478 DISAS_INSN(clr)
2479 {
2480 int opsize;
2481 TCGv zero;
2482
2483 zero = tcg_const_i32(0);
2484
2485 opsize = insn_opsize(insn);
2486 DEST_EA(env, insn, opsize, zero, NULL);
2487 gen_logic_cc(s, zero, opsize);
2488 tcg_temp_free(zero);
2489 }
2490
2491 static TCGv gen_get_ccr(DisasContext *s)
2492 {
2493 TCGv dest;
2494
2495 gen_flush_flags(s);
2496 update_cc_op(s);
2497 dest = tcg_temp_new();
2498 gen_helper_get_ccr(dest, cpu_env);
2499 return dest;
2500 }
2501
2502 DISAS_INSN(move_from_ccr)
2503 {
2504 TCGv ccr;
2505
2506 ccr = gen_get_ccr(s);
2507 DEST_EA(env, insn, OS_WORD, ccr, NULL);
2508 }
2509
2510 DISAS_INSN(neg)
2511 {
2512 TCGv src1;
2513 TCGv dest;
2514 TCGv addr;
2515 int opsize;
2516
2517 opsize = insn_opsize(insn);
2518 SRC_EA(env, src1, opsize, 1, &addr);
2519 dest = tcg_temp_new();
2520 tcg_gen_neg_i32(dest, src1);
2521 set_cc_op(s, CC_OP_SUBB + opsize);
2522 gen_update_cc_add(dest, src1, opsize);
2523 tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2524 DEST_EA(env, insn, opsize, dest, &addr);
2525 tcg_temp_free(dest);
2526 }
2527
2528 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2529 {
2530 if (ccr_only) {
2531 tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2532 tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2533 tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2534 tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2535 tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2536 } else {
2537 gen_helper_set_sr(cpu_env, tcg_const_i32(val));
2538 }
2539 set_cc_op(s, CC_OP_FLAGS);
2540 }
2541
2542 static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2543 int ccr_only)
2544 {
2545 if ((insn & 0x38) == 0) {
2546 if (ccr_only) {
2547 gen_helper_set_ccr(cpu_env, DREG(insn, 0));
2548 } else {
2549 gen_helper_set_sr(cpu_env, DREG(insn, 0));
2550 }
2551 set_cc_op(s, CC_OP_FLAGS);
2552 } else if ((insn & 0x3f) == 0x3c) {
2553 uint16_t val;
2554 val = read_im16(env, s);
2555 gen_set_sr_im(s, val, ccr_only);
2556 } else {
2557 disas_undef(env, s, insn);
2558 }
2559 }
2560
2561
2562 DISAS_INSN(move_to_ccr)
2563 {
2564 gen_set_sr(env, s, insn, 1);
2565 }
2566
2567 DISAS_INSN(not)
2568 {
2569 TCGv src1;
2570 TCGv dest;
2571 TCGv addr;
2572 int opsize;
2573
2574 opsize = insn_opsize(insn);
2575 SRC_EA(env, src1, opsize, 1, &addr);
2576 dest = tcg_temp_new();
2577 tcg_gen_not_i32(dest, src1);
2578 DEST_EA(env, insn, opsize, dest, &addr);
2579 gen_logic_cc(s, dest, opsize);
2580 }
2581
2582 DISAS_INSN(swap)
2583 {
2584 TCGv src1;
2585 TCGv src2;
2586 TCGv reg;
2587
2588 src1 = tcg_temp_new();
2589 src2 = tcg_temp_new();
2590 reg = DREG(insn, 0);
2591 tcg_gen_shli_i32(src1, reg, 16);
2592 tcg_gen_shri_i32(src2, reg, 16);
2593 tcg_gen_or_i32(reg, src1, src2);
2594 tcg_temp_free(src2);
2595 tcg_temp_free(src1);
2596 gen_logic_cc(s, reg, OS_LONG);
2597 }
2598
2599 DISAS_INSN(bkpt)
2600 {
2601 gen_exception(s, s->pc - 2, EXCP_DEBUG);
2602 }
2603
2604 DISAS_INSN(pea)
2605 {
2606 TCGv tmp;
2607
2608 tmp = gen_lea(env, s, insn, OS_LONG);
2609 if (IS_NULL_QREG(tmp)) {
2610 gen_addr_fault(s);
2611 return;
2612 }
2613 gen_push(s, tmp);
2614 }
2615
2616 DISAS_INSN(ext)
2617 {
2618 int op;
2619 TCGv reg;
2620 TCGv tmp;
2621
2622 reg = DREG(insn, 0);
2623 op = (insn >> 6) & 7;
2624 tmp = tcg_temp_new();
2625 if (op == 3)
2626 tcg_gen_ext16s_i32(tmp, reg);
2627 else
2628 tcg_gen_ext8s_i32(tmp, reg);
2629 if (op == 2)
2630 gen_partset_reg(OS_WORD, reg, tmp);
2631 else
2632 tcg_gen_mov_i32(reg, tmp);
2633 gen_logic_cc(s, tmp, OS_LONG);
2634 tcg_temp_free(tmp);
2635 }
2636
2637 DISAS_INSN(tst)
2638 {
2639 int opsize;
2640 TCGv tmp;
2641
2642 opsize = insn_opsize(insn);
2643 SRC_EA(env, tmp, opsize, 1, NULL);
2644 gen_logic_cc(s, tmp, opsize);
2645 }
2646
2647 DISAS_INSN(pulse)
2648 {
2649 /* Implemented as a NOP. */
2650 }
2651
2652 DISAS_INSN(illegal)
2653 {
2654 gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2655 }
2656
2657 /* ??? This should be atomic. */
2658 DISAS_INSN(tas)
2659 {
2660 TCGv dest;
2661 TCGv src1;
2662 TCGv addr;
2663
2664 dest = tcg_temp_new();
2665 SRC_EA(env, src1, OS_BYTE, 1, &addr);
2666 gen_logic_cc(s, src1, OS_BYTE);
2667 tcg_gen_ori_i32(dest, src1, 0x80);
2668 DEST_EA(env, insn, OS_BYTE, dest, &addr);
2669 tcg_temp_free(dest);
2670 }
2671
2672 DISAS_INSN(mull)
2673 {
2674 uint16_t ext;
2675 TCGv src1;
2676 int sign;
2677
2678 ext = read_im16(env, s);
2679
2680 sign = ext & 0x800;
2681
2682 if (ext & 0x400) {
2683 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2684 gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
2685 return;
2686 }
2687
2688 SRC_EA(env, src1, OS_LONG, 0, NULL);
2689
2690 if (sign) {
2691 tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2692 } else {
2693 tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2694 }
2695 /* if Dl == Dh, 68040 returns low word */
2696 tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2697 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2698 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2699
2700 tcg_gen_movi_i32(QREG_CC_V, 0);
2701 tcg_gen_movi_i32(QREG_CC_C, 0);
2702
2703 set_cc_op(s, CC_OP_FLAGS);
2704 return;
2705 }
2706 SRC_EA(env, src1, OS_LONG, 0, NULL);
2707 if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2708 tcg_gen_movi_i32(QREG_CC_C, 0);
2709 if (sign) {
2710 tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2711 /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2712 tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2713 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2714 } else {
2715 tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2716 /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2717 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2718 }
2719 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2720 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2721
2722 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2723
2724 set_cc_op(s, CC_OP_FLAGS);
2725 } else {
2726 /* The upper 32 bits of the product are discarded, so
2727 muls.l and mulu.l are functionally equivalent. */
2728 tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2729 gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2730 }
2731 }
2732
2733 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2734 {
2735 TCGv reg;
2736 TCGv tmp;
2737
2738 reg = AREG(insn, 0);
2739 tmp = tcg_temp_new();
2740 tcg_gen_subi_i32(tmp, QREG_SP, 4);
2741 gen_store(s, OS_LONG, tmp, reg);
2742 if ((insn & 7) != 7) {
2743 tcg_gen_mov_i32(reg, tmp);
2744 }
2745 tcg_gen_addi_i32(QREG_SP, tmp, offset);
2746 tcg_temp_free(tmp);
2747 }
2748
2749 DISAS_INSN(link)
2750 {
2751 int16_t offset;
2752
2753 offset = read_im16(env, s);
2754 gen_link(s, insn, offset);
2755 }
2756
2757 DISAS_INSN(linkl)
2758 {
2759 int32_t offset;
2760
2761 offset = read_im32(env, s);
2762 gen_link(s, insn, offset);
2763 }
2764
2765 DISAS_INSN(unlk)
2766 {
2767 TCGv src;
2768 TCGv reg;
2769 TCGv tmp;
2770
2771 src = tcg_temp_new();
2772 reg = AREG(insn, 0);
2773 tcg_gen_mov_i32(src, reg);
2774 tmp = gen_load(s, OS_LONG, src, 0);
2775 tcg_gen_mov_i32(reg, tmp);
2776 tcg_gen_addi_i32(QREG_SP, src, 4);
2777 tcg_temp_free(src);
2778 }
2779
2780 DISAS_INSN(nop)
2781 {
2782 }
2783
2784 DISAS_INSN(rtd)
2785 {
2786 TCGv tmp;
2787 int16_t offset = read_im16(env, s);
2788
2789 tmp = gen_load(s, OS_LONG, QREG_SP, 0);
2790 tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2791 gen_jmp(s, tmp);
2792 }
2793
2794 DISAS_INSN(rts)
2795 {
2796 TCGv tmp;
2797
2798 tmp = gen_load(s, OS_LONG, QREG_SP, 0);
2799 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2800 gen_jmp(s, tmp);
2801 }
2802
2803 DISAS_INSN(jump)
2804 {
2805 TCGv tmp;
2806
2807 /* Load the target address first to ensure correct exception
2808 behavior. */
2809 tmp = gen_lea(env, s, insn, OS_LONG);
2810 if (IS_NULL_QREG(tmp)) {
2811 gen_addr_fault(s);
2812 return;
2813 }
2814 if ((insn & 0x40) == 0) {
2815 /* jsr */
2816 gen_push(s, tcg_const_i32(s->pc));
2817 }
2818 gen_jmp(s, tmp);
2819 }
2820
2821 DISAS_INSN(addsubq)
2822 {
2823 TCGv src;
2824 TCGv dest;
2825 TCGv val;
2826 int imm;
2827 TCGv addr;
2828 int opsize;
2829
2830 if ((insn & 070) == 010) {
2831 /* Operation on address register is always long. */
2832 opsize = OS_LONG;
2833 } else {
2834 opsize = insn_opsize(insn);
2835 }
2836 SRC_EA(env, src, opsize, 1, &addr);
2837 imm = (insn >> 9) & 7;
2838 if (imm == 0) {
2839 imm = 8;
2840 }
2841 val = tcg_const_i32(imm);
2842 dest = tcg_temp_new();
2843 tcg_gen_mov_i32(dest, src);
2844 if ((insn & 0x38) == 0x08) {
2845 /* Don't update condition codes if the destination is an
2846 address register. */
2847 if (insn & 0x0100) {
2848 tcg_gen_sub_i32(dest, dest, val);
2849 } else {
2850 tcg_gen_add_i32(dest, dest, val);
2851 }
2852 } else {
2853 if (insn & 0x0100) {
2854 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2855 tcg_gen_sub_i32(dest, dest, val);
2856 set_cc_op(s, CC_OP_SUBB + opsize);
2857 } else {
2858 tcg_gen_add_i32(dest, dest, val);
2859 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2860 set_cc_op(s, CC_OP_ADDB + opsize);
2861 }
2862 gen_update_cc_add(dest, val, opsize);
2863 }
2864 tcg_temp_free(val);
2865 DEST_EA(env, insn, opsize, dest, &addr);
2866 tcg_temp_free(dest);
2867 }
2868
2869 DISAS_INSN(tpf)
2870 {
2871 switch (insn & 7) {
2872 case 2: /* One extension word. */
2873 s->pc += 2;
2874 break;
2875 case 3: /* Two extension words. */
2876 s->pc += 4;
2877 break;
2878 case 4: /* No extension words. */
2879 break;
2880 default:
2881 disas_undef(env, s, insn);
2882 }
2883 }
2884
2885 DISAS_INSN(branch)
2886 {
2887 int32_t offset;
2888 uint32_t base;
2889 int op;
2890 TCGLabel *l1;
2891
2892 base = s->pc;
2893 op = (insn >> 8) & 0xf;
2894 offset = (int8_t)insn;
2895 if (offset == 0) {
2896 offset = (int16_t)read_im16(env, s);
2897 } else if (offset == -1) {
2898 offset = read_im32(env, s);
2899 }
2900 if (op == 1) {
2901 /* bsr */
2902 gen_push(s, tcg_const_i32(s->pc));
2903 }
2904 if (op > 1) {
2905 /* Bcc */
2906 l1 = gen_new_label();
2907 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2908 gen_jmp_tb(s, 1, base + offset);
2909 gen_set_label(l1);
2910 gen_jmp_tb(s, 0, s->pc);
2911 } else {
2912 /* Unconditional branch. */
2913 gen_jmp_tb(s, 0, base + offset);
2914 }
2915 }
2916
2917 DISAS_INSN(moveq)
2918 {
2919 tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
2920 gen_logic_cc(s, DREG(insn, 9), OS_LONG);
2921 }
2922
2923 DISAS_INSN(mvzs)
2924 {
2925 int opsize;
2926 TCGv src;
2927 TCGv reg;
2928
2929 if (insn & 0x40)
2930 opsize = OS_WORD;
2931 else
2932 opsize = OS_BYTE;
2933 SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
2934 reg = DREG(insn, 9);
2935 tcg_gen_mov_i32(reg, src);
2936 gen_logic_cc(s, src, opsize);
2937 }
2938
2939 DISAS_INSN(or)
2940 {
2941 TCGv reg;
2942 TCGv dest;
2943 TCGv src;
2944 TCGv addr;
2945 int opsize;
2946
2947 opsize = insn_opsize(insn);
2948 reg = gen_extend(DREG(insn, 9), opsize, 0);
2949 dest = tcg_temp_new();
2950 if (insn & 0x100) {
2951 SRC_EA(env, src, opsize, 0, &addr);
2952 tcg_gen_or_i32(dest, src, reg);
2953 DEST_EA(env, insn, opsize, dest, &addr);
2954 } else {
2955 SRC_EA(env, src, opsize, 0, NULL);
2956 tcg_gen_or_i32(dest, src, reg);
2957 gen_partset_reg(opsize, DREG(insn, 9), dest);
2958 }
2959 gen_logic_cc(s, dest, opsize);
2960 tcg_temp_free(dest);
2961 }
2962
2963 DISAS_INSN(suba)
2964 {
2965 TCGv src;
2966 TCGv reg;
2967
2968 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
2969 reg = AREG(insn, 9);
2970 tcg_gen_sub_i32(reg, reg, src);
2971 }
2972
2973 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
2974 {
2975 TCGv tmp;
2976
2977 gen_flush_flags(s); /* compute old Z */
2978
2979 /* Perform substract with borrow.
2980 * (X, N) = dest - (src + X);
2981 */
2982
2983 tmp = tcg_const_i32(0);
2984 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
2985 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
2986 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2987 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2988
2989 /* Compute signed-overflow for substract. */
2990
2991 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
2992 tcg_gen_xor_i32(tmp, dest, src);
2993 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
2994 tcg_temp_free(tmp);
2995
2996 /* Copy the rest of the results into place. */
2997 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2998 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2999
3000 set_cc_op(s, CC_OP_FLAGS);
3001
3002 /* result is in QREG_CC_N */
3003 }
3004
3005 DISAS_INSN(subx_reg)
3006 {
3007 TCGv dest;
3008 TCGv src;
3009 int opsize;
3010
3011 opsize = insn_opsize(insn);
3012
3013 src = gen_extend(DREG(insn, 0), opsize, 1);
3014 dest = gen_extend(DREG(insn, 9), opsize, 1);
3015
3016 gen_subx(s, src, dest, opsize);
3017
3018 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3019 }
3020
3021 DISAS_INSN(subx_mem)
3022 {
3023 TCGv src;
3024 TCGv addr_src;
3025 TCGv dest;
3026 TCGv addr_dest;
3027 int opsize;
3028
3029 opsize = insn_opsize(insn);
3030
3031 addr_src = AREG(insn, 0);
3032 tcg_gen_subi_i32(addr_src, addr_src, opsize);
3033 src = gen_load(s, opsize, addr_src, 1);
3034
3035 addr_dest = AREG(insn, 9);
3036 tcg_gen_subi_i32(addr_dest, addr_dest, opsize);
3037 dest = gen_load(s, opsize, addr_dest, 1);
3038
3039 gen_subx(s, src, dest, opsize);
3040
3041 gen_store(s, opsize, addr_dest, QREG_CC_N);
3042 }
3043
3044 DISAS_INSN(mov3q)
3045 {
3046 TCGv src;
3047 int val;
3048
3049 val = (insn >> 9) & 7;
3050 if (val == 0)
3051 val = -1;
3052 src = tcg_const_i32(val);
3053 gen_logic_cc(s, src, OS_LONG);
3054 DEST_EA(env, insn, OS_LONG, src, NULL);
3055 tcg_temp_free(src);
3056 }
3057
3058 DISAS_INSN(cmp)
3059 {
3060 TCGv src;
3061 TCGv reg;
3062 int opsize;
3063
3064 opsize = insn_opsize(insn);
3065 SRC_EA(env, src, opsize, 1, NULL);
3066 reg = gen_extend(DREG(insn, 9), opsize, 1);
3067 gen_update_cc_cmp(s, reg, src, opsize);
3068 }
3069
3070 DISAS_INSN(cmpa)
3071 {
3072 int opsize;
3073 TCGv src;
3074 TCGv reg;
3075
3076 if (insn & 0x100) {
3077 opsize = OS_LONG;
3078 } else {
3079 opsize = OS_WORD;
3080 }
3081 SRC_EA(env, src, opsize, 1, NULL);
3082 reg = AREG(insn, 9);
3083 gen_update_cc_cmp(s, reg, src, OS_LONG);
3084 }
3085
3086 DISAS_INSN(cmpm)
3087 {
3088 int opsize = insn_opsize(insn);
3089 TCGv src, dst;
3090
3091 /* Post-increment load (mode 3) from Ay. */
3092 src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3093 NULL_QREG, NULL, EA_LOADS);
3094 /* Post-increment load (mode 3) from Ax. */
3095 dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3096 NULL_QREG, NULL, EA_LOADS);
3097
3098 gen_update_cc_cmp(s, dst, src, opsize);
3099 }
3100
3101 DISAS_INSN(eor)
3102 {
3103 TCGv src;
3104 TCGv dest;
3105 TCGv addr;
3106 int opsize;
3107
3108 opsize = insn_opsize(insn);
3109
3110 SRC_EA(env, src, opsize, 0, &addr);
3111 dest = tcg_temp_new();
3112 tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3113 gen_logic_cc(s, dest, opsize);
3114 DEST_EA(env, insn, opsize, dest, &addr);
3115 tcg_temp_free(dest);
3116 }
3117
3118 static void do_exg(TCGv reg1, TCGv reg2)
3119 {
3120 TCGv temp = tcg_temp_new();
3121 tcg_gen_mov_i32(temp, reg1);
3122 tcg_gen_mov_i32(reg1, reg2);
3123 tcg_gen_mov_i32(reg2, temp);
3124 tcg_temp_free(temp);
3125 }
3126
3127 DISAS_INSN(exg_dd)
3128 {
3129 /* exchange Dx and Dy */
3130 do_exg(DREG(insn, 9), DREG(insn, 0));
3131 }
3132
3133 DISAS_INSN(exg_aa)
3134 {
3135 /* exchange Ax and Ay */
3136 do_exg(AREG(insn, 9), AREG(insn, 0));
3137 }
3138
3139 DISAS_INSN(exg_da)
3140 {
3141 /* exchange Dx and Ay */
3142 do_exg(DREG(insn, 9), AREG(insn, 0));
3143 }
3144
3145 DISAS_INSN(and)
3146 {
3147 TCGv src;
3148 TCGv reg;
3149 TCGv dest;
3150 TCGv addr;
3151 int opsize;
3152
3153 dest = tcg_temp_new();
3154
3155 opsize = insn_opsize(insn);
3156 reg = DREG(insn, 9);
3157 if (insn & 0x100) {
3158 SRC_EA(env, src, opsize, 0, &addr);
3159 tcg_gen_and_i32(dest, src, reg);
3160 DEST_EA(env, insn, opsize, dest, &addr);
3161 } else {
3162 SRC_EA(env, src, opsize, 0, NULL);
3163 tcg_gen_and_i32(dest, src, reg);
3164 gen_partset_reg(opsize, reg, dest);
3165 }
3166 gen_logic_cc(s, dest, opsize);
3167 tcg_temp_free(dest);
3168 }
3169
3170 DISAS_INSN(adda)
3171 {
3172 TCGv src;
3173 TCGv reg;
3174
3175 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3176 reg = AREG(insn, 9);
3177 tcg_gen_add_i32(reg, reg, src);
3178 }
3179
3180 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3181 {
3182 TCGv tmp;
3183
3184 gen_flush_flags(s); /* compute old Z */
3185
3186 /* Perform addition with carry.
3187 * (X, N) = src + dest + X;
3188 */
3189
3190 tmp = tcg_const_i32(0);
3191 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
3192 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
3193 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3194
3195 /* Compute signed-overflow for addition. */
3196
3197 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3198 tcg_gen_xor_i32(tmp, dest, src);
3199 tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3200 tcg_temp_free(tmp);
3201
3202 /* Copy the rest of the results into place. */
3203 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3204 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3205
3206 set_cc_op(s, CC_OP_FLAGS);
3207
3208 /* result is in QREG_CC_N */
3209 }
3210
3211 DISAS_INSN(addx_reg)
3212 {
3213 TCGv dest;
3214 TCGv src;
3215 int opsize;
3216
3217 opsize = insn_opsize(insn);
3218
3219 dest = gen_extend(DREG(insn, 9), opsize, 1);
3220 src = gen_extend(DREG(insn, 0), opsize, 1);
3221
3222 gen_addx(s, src, dest, opsize);
3223
3224 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3225 }
3226
3227 DISAS_INSN(addx_mem)
3228 {
3229 TCGv src;
3230 TCGv addr_src;
3231 TCGv dest;
3232 TCGv addr_dest;
3233 int opsize;
3234
3235 opsize = insn_opsize(insn);
3236
3237 addr_src = AREG(insn, 0);
3238 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3239 src = gen_load(s, opsize, addr_src, 1);
3240
3241 addr_dest = AREG(insn, 9);
3242 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3243 dest = gen_load(s, opsize, addr_dest, 1);
3244
3245 gen_addx(s, src, dest, opsize);
3246
3247 gen_store(s, opsize, addr_dest, QREG_CC_N);
3248 }
3249
3250 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3251 {
3252 int count = (insn >> 9) & 7;
3253 int logical = insn & 8;
3254 int left = insn & 0x100;
3255 int bits = opsize_bytes(opsize) * 8;
3256 TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
3257
3258 if (count == 0) {
3259 count = 8;
3260 }
3261
3262 tcg_gen_movi_i32(QREG_CC_V, 0);
3263 if (left) {
3264 tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3265 tcg_gen_shli_i32(QREG_CC_N, reg, count);
3266
3267 /* Note that ColdFire always clears V (done above),
3268 while M68000 sets if the most significant bit is changed at
3269 any time during the shift operation */
3270 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3271 /* if shift count >= bits, V is (reg != 0) */
3272 if (count >= bits) {
3273 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3274 } else {
3275 TCGv t0 = tcg_temp_new();
3276 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3277 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3278 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3279 tcg_temp_free(t0);
3280 }
3281 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3282 }
3283 } else {
3284 tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3285 if (logical) {
3286 tcg_gen_shri_i32(QREG_CC_N, reg, count);
3287 } else {
3288 tcg_gen_sari_i32(QREG_CC_N, reg, count);
3289 }
3290 }
3291
3292 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3293 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3294 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3295 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3296
3297 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3298 set_cc_op(s, CC_OP_FLAGS);
3299 }
3300
3301 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3302 {
3303 int logical = insn & 8;
3304 int left = insn & 0x100;
3305 int bits = opsize_bytes(opsize) * 8;
3306 TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
3307 TCGv s32;
3308 TCGv_i64 t64, s64;
3309
3310 t64 = tcg_temp_new_i64();
3311 s64 = tcg_temp_new_i64();
3312 s32 = tcg_temp_new();
3313
3314 /* Note that m68k truncates the shift count modulo 64, not 32.
3315 In addition, a 64-bit shift makes it easy to find "the last
3316 bit shifted out", for the carry flag. */
3317 tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3318 tcg_gen_extu_i32_i64(s64, s32);
3319 tcg_gen_extu_i32_i64(t64, reg);
3320
3321 /* Optimistically set V=0. Also used as a zero source below. */
3322 tcg_gen_movi_i32(QREG_CC_V, 0);
3323 if (left) {
3324 tcg_gen_shl_i64(t64, t64, s64);
3325
3326 if (opsize == OS_LONG) {
3327 tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3328 /* Note that C=0 if shift count is 0, and we get that for free. */
3329 } else {
3330 TCGv zero = tcg_const_i32(0);
3331 tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3332 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3333 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3334 s32, zero, zero, QREG_CC_C);
3335 tcg_temp_free(zero);
3336 }
3337 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3338
3339 /* X = C, but only if the shift count was non-zero. */
3340 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3341 QREG_CC_C, QREG_CC_X);
3342
3343 /* M68000 sets V if the most significant bit is changed at
3344 * any time during the shift operation. Do this via creating
3345 * an extension of the sign bit, comparing, and discarding
3346 * the bits below the sign bit. I.e.
3347 * int64_t s = (intN_t)reg;
3348 * int64_t t = (int64_t)(intN_t)reg << count;
3349 * V = ((s ^ t) & (-1 << (bits - 1))) != 0
3350 */
3351 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3352 TCGv_i64 tt = tcg_const_i64(32);
3353 /* if shift is greater than 32, use 32 */
3354 tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3355 tcg_temp_free_i64(tt);
3356 /* Sign extend the input to 64 bits; re-do the shift. */
3357 tcg_gen_ext_i32_i64(t64, reg);
3358 tcg_gen_shl_i64(s64, t64, s64);
3359 /* Clear all bits that are unchanged. */
3360 tcg_gen_xor_i64(t64, t64, s64);
3361 /* Ignore the bits below the sign bit. */
3362 tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3363 /* If any bits remain set, we have overflow. */
3364 tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3365 tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3366 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3367 }
3368 } else {
3369 tcg_gen_shli_i64(t64, t64, 32);
3370 if (logical) {
3371 tcg_gen_shr_i64(t64, t64, s64);
3372 } else {
3373 tcg_gen_sar_i64(t64, t64, s64);
3374 }
3375 tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3376
3377 /* Note that C=0 if shift count is 0, and we get that for free. */
3378 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3379
3380 /* X = C, but only if the shift count was non-zero. */
3381 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3382 QREG_CC_C, QREG_CC_X);
3383 }
3384 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3385 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3386
3387 tcg_temp_free(s32);
3388 tcg_temp_free_i64(s64);
3389 tcg_temp_free_i64(t64);
3390
3391 /* Write back the result. */
3392 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3393 set_cc_op(s, CC_OP_FLAGS);
3394 }
3395
3396 DISAS_INSN(shift8_im)
3397 {
3398 shift_im(s, insn, OS_BYTE);
3399 }
3400
3401 DISAS_INSN(shift16_im)
3402 {
3403 shift_im(s, insn, OS_WORD);
3404 }
3405
3406 DISAS_INSN(shift_im)
3407 {
3408 shift_im(s, insn, OS_LONG);
3409 }
3410
3411 DISAS_INSN(shift8_reg)
3412 {
3413 shift_reg(s, insn, OS_BYTE);
3414 }
3415
3416 DISAS_INSN(shift16_reg)
3417 {
3418 shift_reg(s, insn, OS_WORD);
3419 }
3420
3421 DISAS_INSN(shift_reg)
3422 {
3423 shift_reg(s, insn, OS_LONG);
3424 }
3425
3426 DISAS_INSN(shift_mem)
3427 {
3428 int logical = insn & 8;
3429 int left = insn & 0x100;
3430 TCGv src;
3431 TCGv addr;
3432
3433 SRC_EA(env, src, OS_WORD, !logical, &addr);
3434 tcg_gen_movi_i32(QREG_CC_V, 0);
3435 if (left) {
3436 tcg_gen_shri_i32(QREG_CC_C, src, 15);
3437 tcg_gen_shli_i32(QREG_CC_N, src, 1);
3438
3439 /* Note that ColdFire always clears V,
3440 while M68000 sets if the most significant bit is changed at
3441 any time during the shift operation */
3442 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3443 src = gen_extend(src, OS_WORD, 1);
3444 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3445 }
3446 } else {
3447 tcg_gen_mov_i32(QREG_CC_C, src);
3448 if (logical) {
3449 tcg_gen_shri_i32(QREG_CC_N, src, 1);
3450 } else {
3451 tcg_gen_sari_i32(QREG_CC_N, src, 1);
3452 }
3453 }
3454
3455 gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3456 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3457 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3458 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3459
3460 DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3461 set_cc_op(s, CC_OP_FLAGS);
3462 }
3463
3464 static void rotate(TCGv reg, TCGv shift, int left, int size)
3465 {
3466 switch (size) {
3467 case 8:
3468 /* Replicate the 8-bit input so that a 32-bit rotate works. */
3469 tcg_gen_ext8u_i32(reg, reg);
3470 tcg_gen_muli_i32(reg, reg, 0x01010101);
3471 goto do_long;
3472 case 16:
3473 /* Replicate the 16-bit input so that a 32-bit rotate works. */
3474 tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3475 goto do_long;
3476 do_long:
3477 default:
3478 if (left) {
3479 tcg_gen_rotl_i32(reg, reg, shift);
3480 } else {
3481 tcg_gen_rotr_i32(reg, reg, shift);
3482 }
3483 }
3484
3485 /* compute flags */
3486
3487 switch (size) {
3488 case 8:
3489 tcg_gen_ext8s_i32(reg, reg);
3490 break;
3491 case 16:
3492 tcg_gen_ext16s_i32(reg, reg);
3493 break;
3494 default:
3495 break;
3496 }
3497
3498 /* QREG_CC_X is not affected */
3499
3500 tcg_gen_mov_i32(QREG_CC_N, reg);
3501 tcg_gen_mov_i32(QREG_CC_Z, reg);
3502
3503 if (left) {
3504 tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3505 } else {
3506 tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3507 }
3508
3509 tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3510 }
3511
3512 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3513 {
3514 switch (size) {
3515 case 8:
3516 tcg_gen_ext8s_i32(reg, reg);
3517 break;
3518 case 16:
3519 tcg_gen_ext16s_i32(reg, reg);
3520 break;
3521 default:
3522 break;
3523 }
3524 tcg_gen_mov_i32(QREG_CC_N, reg);
3525 tcg_gen_mov_i32(QREG_CC_Z, reg);
3526 tcg_gen_mov_i32(QREG_CC_X, X);
3527 tcg_gen_mov_i32(QREG_CC_C, X);
3528 tcg_gen_movi_i32(QREG_CC_V, 0);
3529 }
3530
3531 /* Result of rotate_x() is valid if 0 <= shift <= size */
3532 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3533 {
3534 TCGv X, shl, shr, shx, sz, zero;
3535
3536 sz = tcg_const_i32(size);
3537
3538 shr = tcg_temp_new();
3539 shl = tcg_temp_new();
3540 shx = tcg_temp_new();
3541 if (left) {
3542 tcg_gen_mov_i32(shl, shift); /* shl = shift */
3543 tcg_gen_movi_i32(shr, size + 1);
3544 tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3545 tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */
3546 /* shx = shx < 0 ? size : shx; */
3547 zero = tcg_const_i32(0);
3548 tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3549 tcg_temp_free(zero);
3550 } else {
3551 tcg_gen_mov_i32(shr, shift); /* shr = shift */
3552 tcg_gen_movi_i32(shl, size + 1);
3553 tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3554 tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3555 }
3556
3557 /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3558
3559 tcg_gen_shl_i32(shl, reg, shl);
3560 tcg_gen_shr_i32(shr, reg, shr);
3561 tcg_gen_or_i32(reg, shl, shr);
3562 tcg_temp_free(shl);
3563 tcg_temp_free(shr);
3564 tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3565 tcg_gen_or_i32(reg, reg, shx);
3566 tcg_temp_free(shx);
3567
3568 /* X = (reg >> size) & 1 */
3569
3570 X = tcg_temp_new();
3571 tcg_gen_shr_i32(X, reg, sz);
3572 tcg_gen_andi_i32(X, X, 1);
3573 tcg_temp_free(sz);
3574
3575 return X;
3576 }
3577
3578 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3579 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3580 {
3581 TCGv_i64 t0, shift64;
3582 TCGv X, lo, hi, zero;
3583
3584 shift64 = tcg_temp_new_i64();
3585 tcg_gen_extu_i32_i64(shift64, shift);
3586
3587 t0 = tcg_temp_new_i64();
3588
3589 X = tcg_temp_new();
3590 lo = tcg_temp_new();
3591 hi = tcg_temp_new();
3592
3593 if (left) {
3594 /* create [reg:X:..] */
3595
3596 tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3597 tcg_gen_concat_i32_i64(t0, lo, reg);
3598
3599 /* rotate */
3600
3601 tcg_gen_rotl_i64(t0, t0, shift64);
3602 tcg_temp_free_i64(shift64);
3603
3604 /* result is [reg:..:reg:X] */
3605
3606 tcg_gen_extr_i64_i32(lo, hi, t0);
3607 tcg_gen_andi_i32(X, lo, 1);
3608
3609 tcg_gen_shri_i32(lo, lo, 1);
3610 } else {
3611 /* create [..:X:reg] */
3612
3613 tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3614
3615 tcg_gen_rotr_i64(t0, t0, shift64);
3616 tcg_temp_free_i64(shift64);
3617
3618 /* result is value: [X:reg:..:reg] */
3619
3620 tcg_gen_extr_i64_i32(lo, hi, t0);
3621
3622 /* extract X */
3623
3624 tcg_gen_shri_i32(X, hi, 31);
3625
3626 /* extract result */
3627
3628 tcg_gen_shli_i32(hi, hi, 1);
3629 }
3630 tcg_temp_free_i64(t0);
3631 tcg_gen_or_i32(lo, lo, hi);
3632 tcg_temp_free(hi);
3633
3634 /* if shift == 0, register and X are not affected */
3635
3636 zero = tcg_const_i32(0);
3637 tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3638 tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3639 tcg_temp_free(zero);
3640 tcg_temp_free(lo);
3641
3642 return X;
3643 }
3644
3645 DISAS_INSN(rotate_im)
3646 {
3647 TCGv shift;
3648 int tmp;
3649 int left = (insn & 0x100);
3650
3651 tmp = (insn >> 9) & 7;
3652 if (tmp == 0) {
3653 tmp = 8;
3654 }
3655
3656 shift = tcg_const_i32(tmp);
3657 if (insn & 8) {
3658 rotate(DREG(insn, 0), shift, left, 32);
3659 } else {
3660 TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3661 rotate_x_flags(DREG(insn, 0), X, 32);
3662 tcg_temp_free(X);
3663 }
3664 tcg_temp_free(shift);
3665
3666 set_cc_op(s, CC_OP_FLAGS);
3667 }
3668
3669 DISAS_INSN(rotate8_im)
3670 {
3671 int left = (insn & 0x100);
3672 TCGv reg;
3673 TCGv shift;
3674 int tmp;
3675
3676 reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3677
3678 tmp = (insn >> 9) & 7;
3679 if (tmp == 0) {
3680 tmp = 8;
3681 }
3682
3683 shift = tcg_const_i32(tmp);
3684 if (insn & 8) {
3685 rotate(reg, shift, left, 8);
3686 } else {
3687 TCGv X = rotate_x(reg, shift, left, 8);
3688 rotate_x_flags(reg, X, 8);
3689 tcg_temp_free(X);
3690 }
3691 tcg_temp_free(shift);
3692 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3693 set_cc_op(s, CC_OP_FLAGS);
3694 }
3695
3696 DISAS_INSN(rotate16_im)
3697 {
3698 int left = (insn & 0x100);
3699 TCGv reg;
3700 TCGv shift;
3701 int tmp;
3702
3703 reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3704 tmp = (insn >> 9) & 7;
3705 if (tmp == 0) {
3706 tmp = 8;
3707 }
3708
3709 shift = tcg_const_i32(tmp);
3710 if (insn & 8) {
3711 rotate(reg, shift, left, 16);
3712 } else {
3713 TCGv X = rotate_x(reg, shift, left, 16);
3714 rotate_x_flags(reg, X, 16);
3715 tcg_temp_free(X);
3716 }
3717 tcg_temp_free(shift);
3718 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3719 set_cc_op(s, CC_OP_FLAGS);
3720 }
3721
3722 DISAS_INSN(rotate_reg)
3723 {
3724 TCGv reg;
3725 TCGv src;
3726 TCGv t0, t1;
3727 int left = (insn & 0x100);
3728
3729 reg = DREG(insn, 0);
3730 src = DREG(insn, 9);
3731 /* shift in [0..63] */
3732 t0 = tcg_temp_new();
3733 tcg_gen_andi_i32(t0, src, 63);
3734 t1 = tcg_temp_new_i32();
3735 if (insn & 8) {
3736 tcg_gen_andi_i32(t1, src, 31);
3737 rotate(reg, t1, left, 32);
3738 /* if shift == 0, clear C */
3739 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3740 t0, QREG_CC_V /* 0 */,
3741 QREG_CC_V /* 0 */, QREG_CC_C);
3742 } else {
3743 TCGv X;
3744 /* modulo 33 */
3745 tcg_gen_movi_i32(t1, 33);
3746 tcg_gen_remu_i32(t1, t0, t1);
3747 X = rotate32_x(DREG(insn, 0), t1, left);
3748 rotate_x_flags(DREG(insn, 0), X, 32);
3749 tcg_temp_free(X);
3750 }
3751 tcg_temp_free(t1);
3752 tcg_temp_free(t0);
3753 set_cc_op(s, CC_OP_FLAGS);
3754 }
3755
3756 DISAS_INSN(rotate8_reg)
3757 {
3758 TCGv reg;
3759 TCGv src;
3760 TCGv t0, t1;
3761 int left = (insn & 0x100);
3762
3763 reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3764 src = DREG(insn, 9);
3765 /* shift in [0..63] */
3766 t0 = tcg_temp_new_i32();
3767 tcg_gen_andi_i32(t0, src, 63);
3768 t1 = tcg_temp_new_i32();
3769 if (insn & 8) {
3770 tcg_gen_andi_i32(t1, src, 7);
3771 rotate(reg, t1, left, 8);
3772 /* if shift == 0, clear C */
3773 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3774 t0, QREG_CC_V /* 0 */,
3775 QREG_CC_V /* 0 */, QREG_CC_C);
3776 } else {
3777 TCGv X;
3778 /* modulo 9 */
3779 tcg_gen_movi_i32(t1, 9);
3780 tcg_gen_remu_i32(t1, t0, t1);
3781 X = rotate_x(reg, t1, left, 8);
3782 rotate_x_flags(reg, X, 8);
3783 tcg_temp_free(X);
3784 }
3785 tcg_temp_free(t1);
3786 tcg_temp_free(t0);
3787 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3788 set_cc_op(s, CC_OP_FLAGS);
3789 }
3790
3791 DISAS_INSN(rotate16_reg)
3792 {
3793 TCGv reg;
3794 TCGv src;
3795 TCGv t0, t1;
3796 int left = (insn & 0x100);
3797
3798 reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3799 src = DREG(insn, 9);
3800 /* shift in [0..63] */
3801 t0 = tcg_temp_new_i32();
3802 tcg_gen_andi_i32(t0, src, 63);
3803 t1 = tcg_temp_new_i32();
3804 if (insn & 8) {
3805 tcg_gen_andi_i32(t1, src, 15);
3806 rotate(reg, t1, left, 16);
3807 /* if shift == 0, clear C */
3808 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3809 t0, QREG_CC_V /* 0 */,
3810 QREG_CC_V /* 0 */, QREG_CC_C);
3811 } else {
3812 TCGv X;
3813 /* modulo 17 */
3814 tcg_gen_movi_i32(t1, 17);
3815 tcg_gen_remu_i32(t1, t0, t1);
3816 X = rotate_x(reg, t1, left, 16);
3817 rotate_x_flags(reg, X, 16);
3818 tcg_temp_free(X);
3819 }
3820 tcg_temp_free(t1);
3821 tcg_temp_free(t0);
3822 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3823 set_cc_op(s, CC_OP_FLAGS);
3824 }
3825
3826 DISAS_INSN(rotate_mem)
3827 {
3828 TCGv src;
3829 TCGv addr;
3830 TCGv shift;
3831 int left = (insn & 0x100);
3832
3833 SRC_EA(env, src, OS_WORD, 0, &addr);
3834
3835 shift = tcg_const_i32(1);
3836 if (insn & 0x0200) {
3837 rotate(src, shift, left, 16);
3838 } else {
3839 TCGv X = rotate_x(src, shift, left, 16);
3840 rotate_x_flags(src, X, 16);
3841 tcg_temp_free(X);
3842 }
3843 tcg_temp_free(shift);
3844 DEST_EA(env, insn, OS_WORD, src, &addr);
3845 set_cc_op(s, CC_OP_FLAGS);
3846 }
3847
3848 DISAS_INSN(bfext_reg)
3849 {
3850 int ext = read_im16(env, s);
3851 int is_sign = insn & 0x200;
3852 TCGv src = DREG(insn, 0);
3853 TCGv dst = DREG(ext, 12);
3854 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3855 int ofs = extract32(ext, 6, 5); /* big bit-endian */
3856 int pos = 32 - ofs - len; /* little bit-endian */
3857 TCGv tmp = tcg_temp_new();
3858 TCGv shift;
3859
3860 /* In general, we're going to rotate the field so that it's at the
3861 top of the word and then right-shift by the compliment of the
3862 width to extend the field. */
3863 if (ext & 0x20) {
3864 /* Variable width. */
3865 if (ext & 0x800) {
3866 /* Variable offset. */
3867 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3868 tcg_gen_rotl_i32(tmp, src, tmp);
3869 } else {
3870 tcg_gen_rotli_i32(tmp, src, ofs);
3871 }
3872
3873 shift = tcg_temp_new();
3874 tcg_gen_neg_i32(shift, DREG(ext, 0));
3875 tcg_gen_andi_i32(shift, shift, 31);
3876 tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3877 if (is_sign) {
3878 tcg_gen_mov_i32(dst, QREG_CC_N);
3879 } else {
3880 tcg_gen_shr_i32(dst, tmp, shift);
3881 }
3882 tcg_temp_free(shift);
3883 } else {
3884 /* Immediate width. */
3885 if (ext & 0x800) {
3886 /* Variable offset */
3887 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3888 tcg_gen_rotl_i32(tmp, src, tmp);
3889 src = tmp;
3890 pos = 32 - len;
3891 } else {
3892 /* Immediate offset. If the field doesn't wrap around the
3893 end of the word, rely on (s)extract completely. */
3894 if (pos < 0) {
3895 tcg_gen_rotli_i32(tmp, src, ofs);
3896 src = tmp;
3897 pos = 32 - len;
3898 }
3899 }
3900
3901 tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3902 if (is_sign) {
3903 tcg_gen_mov_i32(dst, QREG_CC_N);
3904 } else {
3905 tcg_gen_extract_i32(dst, src, pos, len);
3906 }
3907 }
3908
3909 tcg_temp_free(tmp);
3910 set_cc_op(s, CC_OP_LOGIC);
3911 }
3912
3913 DISAS_INSN(bfext_mem)
3914 {
3915 int ext = read_im16(env, s);
3916 int is_sign = insn & 0x200;
3917 TCGv dest = DREG(ext, 12);
3918 TCGv addr, len, ofs;
3919
3920 addr = gen_lea(env, s, insn, OS_UNSIZED);
3921 if (IS_NULL_QREG(addr)) {
3922 gen_addr_fault(s);
3923 return;
3924 }
3925
3926 if (ext & 0x20) {
3927 len = DREG(ext, 0);
3928 } else {
3929 len = tcg_const_i32(extract32(ext, 0, 5));
3930 }
3931 if (ext & 0x800) {
3932 ofs = DREG(ext, 6);
3933 } else {
3934 ofs = tcg_const_i32(extract32(ext, 6, 5));
3935 }
3936
3937 if (is_sign) {
3938 gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
3939 tcg_gen_mov_i32(QREG_CC_N, dest);
3940 } else {
3941 TCGv_i64 tmp = tcg_temp_new_i64();
3942 gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
3943 tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
3944 tcg_temp_free_i64(tmp);
3945 }
3946 set_cc_op(s, CC_OP_LOGIC);
3947
3948 if (!(ext & 0x20)) {
3949 tcg_temp_free(len);
3950 }
3951 if (!(ext & 0x800)) {
3952 tcg_temp_free(ofs);
3953 }
3954 }
3955
3956 DISAS_INSN(bfop_reg)
3957 {
3958 int ext = read_im16(env, s);
3959 TCGv src = DREG(insn, 0);
3960 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3961 int ofs = extract32(ext, 6, 5); /* big bit-endian */
3962 TCGv mask, tofs, tlen;
3963
3964 TCGV_UNUSED(tofs);
3965 TCGV_UNUSED(tlen);
3966 if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
3967 tofs = tcg_temp_new();
3968 tlen = tcg_temp_new();
3969 }
3970
3971 if ((ext & 0x820) == 0) {
3972 /* Immediate width and offset. */
3973 uint32_t maski = 0x7fffffffu >> (len - 1);
3974 if (ofs + len <= 32) {
3975 tcg_gen_shli_i32(QREG_CC_N, src, ofs);
3976 } else {
3977 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
3978 }
3979 tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
3980 mask = tcg_const_i32(ror32(maski, ofs));
3981 if (!TCGV_IS_UNUSED(tofs)) {
3982 tcg_gen_movi_i32(tofs, ofs);
3983 tcg_gen_movi_i32(tlen, len);
3984 }
3985 } else {
3986 TCGv tmp = tcg_temp_new();
3987 if (ext & 0x20) {
3988 /* Variable width */
3989 tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
3990 tcg_gen_andi_i32(tmp, tmp, 31);
3991 mask = tcg_const_i32(0x7fffffffu);
3992 tcg_gen_shr_i32(mask, mask, tmp);
3993 if (!TCGV_IS_UNUSED(tlen)) {
3994 tcg_gen_addi_i32(tlen, tmp, 1);
3995 }
3996 } else {
3997 /* Immediate width */
3998 mask = tcg_const_i32(0x7fffffffu >> (len - 1));
3999 if (!TCGV_IS_UNUSED(tlen)) {
4000 tcg_gen_movi_i32(tlen, len);
4001 }
4002 }
4003 if (ext & 0x800) {
4004 /* Variable offset */
4005 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4006 tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4007 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4008 tcg_gen_rotr_i32(mask, mask, tmp);
4009 if (!TCGV_IS_UNUSED(tofs)) {
4010 tcg_gen_mov_i32(tofs, tmp);
4011 }
4012 } else {
4013 /* Immediate offset (and variable width) */
4014 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4015 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4016 tcg_gen_rotri_i32(mask, mask, ofs);
4017 if (!TCGV_IS_UNUSED(tofs)) {
4018 tcg_gen_movi_i32(tofs, ofs);
4019 }
4020 }
4021 tcg_temp_free(tmp);
4022 }
4023 set_cc_op(s, CC_OP_LOGIC);
4024
4025 switch (insn & 0x0f00) {
4026 case 0x0a00: /* bfchg */
4027 tcg_gen_eqv_i32(src, src, mask);
4028 break;
4029 case 0x0c00: /* bfclr */
4030 tcg_gen_and_i32(src, src, mask);
4031 break;
4032 case 0x0d00: /* bfffo */
4033 gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4034 tcg_temp_free(tlen);
4035 tcg_temp_free(tofs);
4036 break;
4037 case 0x0e00: /* bfset */
4038 tcg_gen_orc_i32(src, src, mask);
4039 break;
4040 case 0x0800: /* bftst */
4041 /* flags already set; no other work to do. */
4042 break;
4043 default:
4044 g_assert_not_reached();
4045 }
4046 tcg_temp_free(mask);
4047 }
4048
4049 DISAS_INSN(bfop_mem)
4050 {
4051 int ext = read_im16(env, s);
4052 TCGv addr, len, ofs;
4053 TCGv_i64 t64;
4054
4055 addr = gen_lea(env, s, insn, OS_UNSIZED);
4056 if (IS_NULL_QREG(addr)) {
4057 gen_addr_fault(s);
4058 return;
4059 }
4060
4061 if (ext & 0x20) {
4062 len = DREG(ext, 0);
4063 } else {
4064 len = tcg_const_i32(extract32(ext, 0, 5));
4065 }
4066 if (ext & 0x800) {
4067 ofs = DREG(ext, 6);
4068 } else {
4069 ofs = tcg_const_i32(extract32(ext, 6, 5));
4070 }
4071
4072 switch (insn & 0x0f00) {
4073 case 0x0a00: /* bfchg */
4074 gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4075 break;
4076 case 0x0c00: /* bfclr */
4077 gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4078 break;
4079 case 0x0d00: /* bfffo */
4080 t64 = tcg_temp_new_i64();
4081 gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4082 tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4083 tcg_temp_free_i64(t64);
4084 break;
4085 case 0x0e00: /* bfset */
4086 gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4087 break;
4088 case 0x0800: /* bftst */
4089 gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4090 break;
4091 default:
4092 g_assert_not_reached();
4093 }
4094 set_cc_op(s, CC_OP_LOGIC);
4095
4096 if (!(ext & 0x20)) {
4097 tcg_temp_free(len);
4098 }
4099 if (!(ext & 0x800)) {
4100 tcg_temp_free(ofs);
4101 }
4102 }
4103
4104 DISAS_INSN(bfins_reg)
4105 {
4106 int ext = read_im16(env, s);
4107 TCGv dst = DREG(insn, 0);
4108 TCGv src = DREG(ext, 12);
4109 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4110 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4111 int pos = 32 - ofs - len; /* little bit-endian */
4112 TCGv tmp;
4113
4114 tmp = tcg_temp_new();
4115
4116 if (ext & 0x20) {
4117 /* Variable width */
4118 tcg_gen_neg_i32(tmp, DREG(ext, 0));
4119 tcg_gen_andi_i32(tmp, tmp, 31);
4120 tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4121 } else {
4122 /* Immediate width */
4123 tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4124 }
4125 set_cc_op(s, CC_OP_LOGIC);
4126
4127 /* Immediate width and offset */
4128 if ((ext & 0x820) == 0) {
4129 /* Check for suitability for deposit. */
4130 if (pos >= 0) {
4131 tcg_gen_deposit_i32(dst, dst, src, pos, len);
4132 } else {
4133 uint32_t maski = -2U << (len - 1);
4134 uint32_t roti = (ofs + len) & 31;
4135 tcg_gen_andi_i32(tmp, src, ~maski);
4136 tcg_gen_rotri_i32(tmp, tmp, roti);
4137 tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4138 tcg_gen_or_i32(dst, dst, tmp);
4139 }
4140 } else {
4141 TCGv mask = tcg_temp_new();
4142 TCGv rot = tcg_temp_new();
4143
4144 if (ext & 0x20) {
4145 /* Variable width */
4146 tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4147 tcg_gen_andi_i32(rot, rot, 31);
4148 tcg_gen_movi_i32(mask, -2);
4149 tcg_gen_shl_i32(mask, mask, rot);
4150 tcg_gen_mov_i32(rot, DREG(ext, 0));
4151 tcg_gen_andc_i32(tmp, src, mask);
4152 } else {
4153 /* Immediate width (variable offset) */
4154 uint32_t maski = -2U << (len - 1);
4155 tcg_gen_andi_i32(tmp, src, ~maski);
4156 tcg_gen_movi_i32(mask, maski);
4157 tcg_gen_movi_i32(rot, len & 31);
4158 }
4159 if (ext & 0x800) {
4160 /* Variable offset */
4161 tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4162 } else {
4163 /* Immediate offset (variable width) */
4164 tcg_gen_addi_i32(rot, rot, ofs);
4165 }
4166 tcg_gen_andi_i32(rot, rot, 31);
4167 tcg_gen_rotr_i32(mask, mask, rot);
4168 tcg_gen_rotr_i32(tmp, tmp, rot);
4169 tcg_gen_and_i32(dst, dst, mask);
4170 tcg_gen_or_i32(dst, dst, tmp);
4171
4172 tcg_temp_free(rot);
4173 tcg_temp_free(mask);
4174 }
4175 tcg_temp_free(tmp);
4176 }
4177
4178 DISAS_INSN(bfins_mem)
4179 {
4180 int ext = read_im16(env, s);
4181 TCGv src = DREG(ext, 12);
4182 TCGv addr, len, ofs;
4183
4184 addr = gen_lea(env, s, insn, OS_UNSIZED);
4185 if (IS_NULL_QREG(addr)) {
4186 gen_addr_fault(s);
4187 return;
4188 }
4189
4190 if (ext & 0x20) {
4191 len = DREG(ext, 0);
4192 } else {
4193 len = tcg_const_i32(extract32(ext, 0, 5));
4194 }
4195 if (ext & 0x800) {
4196 ofs = DREG(ext, 6);
4197 } else {
4198 ofs = tcg_const_i32(extract32(ext, 6, 5));
4199 }
4200
4201 gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4202 set_cc_op(s, CC_OP_LOGIC);
4203
4204 if (!(ext & 0x20)) {
4205 tcg_temp_free(len);
4206 }
4207 if (!(ext & 0x800)) {
4208 tcg_temp_free(ofs);
4209 }
4210 }
4211
4212 DISAS_INSN(ff1)
4213 {
4214 TCGv reg;
4215 reg = DREG(insn, 0);
4216 gen_logic_cc(s, reg, OS_LONG);
4217 gen_helper_ff1(reg, reg);
4218 }
4219
4220 static TCGv gen_get_sr(DisasContext *s)
4221 {
4222 TCGv ccr;
4223 TCGv sr;
4224
4225 ccr = gen_get_ccr(s);
4226 sr = tcg_temp_new();
4227 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
4228 tcg_gen_or_i32(sr, sr, ccr);
4229 return sr;
4230 }
4231
4232 DISAS_INSN(strldsr)
4233 {
4234 uint16_t ext;
4235 uint32_t addr;
4236
4237 addr = s->pc - 2;
4238 ext = read_im16(env, s);
4239 if (ext != 0x46FC) {
4240 gen_exception(s, addr, EXCP_UNSUPPORTED);
4241 return;
4242 }
4243 ext = read_im16(env, s);
4244 if (IS_USER(s) || (ext & SR_S) == 0) {
4245 gen_exception(s, addr, EXCP_PRIVILEGE);
4246 return;
4247 }
4248 gen_push(s, gen_get_sr(s));
4249 gen_set_sr_im(s, ext, 0);
4250 }
4251
4252 DISAS_INSN(move_from_sr)
4253 {
4254 TCGv sr;
4255
4256 if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
4257 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4258 return;
4259 }
4260 sr = gen_get_sr(s);
4261 DEST_EA(env, insn, OS_WORD, sr, NULL);
4262 }
4263
4264 DISAS_INSN(move_to_sr)
4265 {
4266 if (IS_USER(s)) {
4267 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4268 return;
4269 }
4270 gen_set_sr(env, s, insn, 0);
4271 gen_lookup_tb(s);
4272 }
4273
4274 DISAS_INSN(move_from_usp)
4275 {
4276 if (IS_USER(s)) {
4277 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4278 return;
4279 }
4280 tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4281 offsetof(CPUM68KState, sp[M68K_USP]));
4282 }
4283
4284 DISAS_INSN(move_to_usp)
4285 {
4286 if (IS_USER(s)) {
4287 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4288 return;
4289 }
4290 tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4291 offsetof(CPUM68KState, sp[M68K_USP]));
4292 }
4293
4294 DISAS_INSN(halt)
4295 {
4296 gen_exception(s, s->pc, EXCP_HALT_INSN);
4297 }
4298
4299 DISAS_INSN(stop)
4300 {
4301 uint16_t ext;
4302
4303 if (IS_USER(s)) {
4304 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4305 return;
4306 }
4307
4308 ext = read_im16(env, s);
4309
4310 gen_set_sr_im(s, ext, 0);
4311 tcg_gen_movi_i32(cpu_halted, 1);
4312 gen_exception(s, s->pc, EXCP_HLT);
4313 }
4314
4315 DISAS_INSN(rte)
4316 {
4317 if (IS_USER(s)) {
4318 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4319 return;
4320 }
4321 gen_exception(s, s->pc - 2, EXCP_RTE);
4322 }
4323
4324 DISAS_INSN(movec)
4325 {
4326 uint16_t ext;
4327 TCGv reg;
4328
4329 if (IS_USER(s)) {
4330 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4331 return;
4332 }
4333
4334 ext = read_im16(env, s);
4335
4336 if (ext & 0x8000) {
4337 reg = AREG(ext, 12);
4338 } else {
4339 reg = DREG(ext, 12);
4340 }
4341 gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4342 gen_lookup_tb(s);
4343 }
4344
4345 DISAS_INSN(intouch)
4346 {
4347 if (IS_USER(s)) {
4348 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4349 return;
4350 }
4351 /* ICache fetch. Implement as no-op. */
4352 }
4353
4354 DISAS_INSN(cpushl)
4355 {
4356 if (IS_USER(s)) {
4357 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4358 return;
4359 }
4360 /* Cache push/invalidate. Implement as no-op. */
4361 }
4362
4363 DISAS_INSN(wddata)
4364 {
4365 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4366 }
4367
4368 DISAS_INSN(wdebug)
4369 {
4370 M68kCPU *cpu = m68k_env_get_cpu(env);
4371
4372 if (IS_USER(s)) {
4373 gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4374 return;
4375 }
4376 /* TODO: Implement wdebug. */
4377 cpu_abort(CPU(cpu), "WDEBUG not implemented");
4378 }
4379
4380 DISAS_INSN(trap)
4381 {
4382 gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
4383 }
4384
4385 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4386 {
4387 switch (reg) {
4388 case M68K_FPIAR:
4389 tcg_gen_movi_i32(res, 0);
4390 break;
4391 case M68K_FPSR:
4392 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4393 break;
4394 case M68K_FPCR:
4395 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4396 break;
4397 }
4398 }
4399
4400 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4401 {
4402 switch (reg) {
4403 case M68K_FPIAR:
4404 break;
4405 case M68K_FPSR:
4406 tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4407 break;
4408 case M68K_FPCR:
4409 gen_helper_set_fpcr(cpu_env, val);
4410 break;
4411 }
4412 }
4413
4414 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4415 {
4416 int index = IS_USER(s);
4417 TCGv tmp;
4418
4419 tmp = tcg_temp_new();
4420 gen_load_fcr(s, tmp, reg);
4421 tcg_gen_qemu_st32(tmp, addr, index);
4422 tcg_temp_free(tmp);
4423 }
4424
4425 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4426 {
4427 int index = IS_USER(s);
4428 TCGv tmp;
4429
4430 tmp = tcg_temp_new();
4431 tcg_gen_qemu_ld32u(tmp, addr, index);
4432 gen_store_fcr(s, tmp, reg);
4433 tcg_temp_free(tmp);
4434 }
4435
4436
4437 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4438 uint32_t insn, uint32_t ext)
4439 {
4440 int mask = (ext >> 10) & 7;
4441 int is_write = (ext >> 13) & 1;
4442 int mode = extract32(insn, 3, 3);
4443 int i;
4444 TCGv addr, tmp;
4445
4446 switch (mode) {
4447 case 0: /* Dn */
4448 if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4449 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4450 return;
4451 }
4452 if (is_write) {
4453 gen_load_fcr(s, DREG(insn, 0), mask);
4454 } else {
4455 gen_store_fcr(s, DREG(insn, 0), mask);
4456 }
4457 return;
4458 case 1: /* An, only with FPIAR */
4459 if (mask != M68K_FPIAR) {
4460 gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4461 return;
4462 }
4463 if (is_write) {
4464 gen_load_fcr(s, AREG(insn, 0), mask);
4465 } else {
4466 gen_store_fcr(s, AREG(insn, 0), mask);
4467 }
4468 return;
4469 default:
4470 break;
4471 }
4472
4473 tmp = gen_lea(env, s, insn, OS_LONG);
4474 if (IS_NULL_QREG(tmp)) {
4475 gen_addr_fault(s);
4476 return;
4477 }
4478
4479 addr = tcg_temp_new();
4480 tcg_gen_mov_i32(addr, tmp);
4481
4482 /* mask:
4483 *
4484 * 0b100 Floating-Point Control Register
4485 * 0b010 Floating-Point Status Register
4486 * 0b001 Floating-Point Instruction Address Register
4487 *
4488 */
4489
4490 if (is_write && mode == 4) {
4491 for (i = 2; i >= 0; i--, mask >>= 1) {
4492 if (mask & 1) {
4493 gen_qemu_store_fcr(s, addr, 1 << i);
4494 if (mask != 1) {
4495 tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4496 }
4497 }
4498 }
4499 tcg_gen_mov_i32(AREG(insn, 0), addr);
4500 } else {
4501 for (i = 0; i < 3; i++, mask >>= 1) {
4502 if (mask & 1) {
4503 if (is_write) {
4504 gen_qemu_store_fcr(s, addr, 1 << i);
4505 } else {
4506 gen_qemu_load_fcr(s, addr, 1 << i);
4507 }
4508 if (mask != 1 || mode == 3) {
4509 tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4510 }
4511 }
4512 }
4513 if (mode == 3) {
4514 tcg_gen_mov_i32(AREG(insn, 0), addr);
4515 }
4516 }
4517 tcg_temp_free_i32(addr);
4518 }
4519
4520 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4521 uint32_t insn, uint32_t ext)
4522 {
4523 int opsize;
4524 TCGv addr, tmp;
4525 int mode = (ext >> 11) & 0x3;
4526 int is_load = ((ext & 0x2000) == 0);
4527
4528 if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4529 opsize = OS_EXTENDED;
4530 } else {
4531 opsize = OS_DOUBLE; /* FIXME */
4532 }
4533
4534 addr = gen_lea(env, s, insn, opsize);
4535 if (IS_NULL_QREG(addr)) {
4536 gen_addr_fault(s);
4537 return;
4538 }
4539
4540 tmp = tcg_temp_new();
4541 if (mode & 0x1) {
4542 /* Dynamic register list */
4543 tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4544 } else {
4545 /* Static register list */
4546 tcg_gen_movi_i32(tmp, ext & 0xff);
4547 }
4548
4549 if (!is_load && (mode & 2) == 0) {
4550 /* predecrement addressing mode
4551 * only available to store register to memory
4552 */
4553 if (opsize == OS_EXTENDED) {
4554 gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4555 } else {
4556 gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4557 }
4558 } else {
4559 /* postincrement addressing mode */
4560 if (opsize == OS_EXTENDED) {
4561 if (is_load) {
4562 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4563 } else {
4564 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4565 }
4566 } else {
4567 if (is_load) {
4568 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4569 } else {
4570 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4571 }
4572 }
4573 }
4574 if ((insn & 070) == 030 || (insn & 070) == 040) {
4575 tcg_gen_mov_i32(AREG(insn, 0), tmp);
4576 }
4577 tcg_temp_free(tmp);
4578 }
4579
4580 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
4581 immediately before the next FP instruction is executed. */
4582 DISAS_INSN(fpu)
4583 {
4584 uint16_t ext;
4585 int opmode;
4586 int opsize;
4587 TCGv_ptr cpu_src, cpu_dest;
4588
4589 ext = read_im16(env, s);
4590 opmode = ext & 0x7f;
4591 switch ((ext >> 13) & 7) {
4592 case 0:
4593 break;
4594 case 1:
4595 goto undef;
4596 case 2:
4597 if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4598 /* fmovecr */
4599 TCGv rom_offset = tcg_const_i32(opmode);
4600 cpu_dest = gen_fp_ptr(REG(ext, 7));
4601 gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
4602 tcg_temp_free_ptr(cpu_dest);
4603 tcg_temp_free(rom_offset);
4604 return;
4605 }
4606 break;
4607 case 3: /* fmove out */
4608 cpu_src = gen_fp_ptr(REG(ext, 7));
4609 opsize = ext_opsize(ext, 10);
4610 if (gen_ea_fp(env, s, insn, opsize, cpu_src, EA_STORE) == -1) {
4611 gen_addr_fault(s);
4612 }
4613 gen_helper_ftst(cpu_env, cpu_src);
4614 tcg_temp_free_ptr(cpu_src);
4615 return;
4616 case 4: /* fmove to control register. */
4617 case 5: /* fmove from control register. */
4618 gen_op_fmove_fcr(env, s, insn, ext);
4619 return;
4620 case 6: /* fmovem */
4621 case 7:
4622 if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
4623 goto undef;
4624 }
4625 gen_op_fmovem(env, s, insn, ext);
4626 return;
4627 }
4628 if (ext & (1 << 14)) {
4629 /* Source effective address. */
4630 opsize = ext_opsize(ext, 10);
4631 cpu_src = gen_fp_result_ptr();
4632 if (gen_ea_fp(env, s, insn, opsize, cpu_src, EA_LOADS) == -1) {
4633 gen_addr_fault(s);
4634 return;
4635 }
4636 } else {
4637 /* Source register. */
4638 opsize = OS_EXTENDED;
4639 cpu_src = gen_fp_ptr(REG(ext, 10));
4640 }
4641 cpu_dest = gen_fp_ptr(REG(ext, 7));
4642 switch (opmode) {
4643 case 0: /* fmove */
4644 gen_fp_move(cpu_dest, cpu_src);
4645 break;
4646 case 0x40: /* fsmove */
4647 gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
4648 break;
4649 case 0x44: /* fdmove */
4650 gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
4651 break;
4652 case 1: /* fint */
4653 gen_helper_firound(cpu_env, cpu_dest, cpu_src);
4654 break;
4655 case 3: /* fintrz */
4656 gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
4657 break;
4658 case 4: /* fsqrt */
4659 gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
4660 break;
4661 case 0x41: /* fssqrt */
4662 gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
4663 break;
4664 case 0x45: /* fdsqrt */
4665 gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
4666 break;
4667 case 0x18: /* fabs */
4668 gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
4669 break;
4670 case 0x58: /* fsabs */
4671 gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
4672 break;
4673 case 0x5c: /* fdabs */
4674 gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
4675 break;
4676 case 0x1a: /* fneg */
4677 gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
4678 break;
4679 case 0x5a: /* fsneg */
4680 gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
4681 break;
4682 case 0x5e: /* fdneg */
4683 gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
4684 break;
4685 case 0x20: /* fdiv */
4686 gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
4687 break;
4688 case 0x60: /* fsdiv */
4689 gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
4690 break;
4691 case 0x64: /* fddiv */
4692 gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
4693 break;
4694 case 0x22: /* fadd */
4695 gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
4696 break;
4697 case 0x62: /* fsadd */
4698 gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
4699 break;
4700 case 0x66: /* fdadd */
4701 gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
4702 break;
4703 case 0x23: /* fmul */
4704 gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
4705 break;
4706 case 0x63: /* fsmul */
4707 gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
4708 break;
4709 case 0x67: /* fdmul */
4710 gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
4711 break;
4712 case 0x24: /* fsgldiv */
4713 gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
4714 break;
4715 case 0x27: /* fsglmul */
4716 gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
4717 break;
4718 case 0x28: /* fsub */
4719 gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
4720 break;
4721 case 0x68: /* fssub */
4722 gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
4723 break;
4724 case 0x6c: /* fdsub */
4725 gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
4726 break;
4727 case 0x38: /* fcmp */
4728 gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
4729 return;
4730 case 0x3a: /* ftst */
4731 gen_helper_ftst(cpu_env, cpu_src);
4732 return;
4733 default:
4734 goto undef;
4735 }
4736 tcg_temp_free_ptr(cpu_src);
4737 gen_helper_ftst(cpu_env, cpu_dest);
4738 tcg_temp_free_ptr(cpu_dest);
4739 return;
4740 undef:
4741 /* FIXME: Is this right for offset addressing modes? */
4742 s->pc -= 2;
4743 disas_undef_fpu(env, s, insn);
4744 }
4745
4746 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
4747 {
4748 TCGv fpsr;
4749
4750 c->g1 = 1;
4751 c->v2 = tcg_const_i32(0);
4752 c->g2 = 0;
4753 /* TODO: Raise BSUN exception. */
4754 fpsr = tcg_temp_new();
4755 gen_load_fcr(s, fpsr, M68K_FPSR);
4756 switch (cond) {
4757 case 0: /* False */
4758 case 16: /* Signaling False */
4759 c->v1 = c->v2;
4760 c->tcond = TCG_COND_NEVER;
4761 break;
4762 case 1: /* EQual Z */
4763 case 17: /* Signaling EQual Z */
4764 c->v1 = tcg_temp_new();
4765 c->g1 = 0;
4766 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
4767 c->tcond = TCG_COND_NE;
4768 break;
4769 case 2: /* Ordered Greater Than !(A || Z || N) */
4770 case 18: /* Greater Than !(A || Z || N) */
4771 c->v1 = tcg_temp_new();
4772 c->g1 = 0;
4773 tcg_gen_andi_i32(c->v1, fpsr,
4774 FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
4775 c->tcond = TCG_COND_EQ;
4776 break;
4777 case 3: /* Ordered Greater than or Equal Z || !(A || N) */
4778 case 19: /* Greater than or Equal Z || !(A || N) */
4779 c->v1 = tcg_temp_new();
4780 c->g1 = 0;
4781 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
4782 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
4783 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
4784 tcg_gen_or_i32(c->v1, c->v1, fpsr);
4785 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
4786 c->tcond = TCG_COND_NE;
4787 break;
4788 case 4: /* Ordered Less Than !(!N || A || Z); */
4789 case 20: /* Less Than !(!N || A || Z); */
4790 c->v1 = tcg_temp_new();
4791 c->g1 = 0;
4792 tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
4793 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
4794 c->tcond = TCG_COND_EQ;
4795 break;
4796 case 5: /* Ordered Less than or Equal Z || (N && !A) */
4797 case 21: /* Less than or Equal Z || (N && !A) */
4798 c->v1 = tcg_temp_new();
4799 c->g1 = 0;
4800 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
4801 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
4802 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
4803 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
4804 c->tcond = TCG_COND_NE;
4805 break;
4806 case 6: /* Ordered Greater or Less than !(A || Z) */
4807 case 22: /* Greater or Less than !(A || Z) */
4808 c->v1 = tcg_temp_new();
4809 c->g1 = 0;
4810 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
4811 c->tcond = TCG_COND_EQ;
4812 break;
4813 case 7: /* Ordered !A */
4814 case 23: /* Greater, Less or Equal !A */
4815 c->v1 = tcg_temp_new();
4816 c->g1 = 0;
4817 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
4818 c->tcond = TCG_COND_EQ;
4819 break;
4820 case 8: /* Unordered A */
4821 case 24: /* Not Greater, Less or Equal A */
4822 c->v1 = tcg_temp_new();
4823 c->g1 = 0;
4824 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
4825 c->tcond = TCG_COND_NE;
4826 break;
4827 case 9: /* Unordered or Equal A || Z */
4828 case 25: /* Not Greater or Less then A || Z */
4829 c->v1 = tcg_temp_new();
4830 c->g1 = 0;
4831 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
4832 c->tcond = TCG_COND_NE;
4833 break;
4834 case 10: /* Unordered or Greater Than A || !(N || Z)) */
4835 case 26: /* Not Less or Equal A || !(N || Z)) */
4836 c->v1 = tcg_temp_new();
4837 c->g1 = 0;
4838 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
4839 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
4840 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
4841 tcg_gen_or_i32(c->v1, c->v1, fpsr);
4842 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
4843 c->tcond = TCG_COND_NE;
4844 break;
4845 case 11: /* Unordered or Greater or Equal A || Z || !N */
4846 case 27: /* Not Less Than A || Z || !N */
4847 c->v1 = tcg_temp_new();
4848 c->g1 = 0;
4849 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
4850 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
4851 c->tcond = TCG_COND_NE;
4852 break;
4853 case 12: /* Unordered or Less Than A || (N && !Z) */
4854 case 28: /* Not Greater than or Equal A || (N && !Z) */
4855 c->v1 = tcg_temp_new();
4856 c->g1 = 0;
4857 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
4858 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
4859 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
4860 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
4861 c->tcond = TCG_COND_NE;
4862 break;
4863 case 13: /* Unordered or Less or Equal A || Z || N */
4864 case 29: /* Not Greater Than A || Z || N */
4865 c->v1 = tcg_temp_new();
4866 c->g1 = 0;
4867 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
4868 c->tcond = TCG_COND_NE;
4869 break;
4870 case 14: /* Not Equal !Z */
4871 case 30: /* Signaling Not Equal !Z */
4872 c->v1 = tcg_temp_new();
4873 c->g1 = 0;
4874 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
4875 c->tcond = TCG_COND_EQ;
4876 break;
4877 case 15: /* True */
4878 case 31: /* Signaling True */
4879 c->v1 = c->v2;
4880 c->tcond = TCG_COND_ALWAYS;
4881 break;
4882 }
4883 tcg_temp_free(fpsr);
4884 }
4885
4886 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
4887 {
4888 DisasCompare c;
4889
4890 gen_fcc_cond(&c, s, cond);
4891 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
4892 free_cond(&c);
4893 }
4894
4895 DISAS_INSN(fbcc)
4896 {
4897 uint32_t offset;
4898 uint32_t base;
4899 TCGLabel *l1;
4900
4901 base = s->pc;
4902 offset = (int16_t)read_im16(env, s);
4903 if (insn & (1 << 6)) {
4904 offset = (offset << 16) | read_im16(env, s);
4905 }
4906
4907 l1 = gen_new_label();
4908 update_cc_op(s);
4909 gen_fjmpcc(s, insn & 0x3f, l1);
4910 gen_jmp_tb(s, 0, s->pc);
4911 gen_set_label(l1);
4912 gen_jmp_tb(s, 1, base + offset);
4913 }
4914
4915 DISAS_INSN(fscc)
4916 {
4917 DisasCompare c;
4918 int cond;
4919 TCGv tmp;
4920 uint16_t ext;
4921
4922 ext = read_im16(env, s);
4923 cond = ext & 0x3f;
4924 gen_fcc_cond(&c, s, cond);
4925
4926 tmp = tcg_temp_new();
4927 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
4928 free_cond(&c);
4929
4930 tcg_gen_neg_i32(tmp, tmp);
4931 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
4932 tcg_temp_free(tmp);
4933 }
4934
4935 DISAS_INSN(frestore)
4936 {
4937 M68kCPU *cpu = m68k_env_get_cpu(env);
4938
4939 /* TODO: Implement frestore. */
4940 cpu_abort(CPU(cpu), "FRESTORE not implemented");
4941 }
4942
4943 DISAS_INSN(fsave)
4944 {
4945 M68kCPU *cpu = m68k_env_get_cpu(env);
4946
4947 /* TODO: Implement fsave. */
4948 cpu_abort(CPU(cpu), "FSAVE not implemented");
4949 }
4950
4951 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
4952 {
4953 TCGv tmp = tcg_temp_new();
4954 if (s->env->macsr & MACSR_FI) {
4955 if (upper)
4956 tcg_gen_andi_i32(tmp, val, 0xffff0000);
4957 else
4958 tcg_gen_shli_i32(tmp, val, 16);
4959 } else if (s->env->macsr & MACSR_SU) {
4960 if (upper)
4961 tcg_gen_sari_i32(tmp, val, 16);
4962 else
4963 tcg_gen_ext16s_i32(tmp, val);
4964 } else {
4965 if (upper)
4966 tcg_gen_shri_i32(tmp, val, 16);
4967 else
4968 tcg_gen_ext16u_i32(tmp, val);
4969 }
4970 return tmp;
4971 }
4972
4973 static void gen_mac_clear_flags(void)
4974 {
4975 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
4976 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
4977 }
4978
4979 DISAS_INSN(mac)
4980 {
4981 TCGv rx;
4982 TCGv ry;
4983 uint16_t ext;
4984 int acc;
4985 TCGv tmp;
4986 TCGv addr;
4987 TCGv loadval;
4988 int dual;
4989 TCGv saved_flags;
4990
4991 if (!s->done_mac) {
4992 s->mactmp = tcg_temp_new_i64();
4993 s->done_mac = 1;
4994 }
4995
4996 ext = read_im16(env, s);
4997
4998 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
4999 dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5000 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5001 disas_undef(env, s, insn);
5002 return;
5003 }
5004 if (insn & 0x30) {
5005 /* MAC with load. */
5006 tmp = gen_lea(env, s, insn, OS_LONG);
5007 addr = tcg_temp_new();
5008 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5009 /* Load the value now to ensure correct exception behavior.
5010 Perform writeback after reading the MAC inputs. */
5011 loadval = gen_load(s, OS_LONG, addr, 0);
5012
5013 acc ^= 1;
5014 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5015 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5016 } else {
5017 loadval = addr = NULL_QREG;
5018 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5019 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5020 }
5021
5022 gen_mac_clear_flags();
5023 #if 0
5024 l1 = -1;
5025 /* Disabled because conditional branches clobber temporary vars. */
5026 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5027 /* Skip the multiply if we know we will ignore it. */
5028 l1 = gen_new_label();
5029 tmp = tcg_temp_new();
5030 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5031 gen_op_jmp_nz32(tmp, l1);
5032 }
5033 #endif
5034
5035 if ((ext & 0x0800) == 0) {
5036 /* Word. */
5037 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5038 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5039 }
5040 if (s->env->macsr & MACSR_FI) {
5041 gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5042 } else {
5043 if (s->env->macsr & MACSR_SU)
5044 gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5045 else
5046 gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5047 switch ((ext >> 9) & 3) {
5048 case 1:
5049 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5050 break;
5051 case 3:
5052 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5053 break;
5054 }
5055 }
5056
5057 if (dual) {
5058 /* Save the overflow flag from the multiply. */
5059 saved_flags = tcg_temp_new();
5060 tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5061 } else {
5062 saved_flags = NULL_QREG;
5063 }
5064
5065 #if 0
5066 /* Disabled because conditional branches clobber temporary vars. */
5067 if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5068 /* Skip the accumulate if the value is already saturated. */
5069 l1 = gen_new_label();
5070 tmp = tcg_temp_new();
5071 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5072 gen_op_jmp_nz32(tmp, l1);
5073 }
5074 #endif
5075
5076 if (insn & 0x100)
5077 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5078 else
5079 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5080
5081 if (s->env->macsr & MACSR_FI)
5082 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5083 else if (s->env->macsr & MACSR_SU)
5084 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5085 else
5086 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5087
5088 #if 0
5089 /* Disabled because conditional branches clobber temporary vars. */
5090 if (l1 != -1)
5091 gen_set_label(l1);
5092 #endif
5093
5094 if (dual) {
5095 /* Dual accumulate variant. */
5096 acc = (ext >> 2) & 3;
5097 /* Restore the overflow flag from the multiplier. */
5098 tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5099 #if 0
5100 /* Disabled because conditional branches clobber temporary vars. */
5101 if ((s->env->macsr & MACSR_OMC) != 0) {
5102 /* Skip the accumulate if the value is already saturated. */
5103 l1 = gen_new_label();
5104 tmp = tcg_temp_new();
5105 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5106 gen_op_jmp_nz32(tmp, l1);
5107 }
5108 #endif
5109 if (ext & 2)
5110 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5111 else
5112 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5113 if (s->env->macsr & MACSR_FI)
5114 gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5115 else if (s->env->macsr & MACSR_SU)
5116 gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5117 else
5118 gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5119 #if 0
5120 /* Disabled because conditional branches clobber temporary vars. */
5121 if (l1 != -1)
5122 gen_set_label(l1);
5123 #endif
5124 }
5125 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
5126
5127 if (insn & 0x30) {
5128 TCGv rw;
5129 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5130 tcg_gen_mov_i32(rw, loadval);
5131 /* FIXME: Should address writeback happen with the masked or
5132 unmasked value? */
5133 switch ((insn >> 3) & 7) {
5134 case 3: /* Post-increment. */
5135 tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5136 break;
5137 case 4: /* Pre-decrement. */
5138 tcg_gen_mov_i32(AREG(insn, 0), addr);
5139 }
5140 }
5141 }
5142
5143 DISAS_INSN(from_mac)
5144 {
5145 TCGv rx;
5146 TCGv_i64 acc;
5147 int accnum;
5148
5149 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5150 accnum = (insn >> 9) & 3;
5151 acc = MACREG(accnum);
5152 if (s->env->macsr & MACSR_FI) {
5153 gen_helper_get_macf(rx, cpu_env, acc);
5154 } else if ((s->env->macsr & MACSR_OMC) == 0) {
5155 tcg_gen_extrl_i64_i32(rx, acc);
5156 } else if (s->env->macsr & MACSR_SU) {
5157 gen_helper_get_macs(rx, acc);
5158 } else {
5159 gen_helper_get_macu(rx, acc);
5160 }
5161 if (insn & 0x40) {
5162 tcg_gen_movi_i64(acc, 0);
5163 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5164 }
5165 }
5166
5167 DISAS_INSN(move_mac)
5168 {
5169 /* FIXME: This can be done without a helper. */
5170 int src;
5171 TCGv dest;
5172 src = insn & 3;
5173 dest = tcg_const_i32((insn >> 9) & 3);
5174 gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
5175 gen_mac_clear_flags();
5176 gen_helper_mac_set_flags(cpu_env, dest);
5177 }
5178
5179 DISAS_INSN(from_macsr)
5180 {
5181 TCGv reg;
5182
5183 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5184 tcg_gen_mov_i32(reg, QREG_MACSR);
5185 }
5186
5187 DISAS_INSN(from_mask)
5188 {
5189 TCGv reg;
5190 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5191 tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5192 }
5193
5194 DISAS_INSN(from_mext)
5195 {
5196 TCGv reg;
5197 TCGv acc;
5198 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5199 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5200 if (s->env->macsr & MACSR_FI)
5201 gen_helper_get_mac_extf(reg, cpu_env, acc);
5202 else
5203 gen_helper_get_mac_exti(reg, cpu_env, acc);
5204 }
5205
5206 DISAS_INSN(macsr_to_ccr)
5207 {
5208 TCGv tmp = tcg_temp_new();
5209 tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
5210 gen_helper_set_sr(cpu_env, tmp);
5211 tcg_temp_free(tmp);
5212 set_cc_op(s, CC_OP_FLAGS);
5213 }
5214
5215 DISAS_INSN(to_mac)
5216 {
5217 TCGv_i64 acc;
5218 TCGv val;
5219 int accnum;
5220 accnum = (insn >> 9) & 3;
5221 acc = MACREG(accnum);
5222 SRC_EA(env, val, OS_LONG, 0, NULL);
5223 if (s->env->macsr & MACSR_FI) {
5224 tcg_gen_ext_i32_i64(acc, val);
5225 tcg_gen_shli_i64(acc, acc, 8);
5226 } else if (s->env->macsr & MACSR_SU) {
5227 tcg_gen_ext_i32_i64(acc, val);
5228 } else {
5229 tcg_gen_extu_i32_i64(acc, val);
5230 }
5231 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5232 gen_mac_clear_flags();
5233 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
5234 }
5235
5236 DISAS_INSN(to_macsr)
5237 {
5238 TCGv val;
5239 SRC_EA(env, val, OS_LONG, 0, NULL);
5240 gen_helper_set_macsr(cpu_env, val);
5241 gen_lookup_tb(s);
5242 }
5243
5244 DISAS_INSN(to_mask)
5245 {
5246 TCGv val;
5247 SRC_EA(env, val, OS_LONG, 0, NULL);
5248 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5249 }
5250
5251 DISAS_INSN(to_mext)
5252 {
5253 TCGv val;
5254 TCGv acc;
5255 SRC_EA(env, val, OS_LONG, 0, NULL);
5256 acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5257 if (s->env->macsr & MACSR_FI)
5258 gen_helper_set_mac_extf(cpu_env, val, acc);
5259 else if (s->env->macsr & MACSR_SU)
5260 gen_helper_set_mac_exts(cpu_env, val, acc);
5261 else
5262 gen_helper_set_mac_extu(cpu_env, val, acc);
5263 }
5264
5265 static disas_proc opcode_table[65536];
5266
5267 static void
5268 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5269 {
5270 int i;
5271 int from;
5272 int to;
5273
5274 /* Sanity check. All set bits must be included in the mask. */
5275 if (opcode & ~mask) {
5276 fprintf(stderr,
5277 "qemu internal error: bogus opcode definition %04x/%04x\n",
5278 opcode, mask);
5279 abort();
5280 }
5281 /* This could probably be cleverer. For now just optimize the case where
5282 the top bits are known. */
5283 /* Find the first zero bit in the mask. */
5284 i = 0x8000;
5285 while ((i & mask) != 0)
5286 i >>= 1;
5287 /* Iterate over all combinations of this and lower bits. */
5288 if (i == 0)
5289 i = 1;
5290 else
5291 i <<= 1;
5292 from = opcode & ~(i - 1);
5293 to = from + i;
5294 for (i = from; i < to; i++) {
5295 if ((i & mask) == opcode)
5296 opcode_table[i] = proc;
5297 }
5298 }
5299
5300 /* Register m68k opcode handlers. Order is important.
5301 Later insn override earlier ones. */
5302 void register_m68k_insns (CPUM68KState *env)
5303 {
5304 /* Build the opcode table only once to avoid
5305 multithreading issues. */
5306 if (opcode_table[0] != NULL) {
5307 return;
5308 }
5309
5310 /* use BASE() for instruction available
5311 * for CF_ISA_A and M68000.
5312 */
5313 #define BASE(name, opcode, mask) \
5314 register_opcode(disas_##name, 0x##opcode, 0x##mask)
5315 #define INSN(name, opcode, mask, feature) do { \
5316 if (m68k_feature(env, M68K_FEATURE_##feature)) \
5317 BASE(name, opcode, mask); \
5318 } while(0)
5319 BASE(undef, 0000, 0000);
5320 INSN(arith_im, 0080, fff8, CF_ISA_A);
5321 INSN(arith_im, 0000, ff00, M68000);
5322 INSN(undef, 00c0, ffc0, M68000);
5323 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
5324 BASE(bitop_reg, 0100, f1c0);
5325 BASE(bitop_reg, 0140, f1c0);
5326 BASE(bitop_reg, 0180, f1c0);
5327 BASE(bitop_reg, 01c0, f1c0);
5328 INSN(arith_im, 0280, fff8, CF_ISA_A);
5329 INSN(arith_im, 0200, ff00, M68000);
5330 INSN(undef, 02c0, ffc0, M68000);
5331 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
5332 INSN(arith_im, 0480, fff8, CF_ISA_A);
5333 INSN(arith_im, 0400, ff00, M68000);
5334 INSN(undef, 04c0, ffc0, M68000);
5335 INSN(arith_im, 0600, ff00, M68000);
5336 INSN(undef, 06c0, ffc0, M68000);
5337 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
5338 INSN(arith_im, 0680, fff8, CF_ISA_A);
5339 INSN(arith_im, 0c00, ff38, CF_ISA_A);
5340 INSN(arith_im, 0c00, ff00, M68000);
5341 BASE(bitop_im, 0800, ffc0);
5342 BASE(bitop_im, 0840, ffc0);
5343 BASE(bitop_im, 0880, ffc0);
5344 BASE(bitop_im, 08c0, ffc0);
5345 INSN(arith_im, 0a80, fff8, CF_ISA_A);
5346 INSN(arith_im, 0a00, ff00, M68000);
5347 INSN(cas, 0ac0, ffc0, CAS);
5348 INSN(cas, 0cc0, ffc0, CAS);
5349 INSN(cas, 0ec0, ffc0, CAS);
5350 INSN(cas2w, 0cfc, ffff, CAS);
5351 INSN(cas2l, 0efc, ffff, CAS);
5352 BASE(move, 1000, f000);
5353 BASE(move, 2000, f000);
5354 BASE(move, 3000, f000);
5355 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
5356 INSN(negx, 4080, fff8, CF_ISA_A);
5357 INSN(negx, 4000, ff00, M68000);
5358 INSN(undef, 40c0, ffc0, M68000);
5359 INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5360 INSN(move_from_sr, 40c0, ffc0, M68000);
5361 BASE(lea, 41c0, f1c0);
5362 BASE(clr, 4200, ff00);
5363 BASE(undef, 42c0, ffc0);
5364 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5365 INSN(move_from_ccr, 42c0, ffc0, M68000);
5366 INSN(neg, 4480, fff8, CF_ISA_A);
5367 INSN(neg, 4400, ff00, M68000);
5368 INSN(undef, 44c0, ffc0, M68000);
5369 BASE(move_to_ccr, 44c0, ffc0);
5370 INSN(not, 4680, fff8, CF_ISA_A);
5371 INSN(not, 4600, ff00, M68000);
5372 INSN(undef, 46c0, ffc0, M68000);
5373 INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
5374 INSN(nbcd, 4800, ffc0, M68000);
5375 INSN(linkl, 4808, fff8, M68000);
5376 BASE(pea, 4840, ffc0);
5377 BASE(swap, 4840, fff8);
5378 INSN(bkpt, 4848, fff8, BKPT);
5379 INSN(movem, 48d0, fbf8, CF_ISA_A);
5380 INSN(movem, 48e8, fbf8, CF_ISA_A);
5381 INSN(movem, 4880, fb80, M68000);
5382 BASE(ext, 4880, fff8);
5383 BASE(ext, 48c0, fff8);
5384 BASE(ext, 49c0, fff8);
5385 BASE(tst, 4a00, ff00);
5386 INSN(tas, 4ac0, ffc0, CF_ISA_B);
5387 INSN(tas, 4ac0, ffc0, M68000);
5388 INSN(halt, 4ac8, ffff, CF_ISA_A);
5389 INSN(pulse, 4acc, ffff, CF_ISA_A);
5390 BASE(illegal, 4afc, ffff);
5391 INSN(mull, 4c00, ffc0, CF_ISA_A);
5392 INSN(mull, 4c00, ffc0, LONG_MULDIV);
5393 INSN(divl, 4c40, ffc0, CF_ISA_A);
5394 INSN(divl, 4c40, ffc0, LONG_MULDIV);
5395 INSN(sats, 4c80, fff8, CF_ISA_B);
5396 BASE(trap, 4e40, fff0);
5397 BASE(link, 4e50, fff8);
5398 BASE(unlk, 4e58, fff8);
5399 INSN(move_to_usp, 4e60, fff8, USP);
5400 INSN(move_from_usp, 4e68, fff8, USP);
5401 BASE(nop, 4e71, ffff);
5402 BASE(stop, 4e72, ffff);
5403 BASE(rte, 4e73, ffff);
5404 INSN(rtd, 4e74, ffff, RTD);
5405 BASE(rts, 4e75, ffff);
5406 INSN(movec, 4e7b, ffff, CF_ISA_A);
5407 BASE(jump, 4e80, ffc0);
5408 BASE(jump, 4ec0, ffc0);
5409 INSN(addsubq, 5000, f080, M68000);
5410 BASE(addsubq, 5080, f0c0);
5411 INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */
5412 INSN(scc, 50c0, f0c0, M68000); /* Scc.B <EA> */
5413 INSN(dbcc, 50c8, f0f8, M68000);
5414 INSN(tpf, 51f8, fff8, CF_ISA_A);
5415
5416 /* Branch instructions. */
5417 BASE(branch, 6000, f000);
5418 /* Disable long branch instructions, then add back the ones we want. */
5419 BASE(undef, 60ff, f0ff); /* All long branches. */
5420 INSN(branch, 60ff, f0ff, CF_ISA_B);
5421 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
5422 INSN(branch, 60ff, ffff, BRAL);
5423 INSN(branch, 60ff, f0ff, BCCL);
5424
5425 BASE(moveq, 7000, f100);
5426 INSN(mvzs, 7100, f100, CF_ISA_B);
5427 BASE(or, 8000, f000);
5428 BASE(divw, 80c0, f0c0);
5429 INSN(sbcd_reg, 8100, f1f8, M68000);
5430 INSN(sbcd_mem, 8108, f1f8, M68000);
5431 BASE(addsub, 9000, f000);
5432 INSN(undef, 90c0, f0c0, CF_ISA_A);
5433 INSN(subx_reg, 9180, f1f8, CF_ISA_A);
5434 INSN(subx_reg, 9100, f138, M68000);
5435 INSN(subx_mem, 9108, f138, M68000);
5436 INSN(suba, 91c0, f1c0, CF_ISA_A);
5437 INSN(suba, 90c0, f0c0, M68000);
5438
5439 BASE(undef_mac, a000, f000);
5440 INSN(mac, a000, f100, CF_EMAC);
5441 INSN(from_mac, a180, f9b0, CF_EMAC);
5442 INSN(move_mac, a110, f9fc, CF_EMAC);
5443 INSN(from_macsr,a980, f9f0, CF_EMAC);
5444 INSN(from_mask, ad80, fff0, CF_EMAC);
5445 INSN(from_mext, ab80, fbf0, CF_EMAC);
5446 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5447 INSN(to_mac, a100, f9c0, CF_EMAC);
5448 INSN(to_macsr, a900, ffc0, CF_EMAC);
5449 INSN(to_mext, ab00, fbc0, CF_EMAC);
5450 INSN(to_mask, ad00, ffc0, CF_EMAC);
5451
5452 INSN(mov3q, a140, f1c0, CF_ISA_B);
5453 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
5454 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
5455 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5456 INSN(cmp, b080, f1c0, CF_ISA_A);
5457 INSN(cmpa, b1c0, f1c0, CF_ISA_A);
5458 INSN(cmp, b000, f100, M68000);
5459 INSN(eor, b100, f100, M68000);
5460 INSN(cmpm, b108, f138, M68000);
5461 INSN(cmpa, b0c0, f0c0, M68000);
5462 INSN(eor, b180, f1c0, CF_ISA_A);
5463 BASE(and, c000, f000);
5464 INSN(exg_dd, c140, f1f8, M68000);
5465 INSN(exg_aa, c148, f1f8, M68000);
5466 INSN(exg_da, c188, f1f8, M68000);
5467 BASE(mulw, c0c0, f0c0);
5468 INSN(abcd_reg, c100, f1f8, M68000);
5469 INSN(abcd_mem, c108, f1f8, M68000);
5470 BASE(addsub, d000, f000);
5471 INSN(undef, d0c0, f0c0, CF_ISA_A);
5472 INSN(addx_reg, d180, f1f8, CF_ISA_A);
5473 INSN(addx_reg, d100, f138, M68000);
5474 INSN(addx_mem, d108, f138, M68000);
5475 INSN(adda, d1c0, f1c0, CF_ISA_A);
5476 INSN(adda, d0c0, f0c0, M68000);
5477 INSN(shift_im, e080, f0f0, CF_ISA_A);
5478 INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5479 INSN(shift8_im, e000, f0f0, M68000);
5480 INSN(shift16_im, e040, f0f0, M68000);
5481 INSN(shift_im, e080, f0f0, M68000);
5482 INSN(shift8_reg, e020, f0f0, M68000);
5483 INSN(shift16_reg, e060, f0f0, M68000);
5484 INSN(shift_reg, e0a0, f0f0, M68000);
5485 INSN(shift_mem, e0c0, fcc0, M68000);
5486 INSN(rotate_im, e090, f0f0, M68000);
5487 INSN(rotate8_im, e010, f0f0, M68000);
5488 INSN(rotate16_im, e050, f0f0, M68000);
5489 INSN(rotate_reg, e0b0, f0f0, M68000);
5490 INSN(rotate8_reg, e030, f0f0, M68000);
5491 INSN(rotate16_reg, e070, f0f0, M68000);
5492 INSN(rotate_mem, e4c0, fcc0, M68000);
5493 INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */
5494 INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5495 INSN(bfins_mem, efc0, ffc0, BITFIELD);
5496 INSN(bfins_reg, efc0, fff8, BITFIELD);
5497 INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */
5498 INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */
5499 INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */
5500 INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */
5501 INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */
5502 INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */
5503 INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */
5504 INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */
5505 INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */
5506 INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */
5507 BASE(undef_fpu, f000, f000);
5508 INSN(fpu, f200, ffc0, CF_FPU);
5509 INSN(fbcc, f280, ffc0, CF_FPU);
5510 INSN(frestore, f340, ffc0, CF_FPU);
5511 INSN(fsave, f300, ffc0, CF_FPU);
5512 INSN(fpu, f200, ffc0, FPU);
5513 INSN(fscc, f240, ffc0, FPU);
5514 INSN(fbcc, f280, ff80, FPU);
5515 INSN(frestore, f340, ffc0, FPU);
5516 INSN(fsave, f300, ffc0, FPU);
5517 INSN(intouch, f340, ffc0, CF_ISA_A);
5518 INSN(cpushl, f428, ff38, CF_ISA_A);
5519 INSN(wddata, fb00, ff00, CF_ISA_A);
5520 INSN(wdebug, fbc0, ffc0, CF_ISA_A);
5521 #undef INSN
5522 }
5523
5524 /* ??? Some of this implementation is not exception safe. We should always
5525 write back the result to memory before setting the condition codes. */
5526 static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
5527 {
5528 uint16_t insn = read_im16(env, s);
5529 opcode_table[insn](env, s, insn);
5530 do_writebacks(s);
5531 }
5532
5533 /* generate intermediate code for basic block 'tb'. */
5534 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
5535 {
5536 CPUM68KState *env = cs->env_ptr;
5537 DisasContext dc1, *dc = &dc1;
5538 target_ulong pc_start;
5539 int pc_offset;
5540 int num_insns;
5541 int max_insns;
5542
5543 /* generate intermediate code */
5544 pc_start = tb->pc;
5545
5546 dc->tb = tb;
5547
5548 dc->env = env;
5549 dc->is_jmp = DISAS_NEXT;
5550 dc->pc = pc_start;
5551 dc->cc_op = CC_OP_DYNAMIC;
5552 dc->cc_op_synced = 1;
5553 dc->singlestep_enabled = cs->singlestep_enabled;
5554 dc->user = (env->sr & SR_S) == 0;
5555 dc->done_mac = 0;
5556 dc->writeback_mask = 0;
5557 num_insns = 0;
5558 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
5559 if (max_insns == 0) {
5560 max_insns = CF_COUNT_MASK;
5561 }
5562 if (max_insns > TCG_MAX_INSNS) {
5563 max_insns = TCG_MAX_INSNS;
5564 }
5565
5566 gen_tb_start(tb);
5567 do {
5568 pc_offset = dc->pc - pc_start;
5569 gen_throws_exception = NULL;
5570 tcg_gen_insn_start(dc->pc, dc->cc_op);
5571 num_insns++;
5572
5573 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5574 gen_exception(dc, dc->pc, EXCP_DEBUG);
5575 dc->is_jmp = DISAS_JUMP;
5576 /* The address covered by the breakpoint must be included in
5577 [tb->pc, tb->pc + tb->size) in order to for it to be
5578 properly cleared -- thus we increment the PC here so that
5579 the logic setting tb->size below does the right thing. */
5580 dc->pc += 2;
5581 break;
5582 }
5583
5584 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
5585 gen_io_start();
5586 }
5587
5588 dc->insn_pc = dc->pc;
5589 disas_m68k_insn(env, dc);
5590 } while (!dc->is_jmp && !tcg_op_buf_full() &&
5591 !cs->singlestep_enabled &&
5592 !singlestep &&
5593 (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
5594 num_insns < max_insns);
5595
5596 if (tb_cflags(tb) & CF_LAST_IO)
5597 gen_io_end();
5598 if (unlikely(cs->singlestep_enabled)) {
5599 /* Make sure the pc is updated, and raise a debug exception. */
5600 if (!dc->is_jmp) {
5601 update_cc_op(dc);
5602 tcg_gen_movi_i32(QREG_PC, dc->pc);
5603 }
5604 gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
5605 } else {
5606 switch(dc->is_jmp) {
5607 case DISAS_NEXT:
5608 update_cc_op(dc);
5609 gen_jmp_tb(dc, 0, dc->pc);
5610 break;
5611 default:
5612 case DISAS_JUMP:
5613 case DISAS_UPDATE:
5614 update_cc_op(dc);
5615 /* indicate that the hash table must be used to find the next TB */
5616 tcg_gen_exit_tb(0);
5617 break;
5618 case DISAS_TB_JUMP:
5619 /* nothing more to generate */
5620 break;
5621 }
5622 }
5623 gen_tb_end(tb, num_insns);
5624
5625 #ifdef DEBUG_DISAS
5626 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5627 && qemu_log_in_addr_range(pc_start)) {
5628 qemu_log_lock();
5629 qemu_log("----------------\n");
5630 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5631 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
5632 qemu_log("\n");
5633 qemu_log_unlock();
5634 }
5635 #endif
5636 tb->size = dc->pc - pc_start;
5637 tb->icount = num_insns;
5638 }
5639
5640 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
5641 {
5642 floatx80 a = { .high = high, .low = low };
5643 union {
5644 float64 f64;
5645 double d;
5646 } u;
5647
5648 u.f64 = floatx80_to_float64(a, &env->fp_status);
5649 return u.d;
5650 }
5651
5652 void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
5653 int flags)
5654 {
5655 M68kCPU *cpu = M68K_CPU(cs);
5656 CPUM68KState *env = &cpu->env;
5657 int i;
5658 uint16_t sr;
5659 for (i = 0; i < 8; i++) {
5660 cpu_fprintf(f, "D%d = %08x A%d = %08x "
5661 "F%d = %04x %016"PRIx64" (%12g)\n",
5662 i, env->dregs[i], i, env->aregs[i],
5663 i, env->fregs[i].l.upper, env->fregs[i].l.lower,
5664 floatx80_to_double(env, env->fregs[i].l.upper,
5665 env->fregs[i].l.lower));
5666 }
5667 cpu_fprintf (f, "PC = %08x ", env->pc);
5668 sr = env->sr | cpu_m68k_get_ccr(env);
5669 cpu_fprintf(f, "SR = %04x %c%c%c%c%c ", sr, (sr & CCF_X) ? 'X' : '-',
5670 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
5671 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
5672 cpu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
5673 (env->fpsr & FPSR_CC_A) ? 'A' : '-',
5674 (env->fpsr & FPSR_CC_I) ? 'I' : '-',
5675 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
5676 (env->fpsr & FPSR_CC_N) ? 'N' : '-');
5677 cpu_fprintf(f, "\n "
5678 "FPCR = %04x ", env->fpcr);
5679 switch (env->fpcr & FPCR_PREC_MASK) {
5680 case FPCR_PREC_X:
5681 cpu_fprintf(f, "X ");
5682 break;
5683 case FPCR_PREC_S:
5684 cpu_fprintf(f, "S ");
5685 break;
5686 case FPCR_PREC_D:
5687 cpu_fprintf(f, "D ");
5688 break;
5689 }
5690 switch (env->fpcr & FPCR_RND_MASK) {
5691 case FPCR_RND_N:
5692 cpu_fprintf(f, "RN ");
5693 break;
5694 case FPCR_RND_Z:
5695 cpu_fprintf(f, "RZ ");
5696 break;
5697 case FPCR_RND_M:
5698 cpu_fprintf(f, "RM ");
5699 break;
5700 case FPCR_RND_P:
5701 cpu_fprintf(f, "RP ");
5702 break;
5703 }
5704 }
5705
5706 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
5707 target_ulong *data)
5708 {
5709 int cc_op = data[1];
5710 env->pc = data[0];
5711 if (cc_op != CC_OP_DYNAMIC) {
5712 env->cc_op = cc_op;
5713 }
5714 }