]> git.proxmox.com Git - mirror_qemu.git/blob - target/m68k/translate.c
744eb3748b1f3b2ae6071e8126b0bd7df4047124
[mirror_qemu.git] / target / m68k / translate.c
1 /*
2 * m68k translation
3 *
4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "qemu/log.h"
27 #include "qemu/qemu-print.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/translator.h"
30
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33
34 #include "exec/log.h"
35 #include "fpu/softfloat.h"
36
37
38 //#define DEBUG_DISPATCH 1
39
40 #define DEFO32(name, offset) static TCGv QREG_##name;
41 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
42 #include "qregs.h.inc"
43 #undef DEFO32
44 #undef DEFO64
45
46 static TCGv_i32 cpu_halted;
47 static TCGv_i32 cpu_exception_index;
48
49 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
50 static TCGv cpu_dregs[8];
51 static TCGv cpu_aregs[8];
52 static TCGv_i64 cpu_macc[4];
53
54 #define REG(insn, pos) (((insn) >> (pos)) & 7)
55 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
56 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
57 #define MACREG(acc) cpu_macc[acc]
58 #define QREG_SP get_areg(s, 7)
59
60 static TCGv NULL_QREG;
61 #define IS_NULL_QREG(t) (t == NULL_QREG)
62 /* Used to distinguish stores from bad addressing modes. */
63 static TCGv store_dummy;
64
65 #include "exec/gen-icount.h"
66
67 void m68k_tcg_init(void)
68 {
69 char *p;
70 int i;
71
72 #define DEFO32(name, offset) \
73 QREG_##name = tcg_global_mem_new_i32(cpu_env, \
74 offsetof(CPUM68KState, offset), #name);
75 #define DEFO64(name, offset) \
76 QREG_##name = tcg_global_mem_new_i64(cpu_env, \
77 offsetof(CPUM68KState, offset), #name);
78 #include "qregs.h.inc"
79 #undef DEFO32
80 #undef DEFO64
81
82 cpu_halted = tcg_global_mem_new_i32(cpu_env,
83 -offsetof(M68kCPU, env) +
84 offsetof(CPUState, halted), "HALTED");
85 cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
86 -offsetof(M68kCPU, env) +
87 offsetof(CPUState, exception_index),
88 "EXCEPTION");
89
90 p = cpu_reg_names;
91 for (i = 0; i < 8; i++) {
92 sprintf(p, "D%d", i);
93 cpu_dregs[i] = tcg_global_mem_new(cpu_env,
94 offsetof(CPUM68KState, dregs[i]), p);
95 p += 3;
96 sprintf(p, "A%d", i);
97 cpu_aregs[i] = tcg_global_mem_new(cpu_env,
98 offsetof(CPUM68KState, aregs[i]), p);
99 p += 3;
100 }
101 for (i = 0; i < 4; i++) {
102 sprintf(p, "ACC%d", i);
103 cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
104 offsetof(CPUM68KState, macc[i]), p);
105 p += 5;
106 }
107
108 NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
109 store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
110 }
111
112 /* internal defines */
113 typedef struct DisasContext {
114 DisasContextBase base;
115 CPUM68KState *env;
116 target_ulong pc;
117 target_ulong pc_prev;
118 CCOp cc_op; /* Current CC operation */
119 int cc_op_synced;
120 TCGv_i64 mactmp;
121 int done_mac;
122 int writeback_mask;
123 TCGv writeback[8];
124 bool ss_active;
125 } DisasContext;
126
127 static TCGv get_areg(DisasContext *s, unsigned regno)
128 {
129 if (s->writeback_mask & (1 << regno)) {
130 return s->writeback[regno];
131 } else {
132 return cpu_aregs[regno];
133 }
134 }
135
136 static void delay_set_areg(DisasContext *s, unsigned regno,
137 TCGv val, bool give_temp)
138 {
139 if (s->writeback_mask & (1 << regno)) {
140 if (give_temp) {
141 s->writeback[regno] = val;
142 } else {
143 tcg_gen_mov_i32(s->writeback[regno], val);
144 }
145 } else {
146 s->writeback_mask |= 1 << regno;
147 if (give_temp) {
148 s->writeback[regno] = val;
149 } else {
150 TCGv tmp = tcg_temp_new();
151 s->writeback[regno] = tmp;
152 tcg_gen_mov_i32(tmp, val);
153 }
154 }
155 }
156
157 static void do_writebacks(DisasContext *s)
158 {
159 unsigned mask = s->writeback_mask;
160 if (mask) {
161 s->writeback_mask = 0;
162 do {
163 unsigned regno = ctz32(mask);
164 tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
165 mask &= mask - 1;
166 } while (mask);
167 }
168 }
169
170 /* is_jmp field values */
171 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
172 #define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */
173
174 #if defined(CONFIG_USER_ONLY)
175 #define IS_USER(s) 1
176 #else
177 #define IS_USER(s) (!(s->base.tb->flags & TB_FLAGS_MSR_S))
178 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
179 MMU_KERNEL_IDX : MMU_USER_IDX)
180 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
181 MMU_KERNEL_IDX : MMU_USER_IDX)
182 #endif
183
184 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
185
186 #ifdef DEBUG_DISPATCH
187 #define DISAS_INSN(name) \
188 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
189 uint16_t insn); \
190 static void disas_##name(CPUM68KState *env, DisasContext *s, \
191 uint16_t insn) \
192 { \
193 qemu_log("Dispatch " #name "\n"); \
194 real_disas_##name(env, s, insn); \
195 } \
196 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
197 uint16_t insn)
198 #else
199 #define DISAS_INSN(name) \
200 static void disas_##name(CPUM68KState *env, DisasContext *s, \
201 uint16_t insn)
202 #endif
203
204 static const uint8_t cc_op_live[CC_OP_NB] = {
205 [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
206 [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
207 [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
208 [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
209 [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
210 [CC_OP_LOGIC] = CCF_X | CCF_N
211 };
212
213 static void set_cc_op(DisasContext *s, CCOp op)
214 {
215 CCOp old_op = s->cc_op;
216 int dead;
217
218 if (old_op == op) {
219 return;
220 }
221 s->cc_op = op;
222 s->cc_op_synced = 0;
223
224 /*
225 * Discard CC computation that will no longer be used.
226 * Note that X and N are never dead.
227 */
228 dead = cc_op_live[old_op] & ~cc_op_live[op];
229 if (dead & CCF_C) {
230 tcg_gen_discard_i32(QREG_CC_C);
231 }
232 if (dead & CCF_Z) {
233 tcg_gen_discard_i32(QREG_CC_Z);
234 }
235 if (dead & CCF_V) {
236 tcg_gen_discard_i32(QREG_CC_V);
237 }
238 }
239
240 /* Update the CPU env CC_OP state. */
241 static void update_cc_op(DisasContext *s)
242 {
243 if (!s->cc_op_synced) {
244 s->cc_op_synced = 1;
245 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
246 }
247 }
248
249 /* Generate a jump to an immediate address. */
250 static void gen_jmp_im(DisasContext *s, uint32_t dest)
251 {
252 update_cc_op(s);
253 tcg_gen_movi_i32(QREG_PC, dest);
254 s->base.is_jmp = DISAS_JUMP;
255 }
256
257 /* Generate a jump to the address in qreg DEST. */
258 static void gen_jmp(DisasContext *s, TCGv dest)
259 {
260 update_cc_op(s);
261 tcg_gen_mov_i32(QREG_PC, dest);
262 s->base.is_jmp = DISAS_JUMP;
263 }
264
265 static void gen_raise_exception(int nr)
266 {
267 gen_helper_raise_exception(cpu_env, tcg_constant_i32(nr));
268 }
269
270 static void gen_raise_exception_format2(DisasContext *s, int nr,
271 target_ulong this_pc)
272 {
273 /*
274 * Pass the address of the insn to the exception handler,
275 * for recording in the Format $2 (6-word) stack frame.
276 * Re-use mmu.ar for the purpose, since that's only valid
277 * after tlb_fill.
278 */
279 tcg_gen_st_i32(tcg_constant_i32(this_pc), cpu_env,
280 offsetof(CPUM68KState, mmu.ar));
281 gen_raise_exception(nr);
282 s->base.is_jmp = DISAS_NORETURN;
283 }
284
285 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
286 {
287 update_cc_op(s);
288 tcg_gen_movi_i32(QREG_PC, dest);
289
290 gen_raise_exception(nr);
291
292 s->base.is_jmp = DISAS_NORETURN;
293 }
294
295 static inline void gen_addr_fault(DisasContext *s)
296 {
297 gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
298 }
299
300 /*
301 * Generate a load from the specified address. Narrow values are
302 * sign extended to full register width.
303 */
304 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
305 int sign, int index)
306 {
307 TCGv tmp = tcg_temp_new_i32();
308
309 switch (opsize) {
310 case OS_BYTE:
311 case OS_WORD:
312 case OS_LONG:
313 tcg_gen_qemu_ld_tl(tmp, addr, index,
314 opsize | (sign ? MO_SIGN : 0) | MO_TE);
315 break;
316 default:
317 g_assert_not_reached();
318 }
319 return tmp;
320 }
321
322 /* Generate a store. */
323 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
324 int index)
325 {
326 switch (opsize) {
327 case OS_BYTE:
328 case OS_WORD:
329 case OS_LONG:
330 tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE);
331 break;
332 default:
333 g_assert_not_reached();
334 }
335 }
336
337 typedef enum {
338 EA_STORE,
339 EA_LOADU,
340 EA_LOADS
341 } ea_what;
342
343 /*
344 * Generate an unsigned load if VAL is 0 a signed load if val is -1,
345 * otherwise generate a store.
346 */
347 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
348 ea_what what, int index)
349 {
350 if (what == EA_STORE) {
351 gen_store(s, opsize, addr, val, index);
352 return store_dummy;
353 } else {
354 return gen_load(s, opsize, addr, what == EA_LOADS, index);
355 }
356 }
357
358 /* Read a 16-bit immediate constant */
359 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
360 {
361 uint16_t im;
362 im = translator_lduw(env, &s->base, s->pc);
363 s->pc += 2;
364 return im;
365 }
366
367 /* Read an 8-bit immediate constant */
368 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
369 {
370 return read_im16(env, s);
371 }
372
373 /* Read a 32-bit immediate constant. */
374 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
375 {
376 uint32_t im;
377 im = read_im16(env, s) << 16;
378 im |= 0xffff & read_im16(env, s);
379 return im;
380 }
381
382 /* Read a 64-bit immediate constant. */
383 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
384 {
385 uint64_t im;
386 im = (uint64_t)read_im32(env, s) << 32;
387 im |= (uint64_t)read_im32(env, s);
388 return im;
389 }
390
391 /* Calculate and address index. */
392 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
393 {
394 TCGv add;
395 int scale;
396
397 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
398 if ((ext & 0x800) == 0) {
399 tcg_gen_ext16s_i32(tmp, add);
400 add = tmp;
401 }
402 scale = (ext >> 9) & 3;
403 if (scale != 0) {
404 tcg_gen_shli_i32(tmp, add, scale);
405 add = tmp;
406 }
407 return add;
408 }
409
410 /*
411 * Handle a base + index + displacement effective address.
412 * A NULL_QREG base means pc-relative.
413 */
414 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
415 {
416 uint32_t offset;
417 uint16_t ext;
418 TCGv add;
419 TCGv tmp;
420 uint32_t bd, od;
421
422 offset = s->pc;
423 ext = read_im16(env, s);
424
425 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
426 return NULL_QREG;
427
428 if (m68k_feature(s->env, M68K_FEATURE_M68K) &&
429 !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
430 ext &= ~(3 << 9);
431 }
432
433 if (ext & 0x100) {
434 /* full extension word format */
435 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
436 return NULL_QREG;
437
438 if ((ext & 0x30) > 0x10) {
439 /* base displacement */
440 if ((ext & 0x30) == 0x20) {
441 bd = (int16_t)read_im16(env, s);
442 } else {
443 bd = read_im32(env, s);
444 }
445 } else {
446 bd = 0;
447 }
448 tmp = tcg_temp_new();
449 if ((ext & 0x44) == 0) {
450 /* pre-index */
451 add = gen_addr_index(s, ext, tmp);
452 } else {
453 add = NULL_QREG;
454 }
455 if ((ext & 0x80) == 0) {
456 /* base not suppressed */
457 if (IS_NULL_QREG(base)) {
458 base = tcg_constant_i32(offset + bd);
459 bd = 0;
460 }
461 if (!IS_NULL_QREG(add)) {
462 tcg_gen_add_i32(tmp, add, base);
463 add = tmp;
464 } else {
465 add = base;
466 }
467 }
468 if (!IS_NULL_QREG(add)) {
469 if (bd != 0) {
470 tcg_gen_addi_i32(tmp, add, bd);
471 add = tmp;
472 }
473 } else {
474 add = tcg_constant_i32(bd);
475 }
476 if ((ext & 3) != 0) {
477 /* memory indirect */
478 base = gen_load(s, OS_LONG, add, 0, IS_USER(s));
479 if ((ext & 0x44) == 4) {
480 add = gen_addr_index(s, ext, tmp);
481 tcg_gen_add_i32(tmp, add, base);
482 add = tmp;
483 } else {
484 add = base;
485 }
486 if ((ext & 3) > 1) {
487 /* outer displacement */
488 if ((ext & 3) == 2) {
489 od = (int16_t)read_im16(env, s);
490 } else {
491 od = read_im32(env, s);
492 }
493 } else {
494 od = 0;
495 }
496 if (od != 0) {
497 tcg_gen_addi_i32(tmp, add, od);
498 add = tmp;
499 }
500 }
501 } else {
502 /* brief extension word format */
503 tmp = tcg_temp_new();
504 add = gen_addr_index(s, ext, tmp);
505 if (!IS_NULL_QREG(base)) {
506 tcg_gen_add_i32(tmp, add, base);
507 if ((int8_t)ext)
508 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
509 } else {
510 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
511 }
512 add = tmp;
513 }
514 return add;
515 }
516
517 /* Sign or zero extend a value. */
518
519 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
520 {
521 switch (opsize) {
522 case OS_BYTE:
523 if (sign) {
524 tcg_gen_ext8s_i32(res, val);
525 } else {
526 tcg_gen_ext8u_i32(res, val);
527 }
528 break;
529 case OS_WORD:
530 if (sign) {
531 tcg_gen_ext16s_i32(res, val);
532 } else {
533 tcg_gen_ext16u_i32(res, val);
534 }
535 break;
536 case OS_LONG:
537 tcg_gen_mov_i32(res, val);
538 break;
539 default:
540 g_assert_not_reached();
541 }
542 }
543
544 /* Evaluate all the CC flags. */
545
546 static void gen_flush_flags(DisasContext *s)
547 {
548 TCGv t0, t1;
549
550 switch (s->cc_op) {
551 case CC_OP_FLAGS:
552 return;
553
554 case CC_OP_ADDB:
555 case CC_OP_ADDW:
556 case CC_OP_ADDL:
557 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
558 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
559 /* Compute signed overflow for addition. */
560 t0 = tcg_temp_new();
561 t1 = tcg_temp_new();
562 tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
563 gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
564 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
565 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
566 tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
567 break;
568
569 case CC_OP_SUBB:
570 case CC_OP_SUBW:
571 case CC_OP_SUBL:
572 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
573 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
574 /* Compute signed overflow for subtraction. */
575 t0 = tcg_temp_new();
576 t1 = tcg_temp_new();
577 tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
578 gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
579 tcg_gen_xor_i32(t1, QREG_CC_N, t0);
580 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
581 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
582 break;
583
584 case CC_OP_CMPB:
585 case CC_OP_CMPW:
586 case CC_OP_CMPL:
587 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
588 tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
589 gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
590 /* Compute signed overflow for subtraction. */
591 t0 = tcg_temp_new();
592 tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
593 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
594 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
595 tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
596 break;
597
598 case CC_OP_LOGIC:
599 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
600 tcg_gen_movi_i32(QREG_CC_C, 0);
601 tcg_gen_movi_i32(QREG_CC_V, 0);
602 break;
603
604 case CC_OP_DYNAMIC:
605 gen_helper_flush_flags(cpu_env, QREG_CC_OP);
606 s->cc_op_synced = 1;
607 break;
608
609 default:
610 gen_helper_flush_flags(cpu_env, tcg_constant_i32(s->cc_op));
611 s->cc_op_synced = 1;
612 break;
613 }
614
615 /* Note that flush_flags also assigned to env->cc_op. */
616 s->cc_op = CC_OP_FLAGS;
617 }
618
619 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
620 {
621 TCGv tmp;
622
623 if (opsize == OS_LONG) {
624 tmp = val;
625 } else {
626 tmp = tcg_temp_new();
627 gen_ext(tmp, val, opsize, sign);
628 }
629
630 return tmp;
631 }
632
633 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
634 {
635 gen_ext(QREG_CC_N, val, opsize, 1);
636 set_cc_op(s, CC_OP_LOGIC);
637 }
638
639 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
640 {
641 tcg_gen_mov_i32(QREG_CC_N, dest);
642 tcg_gen_mov_i32(QREG_CC_V, src);
643 set_cc_op(s, CC_OP_CMPB + opsize);
644 }
645
646 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
647 {
648 gen_ext(QREG_CC_N, dest, opsize, 1);
649 tcg_gen_mov_i32(QREG_CC_V, src);
650 }
651
652 static inline int opsize_bytes(int opsize)
653 {
654 switch (opsize) {
655 case OS_BYTE: return 1;
656 case OS_WORD: return 2;
657 case OS_LONG: return 4;
658 case OS_SINGLE: return 4;
659 case OS_DOUBLE: return 8;
660 case OS_EXTENDED: return 12;
661 case OS_PACKED: return 12;
662 default:
663 g_assert_not_reached();
664 }
665 }
666
667 static inline int insn_opsize(int insn)
668 {
669 switch ((insn >> 6) & 3) {
670 case 0: return OS_BYTE;
671 case 1: return OS_WORD;
672 case 2: return OS_LONG;
673 default:
674 g_assert_not_reached();
675 }
676 }
677
678 static inline int ext_opsize(int ext, int pos)
679 {
680 switch ((ext >> pos) & 7) {
681 case 0: return OS_LONG;
682 case 1: return OS_SINGLE;
683 case 2: return OS_EXTENDED;
684 case 3: return OS_PACKED;
685 case 4: return OS_WORD;
686 case 5: return OS_DOUBLE;
687 case 6: return OS_BYTE;
688 default:
689 g_assert_not_reached();
690 }
691 }
692
693 /*
694 * Assign value to a register. If the width is less than the register width
695 * only the low part of the register is set.
696 */
697 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
698 {
699 TCGv tmp;
700 switch (opsize) {
701 case OS_BYTE:
702 tcg_gen_andi_i32(reg, reg, 0xffffff00);
703 tmp = tcg_temp_new();
704 tcg_gen_ext8u_i32(tmp, val);
705 tcg_gen_or_i32(reg, reg, tmp);
706 break;
707 case OS_WORD:
708 tcg_gen_andi_i32(reg, reg, 0xffff0000);
709 tmp = tcg_temp_new();
710 tcg_gen_ext16u_i32(tmp, val);
711 tcg_gen_or_i32(reg, reg, tmp);
712 break;
713 case OS_LONG:
714 case OS_SINGLE:
715 tcg_gen_mov_i32(reg, val);
716 break;
717 default:
718 g_assert_not_reached();
719 }
720 }
721
722 /*
723 * Generate code for an "effective address". Does not adjust the base
724 * register for autoincrement addressing modes.
725 */
726 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
727 int mode, int reg0, int opsize)
728 {
729 TCGv reg;
730 TCGv tmp;
731 uint16_t ext;
732 uint32_t offset;
733
734 switch (mode) {
735 case 0: /* Data register direct. */
736 case 1: /* Address register direct. */
737 return NULL_QREG;
738 case 3: /* Indirect postincrement. */
739 if (opsize == OS_UNSIZED) {
740 return NULL_QREG;
741 }
742 /* fallthru */
743 case 2: /* Indirect register */
744 return get_areg(s, reg0);
745 case 4: /* Indirect predecrememnt. */
746 if (opsize == OS_UNSIZED) {
747 return NULL_QREG;
748 }
749 reg = get_areg(s, reg0);
750 tmp = tcg_temp_new();
751 if (reg0 == 7 && opsize == OS_BYTE &&
752 m68k_feature(s->env, M68K_FEATURE_M68K)) {
753 tcg_gen_subi_i32(tmp, reg, 2);
754 } else {
755 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
756 }
757 return tmp;
758 case 5: /* Indirect displacement. */
759 reg = get_areg(s, reg0);
760 tmp = tcg_temp_new();
761 ext = read_im16(env, s);
762 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
763 return tmp;
764 case 6: /* Indirect index + displacement. */
765 reg = get_areg(s, reg0);
766 return gen_lea_indexed(env, s, reg);
767 case 7: /* Other */
768 switch (reg0) {
769 case 0: /* Absolute short. */
770 offset = (int16_t)read_im16(env, s);
771 return tcg_constant_i32(offset);
772 case 1: /* Absolute long. */
773 offset = read_im32(env, s);
774 return tcg_constant_i32(offset);
775 case 2: /* pc displacement */
776 offset = s->pc;
777 offset += (int16_t)read_im16(env, s);
778 return tcg_constant_i32(offset);
779 case 3: /* pc index+displacement. */
780 return gen_lea_indexed(env, s, NULL_QREG);
781 case 4: /* Immediate. */
782 default:
783 return NULL_QREG;
784 }
785 }
786 /* Should never happen. */
787 return NULL_QREG;
788 }
789
790 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
791 int opsize)
792 {
793 int mode = extract32(insn, 3, 3);
794 int reg0 = REG(insn, 0);
795 return gen_lea_mode(env, s, mode, reg0, opsize);
796 }
797
798 /*
799 * Generate code to load/store a value from/into an EA. If WHAT > 0 this is
800 * a write otherwise it is a read (0 == sign extend, -1 == zero extend).
801 * ADDRP is non-null for readwrite operands.
802 */
803 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
804 int opsize, TCGv val, TCGv *addrp, ea_what what,
805 int index)
806 {
807 TCGv reg, tmp, result;
808 int32_t offset;
809
810 switch (mode) {
811 case 0: /* Data register direct. */
812 reg = cpu_dregs[reg0];
813 if (what == EA_STORE) {
814 gen_partset_reg(opsize, reg, val);
815 return store_dummy;
816 } else {
817 return gen_extend(s, reg, opsize, what == EA_LOADS);
818 }
819 case 1: /* Address register direct. */
820 reg = get_areg(s, reg0);
821 if (what == EA_STORE) {
822 tcg_gen_mov_i32(reg, val);
823 return store_dummy;
824 } else {
825 return gen_extend(s, reg, opsize, what == EA_LOADS);
826 }
827 case 2: /* Indirect register */
828 reg = get_areg(s, reg0);
829 return gen_ldst(s, opsize, reg, val, what, index);
830 case 3: /* Indirect postincrement. */
831 reg = get_areg(s, reg0);
832 result = gen_ldst(s, opsize, reg, val, what, index);
833 if (what == EA_STORE || !addrp) {
834 TCGv tmp = tcg_temp_new();
835 if (reg0 == 7 && opsize == OS_BYTE &&
836 m68k_feature(s->env, M68K_FEATURE_M68K)) {
837 tcg_gen_addi_i32(tmp, reg, 2);
838 } else {
839 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
840 }
841 delay_set_areg(s, reg0, tmp, true);
842 }
843 return result;
844 case 4: /* Indirect predecrememnt. */
845 if (addrp && what == EA_STORE) {
846 tmp = *addrp;
847 } else {
848 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
849 if (IS_NULL_QREG(tmp)) {
850 return tmp;
851 }
852 if (addrp) {
853 *addrp = tmp;
854 }
855 }
856 result = gen_ldst(s, opsize, tmp, val, what, index);
857 if (what == EA_STORE || !addrp) {
858 delay_set_areg(s, reg0, tmp, false);
859 }
860 return result;
861 case 5: /* Indirect displacement. */
862 case 6: /* Indirect index + displacement. */
863 do_indirect:
864 if (addrp && what == EA_STORE) {
865 tmp = *addrp;
866 } else {
867 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
868 if (IS_NULL_QREG(tmp)) {
869 return tmp;
870 }
871 if (addrp) {
872 *addrp = tmp;
873 }
874 }
875 return gen_ldst(s, opsize, tmp, val, what, index);
876 case 7: /* Other */
877 switch (reg0) {
878 case 0: /* Absolute short. */
879 case 1: /* Absolute long. */
880 case 2: /* pc displacement */
881 case 3: /* pc index+displacement. */
882 goto do_indirect;
883 case 4: /* Immediate. */
884 /* Sign extend values for consistency. */
885 switch (opsize) {
886 case OS_BYTE:
887 if (what == EA_LOADS) {
888 offset = (int8_t)read_im8(env, s);
889 } else {
890 offset = read_im8(env, s);
891 }
892 break;
893 case OS_WORD:
894 if (what == EA_LOADS) {
895 offset = (int16_t)read_im16(env, s);
896 } else {
897 offset = read_im16(env, s);
898 }
899 break;
900 case OS_LONG:
901 offset = read_im32(env, s);
902 break;
903 default:
904 g_assert_not_reached();
905 }
906 return tcg_constant_i32(offset);
907 default:
908 return NULL_QREG;
909 }
910 }
911 /* Should never happen. */
912 return NULL_QREG;
913 }
914
915 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
916 int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
917 {
918 int mode = extract32(insn, 3, 3);
919 int reg0 = REG(insn, 0);
920 return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
921 }
922
923 static TCGv_ptr gen_fp_ptr(int freg)
924 {
925 TCGv_ptr fp = tcg_temp_new_ptr();
926 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
927 return fp;
928 }
929
930 static TCGv_ptr gen_fp_result_ptr(void)
931 {
932 TCGv_ptr fp = tcg_temp_new_ptr();
933 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
934 return fp;
935 }
936
937 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
938 {
939 TCGv t32;
940 TCGv_i64 t64;
941
942 t32 = tcg_temp_new();
943 tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
944 tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
945
946 t64 = tcg_temp_new_i64();
947 tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
948 tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
949 }
950
951 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
952 int index)
953 {
954 TCGv tmp;
955 TCGv_i64 t64;
956
957 t64 = tcg_temp_new_i64();
958 tmp = tcg_temp_new();
959 switch (opsize) {
960 case OS_BYTE:
961 case OS_WORD:
962 tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE);
963 gen_helper_exts32(cpu_env, fp, tmp);
964 break;
965 case OS_SINGLE:
966 tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
967 gen_helper_extf32(cpu_env, fp, tmp);
968 break;
969 case OS_DOUBLE:
970 tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ);
971 gen_helper_extf64(cpu_env, fp, t64);
972 break;
973 case OS_EXTENDED:
974 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
975 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
976 break;
977 }
978 tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL);
979 tcg_gen_shri_i32(tmp, tmp, 16);
980 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
981 tcg_gen_addi_i32(tmp, addr, 4);
982 tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ);
983 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
984 break;
985 case OS_PACKED:
986 /*
987 * unimplemented data type on 68040/ColdFire
988 * FIXME if needed for another FPU
989 */
990 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
991 break;
992 default:
993 g_assert_not_reached();
994 }
995 }
996
997 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
998 int index)
999 {
1000 TCGv tmp;
1001 TCGv_i64 t64;
1002
1003 t64 = tcg_temp_new_i64();
1004 tmp = tcg_temp_new();
1005 switch (opsize) {
1006 case OS_BYTE:
1007 case OS_WORD:
1008 case OS_LONG:
1009 gen_helper_reds32(tmp, cpu_env, fp);
1010 tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE);
1011 break;
1012 case OS_SINGLE:
1013 gen_helper_redf32(tmp, cpu_env, fp);
1014 tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
1015 break;
1016 case OS_DOUBLE:
1017 gen_helper_redf64(t64, cpu_env, fp);
1018 tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ);
1019 break;
1020 case OS_EXTENDED:
1021 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1022 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1023 break;
1024 }
1025 tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1026 tcg_gen_shli_i32(tmp, tmp, 16);
1027 tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL);
1028 tcg_gen_addi_i32(tmp, addr, 4);
1029 tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1030 tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ);
1031 break;
1032 case OS_PACKED:
1033 /*
1034 * unimplemented data type on 68040/ColdFire
1035 * FIXME if needed for another FPU
1036 */
1037 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1038 break;
1039 default:
1040 g_assert_not_reached();
1041 }
1042 }
1043
1044 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1045 TCGv_ptr fp, ea_what what, int index)
1046 {
1047 if (what == EA_STORE) {
1048 gen_store_fp(s, opsize, addr, fp, index);
1049 } else {
1050 gen_load_fp(s, opsize, addr, fp, index);
1051 }
1052 }
1053
1054 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1055 int reg0, int opsize, TCGv_ptr fp, ea_what what,
1056 int index)
1057 {
1058 TCGv reg, addr, tmp;
1059 TCGv_i64 t64;
1060
1061 switch (mode) {
1062 case 0: /* Data register direct. */
1063 reg = cpu_dregs[reg0];
1064 if (what == EA_STORE) {
1065 switch (opsize) {
1066 case OS_BYTE:
1067 case OS_WORD:
1068 case OS_LONG:
1069 gen_helper_reds32(reg, cpu_env, fp);
1070 break;
1071 case OS_SINGLE:
1072 gen_helper_redf32(reg, cpu_env, fp);
1073 break;
1074 default:
1075 g_assert_not_reached();
1076 }
1077 } else {
1078 tmp = tcg_temp_new();
1079 switch (opsize) {
1080 case OS_BYTE:
1081 tcg_gen_ext8s_i32(tmp, reg);
1082 gen_helper_exts32(cpu_env, fp, tmp);
1083 break;
1084 case OS_WORD:
1085 tcg_gen_ext16s_i32(tmp, reg);
1086 gen_helper_exts32(cpu_env, fp, tmp);
1087 break;
1088 case OS_LONG:
1089 gen_helper_exts32(cpu_env, fp, reg);
1090 break;
1091 case OS_SINGLE:
1092 gen_helper_extf32(cpu_env, fp, reg);
1093 break;
1094 default:
1095 g_assert_not_reached();
1096 }
1097 }
1098 return 0;
1099 case 1: /* Address register direct. */
1100 return -1;
1101 case 2: /* Indirect register */
1102 addr = get_areg(s, reg0);
1103 gen_ldst_fp(s, opsize, addr, fp, what, index);
1104 return 0;
1105 case 3: /* Indirect postincrement. */
1106 addr = cpu_aregs[reg0];
1107 gen_ldst_fp(s, opsize, addr, fp, what, index);
1108 tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1109 return 0;
1110 case 4: /* Indirect predecrememnt. */
1111 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1112 if (IS_NULL_QREG(addr)) {
1113 return -1;
1114 }
1115 gen_ldst_fp(s, opsize, addr, fp, what, index);
1116 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1117 return 0;
1118 case 5: /* Indirect displacement. */
1119 case 6: /* Indirect index + displacement. */
1120 do_indirect:
1121 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1122 if (IS_NULL_QREG(addr)) {
1123 return -1;
1124 }
1125 gen_ldst_fp(s, opsize, addr, fp, what, index);
1126 return 0;
1127 case 7: /* Other */
1128 switch (reg0) {
1129 case 0: /* Absolute short. */
1130 case 1: /* Absolute long. */
1131 case 2: /* pc displacement */
1132 case 3: /* pc index+displacement. */
1133 goto do_indirect;
1134 case 4: /* Immediate. */
1135 if (what == EA_STORE) {
1136 return -1;
1137 }
1138 switch (opsize) {
1139 case OS_BYTE:
1140 tmp = tcg_constant_i32((int8_t)read_im8(env, s));
1141 gen_helper_exts32(cpu_env, fp, tmp);
1142 break;
1143 case OS_WORD:
1144 tmp = tcg_constant_i32((int16_t)read_im16(env, s));
1145 gen_helper_exts32(cpu_env, fp, tmp);
1146 break;
1147 case OS_LONG:
1148 tmp = tcg_constant_i32(read_im32(env, s));
1149 gen_helper_exts32(cpu_env, fp, tmp);
1150 break;
1151 case OS_SINGLE:
1152 tmp = tcg_constant_i32(read_im32(env, s));
1153 gen_helper_extf32(cpu_env, fp, tmp);
1154 break;
1155 case OS_DOUBLE:
1156 t64 = tcg_constant_i64(read_im64(env, s));
1157 gen_helper_extf64(cpu_env, fp, t64);
1158 break;
1159 case OS_EXTENDED:
1160 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1161 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1162 break;
1163 }
1164 tmp = tcg_constant_i32(read_im32(env, s) >> 16);
1165 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1166 t64 = tcg_constant_i64(read_im64(env, s));
1167 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1168 break;
1169 case OS_PACKED:
1170 /*
1171 * unimplemented data type on 68040/ColdFire
1172 * FIXME if needed for another FPU
1173 */
1174 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1175 break;
1176 default:
1177 g_assert_not_reached();
1178 }
1179 return 0;
1180 default:
1181 return -1;
1182 }
1183 }
1184 return -1;
1185 }
1186
1187 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1188 int opsize, TCGv_ptr fp, ea_what what, int index)
1189 {
1190 int mode = extract32(insn, 3, 3);
1191 int reg0 = REG(insn, 0);
1192 return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1193 }
1194
1195 typedef struct {
1196 TCGCond tcond;
1197 TCGv v1;
1198 TCGv v2;
1199 } DisasCompare;
1200
1201 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1202 {
1203 TCGv tmp, tmp2;
1204 TCGCond tcond;
1205 CCOp op = s->cc_op;
1206
1207 /* The CC_OP_CMP form can handle most normal comparisons directly. */
1208 if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1209 c->v1 = QREG_CC_N;
1210 c->v2 = QREG_CC_V;
1211 switch (cond) {
1212 case 2: /* HI */
1213 case 3: /* LS */
1214 tcond = TCG_COND_LEU;
1215 goto done;
1216 case 4: /* CC */
1217 case 5: /* CS */
1218 tcond = TCG_COND_LTU;
1219 goto done;
1220 case 6: /* NE */
1221 case 7: /* EQ */
1222 tcond = TCG_COND_EQ;
1223 goto done;
1224 case 10: /* PL */
1225 case 11: /* MI */
1226 c->v2 = tcg_constant_i32(0);
1227 c->v1 = tmp = tcg_temp_new();
1228 tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1229 gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1230 /* fallthru */
1231 case 12: /* GE */
1232 case 13: /* LT */
1233 tcond = TCG_COND_LT;
1234 goto done;
1235 case 14: /* GT */
1236 case 15: /* LE */
1237 tcond = TCG_COND_LE;
1238 goto done;
1239 }
1240 }
1241
1242 c->v2 = tcg_constant_i32(0);
1243
1244 switch (cond) {
1245 case 0: /* T */
1246 case 1: /* F */
1247 c->v1 = c->v2;
1248 tcond = TCG_COND_NEVER;
1249 goto done;
1250 case 14: /* GT (!(Z || (N ^ V))) */
1251 case 15: /* LE (Z || (N ^ V)) */
1252 /*
1253 * Logic operations clear V, which simplifies LE to (Z || N),
1254 * and since Z and N are co-located, this becomes a normal
1255 * comparison vs N.
1256 */
1257 if (op == CC_OP_LOGIC) {
1258 c->v1 = QREG_CC_N;
1259 tcond = TCG_COND_LE;
1260 goto done;
1261 }
1262 break;
1263 case 12: /* GE (!(N ^ V)) */
1264 case 13: /* LT (N ^ V) */
1265 /* Logic operations clear V, which simplifies this to N. */
1266 if (op != CC_OP_LOGIC) {
1267 break;
1268 }
1269 /* fallthru */
1270 case 10: /* PL (!N) */
1271 case 11: /* MI (N) */
1272 /* Several cases represent N normally. */
1273 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1274 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1275 op == CC_OP_LOGIC) {
1276 c->v1 = QREG_CC_N;
1277 tcond = TCG_COND_LT;
1278 goto done;
1279 }
1280 break;
1281 case 6: /* NE (!Z) */
1282 case 7: /* EQ (Z) */
1283 /* Some cases fold Z into N. */
1284 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1285 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1286 op == CC_OP_LOGIC) {
1287 tcond = TCG_COND_EQ;
1288 c->v1 = QREG_CC_N;
1289 goto done;
1290 }
1291 break;
1292 case 4: /* CC (!C) */
1293 case 5: /* CS (C) */
1294 /* Some cases fold C into X. */
1295 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1296 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1297 tcond = TCG_COND_NE;
1298 c->v1 = QREG_CC_X;
1299 goto done;
1300 }
1301 /* fallthru */
1302 case 8: /* VC (!V) */
1303 case 9: /* VS (V) */
1304 /* Logic operations clear V and C. */
1305 if (op == CC_OP_LOGIC) {
1306 tcond = TCG_COND_NEVER;
1307 c->v1 = c->v2;
1308 goto done;
1309 }
1310 break;
1311 }
1312
1313 /* Otherwise, flush flag state to CC_OP_FLAGS. */
1314 gen_flush_flags(s);
1315
1316 switch (cond) {
1317 case 0: /* T */
1318 case 1: /* F */
1319 default:
1320 /* Invalid, or handled above. */
1321 abort();
1322 case 2: /* HI (!C && !Z) -> !(C || Z)*/
1323 case 3: /* LS (C || Z) */
1324 c->v1 = tmp = tcg_temp_new();
1325 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1326 tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1327 tcond = TCG_COND_NE;
1328 break;
1329 case 4: /* CC (!C) */
1330 case 5: /* CS (C) */
1331 c->v1 = QREG_CC_C;
1332 tcond = TCG_COND_NE;
1333 break;
1334 case 6: /* NE (!Z) */
1335 case 7: /* EQ (Z) */
1336 c->v1 = QREG_CC_Z;
1337 tcond = TCG_COND_EQ;
1338 break;
1339 case 8: /* VC (!V) */
1340 case 9: /* VS (V) */
1341 c->v1 = QREG_CC_V;
1342 tcond = TCG_COND_LT;
1343 break;
1344 case 10: /* PL (!N) */
1345 case 11: /* MI (N) */
1346 c->v1 = QREG_CC_N;
1347 tcond = TCG_COND_LT;
1348 break;
1349 case 12: /* GE (!(N ^ V)) */
1350 case 13: /* LT (N ^ V) */
1351 c->v1 = tmp = tcg_temp_new();
1352 tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1353 tcond = TCG_COND_LT;
1354 break;
1355 case 14: /* GT (!(Z || (N ^ V))) */
1356 case 15: /* LE (Z || (N ^ V)) */
1357 c->v1 = tmp = tcg_temp_new();
1358 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1359 tcg_gen_neg_i32(tmp, tmp);
1360 tmp2 = tcg_temp_new();
1361 tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1362 tcg_gen_or_i32(tmp, tmp, tmp2);
1363 tcond = TCG_COND_LT;
1364 break;
1365 }
1366
1367 done:
1368 if ((cond & 1) == 0) {
1369 tcond = tcg_invert_cond(tcond);
1370 }
1371 c->tcond = tcond;
1372 }
1373
1374 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1375 {
1376 DisasCompare c;
1377
1378 gen_cc_cond(&c, s, cond);
1379 update_cc_op(s);
1380 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1381 }
1382
1383 /* Force a TB lookup after an instruction that changes the CPU state. */
1384 static void gen_exit_tb(DisasContext *s)
1385 {
1386 update_cc_op(s);
1387 tcg_gen_movi_i32(QREG_PC, s->pc);
1388 s->base.is_jmp = DISAS_EXIT;
1389 }
1390
1391 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
1392 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
1393 op_sign ? EA_LOADS : EA_LOADU, IS_USER(s)); \
1394 if (IS_NULL_QREG(result)) { \
1395 gen_addr_fault(s); \
1396 return; \
1397 } \
1398 } while (0)
1399
1400 #define DEST_EA(env, insn, opsize, val, addrp) do { \
1401 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, \
1402 EA_STORE, IS_USER(s)); \
1403 if (IS_NULL_QREG(ea_result)) { \
1404 gen_addr_fault(s); \
1405 return; \
1406 } \
1407 } while (0)
1408
1409 /* Generate a jump to an immediate address. */
1410 static void gen_jmp_tb(DisasContext *s, int n, target_ulong dest,
1411 target_ulong src)
1412 {
1413 if (unlikely(s->ss_active)) {
1414 update_cc_op(s);
1415 tcg_gen_movi_i32(QREG_PC, dest);
1416 gen_raise_exception_format2(s, EXCP_TRACE, src);
1417 } else if (translator_use_goto_tb(&s->base, dest)) {
1418 tcg_gen_goto_tb(n);
1419 tcg_gen_movi_i32(QREG_PC, dest);
1420 tcg_gen_exit_tb(s->base.tb, n);
1421 } else {
1422 gen_jmp_im(s, dest);
1423 tcg_gen_exit_tb(NULL, 0);
1424 }
1425 s->base.is_jmp = DISAS_NORETURN;
1426 }
1427
1428 DISAS_INSN(scc)
1429 {
1430 DisasCompare c;
1431 int cond;
1432 TCGv tmp;
1433
1434 cond = (insn >> 8) & 0xf;
1435 gen_cc_cond(&c, s, cond);
1436
1437 tmp = tcg_temp_new();
1438 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1439
1440 tcg_gen_neg_i32(tmp, tmp);
1441 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1442 }
1443
1444 DISAS_INSN(dbcc)
1445 {
1446 TCGLabel *l1;
1447 TCGv reg;
1448 TCGv tmp;
1449 int16_t offset;
1450 uint32_t base;
1451
1452 reg = DREG(insn, 0);
1453 base = s->pc;
1454 offset = (int16_t)read_im16(env, s);
1455 l1 = gen_new_label();
1456 gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1457
1458 tmp = tcg_temp_new();
1459 tcg_gen_ext16s_i32(tmp, reg);
1460 tcg_gen_addi_i32(tmp, tmp, -1);
1461 gen_partset_reg(OS_WORD, reg, tmp);
1462 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1463 gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
1464 gen_set_label(l1);
1465 gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
1466 }
1467
1468 DISAS_INSN(undef_mac)
1469 {
1470 gen_exception(s, s->base.pc_next, EXCP_LINEA);
1471 }
1472
1473 DISAS_INSN(undef_fpu)
1474 {
1475 gen_exception(s, s->base.pc_next, EXCP_LINEF);
1476 }
1477
1478 DISAS_INSN(undef)
1479 {
1480 /*
1481 * ??? This is both instructions that are as yet unimplemented
1482 * for the 680x0 series, as well as those that are implemented
1483 * but actually illegal for CPU32 or pre-68020.
1484 */
1485 qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n",
1486 insn, s->base.pc_next);
1487 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1488 }
1489
1490 DISAS_INSN(mulw)
1491 {
1492 TCGv reg;
1493 TCGv tmp;
1494 TCGv src;
1495 int sign;
1496
1497 sign = (insn & 0x100) != 0;
1498 reg = DREG(insn, 9);
1499 tmp = tcg_temp_new();
1500 if (sign)
1501 tcg_gen_ext16s_i32(tmp, reg);
1502 else
1503 tcg_gen_ext16u_i32(tmp, reg);
1504 SRC_EA(env, src, OS_WORD, sign, NULL);
1505 tcg_gen_mul_i32(tmp, tmp, src);
1506 tcg_gen_mov_i32(reg, tmp);
1507 gen_logic_cc(s, tmp, OS_LONG);
1508 }
1509
1510 DISAS_INSN(divw)
1511 {
1512 int sign;
1513 TCGv src;
1514 TCGv destr;
1515 TCGv ilen;
1516
1517 /* divX.w <EA>,Dn 32/16 -> 16r:16q */
1518
1519 sign = (insn & 0x100) != 0;
1520
1521 /* dest.l / src.w */
1522
1523 SRC_EA(env, src, OS_WORD, sign, NULL);
1524 destr = tcg_constant_i32(REG(insn, 9));
1525 ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1526 if (sign) {
1527 gen_helper_divsw(cpu_env, destr, src, ilen);
1528 } else {
1529 gen_helper_divuw(cpu_env, destr, src, ilen);
1530 }
1531
1532 set_cc_op(s, CC_OP_FLAGS);
1533 }
1534
1535 DISAS_INSN(divl)
1536 {
1537 TCGv num, reg, den, ilen;
1538 int sign;
1539 uint16_t ext;
1540
1541 ext = read_im16(env, s);
1542
1543 sign = (ext & 0x0800) != 0;
1544
1545 if (ext & 0x400) {
1546 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1547 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1548 return;
1549 }
1550
1551 /* divX.l <EA>, Dr:Dq 64/32 -> 32r:32q */
1552
1553 SRC_EA(env, den, OS_LONG, 0, NULL);
1554 num = tcg_constant_i32(REG(ext, 12));
1555 reg = tcg_constant_i32(REG(ext, 0));
1556 ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1557 if (sign) {
1558 gen_helper_divsll(cpu_env, num, reg, den, ilen);
1559 } else {
1560 gen_helper_divull(cpu_env, num, reg, den, ilen);
1561 }
1562 set_cc_op(s, CC_OP_FLAGS);
1563 return;
1564 }
1565
1566 /* divX.l <EA>, Dq 32/32 -> 32q */
1567 /* divXl.l <EA>, Dr:Dq 32/32 -> 32r:32q */
1568
1569 SRC_EA(env, den, OS_LONG, 0, NULL);
1570 num = tcg_constant_i32(REG(ext, 12));
1571 reg = tcg_constant_i32(REG(ext, 0));
1572 ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1573 if (sign) {
1574 gen_helper_divsl(cpu_env, num, reg, den, ilen);
1575 } else {
1576 gen_helper_divul(cpu_env, num, reg, den, ilen);
1577 }
1578
1579 set_cc_op(s, CC_OP_FLAGS);
1580 }
1581
1582 static void bcd_add(TCGv dest, TCGv src)
1583 {
1584 TCGv t0, t1;
1585
1586 /*
1587 * dest10 = dest10 + src10 + X
1588 *
1589 * t1 = src
1590 * t2 = t1 + 0x066
1591 * t3 = t2 + dest + X
1592 * t4 = t2 ^ dest
1593 * t5 = t3 ^ t4
1594 * t6 = ~t5 & 0x110
1595 * t7 = (t6 >> 2) | (t6 >> 3)
1596 * return t3 - t7
1597 */
1598
1599 /*
1600 * t1 = (src + 0x066) + dest + X
1601 * = result with some possible exceeding 0x6
1602 */
1603
1604 t0 = tcg_temp_new();
1605 tcg_gen_addi_i32(t0, src, 0x066);
1606
1607 t1 = tcg_temp_new();
1608 tcg_gen_add_i32(t1, t0, dest);
1609 tcg_gen_add_i32(t1, t1, QREG_CC_X);
1610
1611 /* we will remove exceeding 0x6 where there is no carry */
1612
1613 /*
1614 * t0 = (src + 0x0066) ^ dest
1615 * = t1 without carries
1616 */
1617
1618 tcg_gen_xor_i32(t0, t0, dest);
1619
1620 /*
1621 * extract the carries
1622 * t0 = t0 ^ t1
1623 * = only the carries
1624 */
1625
1626 tcg_gen_xor_i32(t0, t0, t1);
1627
1628 /*
1629 * generate 0x1 where there is no carry
1630 * and for each 0x10, generate a 0x6
1631 */
1632
1633 tcg_gen_shri_i32(t0, t0, 3);
1634 tcg_gen_not_i32(t0, t0);
1635 tcg_gen_andi_i32(t0, t0, 0x22);
1636 tcg_gen_add_i32(dest, t0, t0);
1637 tcg_gen_add_i32(dest, dest, t0);
1638
1639 /*
1640 * remove the exceeding 0x6
1641 * for digits that have not generated a carry
1642 */
1643
1644 tcg_gen_sub_i32(dest, t1, dest);
1645 }
1646
1647 static void bcd_sub(TCGv dest, TCGv src)
1648 {
1649 TCGv t0, t1, t2;
1650
1651 /*
1652 * dest10 = dest10 - src10 - X
1653 * = bcd_add(dest + 1 - X, 0x199 - src)
1654 */
1655
1656 /* t0 = 0x066 + (0x199 - src) */
1657
1658 t0 = tcg_temp_new();
1659 tcg_gen_subfi_i32(t0, 0x1ff, src);
1660
1661 /* t1 = t0 + dest + 1 - X*/
1662
1663 t1 = tcg_temp_new();
1664 tcg_gen_add_i32(t1, t0, dest);
1665 tcg_gen_addi_i32(t1, t1, 1);
1666 tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1667
1668 /* t2 = t0 ^ dest */
1669
1670 t2 = tcg_temp_new();
1671 tcg_gen_xor_i32(t2, t0, dest);
1672
1673 /* t0 = t1 ^ t2 */
1674
1675 tcg_gen_xor_i32(t0, t1, t2);
1676
1677 /*
1678 * t2 = ~t0 & 0x110
1679 * t0 = (t2 >> 2) | (t2 >> 3)
1680 *
1681 * to fit on 8bit operands, changed in:
1682 *
1683 * t2 = ~(t0 >> 3) & 0x22
1684 * t0 = t2 + t2
1685 * t0 = t0 + t2
1686 */
1687
1688 tcg_gen_shri_i32(t2, t0, 3);
1689 tcg_gen_not_i32(t2, t2);
1690 tcg_gen_andi_i32(t2, t2, 0x22);
1691 tcg_gen_add_i32(t0, t2, t2);
1692 tcg_gen_add_i32(t0, t0, t2);
1693
1694 /* return t1 - t0 */
1695
1696 tcg_gen_sub_i32(dest, t1, t0);
1697 }
1698
1699 static void bcd_flags(TCGv val)
1700 {
1701 tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1702 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1703
1704 tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1705
1706 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1707 }
1708
1709 DISAS_INSN(abcd_reg)
1710 {
1711 TCGv src;
1712 TCGv dest;
1713
1714 gen_flush_flags(s); /* !Z is sticky */
1715
1716 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1717 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1718 bcd_add(dest, src);
1719 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1720
1721 bcd_flags(dest);
1722 }
1723
1724 DISAS_INSN(abcd_mem)
1725 {
1726 TCGv src, dest, addr;
1727
1728 gen_flush_flags(s); /* !Z is sticky */
1729
1730 /* Indirect pre-decrement load (mode 4) */
1731
1732 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1733 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1734 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1735 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1736
1737 bcd_add(dest, src);
1738
1739 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1740 EA_STORE, IS_USER(s));
1741
1742 bcd_flags(dest);
1743 }
1744
1745 DISAS_INSN(sbcd_reg)
1746 {
1747 TCGv src, dest;
1748
1749 gen_flush_flags(s); /* !Z is sticky */
1750
1751 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1752 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1753
1754 bcd_sub(dest, src);
1755
1756 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1757
1758 bcd_flags(dest);
1759 }
1760
1761 DISAS_INSN(sbcd_mem)
1762 {
1763 TCGv src, dest, addr;
1764
1765 gen_flush_flags(s); /* !Z is sticky */
1766
1767 /* Indirect pre-decrement load (mode 4) */
1768
1769 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1770 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1771 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1772 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1773
1774 bcd_sub(dest, src);
1775
1776 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1777 EA_STORE, IS_USER(s));
1778
1779 bcd_flags(dest);
1780 }
1781
1782 DISAS_INSN(nbcd)
1783 {
1784 TCGv src, dest;
1785 TCGv addr;
1786
1787 gen_flush_flags(s); /* !Z is sticky */
1788
1789 SRC_EA(env, src, OS_BYTE, 0, &addr);
1790
1791 dest = tcg_temp_new();
1792 tcg_gen_movi_i32(dest, 0);
1793 bcd_sub(dest, src);
1794
1795 DEST_EA(env, insn, OS_BYTE, dest, &addr);
1796
1797 bcd_flags(dest);
1798 }
1799
1800 DISAS_INSN(addsub)
1801 {
1802 TCGv reg;
1803 TCGv dest;
1804 TCGv src;
1805 TCGv tmp;
1806 TCGv addr;
1807 int add;
1808 int opsize;
1809
1810 add = (insn & 0x4000) != 0;
1811 opsize = insn_opsize(insn);
1812 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1813 dest = tcg_temp_new();
1814 if (insn & 0x100) {
1815 SRC_EA(env, tmp, opsize, 1, &addr);
1816 src = reg;
1817 } else {
1818 tmp = reg;
1819 SRC_EA(env, src, opsize, 1, NULL);
1820 }
1821 if (add) {
1822 tcg_gen_add_i32(dest, tmp, src);
1823 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1824 set_cc_op(s, CC_OP_ADDB + opsize);
1825 } else {
1826 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1827 tcg_gen_sub_i32(dest, tmp, src);
1828 set_cc_op(s, CC_OP_SUBB + opsize);
1829 }
1830 gen_update_cc_add(dest, src, opsize);
1831 if (insn & 0x100) {
1832 DEST_EA(env, insn, opsize, dest, &addr);
1833 } else {
1834 gen_partset_reg(opsize, DREG(insn, 9), dest);
1835 }
1836 }
1837
1838 /* Reverse the order of the bits in REG. */
1839 DISAS_INSN(bitrev)
1840 {
1841 TCGv reg;
1842 reg = DREG(insn, 0);
1843 gen_helper_bitrev(reg, reg);
1844 }
1845
1846 DISAS_INSN(bitop_reg)
1847 {
1848 int opsize;
1849 int op;
1850 TCGv src1;
1851 TCGv src2;
1852 TCGv tmp;
1853 TCGv addr;
1854 TCGv dest;
1855
1856 if ((insn & 0x38) != 0)
1857 opsize = OS_BYTE;
1858 else
1859 opsize = OS_LONG;
1860 op = (insn >> 6) & 3;
1861 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1862
1863 gen_flush_flags(s);
1864 src2 = tcg_temp_new();
1865 if (opsize == OS_BYTE)
1866 tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1867 else
1868 tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1869
1870 tmp = tcg_temp_new();
1871 tcg_gen_shl_i32(tmp, tcg_constant_i32(1), src2);
1872
1873 tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1874
1875 dest = tcg_temp_new();
1876 switch (op) {
1877 case 1: /* bchg */
1878 tcg_gen_xor_i32(dest, src1, tmp);
1879 break;
1880 case 2: /* bclr */
1881 tcg_gen_andc_i32(dest, src1, tmp);
1882 break;
1883 case 3: /* bset */
1884 tcg_gen_or_i32(dest, src1, tmp);
1885 break;
1886 default: /* btst */
1887 break;
1888 }
1889 if (op) {
1890 DEST_EA(env, insn, opsize, dest, &addr);
1891 }
1892 }
1893
1894 DISAS_INSN(sats)
1895 {
1896 TCGv reg;
1897 reg = DREG(insn, 0);
1898 gen_flush_flags(s);
1899 gen_helper_sats(reg, reg, QREG_CC_V);
1900 gen_logic_cc(s, reg, OS_LONG);
1901 }
1902
1903 static void gen_push(DisasContext *s, TCGv val)
1904 {
1905 TCGv tmp;
1906
1907 tmp = tcg_temp_new();
1908 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1909 gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1910 tcg_gen_mov_i32(QREG_SP, tmp);
1911 }
1912
1913 static TCGv mreg(int reg)
1914 {
1915 if (reg < 8) {
1916 /* Dx */
1917 return cpu_dregs[reg];
1918 }
1919 /* Ax */
1920 return cpu_aregs[reg & 7];
1921 }
1922
1923 DISAS_INSN(movem)
1924 {
1925 TCGv addr, incr, tmp, r[16];
1926 int is_load = (insn & 0x0400) != 0;
1927 int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1928 uint16_t mask = read_im16(env, s);
1929 int mode = extract32(insn, 3, 3);
1930 int reg0 = REG(insn, 0);
1931 int i;
1932
1933 tmp = cpu_aregs[reg0];
1934
1935 switch (mode) {
1936 case 0: /* data register direct */
1937 case 1: /* addr register direct */
1938 do_addr_fault:
1939 gen_addr_fault(s);
1940 return;
1941
1942 case 2: /* indirect */
1943 break;
1944
1945 case 3: /* indirect post-increment */
1946 if (!is_load) {
1947 /* post-increment is not allowed */
1948 goto do_addr_fault;
1949 }
1950 break;
1951
1952 case 4: /* indirect pre-decrement */
1953 if (is_load) {
1954 /* pre-decrement is not allowed */
1955 goto do_addr_fault;
1956 }
1957 /*
1958 * We want a bare copy of the address reg, without any pre-decrement
1959 * adjustment, as gen_lea would provide.
1960 */
1961 break;
1962
1963 default:
1964 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
1965 if (IS_NULL_QREG(tmp)) {
1966 goto do_addr_fault;
1967 }
1968 break;
1969 }
1970
1971 addr = tcg_temp_new();
1972 tcg_gen_mov_i32(addr, tmp);
1973 incr = tcg_constant_i32(opsize_bytes(opsize));
1974
1975 if (is_load) {
1976 /* memory to register */
1977 for (i = 0; i < 16; i++) {
1978 if (mask & (1 << i)) {
1979 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
1980 tcg_gen_add_i32(addr, addr, incr);
1981 }
1982 }
1983 for (i = 0; i < 16; i++) {
1984 if (mask & (1 << i)) {
1985 tcg_gen_mov_i32(mreg(i), r[i]);
1986 }
1987 }
1988 if (mode == 3) {
1989 /* post-increment: movem (An)+,X */
1990 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1991 }
1992 } else {
1993 /* register to memory */
1994 if (mode == 4) {
1995 /* pre-decrement: movem X,-(An) */
1996 for (i = 15; i >= 0; i--) {
1997 if ((mask << i) & 0x8000) {
1998 tcg_gen_sub_i32(addr, addr, incr);
1999 if (reg0 + 8 == i &&
2000 m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2001 /*
2002 * M68020+: if the addressing register is the
2003 * register moved to memory, the value written
2004 * is the initial value decremented by the size of
2005 * the operation, regardless of how many actual
2006 * stores have been performed until this point.
2007 * M68000/M68010: the value is the initial value.
2008 */
2009 tmp = tcg_temp_new();
2010 tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2011 gen_store(s, opsize, addr, tmp, IS_USER(s));
2012 } else {
2013 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2014 }
2015 }
2016 }
2017 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2018 } else {
2019 for (i = 0; i < 16; i++) {
2020 if (mask & (1 << i)) {
2021 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2022 tcg_gen_add_i32(addr, addr, incr);
2023 }
2024 }
2025 }
2026 }
2027 }
2028
2029 DISAS_INSN(movep)
2030 {
2031 uint8_t i;
2032 int16_t displ;
2033 TCGv reg;
2034 TCGv addr;
2035 TCGv abuf;
2036 TCGv dbuf;
2037
2038 displ = read_im16(env, s);
2039
2040 addr = AREG(insn, 0);
2041 reg = DREG(insn, 9);
2042
2043 abuf = tcg_temp_new();
2044 tcg_gen_addi_i32(abuf, addr, displ);
2045 dbuf = tcg_temp_new();
2046
2047 if (insn & 0x40) {
2048 i = 4;
2049 } else {
2050 i = 2;
2051 }
2052
2053 if (insn & 0x80) {
2054 for ( ; i > 0 ; i--) {
2055 tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2056 tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB);
2057 if (i > 1) {
2058 tcg_gen_addi_i32(abuf, abuf, 2);
2059 }
2060 }
2061 } else {
2062 for ( ; i > 0 ; i--) {
2063 tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB);
2064 tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2065 if (i > 1) {
2066 tcg_gen_addi_i32(abuf, abuf, 2);
2067 }
2068 }
2069 }
2070 }
2071
2072 DISAS_INSN(bitop_im)
2073 {
2074 int opsize;
2075 int op;
2076 TCGv src1;
2077 uint32_t mask;
2078 int bitnum;
2079 TCGv tmp;
2080 TCGv addr;
2081
2082 if ((insn & 0x38) != 0)
2083 opsize = OS_BYTE;
2084 else
2085 opsize = OS_LONG;
2086 op = (insn >> 6) & 3;
2087
2088 bitnum = read_im16(env, s);
2089 if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2090 if (bitnum & 0xfe00) {
2091 disas_undef(env, s, insn);
2092 return;
2093 }
2094 } else {
2095 if (bitnum & 0xff00) {
2096 disas_undef(env, s, insn);
2097 return;
2098 }
2099 }
2100
2101 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2102
2103 gen_flush_flags(s);
2104 if (opsize == OS_BYTE)
2105 bitnum &= 7;
2106 else
2107 bitnum &= 31;
2108 mask = 1 << bitnum;
2109
2110 tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2111
2112 if (op) {
2113 tmp = tcg_temp_new();
2114 switch (op) {
2115 case 1: /* bchg */
2116 tcg_gen_xori_i32(tmp, src1, mask);
2117 break;
2118 case 2: /* bclr */
2119 tcg_gen_andi_i32(tmp, src1, ~mask);
2120 break;
2121 case 3: /* bset */
2122 tcg_gen_ori_i32(tmp, src1, mask);
2123 break;
2124 default: /* btst */
2125 break;
2126 }
2127 DEST_EA(env, insn, opsize, tmp, &addr);
2128 }
2129 }
2130
2131 static TCGv gen_get_ccr(DisasContext *s)
2132 {
2133 TCGv dest;
2134
2135 update_cc_op(s);
2136 dest = tcg_temp_new();
2137 gen_helper_get_ccr(dest, cpu_env);
2138 return dest;
2139 }
2140
2141 static TCGv gen_get_sr(DisasContext *s)
2142 {
2143 TCGv ccr;
2144 TCGv sr;
2145
2146 ccr = gen_get_ccr(s);
2147 sr = tcg_temp_new();
2148 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2149 tcg_gen_or_i32(sr, sr, ccr);
2150 return sr;
2151 }
2152
2153 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2154 {
2155 if (ccr_only) {
2156 tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2157 tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2158 tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2159 tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2160 tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2161 } else {
2162 /* Must writeback before changing security state. */
2163 do_writebacks(s);
2164 gen_helper_set_sr(cpu_env, tcg_constant_i32(val));
2165 }
2166 set_cc_op(s, CC_OP_FLAGS);
2167 }
2168
2169 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2170 {
2171 if (ccr_only) {
2172 gen_helper_set_ccr(cpu_env, val);
2173 } else {
2174 /* Must writeback before changing security state. */
2175 do_writebacks(s);
2176 gen_helper_set_sr(cpu_env, val);
2177 }
2178 set_cc_op(s, CC_OP_FLAGS);
2179 }
2180
2181 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2182 bool ccr_only)
2183 {
2184 if ((insn & 0x3f) == 0x3c) {
2185 uint16_t val;
2186 val = read_im16(env, s);
2187 gen_set_sr_im(s, val, ccr_only);
2188 } else {
2189 TCGv src;
2190 SRC_EA(env, src, OS_WORD, 0, NULL);
2191 gen_set_sr(s, src, ccr_only);
2192 }
2193 }
2194
2195 DISAS_INSN(arith_im)
2196 {
2197 int op;
2198 TCGv im;
2199 TCGv src1;
2200 TCGv dest;
2201 TCGv addr;
2202 int opsize;
2203 bool with_SR = ((insn & 0x3f) == 0x3c);
2204
2205 op = (insn >> 9) & 7;
2206 opsize = insn_opsize(insn);
2207 switch (opsize) {
2208 case OS_BYTE:
2209 im = tcg_constant_i32((int8_t)read_im8(env, s));
2210 break;
2211 case OS_WORD:
2212 im = tcg_constant_i32((int16_t)read_im16(env, s));
2213 break;
2214 case OS_LONG:
2215 im = tcg_constant_i32(read_im32(env, s));
2216 break;
2217 default:
2218 g_assert_not_reached();
2219 }
2220
2221 if (with_SR) {
2222 /* SR/CCR can only be used with andi/eori/ori */
2223 if (op == 2 || op == 3 || op == 6) {
2224 disas_undef(env, s, insn);
2225 return;
2226 }
2227 switch (opsize) {
2228 case OS_BYTE:
2229 src1 = gen_get_ccr(s);
2230 break;
2231 case OS_WORD:
2232 if (IS_USER(s)) {
2233 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2234 return;
2235 }
2236 src1 = gen_get_sr(s);
2237 break;
2238 default:
2239 /* OS_LONG; others already g_assert_not_reached. */
2240 disas_undef(env, s, insn);
2241 return;
2242 }
2243 } else {
2244 SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2245 }
2246 dest = tcg_temp_new();
2247 switch (op) {
2248 case 0: /* ori */
2249 tcg_gen_or_i32(dest, src1, im);
2250 if (with_SR) {
2251 gen_set_sr(s, dest, opsize == OS_BYTE);
2252 gen_exit_tb(s);
2253 } else {
2254 DEST_EA(env, insn, opsize, dest, &addr);
2255 gen_logic_cc(s, dest, opsize);
2256 }
2257 break;
2258 case 1: /* andi */
2259 tcg_gen_and_i32(dest, src1, im);
2260 if (with_SR) {
2261 gen_set_sr(s, dest, opsize == OS_BYTE);
2262 gen_exit_tb(s);
2263 } else {
2264 DEST_EA(env, insn, opsize, dest, &addr);
2265 gen_logic_cc(s, dest, opsize);
2266 }
2267 break;
2268 case 2: /* subi */
2269 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2270 tcg_gen_sub_i32(dest, src1, im);
2271 gen_update_cc_add(dest, im, opsize);
2272 set_cc_op(s, CC_OP_SUBB + opsize);
2273 DEST_EA(env, insn, opsize, dest, &addr);
2274 break;
2275 case 3: /* addi */
2276 tcg_gen_add_i32(dest, src1, im);
2277 gen_update_cc_add(dest, im, opsize);
2278 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2279 set_cc_op(s, CC_OP_ADDB + opsize);
2280 DEST_EA(env, insn, opsize, dest, &addr);
2281 break;
2282 case 5: /* eori */
2283 tcg_gen_xor_i32(dest, src1, im);
2284 if (with_SR) {
2285 gen_set_sr(s, dest, opsize == OS_BYTE);
2286 gen_exit_tb(s);
2287 } else {
2288 DEST_EA(env, insn, opsize, dest, &addr);
2289 gen_logic_cc(s, dest, opsize);
2290 }
2291 break;
2292 case 6: /* cmpi */
2293 gen_update_cc_cmp(s, src1, im, opsize);
2294 break;
2295 default:
2296 abort();
2297 }
2298 }
2299
2300 DISAS_INSN(cas)
2301 {
2302 int opsize;
2303 TCGv addr;
2304 uint16_t ext;
2305 TCGv load;
2306 TCGv cmp;
2307 MemOp opc;
2308
2309 switch ((insn >> 9) & 3) {
2310 case 1:
2311 opsize = OS_BYTE;
2312 opc = MO_SB;
2313 break;
2314 case 2:
2315 opsize = OS_WORD;
2316 opc = MO_TESW;
2317 break;
2318 case 3:
2319 opsize = OS_LONG;
2320 opc = MO_TESL;
2321 break;
2322 default:
2323 g_assert_not_reached();
2324 }
2325
2326 ext = read_im16(env, s);
2327
2328 /* cas Dc,Du,<EA> */
2329
2330 addr = gen_lea(env, s, insn, opsize);
2331 if (IS_NULL_QREG(addr)) {
2332 gen_addr_fault(s);
2333 return;
2334 }
2335
2336 cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2337
2338 /*
2339 * if <EA> == Dc then
2340 * <EA> = Du
2341 * Dc = <EA> (because <EA> == Dc)
2342 * else
2343 * Dc = <EA>
2344 */
2345
2346 load = tcg_temp_new();
2347 tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2348 IS_USER(s), opc);
2349 /* update flags before setting cmp to load */
2350 gen_update_cc_cmp(s, load, cmp, opsize);
2351 gen_partset_reg(opsize, DREG(ext, 0), load);
2352
2353 switch (extract32(insn, 3, 3)) {
2354 case 3: /* Indirect postincrement. */
2355 tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2356 break;
2357 case 4: /* Indirect predecrememnt. */
2358 tcg_gen_mov_i32(AREG(insn, 0), addr);
2359 break;
2360 }
2361 }
2362
2363 DISAS_INSN(cas2w)
2364 {
2365 uint16_t ext1, ext2;
2366 TCGv addr1, addr2;
2367
2368 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2369
2370 ext1 = read_im16(env, s);
2371
2372 if (ext1 & 0x8000) {
2373 /* Address Register */
2374 addr1 = AREG(ext1, 12);
2375 } else {
2376 /* Data Register */
2377 addr1 = DREG(ext1, 12);
2378 }
2379
2380 ext2 = read_im16(env, s);
2381 if (ext2 & 0x8000) {
2382 /* Address Register */
2383 addr2 = AREG(ext2, 12);
2384 } else {
2385 /* Data Register */
2386 addr2 = DREG(ext2, 12);
2387 }
2388
2389 /*
2390 * if (R1) == Dc1 && (R2) == Dc2 then
2391 * (R1) = Du1
2392 * (R2) = Du2
2393 * else
2394 * Dc1 = (R1)
2395 * Dc2 = (R2)
2396 */
2397
2398 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2399 gen_helper_exit_atomic(cpu_env);
2400 } else {
2401 TCGv regs = tcg_constant_i32(REG(ext2, 6) |
2402 (REG(ext1, 6) << 3) |
2403 (REG(ext2, 0) << 6) |
2404 (REG(ext1, 0) << 9));
2405 gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2406 }
2407
2408 /* Note that cas2w also assigned to env->cc_op. */
2409 s->cc_op = CC_OP_CMPW;
2410 s->cc_op_synced = 1;
2411 }
2412
2413 DISAS_INSN(cas2l)
2414 {
2415 uint16_t ext1, ext2;
2416 TCGv addr1, addr2, regs;
2417
2418 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2419
2420 ext1 = read_im16(env, s);
2421
2422 if (ext1 & 0x8000) {
2423 /* Address Register */
2424 addr1 = AREG(ext1, 12);
2425 } else {
2426 /* Data Register */
2427 addr1 = DREG(ext1, 12);
2428 }
2429
2430 ext2 = read_im16(env, s);
2431 if (ext2 & 0x8000) {
2432 /* Address Register */
2433 addr2 = AREG(ext2, 12);
2434 } else {
2435 /* Data Register */
2436 addr2 = DREG(ext2, 12);
2437 }
2438
2439 /*
2440 * if (R1) == Dc1 && (R2) == Dc2 then
2441 * (R1) = Du1
2442 * (R2) = Du2
2443 * else
2444 * Dc1 = (R1)
2445 * Dc2 = (R2)
2446 */
2447
2448 regs = tcg_constant_i32(REG(ext2, 6) |
2449 (REG(ext1, 6) << 3) |
2450 (REG(ext2, 0) << 6) |
2451 (REG(ext1, 0) << 9));
2452 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2453 gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2454 } else {
2455 gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2456 }
2457
2458 /* Note that cas2l also assigned to env->cc_op. */
2459 s->cc_op = CC_OP_CMPL;
2460 s->cc_op_synced = 1;
2461 }
2462
2463 DISAS_INSN(byterev)
2464 {
2465 TCGv reg;
2466
2467 reg = DREG(insn, 0);
2468 tcg_gen_bswap32_i32(reg, reg);
2469 }
2470
2471 DISAS_INSN(move)
2472 {
2473 TCGv src;
2474 TCGv dest;
2475 int op;
2476 int opsize;
2477
2478 switch (insn >> 12) {
2479 case 1: /* move.b */
2480 opsize = OS_BYTE;
2481 break;
2482 case 2: /* move.l */
2483 opsize = OS_LONG;
2484 break;
2485 case 3: /* move.w */
2486 opsize = OS_WORD;
2487 break;
2488 default:
2489 abort();
2490 }
2491 SRC_EA(env, src, opsize, 1, NULL);
2492 op = (insn >> 6) & 7;
2493 if (op == 1) {
2494 /* movea */
2495 /* The value will already have been sign extended. */
2496 dest = AREG(insn, 9);
2497 tcg_gen_mov_i32(dest, src);
2498 } else {
2499 /* normal move */
2500 uint16_t dest_ea;
2501 dest_ea = ((insn >> 9) & 7) | (op << 3);
2502 DEST_EA(env, dest_ea, opsize, src, NULL);
2503 /* This will be correct because loads sign extend. */
2504 gen_logic_cc(s, src, opsize);
2505 }
2506 }
2507
2508 DISAS_INSN(negx)
2509 {
2510 TCGv z;
2511 TCGv src;
2512 TCGv addr;
2513 int opsize;
2514
2515 opsize = insn_opsize(insn);
2516 SRC_EA(env, src, opsize, 1, &addr);
2517
2518 gen_flush_flags(s); /* compute old Z */
2519
2520 /*
2521 * Perform subtract with borrow.
2522 * (X, N) = -(src + X);
2523 */
2524
2525 z = tcg_constant_i32(0);
2526 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2527 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2528 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2529
2530 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2531
2532 /*
2533 * Compute signed-overflow for negation. The normal formula for
2534 * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2535 * this simplifies to res & src.
2536 */
2537
2538 tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2539
2540 /* Copy the rest of the results into place. */
2541 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2542 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2543
2544 set_cc_op(s, CC_OP_FLAGS);
2545
2546 /* result is in QREG_CC_N */
2547
2548 DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2549 }
2550
2551 DISAS_INSN(lea)
2552 {
2553 TCGv reg;
2554 TCGv tmp;
2555
2556 reg = AREG(insn, 9);
2557 tmp = gen_lea(env, s, insn, OS_LONG);
2558 if (IS_NULL_QREG(tmp)) {
2559 gen_addr_fault(s);
2560 return;
2561 }
2562 tcg_gen_mov_i32(reg, tmp);
2563 }
2564
2565 DISAS_INSN(clr)
2566 {
2567 int opsize;
2568 TCGv zero;
2569
2570 zero = tcg_constant_i32(0);
2571 opsize = insn_opsize(insn);
2572 DEST_EA(env, insn, opsize, zero, NULL);
2573 gen_logic_cc(s, zero, opsize);
2574 }
2575
2576 DISAS_INSN(move_from_ccr)
2577 {
2578 TCGv ccr;
2579
2580 ccr = gen_get_ccr(s);
2581 DEST_EA(env, insn, OS_WORD, ccr, NULL);
2582 }
2583
2584 DISAS_INSN(neg)
2585 {
2586 TCGv src1;
2587 TCGv dest;
2588 TCGv addr;
2589 int opsize;
2590
2591 opsize = insn_opsize(insn);
2592 SRC_EA(env, src1, opsize, 1, &addr);
2593 dest = tcg_temp_new();
2594 tcg_gen_neg_i32(dest, src1);
2595 set_cc_op(s, CC_OP_SUBB + opsize);
2596 gen_update_cc_add(dest, src1, opsize);
2597 tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2598 DEST_EA(env, insn, opsize, dest, &addr);
2599 }
2600
2601 DISAS_INSN(move_to_ccr)
2602 {
2603 gen_move_to_sr(env, s, insn, true);
2604 }
2605
2606 DISAS_INSN(not)
2607 {
2608 TCGv src1;
2609 TCGv dest;
2610 TCGv addr;
2611 int opsize;
2612
2613 opsize = insn_opsize(insn);
2614 SRC_EA(env, src1, opsize, 1, &addr);
2615 dest = tcg_temp_new();
2616 tcg_gen_not_i32(dest, src1);
2617 DEST_EA(env, insn, opsize, dest, &addr);
2618 gen_logic_cc(s, dest, opsize);
2619 }
2620
2621 DISAS_INSN(swap)
2622 {
2623 TCGv src1;
2624 TCGv src2;
2625 TCGv reg;
2626
2627 src1 = tcg_temp_new();
2628 src2 = tcg_temp_new();
2629 reg = DREG(insn, 0);
2630 tcg_gen_shli_i32(src1, reg, 16);
2631 tcg_gen_shri_i32(src2, reg, 16);
2632 tcg_gen_or_i32(reg, src1, src2);
2633 gen_logic_cc(s, reg, OS_LONG);
2634 }
2635
2636 DISAS_INSN(bkpt)
2637 {
2638 #if defined(CONFIG_SOFTMMU)
2639 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2640 #else
2641 gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2642 #endif
2643 }
2644
2645 DISAS_INSN(pea)
2646 {
2647 TCGv tmp;
2648
2649 tmp = gen_lea(env, s, insn, OS_LONG);
2650 if (IS_NULL_QREG(tmp)) {
2651 gen_addr_fault(s);
2652 return;
2653 }
2654 gen_push(s, tmp);
2655 }
2656
2657 DISAS_INSN(ext)
2658 {
2659 int op;
2660 TCGv reg;
2661 TCGv tmp;
2662
2663 reg = DREG(insn, 0);
2664 op = (insn >> 6) & 7;
2665 tmp = tcg_temp_new();
2666 if (op == 3)
2667 tcg_gen_ext16s_i32(tmp, reg);
2668 else
2669 tcg_gen_ext8s_i32(tmp, reg);
2670 if (op == 2)
2671 gen_partset_reg(OS_WORD, reg, tmp);
2672 else
2673 tcg_gen_mov_i32(reg, tmp);
2674 gen_logic_cc(s, tmp, OS_LONG);
2675 }
2676
2677 DISAS_INSN(tst)
2678 {
2679 int opsize;
2680 TCGv tmp;
2681
2682 opsize = insn_opsize(insn);
2683 SRC_EA(env, tmp, opsize, 1, NULL);
2684 gen_logic_cc(s, tmp, opsize);
2685 }
2686
2687 DISAS_INSN(pulse)
2688 {
2689 /* Implemented as a NOP. */
2690 }
2691
2692 DISAS_INSN(illegal)
2693 {
2694 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2695 }
2696
2697 DISAS_INSN(tas)
2698 {
2699 int mode = extract32(insn, 3, 3);
2700 int reg0 = REG(insn, 0);
2701
2702 if (mode == 0) {
2703 /* data register direct */
2704 TCGv dest = cpu_dregs[reg0];
2705 gen_logic_cc(s, dest, OS_BYTE);
2706 tcg_gen_ori_tl(dest, dest, 0x80);
2707 } else {
2708 TCGv src1, addr;
2709
2710 addr = gen_lea_mode(env, s, mode, reg0, OS_BYTE);
2711 if (IS_NULL_QREG(addr)) {
2712 gen_addr_fault(s);
2713 return;
2714 }
2715 src1 = tcg_temp_new();
2716 tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80),
2717 IS_USER(s), MO_SB);
2718 gen_logic_cc(s, src1, OS_BYTE);
2719
2720 switch (mode) {
2721 case 3: /* Indirect postincrement. */
2722 tcg_gen_addi_i32(AREG(insn, 0), addr, 1);
2723 break;
2724 case 4: /* Indirect predecrememnt. */
2725 tcg_gen_mov_i32(AREG(insn, 0), addr);
2726 break;
2727 }
2728 }
2729 }
2730
2731 DISAS_INSN(mull)
2732 {
2733 uint16_t ext;
2734 TCGv src1;
2735 int sign;
2736
2737 ext = read_im16(env, s);
2738
2739 sign = ext & 0x800;
2740
2741 if (ext & 0x400) {
2742 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2743 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2744 return;
2745 }
2746
2747 SRC_EA(env, src1, OS_LONG, 0, NULL);
2748
2749 if (sign) {
2750 tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2751 } else {
2752 tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2753 }
2754 /* if Dl == Dh, 68040 returns low word */
2755 tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2756 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2757 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2758
2759 tcg_gen_movi_i32(QREG_CC_V, 0);
2760 tcg_gen_movi_i32(QREG_CC_C, 0);
2761
2762 set_cc_op(s, CC_OP_FLAGS);
2763 return;
2764 }
2765 SRC_EA(env, src1, OS_LONG, 0, NULL);
2766 if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2767 tcg_gen_movi_i32(QREG_CC_C, 0);
2768 if (sign) {
2769 tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2770 /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2771 tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2772 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2773 } else {
2774 tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2775 /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2776 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2777 }
2778 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2779 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2780
2781 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2782
2783 set_cc_op(s, CC_OP_FLAGS);
2784 } else {
2785 /*
2786 * The upper 32 bits of the product are discarded, so
2787 * muls.l and mulu.l are functionally equivalent.
2788 */
2789 tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2790 gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2791 }
2792 }
2793
2794 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2795 {
2796 TCGv reg;
2797 TCGv tmp;
2798
2799 reg = AREG(insn, 0);
2800 tmp = tcg_temp_new();
2801 tcg_gen_subi_i32(tmp, QREG_SP, 4);
2802 gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2803 if ((insn & 7) != 7) {
2804 tcg_gen_mov_i32(reg, tmp);
2805 }
2806 tcg_gen_addi_i32(QREG_SP, tmp, offset);
2807 }
2808
2809 DISAS_INSN(link)
2810 {
2811 int16_t offset;
2812
2813 offset = read_im16(env, s);
2814 gen_link(s, insn, offset);
2815 }
2816
2817 DISAS_INSN(linkl)
2818 {
2819 int32_t offset;
2820
2821 offset = read_im32(env, s);
2822 gen_link(s, insn, offset);
2823 }
2824
2825 DISAS_INSN(unlk)
2826 {
2827 TCGv src;
2828 TCGv reg;
2829 TCGv tmp;
2830
2831 src = tcg_temp_new();
2832 reg = AREG(insn, 0);
2833 tcg_gen_mov_i32(src, reg);
2834 tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2835 tcg_gen_mov_i32(reg, tmp);
2836 tcg_gen_addi_i32(QREG_SP, src, 4);
2837 }
2838
2839 #if defined(CONFIG_SOFTMMU)
2840 DISAS_INSN(reset)
2841 {
2842 if (IS_USER(s)) {
2843 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2844 return;
2845 }
2846
2847 gen_helper_reset(cpu_env);
2848 }
2849 #endif
2850
2851 DISAS_INSN(nop)
2852 {
2853 }
2854
2855 DISAS_INSN(rtd)
2856 {
2857 TCGv tmp;
2858 int16_t offset = read_im16(env, s);
2859
2860 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2861 tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2862 gen_jmp(s, tmp);
2863 }
2864
2865 DISAS_INSN(rtr)
2866 {
2867 TCGv tmp;
2868 TCGv ccr;
2869 TCGv sp;
2870
2871 sp = tcg_temp_new();
2872 ccr = gen_load(s, OS_WORD, QREG_SP, 0, IS_USER(s));
2873 tcg_gen_addi_i32(sp, QREG_SP, 2);
2874 tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s));
2875 tcg_gen_addi_i32(QREG_SP, sp, 4);
2876
2877 gen_set_sr(s, ccr, true);
2878
2879 gen_jmp(s, tmp);
2880 }
2881
2882 DISAS_INSN(rts)
2883 {
2884 TCGv tmp;
2885
2886 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2887 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2888 gen_jmp(s, tmp);
2889 }
2890
2891 DISAS_INSN(jump)
2892 {
2893 TCGv tmp;
2894
2895 /*
2896 * Load the target address first to ensure correct exception
2897 * behavior.
2898 */
2899 tmp = gen_lea(env, s, insn, OS_LONG);
2900 if (IS_NULL_QREG(tmp)) {
2901 gen_addr_fault(s);
2902 return;
2903 }
2904 if ((insn & 0x40) == 0) {
2905 /* jsr */
2906 gen_push(s, tcg_constant_i32(s->pc));
2907 }
2908 gen_jmp(s, tmp);
2909 }
2910
2911 DISAS_INSN(addsubq)
2912 {
2913 TCGv src;
2914 TCGv dest;
2915 TCGv val;
2916 int imm;
2917 TCGv addr;
2918 int opsize;
2919
2920 if ((insn & 070) == 010) {
2921 /* Operation on address register is always long. */
2922 opsize = OS_LONG;
2923 } else {
2924 opsize = insn_opsize(insn);
2925 }
2926 SRC_EA(env, src, opsize, 1, &addr);
2927 imm = (insn >> 9) & 7;
2928 if (imm == 0) {
2929 imm = 8;
2930 }
2931 val = tcg_constant_i32(imm);
2932 dest = tcg_temp_new();
2933 tcg_gen_mov_i32(dest, src);
2934 if ((insn & 0x38) == 0x08) {
2935 /*
2936 * Don't update condition codes if the destination is an
2937 * address register.
2938 */
2939 if (insn & 0x0100) {
2940 tcg_gen_sub_i32(dest, dest, val);
2941 } else {
2942 tcg_gen_add_i32(dest, dest, val);
2943 }
2944 } else {
2945 if (insn & 0x0100) {
2946 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2947 tcg_gen_sub_i32(dest, dest, val);
2948 set_cc_op(s, CC_OP_SUBB + opsize);
2949 } else {
2950 tcg_gen_add_i32(dest, dest, val);
2951 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2952 set_cc_op(s, CC_OP_ADDB + opsize);
2953 }
2954 gen_update_cc_add(dest, val, opsize);
2955 }
2956 DEST_EA(env, insn, opsize, dest, &addr);
2957 }
2958
2959 DISAS_INSN(branch)
2960 {
2961 int32_t offset;
2962 uint32_t base;
2963 int op;
2964
2965 base = s->pc;
2966 op = (insn >> 8) & 0xf;
2967 offset = (int8_t)insn;
2968 if (offset == 0) {
2969 offset = (int16_t)read_im16(env, s);
2970 } else if (offset == -1) {
2971 offset = read_im32(env, s);
2972 }
2973 if (op == 1) {
2974 /* bsr */
2975 gen_push(s, tcg_constant_i32(s->pc));
2976 }
2977 if (op > 1) {
2978 /* Bcc */
2979 TCGLabel *l1 = gen_new_label();
2980 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2981 gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
2982 gen_set_label(l1);
2983 gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
2984 } else {
2985 /* Unconditional branch. */
2986 update_cc_op(s);
2987 gen_jmp_tb(s, 0, base + offset, s->base.pc_next);
2988 }
2989 }
2990
2991 DISAS_INSN(moveq)
2992 {
2993 tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
2994 gen_logic_cc(s, DREG(insn, 9), OS_LONG);
2995 }
2996
2997 DISAS_INSN(mvzs)
2998 {
2999 int opsize;
3000 TCGv src;
3001 TCGv reg;
3002
3003 if (insn & 0x40)
3004 opsize = OS_WORD;
3005 else
3006 opsize = OS_BYTE;
3007 SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3008 reg = DREG(insn, 9);
3009 tcg_gen_mov_i32(reg, src);
3010 gen_logic_cc(s, src, opsize);
3011 }
3012
3013 DISAS_INSN(or)
3014 {
3015 TCGv reg;
3016 TCGv dest;
3017 TCGv src;
3018 TCGv addr;
3019 int opsize;
3020
3021 opsize = insn_opsize(insn);
3022 reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3023 dest = tcg_temp_new();
3024 if (insn & 0x100) {
3025 SRC_EA(env, src, opsize, 0, &addr);
3026 tcg_gen_or_i32(dest, src, reg);
3027 DEST_EA(env, insn, opsize, dest, &addr);
3028 } else {
3029 SRC_EA(env, src, opsize, 0, NULL);
3030 tcg_gen_or_i32(dest, src, reg);
3031 gen_partset_reg(opsize, DREG(insn, 9), dest);
3032 }
3033 gen_logic_cc(s, dest, opsize);
3034 }
3035
3036 DISAS_INSN(suba)
3037 {
3038 TCGv src;
3039 TCGv reg;
3040
3041 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3042 reg = AREG(insn, 9);
3043 tcg_gen_sub_i32(reg, reg, src);
3044 }
3045
3046 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3047 {
3048 TCGv tmp, zero;
3049
3050 gen_flush_flags(s); /* compute old Z */
3051
3052 /*
3053 * Perform subtract with borrow.
3054 * (X, N) = dest - (src + X);
3055 */
3056
3057 zero = tcg_constant_i32(0);
3058 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, zero, QREG_CC_X, zero);
3059 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, zero, QREG_CC_N, QREG_CC_X);
3060 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3061 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3062
3063 /* Compute signed-overflow for subtract. */
3064
3065 tmp = tcg_temp_new();
3066 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3067 tcg_gen_xor_i32(tmp, dest, src);
3068 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3069
3070 /* Copy the rest of the results into place. */
3071 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3072 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3073
3074 set_cc_op(s, CC_OP_FLAGS);
3075
3076 /* result is in QREG_CC_N */
3077 }
3078
3079 DISAS_INSN(subx_reg)
3080 {
3081 TCGv dest;
3082 TCGv src;
3083 int opsize;
3084
3085 opsize = insn_opsize(insn);
3086
3087 src = gen_extend(s, DREG(insn, 0), opsize, 1);
3088 dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3089
3090 gen_subx(s, src, dest, opsize);
3091
3092 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3093 }
3094
3095 DISAS_INSN(subx_mem)
3096 {
3097 TCGv src;
3098 TCGv addr_src;
3099 TCGv dest;
3100 TCGv addr_dest;
3101 int opsize;
3102
3103 opsize = insn_opsize(insn);
3104
3105 addr_src = AREG(insn, 0);
3106 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3107 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3108
3109 addr_dest = AREG(insn, 9);
3110 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3111 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3112
3113 gen_subx(s, src, dest, opsize);
3114
3115 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3116 }
3117
3118 DISAS_INSN(mov3q)
3119 {
3120 TCGv src;
3121 int val;
3122
3123 val = (insn >> 9) & 7;
3124 if (val == 0) {
3125 val = -1;
3126 }
3127 src = tcg_constant_i32(val);
3128 gen_logic_cc(s, src, OS_LONG);
3129 DEST_EA(env, insn, OS_LONG, src, NULL);
3130 }
3131
3132 DISAS_INSN(cmp)
3133 {
3134 TCGv src;
3135 TCGv reg;
3136 int opsize;
3137
3138 opsize = insn_opsize(insn);
3139 SRC_EA(env, src, opsize, 1, NULL);
3140 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3141 gen_update_cc_cmp(s, reg, src, opsize);
3142 }
3143
3144 DISAS_INSN(cmpa)
3145 {
3146 int opsize;
3147 TCGv src;
3148 TCGv reg;
3149
3150 if (insn & 0x100) {
3151 opsize = OS_LONG;
3152 } else {
3153 opsize = OS_WORD;
3154 }
3155 SRC_EA(env, src, opsize, 1, NULL);
3156 reg = AREG(insn, 9);
3157 gen_update_cc_cmp(s, reg, src, OS_LONG);
3158 }
3159
3160 DISAS_INSN(cmpm)
3161 {
3162 int opsize = insn_opsize(insn);
3163 TCGv src, dst;
3164
3165 /* Post-increment load (mode 3) from Ay. */
3166 src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3167 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3168 /* Post-increment load (mode 3) from Ax. */
3169 dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3170 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3171
3172 gen_update_cc_cmp(s, dst, src, opsize);
3173 }
3174
3175 DISAS_INSN(eor)
3176 {
3177 TCGv src;
3178 TCGv dest;
3179 TCGv addr;
3180 int opsize;
3181
3182 opsize = insn_opsize(insn);
3183
3184 SRC_EA(env, src, opsize, 0, &addr);
3185 dest = tcg_temp_new();
3186 tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3187 gen_logic_cc(s, dest, opsize);
3188 DEST_EA(env, insn, opsize, dest, &addr);
3189 }
3190
3191 static void do_exg(TCGv reg1, TCGv reg2)
3192 {
3193 TCGv temp = tcg_temp_new();
3194 tcg_gen_mov_i32(temp, reg1);
3195 tcg_gen_mov_i32(reg1, reg2);
3196 tcg_gen_mov_i32(reg2, temp);
3197 }
3198
3199 DISAS_INSN(exg_dd)
3200 {
3201 /* exchange Dx and Dy */
3202 do_exg(DREG(insn, 9), DREG(insn, 0));
3203 }
3204
3205 DISAS_INSN(exg_aa)
3206 {
3207 /* exchange Ax and Ay */
3208 do_exg(AREG(insn, 9), AREG(insn, 0));
3209 }
3210
3211 DISAS_INSN(exg_da)
3212 {
3213 /* exchange Dx and Ay */
3214 do_exg(DREG(insn, 9), AREG(insn, 0));
3215 }
3216
3217 DISAS_INSN(and)
3218 {
3219 TCGv src;
3220 TCGv reg;
3221 TCGv dest;
3222 TCGv addr;
3223 int opsize;
3224
3225 dest = tcg_temp_new();
3226
3227 opsize = insn_opsize(insn);
3228 reg = DREG(insn, 9);
3229 if (insn & 0x100) {
3230 SRC_EA(env, src, opsize, 0, &addr);
3231 tcg_gen_and_i32(dest, src, reg);
3232 DEST_EA(env, insn, opsize, dest, &addr);
3233 } else {
3234 SRC_EA(env, src, opsize, 0, NULL);
3235 tcg_gen_and_i32(dest, src, reg);
3236 gen_partset_reg(opsize, reg, dest);
3237 }
3238 gen_logic_cc(s, dest, opsize);
3239 }
3240
3241 DISAS_INSN(adda)
3242 {
3243 TCGv src;
3244 TCGv reg;
3245
3246 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3247 reg = AREG(insn, 9);
3248 tcg_gen_add_i32(reg, reg, src);
3249 }
3250
3251 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3252 {
3253 TCGv tmp, zero;
3254
3255 gen_flush_flags(s); /* compute old Z */
3256
3257 /*
3258 * Perform addition with carry.
3259 * (X, N) = src + dest + X;
3260 */
3261
3262 zero = tcg_constant_i32(0);
3263 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, zero, dest, zero);
3264 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, zero);
3265 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3266
3267 /* Compute signed-overflow for addition. */
3268
3269 tmp = tcg_temp_new();
3270 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3271 tcg_gen_xor_i32(tmp, dest, src);
3272 tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3273
3274 /* Copy the rest of the results into place. */
3275 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3276 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3277
3278 set_cc_op(s, CC_OP_FLAGS);
3279
3280 /* result is in QREG_CC_N */
3281 }
3282
3283 DISAS_INSN(addx_reg)
3284 {
3285 TCGv dest;
3286 TCGv src;
3287 int opsize;
3288
3289 opsize = insn_opsize(insn);
3290
3291 dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3292 src = gen_extend(s, DREG(insn, 0), opsize, 1);
3293
3294 gen_addx(s, src, dest, opsize);
3295
3296 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3297 }
3298
3299 DISAS_INSN(addx_mem)
3300 {
3301 TCGv src;
3302 TCGv addr_src;
3303 TCGv dest;
3304 TCGv addr_dest;
3305 int opsize;
3306
3307 opsize = insn_opsize(insn);
3308
3309 addr_src = AREG(insn, 0);
3310 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3311 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3312
3313 addr_dest = AREG(insn, 9);
3314 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3315 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3316
3317 gen_addx(s, src, dest, opsize);
3318
3319 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3320 }
3321
3322 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3323 {
3324 int count = (insn >> 9) & 7;
3325 int logical = insn & 8;
3326 int left = insn & 0x100;
3327 int bits = opsize_bytes(opsize) * 8;
3328 TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3329
3330 if (count == 0) {
3331 count = 8;
3332 }
3333
3334 tcg_gen_movi_i32(QREG_CC_V, 0);
3335 if (left) {
3336 tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3337 tcg_gen_shli_i32(QREG_CC_N, reg, count);
3338
3339 /*
3340 * Note that ColdFire always clears V (done above),
3341 * while M68000 sets if the most significant bit is changed at
3342 * any time during the shift operation.
3343 */
3344 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3345 /* if shift count >= bits, V is (reg != 0) */
3346 if (count >= bits) {
3347 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3348 } else {
3349 TCGv t0 = tcg_temp_new();
3350 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3351 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3352 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3353 }
3354 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3355 }
3356 } else {
3357 tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3358 if (logical) {
3359 tcg_gen_shri_i32(QREG_CC_N, reg, count);
3360 } else {
3361 tcg_gen_sari_i32(QREG_CC_N, reg, count);
3362 }
3363 }
3364
3365 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3366 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3367 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3368 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3369
3370 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3371 set_cc_op(s, CC_OP_FLAGS);
3372 }
3373
3374 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3375 {
3376 int logical = insn & 8;
3377 int left = insn & 0x100;
3378 int bits = opsize_bytes(opsize) * 8;
3379 TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3380 TCGv s32;
3381 TCGv_i64 t64, s64;
3382
3383 t64 = tcg_temp_new_i64();
3384 s64 = tcg_temp_new_i64();
3385 s32 = tcg_temp_new();
3386
3387 /*
3388 * Note that m68k truncates the shift count modulo 64, not 32.
3389 * In addition, a 64-bit shift makes it easy to find "the last
3390 * bit shifted out", for the carry flag.
3391 */
3392 tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3393 tcg_gen_extu_i32_i64(s64, s32);
3394 tcg_gen_extu_i32_i64(t64, reg);
3395
3396 /* Optimistically set V=0. Also used as a zero source below. */
3397 tcg_gen_movi_i32(QREG_CC_V, 0);
3398 if (left) {
3399 tcg_gen_shl_i64(t64, t64, s64);
3400
3401 if (opsize == OS_LONG) {
3402 tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3403 /* Note that C=0 if shift count is 0, and we get that for free. */
3404 } else {
3405 TCGv zero = tcg_constant_i32(0);
3406 tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3407 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3408 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3409 s32, zero, zero, QREG_CC_C);
3410 }
3411 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3412
3413 /* X = C, but only if the shift count was non-zero. */
3414 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3415 QREG_CC_C, QREG_CC_X);
3416
3417 /*
3418 * M68000 sets V if the most significant bit is changed at
3419 * any time during the shift operation. Do this via creating
3420 * an extension of the sign bit, comparing, and discarding
3421 * the bits below the sign bit. I.e.
3422 * int64_t s = (intN_t)reg;
3423 * int64_t t = (int64_t)(intN_t)reg << count;
3424 * V = ((s ^ t) & (-1 << (bits - 1))) != 0
3425 */
3426 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3427 TCGv_i64 tt = tcg_constant_i64(32);
3428 /* if shift is greater than 32, use 32 */
3429 tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3430 /* Sign extend the input to 64 bits; re-do the shift. */
3431 tcg_gen_ext_i32_i64(t64, reg);
3432 tcg_gen_shl_i64(s64, t64, s64);
3433 /* Clear all bits that are unchanged. */
3434 tcg_gen_xor_i64(t64, t64, s64);
3435 /* Ignore the bits below the sign bit. */
3436 tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3437 /* If any bits remain set, we have overflow. */
3438 tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3439 tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3440 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3441 }
3442 } else {
3443 tcg_gen_shli_i64(t64, t64, 32);
3444 if (logical) {
3445 tcg_gen_shr_i64(t64, t64, s64);
3446 } else {
3447 tcg_gen_sar_i64(t64, t64, s64);
3448 }
3449 tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3450
3451 /* Note that C=0 if shift count is 0, and we get that for free. */
3452 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3453
3454 /* X = C, but only if the shift count was non-zero. */
3455 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3456 QREG_CC_C, QREG_CC_X);
3457 }
3458 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3459 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3460
3461 /* Write back the result. */
3462 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3463 set_cc_op(s, CC_OP_FLAGS);
3464 }
3465
3466 DISAS_INSN(shift8_im)
3467 {
3468 shift_im(s, insn, OS_BYTE);
3469 }
3470
3471 DISAS_INSN(shift16_im)
3472 {
3473 shift_im(s, insn, OS_WORD);
3474 }
3475
3476 DISAS_INSN(shift_im)
3477 {
3478 shift_im(s, insn, OS_LONG);
3479 }
3480
3481 DISAS_INSN(shift8_reg)
3482 {
3483 shift_reg(s, insn, OS_BYTE);
3484 }
3485
3486 DISAS_INSN(shift16_reg)
3487 {
3488 shift_reg(s, insn, OS_WORD);
3489 }
3490
3491 DISAS_INSN(shift_reg)
3492 {
3493 shift_reg(s, insn, OS_LONG);
3494 }
3495
3496 DISAS_INSN(shift_mem)
3497 {
3498 int logical = insn & 8;
3499 int left = insn & 0x100;
3500 TCGv src;
3501 TCGv addr;
3502
3503 SRC_EA(env, src, OS_WORD, !logical, &addr);
3504 tcg_gen_movi_i32(QREG_CC_V, 0);
3505 if (left) {
3506 tcg_gen_shri_i32(QREG_CC_C, src, 15);
3507 tcg_gen_shli_i32(QREG_CC_N, src, 1);
3508
3509 /*
3510 * Note that ColdFire always clears V,
3511 * while M68000 sets if the most significant bit is changed at
3512 * any time during the shift operation
3513 */
3514 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3515 src = gen_extend(s, src, OS_WORD, 1);
3516 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3517 }
3518 } else {
3519 tcg_gen_mov_i32(QREG_CC_C, src);
3520 if (logical) {
3521 tcg_gen_shri_i32(QREG_CC_N, src, 1);
3522 } else {
3523 tcg_gen_sari_i32(QREG_CC_N, src, 1);
3524 }
3525 }
3526
3527 gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3528 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3529 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3530 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3531
3532 DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3533 set_cc_op(s, CC_OP_FLAGS);
3534 }
3535
3536 static void rotate(TCGv reg, TCGv shift, int left, int size)
3537 {
3538 switch (size) {
3539 case 8:
3540 /* Replicate the 8-bit input so that a 32-bit rotate works. */
3541 tcg_gen_ext8u_i32(reg, reg);
3542 tcg_gen_muli_i32(reg, reg, 0x01010101);
3543 goto do_long;
3544 case 16:
3545 /* Replicate the 16-bit input so that a 32-bit rotate works. */
3546 tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3547 goto do_long;
3548 do_long:
3549 default:
3550 if (left) {
3551 tcg_gen_rotl_i32(reg, reg, shift);
3552 } else {
3553 tcg_gen_rotr_i32(reg, reg, shift);
3554 }
3555 }
3556
3557 /* compute flags */
3558
3559 switch (size) {
3560 case 8:
3561 tcg_gen_ext8s_i32(reg, reg);
3562 break;
3563 case 16:
3564 tcg_gen_ext16s_i32(reg, reg);
3565 break;
3566 default:
3567 break;
3568 }
3569
3570 /* QREG_CC_X is not affected */
3571
3572 tcg_gen_mov_i32(QREG_CC_N, reg);
3573 tcg_gen_mov_i32(QREG_CC_Z, reg);
3574
3575 if (left) {
3576 tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3577 } else {
3578 tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3579 }
3580
3581 tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3582 }
3583
3584 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3585 {
3586 switch (size) {
3587 case 8:
3588 tcg_gen_ext8s_i32(reg, reg);
3589 break;
3590 case 16:
3591 tcg_gen_ext16s_i32(reg, reg);
3592 break;
3593 default:
3594 break;
3595 }
3596 tcg_gen_mov_i32(QREG_CC_N, reg);
3597 tcg_gen_mov_i32(QREG_CC_Z, reg);
3598 tcg_gen_mov_i32(QREG_CC_X, X);
3599 tcg_gen_mov_i32(QREG_CC_C, X);
3600 tcg_gen_movi_i32(QREG_CC_V, 0);
3601 }
3602
3603 /* Result of rotate_x() is valid if 0 <= shift <= size */
3604 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3605 {
3606 TCGv X, shl, shr, shx, sz, zero;
3607
3608 sz = tcg_constant_i32(size);
3609
3610 shr = tcg_temp_new();
3611 shl = tcg_temp_new();
3612 shx = tcg_temp_new();
3613 if (left) {
3614 tcg_gen_mov_i32(shl, shift); /* shl = shift */
3615 tcg_gen_movi_i32(shr, size + 1);
3616 tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3617 tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */
3618 /* shx = shx < 0 ? size : shx; */
3619 zero = tcg_constant_i32(0);
3620 tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3621 } else {
3622 tcg_gen_mov_i32(shr, shift); /* shr = shift */
3623 tcg_gen_movi_i32(shl, size + 1);
3624 tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3625 tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3626 }
3627
3628 /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3629
3630 tcg_gen_shl_i32(shl, reg, shl);
3631 tcg_gen_shr_i32(shr, reg, shr);
3632 tcg_gen_or_i32(reg, shl, shr);
3633 tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3634 tcg_gen_or_i32(reg, reg, shx);
3635
3636 /* X = (reg >> size) & 1 */
3637
3638 X = tcg_temp_new();
3639 tcg_gen_extract_i32(X, reg, size, 1);
3640
3641 return X;
3642 }
3643
3644 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3645 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3646 {
3647 TCGv_i64 t0, shift64;
3648 TCGv X, lo, hi, zero;
3649
3650 shift64 = tcg_temp_new_i64();
3651 tcg_gen_extu_i32_i64(shift64, shift);
3652
3653 t0 = tcg_temp_new_i64();
3654
3655 X = tcg_temp_new();
3656 lo = tcg_temp_new();
3657 hi = tcg_temp_new();
3658
3659 if (left) {
3660 /* create [reg:X:..] */
3661
3662 tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3663 tcg_gen_concat_i32_i64(t0, lo, reg);
3664
3665 /* rotate */
3666
3667 tcg_gen_rotl_i64(t0, t0, shift64);
3668
3669 /* result is [reg:..:reg:X] */
3670
3671 tcg_gen_extr_i64_i32(lo, hi, t0);
3672 tcg_gen_andi_i32(X, lo, 1);
3673
3674 tcg_gen_shri_i32(lo, lo, 1);
3675 } else {
3676 /* create [..:X:reg] */
3677
3678 tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3679
3680 tcg_gen_rotr_i64(t0, t0, shift64);
3681
3682 /* result is value: [X:reg:..:reg] */
3683
3684 tcg_gen_extr_i64_i32(lo, hi, t0);
3685
3686 /* extract X */
3687
3688 tcg_gen_shri_i32(X, hi, 31);
3689
3690 /* extract result */
3691
3692 tcg_gen_shli_i32(hi, hi, 1);
3693 }
3694 tcg_gen_or_i32(lo, lo, hi);
3695
3696 /* if shift == 0, register and X are not affected */
3697
3698 zero = tcg_constant_i32(0);
3699 tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3700 tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3701
3702 return X;
3703 }
3704
3705 DISAS_INSN(rotate_im)
3706 {
3707 TCGv shift;
3708 int tmp;
3709 int left = (insn & 0x100);
3710
3711 tmp = (insn >> 9) & 7;
3712 if (tmp == 0) {
3713 tmp = 8;
3714 }
3715
3716 shift = tcg_constant_i32(tmp);
3717 if (insn & 8) {
3718 rotate(DREG(insn, 0), shift, left, 32);
3719 } else {
3720 TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3721 rotate_x_flags(DREG(insn, 0), X, 32);
3722 }
3723
3724 set_cc_op(s, CC_OP_FLAGS);
3725 }
3726
3727 DISAS_INSN(rotate8_im)
3728 {
3729 int left = (insn & 0x100);
3730 TCGv reg;
3731 TCGv shift;
3732 int tmp;
3733
3734 reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3735
3736 tmp = (insn >> 9) & 7;
3737 if (tmp == 0) {
3738 tmp = 8;
3739 }
3740
3741 shift = tcg_constant_i32(tmp);
3742 if (insn & 8) {
3743 rotate(reg, shift, left, 8);
3744 } else {
3745 TCGv X = rotate_x(reg, shift, left, 8);
3746 rotate_x_flags(reg, X, 8);
3747 }
3748 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3749 set_cc_op(s, CC_OP_FLAGS);
3750 }
3751
3752 DISAS_INSN(rotate16_im)
3753 {
3754 int left = (insn & 0x100);
3755 TCGv reg;
3756 TCGv shift;
3757 int tmp;
3758
3759 reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3760 tmp = (insn >> 9) & 7;
3761 if (tmp == 0) {
3762 tmp = 8;
3763 }
3764
3765 shift = tcg_constant_i32(tmp);
3766 if (insn & 8) {
3767 rotate(reg, shift, left, 16);
3768 } else {
3769 TCGv X = rotate_x(reg, shift, left, 16);
3770 rotate_x_flags(reg, X, 16);
3771 }
3772 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3773 set_cc_op(s, CC_OP_FLAGS);
3774 }
3775
3776 DISAS_INSN(rotate_reg)
3777 {
3778 TCGv reg;
3779 TCGv src;
3780 TCGv t0, t1;
3781 int left = (insn & 0x100);
3782
3783 reg = DREG(insn, 0);
3784 src = DREG(insn, 9);
3785 /* shift in [0..63] */
3786 t0 = tcg_temp_new();
3787 tcg_gen_andi_i32(t0, src, 63);
3788 t1 = tcg_temp_new_i32();
3789 if (insn & 8) {
3790 tcg_gen_andi_i32(t1, src, 31);
3791 rotate(reg, t1, left, 32);
3792 /* if shift == 0, clear C */
3793 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3794 t0, QREG_CC_V /* 0 */,
3795 QREG_CC_V /* 0 */, QREG_CC_C);
3796 } else {
3797 TCGv X;
3798 /* modulo 33 */
3799 tcg_gen_movi_i32(t1, 33);
3800 tcg_gen_remu_i32(t1, t0, t1);
3801 X = rotate32_x(DREG(insn, 0), t1, left);
3802 rotate_x_flags(DREG(insn, 0), X, 32);
3803 }
3804 set_cc_op(s, CC_OP_FLAGS);
3805 }
3806
3807 DISAS_INSN(rotate8_reg)
3808 {
3809 TCGv reg;
3810 TCGv src;
3811 TCGv t0, t1;
3812 int left = (insn & 0x100);
3813
3814 reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3815 src = DREG(insn, 9);
3816 /* shift in [0..63] */
3817 t0 = tcg_temp_new_i32();
3818 tcg_gen_andi_i32(t0, src, 63);
3819 t1 = tcg_temp_new_i32();
3820 if (insn & 8) {
3821 tcg_gen_andi_i32(t1, src, 7);
3822 rotate(reg, t1, left, 8);
3823 /* if shift == 0, clear C */
3824 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3825 t0, QREG_CC_V /* 0 */,
3826 QREG_CC_V /* 0 */, QREG_CC_C);
3827 } else {
3828 TCGv X;
3829 /* modulo 9 */
3830 tcg_gen_movi_i32(t1, 9);
3831 tcg_gen_remu_i32(t1, t0, t1);
3832 X = rotate_x(reg, t1, left, 8);
3833 rotate_x_flags(reg, X, 8);
3834 }
3835 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3836 set_cc_op(s, CC_OP_FLAGS);
3837 }
3838
3839 DISAS_INSN(rotate16_reg)
3840 {
3841 TCGv reg;
3842 TCGv src;
3843 TCGv t0, t1;
3844 int left = (insn & 0x100);
3845
3846 reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3847 src = DREG(insn, 9);
3848 /* shift in [0..63] */
3849 t0 = tcg_temp_new_i32();
3850 tcg_gen_andi_i32(t0, src, 63);
3851 t1 = tcg_temp_new_i32();
3852 if (insn & 8) {
3853 tcg_gen_andi_i32(t1, src, 15);
3854 rotate(reg, t1, left, 16);
3855 /* if shift == 0, clear C */
3856 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3857 t0, QREG_CC_V /* 0 */,
3858 QREG_CC_V /* 0 */, QREG_CC_C);
3859 } else {
3860 TCGv X;
3861 /* modulo 17 */
3862 tcg_gen_movi_i32(t1, 17);
3863 tcg_gen_remu_i32(t1, t0, t1);
3864 X = rotate_x(reg, t1, left, 16);
3865 rotate_x_flags(reg, X, 16);
3866 }
3867 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3868 set_cc_op(s, CC_OP_FLAGS);
3869 }
3870
3871 DISAS_INSN(rotate_mem)
3872 {
3873 TCGv src;
3874 TCGv addr;
3875 TCGv shift;
3876 int left = (insn & 0x100);
3877
3878 SRC_EA(env, src, OS_WORD, 0, &addr);
3879
3880 shift = tcg_constant_i32(1);
3881 if (insn & 0x0200) {
3882 rotate(src, shift, left, 16);
3883 } else {
3884 TCGv X = rotate_x(src, shift, left, 16);
3885 rotate_x_flags(src, X, 16);
3886 }
3887 DEST_EA(env, insn, OS_WORD, src, &addr);
3888 set_cc_op(s, CC_OP_FLAGS);
3889 }
3890
3891 DISAS_INSN(bfext_reg)
3892 {
3893 int ext = read_im16(env, s);
3894 int is_sign = insn & 0x200;
3895 TCGv src = DREG(insn, 0);
3896 TCGv dst = DREG(ext, 12);
3897 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3898 int ofs = extract32(ext, 6, 5); /* big bit-endian */
3899 int pos = 32 - ofs - len; /* little bit-endian */
3900 TCGv tmp = tcg_temp_new();
3901 TCGv shift;
3902
3903 /*
3904 * In general, we're going to rotate the field so that it's at the
3905 * top of the word and then right-shift by the complement of the
3906 * width to extend the field.
3907 */
3908 if (ext & 0x20) {
3909 /* Variable width. */
3910 if (ext & 0x800) {
3911 /* Variable offset. */
3912 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3913 tcg_gen_rotl_i32(tmp, src, tmp);
3914 } else {
3915 tcg_gen_rotli_i32(tmp, src, ofs);
3916 }
3917
3918 shift = tcg_temp_new();
3919 tcg_gen_neg_i32(shift, DREG(ext, 0));
3920 tcg_gen_andi_i32(shift, shift, 31);
3921 tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3922 if (is_sign) {
3923 tcg_gen_mov_i32(dst, QREG_CC_N);
3924 } else {
3925 tcg_gen_shr_i32(dst, tmp, shift);
3926 }
3927 } else {
3928 /* Immediate width. */
3929 if (ext & 0x800) {
3930 /* Variable offset */
3931 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3932 tcg_gen_rotl_i32(tmp, src, tmp);
3933 src = tmp;
3934 pos = 32 - len;
3935 } else {
3936 /*
3937 * Immediate offset. If the field doesn't wrap around the
3938 * end of the word, rely on (s)extract completely.
3939 */
3940 if (pos < 0) {
3941 tcg_gen_rotli_i32(tmp, src, ofs);
3942 src = tmp;
3943 pos = 32 - len;
3944 }
3945 }
3946
3947 tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3948 if (is_sign) {
3949 tcg_gen_mov_i32(dst, QREG_CC_N);
3950 } else {
3951 tcg_gen_extract_i32(dst, src, pos, len);
3952 }
3953 }
3954
3955 set_cc_op(s, CC_OP_LOGIC);
3956 }
3957
3958 DISAS_INSN(bfext_mem)
3959 {
3960 int ext = read_im16(env, s);
3961 int is_sign = insn & 0x200;
3962 TCGv dest = DREG(ext, 12);
3963 TCGv addr, len, ofs;
3964
3965 addr = gen_lea(env, s, insn, OS_UNSIZED);
3966 if (IS_NULL_QREG(addr)) {
3967 gen_addr_fault(s);
3968 return;
3969 }
3970
3971 if (ext & 0x20) {
3972 len = DREG(ext, 0);
3973 } else {
3974 len = tcg_constant_i32(extract32(ext, 0, 5));
3975 }
3976 if (ext & 0x800) {
3977 ofs = DREG(ext, 6);
3978 } else {
3979 ofs = tcg_constant_i32(extract32(ext, 6, 5));
3980 }
3981
3982 if (is_sign) {
3983 gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
3984 tcg_gen_mov_i32(QREG_CC_N, dest);
3985 } else {
3986 TCGv_i64 tmp = tcg_temp_new_i64();
3987 gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
3988 tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
3989 }
3990 set_cc_op(s, CC_OP_LOGIC);
3991 }
3992
3993 DISAS_INSN(bfop_reg)
3994 {
3995 int ext = read_im16(env, s);
3996 TCGv src = DREG(insn, 0);
3997 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3998 int ofs = extract32(ext, 6, 5); /* big bit-endian */
3999 TCGv mask, tofs = NULL, tlen = NULL;
4000 bool is_bfffo = (insn & 0x0f00) == 0x0d00;
4001
4002 if ((ext & 0x820) == 0) {
4003 /* Immediate width and offset. */
4004 uint32_t maski = 0x7fffffffu >> (len - 1);
4005 if (ofs + len <= 32) {
4006 tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4007 } else {
4008 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4009 }
4010 tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4011
4012 mask = tcg_constant_i32(ror32(maski, ofs));
4013 if (is_bfffo) {
4014 tofs = tcg_constant_i32(ofs);
4015 tlen = tcg_constant_i32(len);
4016 }
4017 } else {
4018 TCGv tmp = tcg_temp_new();
4019
4020 mask = tcg_temp_new();
4021 if (ext & 0x20) {
4022 /* Variable width */
4023 tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4024 tcg_gen_andi_i32(tmp, tmp, 31);
4025 tcg_gen_shr_i32(mask, tcg_constant_i32(0x7fffffffu), tmp);
4026 if (is_bfffo) {
4027 tlen = tcg_temp_new();
4028 tcg_gen_addi_i32(tlen, tmp, 1);
4029 }
4030 } else {
4031 /* Immediate width */
4032 tcg_gen_movi_i32(mask, 0x7fffffffu >> (len - 1));
4033 if (is_bfffo) {
4034 tlen = tcg_constant_i32(len);
4035 }
4036 }
4037
4038 if (ext & 0x800) {
4039 /* Variable offset */
4040 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4041 tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4042 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4043 tcg_gen_rotr_i32(mask, mask, tmp);
4044 if (is_bfffo) {
4045 tofs = tmp;
4046 }
4047 } else {
4048 /* Immediate offset (and variable width) */
4049 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4050 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4051 tcg_gen_rotri_i32(mask, mask, ofs);
4052 if (is_bfffo) {
4053 tofs = tcg_constant_i32(ofs);
4054 }
4055 }
4056 }
4057 set_cc_op(s, CC_OP_LOGIC);
4058
4059 switch (insn & 0x0f00) {
4060 case 0x0a00: /* bfchg */
4061 tcg_gen_eqv_i32(src, src, mask);
4062 break;
4063 case 0x0c00: /* bfclr */
4064 tcg_gen_and_i32(src, src, mask);
4065 break;
4066 case 0x0d00: /* bfffo */
4067 gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4068 break;
4069 case 0x0e00: /* bfset */
4070 tcg_gen_orc_i32(src, src, mask);
4071 break;
4072 case 0x0800: /* bftst */
4073 /* flags already set; no other work to do. */
4074 break;
4075 default:
4076 g_assert_not_reached();
4077 }
4078 }
4079
4080 DISAS_INSN(bfop_mem)
4081 {
4082 int ext = read_im16(env, s);
4083 TCGv addr, len, ofs;
4084 TCGv_i64 t64;
4085
4086 addr = gen_lea(env, s, insn, OS_UNSIZED);
4087 if (IS_NULL_QREG(addr)) {
4088 gen_addr_fault(s);
4089 return;
4090 }
4091
4092 if (ext & 0x20) {
4093 len = DREG(ext, 0);
4094 } else {
4095 len = tcg_constant_i32(extract32(ext, 0, 5));
4096 }
4097 if (ext & 0x800) {
4098 ofs = DREG(ext, 6);
4099 } else {
4100 ofs = tcg_constant_i32(extract32(ext, 6, 5));
4101 }
4102
4103 switch (insn & 0x0f00) {
4104 case 0x0a00: /* bfchg */
4105 gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4106 break;
4107 case 0x0c00: /* bfclr */
4108 gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4109 break;
4110 case 0x0d00: /* bfffo */
4111 t64 = tcg_temp_new_i64();
4112 gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4113 tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4114 break;
4115 case 0x0e00: /* bfset */
4116 gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4117 break;
4118 case 0x0800: /* bftst */
4119 gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4120 break;
4121 default:
4122 g_assert_not_reached();
4123 }
4124 set_cc_op(s, CC_OP_LOGIC);
4125 }
4126
4127 DISAS_INSN(bfins_reg)
4128 {
4129 int ext = read_im16(env, s);
4130 TCGv dst = DREG(insn, 0);
4131 TCGv src = DREG(ext, 12);
4132 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4133 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4134 int pos = 32 - ofs - len; /* little bit-endian */
4135 TCGv tmp;
4136
4137 tmp = tcg_temp_new();
4138
4139 if (ext & 0x20) {
4140 /* Variable width */
4141 tcg_gen_neg_i32(tmp, DREG(ext, 0));
4142 tcg_gen_andi_i32(tmp, tmp, 31);
4143 tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4144 } else {
4145 /* Immediate width */
4146 tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4147 }
4148 set_cc_op(s, CC_OP_LOGIC);
4149
4150 /* Immediate width and offset */
4151 if ((ext & 0x820) == 0) {
4152 /* Check for suitability for deposit. */
4153 if (pos >= 0) {
4154 tcg_gen_deposit_i32(dst, dst, src, pos, len);
4155 } else {
4156 uint32_t maski = -2U << (len - 1);
4157 uint32_t roti = (ofs + len) & 31;
4158 tcg_gen_andi_i32(tmp, src, ~maski);
4159 tcg_gen_rotri_i32(tmp, tmp, roti);
4160 tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4161 tcg_gen_or_i32(dst, dst, tmp);
4162 }
4163 } else {
4164 TCGv mask = tcg_temp_new();
4165 TCGv rot = tcg_temp_new();
4166
4167 if (ext & 0x20) {
4168 /* Variable width */
4169 tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4170 tcg_gen_andi_i32(rot, rot, 31);
4171 tcg_gen_movi_i32(mask, -2);
4172 tcg_gen_shl_i32(mask, mask, rot);
4173 tcg_gen_mov_i32(rot, DREG(ext, 0));
4174 tcg_gen_andc_i32(tmp, src, mask);
4175 } else {
4176 /* Immediate width (variable offset) */
4177 uint32_t maski = -2U << (len - 1);
4178 tcg_gen_andi_i32(tmp, src, ~maski);
4179 tcg_gen_movi_i32(mask, maski);
4180 tcg_gen_movi_i32(rot, len & 31);
4181 }
4182 if (ext & 0x800) {
4183 /* Variable offset */
4184 tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4185 } else {
4186 /* Immediate offset (variable width) */
4187 tcg_gen_addi_i32(rot, rot, ofs);
4188 }
4189 tcg_gen_andi_i32(rot, rot, 31);
4190 tcg_gen_rotr_i32(mask, mask, rot);
4191 tcg_gen_rotr_i32(tmp, tmp, rot);
4192 tcg_gen_and_i32(dst, dst, mask);
4193 tcg_gen_or_i32(dst, dst, tmp);
4194 }
4195 }
4196
4197 DISAS_INSN(bfins_mem)
4198 {
4199 int ext = read_im16(env, s);
4200 TCGv src = DREG(ext, 12);
4201 TCGv addr, len, ofs;
4202
4203 addr = gen_lea(env, s, insn, OS_UNSIZED);
4204 if (IS_NULL_QREG(addr)) {
4205 gen_addr_fault(s);
4206 return;
4207 }
4208
4209 if (ext & 0x20) {
4210 len = DREG(ext, 0);
4211 } else {
4212 len = tcg_constant_i32(extract32(ext, 0, 5));
4213 }
4214 if (ext & 0x800) {
4215 ofs = DREG(ext, 6);
4216 } else {
4217 ofs = tcg_constant_i32(extract32(ext, 6, 5));
4218 }
4219
4220 gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4221 set_cc_op(s, CC_OP_LOGIC);
4222 }
4223
4224 DISAS_INSN(ff1)
4225 {
4226 TCGv reg;
4227 reg = DREG(insn, 0);
4228 gen_logic_cc(s, reg, OS_LONG);
4229 gen_helper_ff1(reg, reg);
4230 }
4231
4232 DISAS_INSN(chk)
4233 {
4234 TCGv src, reg;
4235 int opsize;
4236
4237 switch ((insn >> 7) & 3) {
4238 case 3:
4239 opsize = OS_WORD;
4240 break;
4241 case 2:
4242 if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4243 opsize = OS_LONG;
4244 break;
4245 }
4246 /* fallthru */
4247 default:
4248 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4249 return;
4250 }
4251 SRC_EA(env, src, opsize, 1, NULL);
4252 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4253
4254 gen_flush_flags(s);
4255 gen_helper_chk(cpu_env, reg, src);
4256 }
4257
4258 DISAS_INSN(chk2)
4259 {
4260 uint16_t ext;
4261 TCGv addr1, addr2, bound1, bound2, reg;
4262 int opsize;
4263
4264 switch ((insn >> 9) & 3) {
4265 case 0:
4266 opsize = OS_BYTE;
4267 break;
4268 case 1:
4269 opsize = OS_WORD;
4270 break;
4271 case 2:
4272 opsize = OS_LONG;
4273 break;
4274 default:
4275 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4276 return;
4277 }
4278
4279 ext = read_im16(env, s);
4280 if ((ext & 0x0800) == 0) {
4281 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4282 return;
4283 }
4284
4285 addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4286 addr2 = tcg_temp_new();
4287 tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4288
4289 bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4290 bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4291
4292 reg = tcg_temp_new();
4293 if (ext & 0x8000) {
4294 tcg_gen_mov_i32(reg, AREG(ext, 12));
4295 } else {
4296 gen_ext(reg, DREG(ext, 12), opsize, 1);
4297 }
4298
4299 gen_flush_flags(s);
4300 gen_helper_chk2(cpu_env, reg, bound1, bound2);
4301 }
4302
4303 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4304 {
4305 TCGv addr;
4306 TCGv_i64 t0, t1;
4307
4308 addr = tcg_temp_new();
4309
4310 t0 = tcg_temp_new_i64();
4311 t1 = tcg_temp_new_i64();
4312
4313 tcg_gen_andi_i32(addr, src, ~15);
4314 tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ);
4315 tcg_gen_addi_i32(addr, addr, 8);
4316 tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ);
4317
4318 tcg_gen_andi_i32(addr, dst, ~15);
4319 tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ);
4320 tcg_gen_addi_i32(addr, addr, 8);
4321 tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ);
4322 }
4323
4324 DISAS_INSN(move16_reg)
4325 {
4326 int index = IS_USER(s);
4327 TCGv tmp;
4328 uint16_t ext;
4329
4330 ext = read_im16(env, s);
4331 if ((ext & (1 << 15)) == 0) {
4332 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4333 }
4334
4335 m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4336
4337 /* Ax can be Ay, so save Ay before incrementing Ax */
4338 tmp = tcg_temp_new();
4339 tcg_gen_mov_i32(tmp, AREG(ext, 12));
4340 tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4341 tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4342 }
4343
4344 DISAS_INSN(move16_mem)
4345 {
4346 int index = IS_USER(s);
4347 TCGv reg, addr;
4348
4349 reg = AREG(insn, 0);
4350 addr = tcg_constant_i32(read_im32(env, s));
4351
4352 if ((insn >> 3) & 1) {
4353 /* MOVE16 (xxx).L, (Ay) */
4354 m68k_copy_line(reg, addr, index);
4355 } else {
4356 /* MOVE16 (Ay), (xxx).L */
4357 m68k_copy_line(addr, reg, index);
4358 }
4359
4360 if (((insn >> 3) & 2) == 0) {
4361 /* (Ay)+ */
4362 tcg_gen_addi_i32(reg, reg, 16);
4363 }
4364 }
4365
4366 DISAS_INSN(strldsr)
4367 {
4368 uint16_t ext;
4369 uint32_t addr;
4370
4371 addr = s->pc - 2;
4372 ext = read_im16(env, s);
4373 if (ext != 0x46FC) {
4374 gen_exception(s, addr, EXCP_ILLEGAL);
4375 return;
4376 }
4377 ext = read_im16(env, s);
4378 if (IS_USER(s) || (ext & SR_S) == 0) {
4379 gen_exception(s, addr, EXCP_PRIVILEGE);
4380 return;
4381 }
4382 gen_push(s, gen_get_sr(s));
4383 gen_set_sr_im(s, ext, 0);
4384 gen_exit_tb(s);
4385 }
4386
4387 DISAS_INSN(move_from_sr)
4388 {
4389 TCGv sr;
4390
4391 if (IS_USER(s) && m68k_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV)) {
4392 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4393 return;
4394 }
4395 sr = gen_get_sr(s);
4396 DEST_EA(env, insn, OS_WORD, sr, NULL);
4397 }
4398
4399 #if defined(CONFIG_SOFTMMU)
4400 DISAS_INSN(moves)
4401 {
4402 int opsize;
4403 uint16_t ext;
4404 TCGv reg;
4405 TCGv addr;
4406 int extend;
4407
4408 if (IS_USER(s)) {
4409 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4410 return;
4411 }
4412
4413 ext = read_im16(env, s);
4414
4415 opsize = insn_opsize(insn);
4416
4417 if (ext & 0x8000) {
4418 /* address register */
4419 reg = AREG(ext, 12);
4420 extend = 1;
4421 } else {
4422 /* data register */
4423 reg = DREG(ext, 12);
4424 extend = 0;
4425 }
4426
4427 addr = gen_lea(env, s, insn, opsize);
4428 if (IS_NULL_QREG(addr)) {
4429 gen_addr_fault(s);
4430 return;
4431 }
4432
4433 if (ext & 0x0800) {
4434 /* from reg to ea */
4435 gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4436 } else {
4437 /* from ea to reg */
4438 TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4439 if (extend) {
4440 gen_ext(reg, tmp, opsize, 1);
4441 } else {
4442 gen_partset_reg(opsize, reg, tmp);
4443 }
4444 }
4445 switch (extract32(insn, 3, 3)) {
4446 case 3: /* Indirect postincrement. */
4447 tcg_gen_addi_i32(AREG(insn, 0), addr,
4448 REG(insn, 0) == 7 && opsize == OS_BYTE
4449 ? 2
4450 : opsize_bytes(opsize));
4451 break;
4452 case 4: /* Indirect predecrememnt. */
4453 tcg_gen_mov_i32(AREG(insn, 0), addr);
4454 break;
4455 }
4456 }
4457
4458 DISAS_INSN(move_to_sr)
4459 {
4460 if (IS_USER(s)) {
4461 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4462 return;
4463 }
4464 gen_move_to_sr(env, s, insn, false);
4465 gen_exit_tb(s);
4466 }
4467
4468 DISAS_INSN(move_from_usp)
4469 {
4470 if (IS_USER(s)) {
4471 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4472 return;
4473 }
4474 tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4475 offsetof(CPUM68KState, sp[M68K_USP]));
4476 }
4477
4478 DISAS_INSN(move_to_usp)
4479 {
4480 if (IS_USER(s)) {
4481 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4482 return;
4483 }
4484 tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4485 offsetof(CPUM68KState, sp[M68K_USP]));
4486 }
4487
4488 DISAS_INSN(halt)
4489 {
4490 if (IS_USER(s)) {
4491 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4492 return;
4493 }
4494
4495 gen_exception(s, s->pc, EXCP_HALT_INSN);
4496 }
4497
4498 DISAS_INSN(stop)
4499 {
4500 uint16_t ext;
4501
4502 if (IS_USER(s)) {
4503 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4504 return;
4505 }
4506
4507 ext = read_im16(env, s);
4508
4509 gen_set_sr_im(s, ext, 0);
4510 tcg_gen_movi_i32(cpu_halted, 1);
4511 gen_exception(s, s->pc, EXCP_HLT);
4512 }
4513
4514 DISAS_INSN(rte)
4515 {
4516 if (IS_USER(s)) {
4517 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4518 return;
4519 }
4520 gen_exception(s, s->base.pc_next, EXCP_RTE);
4521 }
4522
4523 DISAS_INSN(cf_movec)
4524 {
4525 uint16_t ext;
4526 TCGv reg;
4527
4528 if (IS_USER(s)) {
4529 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4530 return;
4531 }
4532
4533 ext = read_im16(env, s);
4534
4535 if (ext & 0x8000) {
4536 reg = AREG(ext, 12);
4537 } else {
4538 reg = DREG(ext, 12);
4539 }
4540 gen_helper_cf_movec_to(cpu_env, tcg_constant_i32(ext & 0xfff), reg);
4541 gen_exit_tb(s);
4542 }
4543
4544 DISAS_INSN(m68k_movec)
4545 {
4546 uint16_t ext;
4547 TCGv reg, creg;
4548
4549 if (IS_USER(s)) {
4550 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4551 return;
4552 }
4553
4554 ext = read_im16(env, s);
4555
4556 if (ext & 0x8000) {
4557 reg = AREG(ext, 12);
4558 } else {
4559 reg = DREG(ext, 12);
4560 }
4561 creg = tcg_constant_i32(ext & 0xfff);
4562 if (insn & 1) {
4563 gen_helper_m68k_movec_to(cpu_env, creg, reg);
4564 } else {
4565 gen_helper_m68k_movec_from(reg, cpu_env, creg);
4566 }
4567 gen_exit_tb(s);
4568 }
4569
4570 DISAS_INSN(intouch)
4571 {
4572 if (IS_USER(s)) {
4573 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4574 return;
4575 }
4576 /* ICache fetch. Implement as no-op. */
4577 }
4578
4579 DISAS_INSN(cpushl)
4580 {
4581 if (IS_USER(s)) {
4582 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4583 return;
4584 }
4585 /* Cache push/invalidate. Implement as no-op. */
4586 }
4587
4588 DISAS_INSN(cpush)
4589 {
4590 if (IS_USER(s)) {
4591 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4592 return;
4593 }
4594 /* Cache push/invalidate. Implement as no-op. */
4595 }
4596
4597 DISAS_INSN(cinv)
4598 {
4599 if (IS_USER(s)) {
4600 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4601 return;
4602 }
4603 /* Invalidate cache line. Implement as no-op. */
4604 }
4605
4606 #if defined(CONFIG_SOFTMMU)
4607 DISAS_INSN(pflush)
4608 {
4609 TCGv opmode;
4610
4611 if (IS_USER(s)) {
4612 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4613 return;
4614 }
4615
4616 opmode = tcg_constant_i32((insn >> 3) & 3);
4617 gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4618 }
4619
4620 DISAS_INSN(ptest)
4621 {
4622 TCGv is_read;
4623
4624 if (IS_USER(s)) {
4625 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4626 return;
4627 }
4628 is_read = tcg_constant_i32((insn >> 5) & 1);
4629 gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4630 }
4631 #endif
4632
4633 DISAS_INSN(wddata)
4634 {
4635 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4636 }
4637
4638 DISAS_INSN(wdebug)
4639 {
4640 if (IS_USER(s)) {
4641 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4642 return;
4643 }
4644 /* TODO: Implement wdebug. */
4645 cpu_abort(env_cpu(env), "WDEBUG not implemented");
4646 }
4647 #endif
4648
4649 DISAS_INSN(trap)
4650 {
4651 gen_exception(s, s->pc, EXCP_TRAP0 + (insn & 0xf));
4652 }
4653
4654 static void do_trapcc(DisasContext *s, DisasCompare *c)
4655 {
4656 if (c->tcond != TCG_COND_NEVER) {
4657 TCGLabel *over = NULL;
4658
4659 update_cc_op(s);
4660
4661 if (c->tcond != TCG_COND_ALWAYS) {
4662 /* Jump over if !c. */
4663 over = gen_new_label();
4664 tcg_gen_brcond_i32(tcg_invert_cond(c->tcond), c->v1, c->v2, over);
4665 }
4666
4667 tcg_gen_movi_i32(QREG_PC, s->pc);
4668 gen_raise_exception_format2(s, EXCP_TRAPCC, s->base.pc_next);
4669
4670 if (over != NULL) {
4671 gen_set_label(over);
4672 s->base.is_jmp = DISAS_NEXT;
4673 }
4674 }
4675 }
4676
4677 DISAS_INSN(trapcc)
4678 {
4679 DisasCompare c;
4680
4681 /* Consume and discard the immediate operand. */
4682 switch (extract32(insn, 0, 3)) {
4683 case 2: /* trapcc.w */
4684 (void)read_im16(env, s);
4685 break;
4686 case 3: /* trapcc.l */
4687 (void)read_im32(env, s);
4688 break;
4689 case 4: /* trapcc (no operand) */
4690 break;
4691 default:
4692 /* trapcc registered with only valid opmodes */
4693 g_assert_not_reached();
4694 }
4695
4696 gen_cc_cond(&c, s, extract32(insn, 8, 4));
4697 do_trapcc(s, &c);
4698 }
4699
4700 DISAS_INSN(trapv)
4701 {
4702 DisasCompare c;
4703
4704 gen_cc_cond(&c, s, 9); /* V set */
4705 do_trapcc(s, &c);
4706 }
4707
4708 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4709 {
4710 switch (reg) {
4711 case M68K_FPIAR:
4712 tcg_gen_movi_i32(res, 0);
4713 break;
4714 case M68K_FPSR:
4715 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4716 break;
4717 case M68K_FPCR:
4718 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4719 break;
4720 }
4721 }
4722
4723 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4724 {
4725 switch (reg) {
4726 case M68K_FPIAR:
4727 break;
4728 case M68K_FPSR:
4729 tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4730 break;
4731 case M68K_FPCR:
4732 gen_helper_set_fpcr(cpu_env, val);
4733 break;
4734 }
4735 }
4736
4737 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4738 {
4739 int index = IS_USER(s);
4740 TCGv tmp;
4741
4742 tmp = tcg_temp_new();
4743 gen_load_fcr(s, tmp, reg);
4744 tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
4745 }
4746
4747 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4748 {
4749 int index = IS_USER(s);
4750 TCGv tmp;
4751
4752 tmp = tcg_temp_new();
4753 tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
4754 gen_store_fcr(s, tmp, reg);
4755 }
4756
4757
4758 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4759 uint32_t insn, uint32_t ext)
4760 {
4761 int mask = (ext >> 10) & 7;
4762 int is_write = (ext >> 13) & 1;
4763 int mode = extract32(insn, 3, 3);
4764 int i;
4765 TCGv addr, tmp;
4766
4767 switch (mode) {
4768 case 0: /* Dn */
4769 if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4770 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4771 return;
4772 }
4773 if (is_write) {
4774 gen_load_fcr(s, DREG(insn, 0), mask);
4775 } else {
4776 gen_store_fcr(s, DREG(insn, 0), mask);
4777 }
4778 return;
4779 case 1: /* An, only with FPIAR */
4780 if (mask != M68K_FPIAR) {
4781 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4782 return;
4783 }
4784 if (is_write) {
4785 gen_load_fcr(s, AREG(insn, 0), mask);
4786 } else {
4787 gen_store_fcr(s, AREG(insn, 0), mask);
4788 }
4789 return;
4790 case 7: /* Immediate */
4791 if (REG(insn, 0) == 4) {
4792 if (is_write ||
4793 (mask != M68K_FPIAR && mask != M68K_FPSR &&
4794 mask != M68K_FPCR)) {
4795 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4796 return;
4797 }
4798 tmp = tcg_constant_i32(read_im32(env, s));
4799 gen_store_fcr(s, tmp, mask);
4800 return;
4801 }
4802 break;
4803 default:
4804 break;
4805 }
4806
4807 tmp = gen_lea(env, s, insn, OS_LONG);
4808 if (IS_NULL_QREG(tmp)) {
4809 gen_addr_fault(s);
4810 return;
4811 }
4812
4813 addr = tcg_temp_new();
4814 tcg_gen_mov_i32(addr, tmp);
4815
4816 /*
4817 * mask:
4818 *
4819 * 0b100 Floating-Point Control Register
4820 * 0b010 Floating-Point Status Register
4821 * 0b001 Floating-Point Instruction Address Register
4822 *
4823 */
4824
4825 if (is_write && mode == 4) {
4826 for (i = 2; i >= 0; i--, mask >>= 1) {
4827 if (mask & 1) {
4828 gen_qemu_store_fcr(s, addr, 1 << i);
4829 if (mask != 1) {
4830 tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4831 }
4832 }
4833 }
4834 tcg_gen_mov_i32(AREG(insn, 0), addr);
4835 } else {
4836 for (i = 0; i < 3; i++, mask >>= 1) {
4837 if (mask & 1) {
4838 if (is_write) {
4839 gen_qemu_store_fcr(s, addr, 1 << i);
4840 } else {
4841 gen_qemu_load_fcr(s, addr, 1 << i);
4842 }
4843 if (mask != 1 || mode == 3) {
4844 tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4845 }
4846 }
4847 }
4848 if (mode == 3) {
4849 tcg_gen_mov_i32(AREG(insn, 0), addr);
4850 }
4851 }
4852 }
4853
4854 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4855 uint32_t insn, uint32_t ext)
4856 {
4857 int opsize;
4858 TCGv addr, tmp;
4859 int mode = (ext >> 11) & 0x3;
4860 int is_load = ((ext & 0x2000) == 0);
4861
4862 if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4863 opsize = OS_EXTENDED;
4864 } else {
4865 opsize = OS_DOUBLE; /* FIXME */
4866 }
4867
4868 addr = gen_lea(env, s, insn, opsize);
4869 if (IS_NULL_QREG(addr)) {
4870 gen_addr_fault(s);
4871 return;
4872 }
4873
4874 tmp = tcg_temp_new();
4875 if (mode & 0x1) {
4876 /* Dynamic register list */
4877 tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4878 } else {
4879 /* Static register list */
4880 tcg_gen_movi_i32(tmp, ext & 0xff);
4881 }
4882
4883 if (!is_load && (mode & 2) == 0) {
4884 /*
4885 * predecrement addressing mode
4886 * only available to store register to memory
4887 */
4888 if (opsize == OS_EXTENDED) {
4889 gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4890 } else {
4891 gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4892 }
4893 } else {
4894 /* postincrement addressing mode */
4895 if (opsize == OS_EXTENDED) {
4896 if (is_load) {
4897 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4898 } else {
4899 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4900 }
4901 } else {
4902 if (is_load) {
4903 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4904 } else {
4905 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4906 }
4907 }
4908 }
4909 if ((insn & 070) == 030 || (insn & 070) == 040) {
4910 tcg_gen_mov_i32(AREG(insn, 0), tmp);
4911 }
4912 }
4913
4914 /*
4915 * ??? FP exceptions are not implemented. Most exceptions are deferred until
4916 * immediately before the next FP instruction is executed.
4917 */
4918 DISAS_INSN(fpu)
4919 {
4920 uint16_t ext;
4921 int opmode;
4922 int opsize;
4923 TCGv_ptr cpu_src, cpu_dest;
4924
4925 ext = read_im16(env, s);
4926 opmode = ext & 0x7f;
4927 switch ((ext >> 13) & 7) {
4928 case 0:
4929 break;
4930 case 1:
4931 goto undef;
4932 case 2:
4933 if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4934 /* fmovecr */
4935 TCGv rom_offset = tcg_constant_i32(opmode);
4936 cpu_dest = gen_fp_ptr(REG(ext, 7));
4937 gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
4938 return;
4939 }
4940 break;
4941 case 3: /* fmove out */
4942 cpu_src = gen_fp_ptr(REG(ext, 7));
4943 opsize = ext_opsize(ext, 10);
4944 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4945 EA_STORE, IS_USER(s)) == -1) {
4946 gen_addr_fault(s);
4947 }
4948 gen_helper_ftst(cpu_env, cpu_src);
4949 return;
4950 case 4: /* fmove to control register. */
4951 case 5: /* fmove from control register. */
4952 gen_op_fmove_fcr(env, s, insn, ext);
4953 return;
4954 case 6: /* fmovem */
4955 case 7:
4956 if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
4957 goto undef;
4958 }
4959 gen_op_fmovem(env, s, insn, ext);
4960 return;
4961 }
4962 if (ext & (1 << 14)) {
4963 /* Source effective address. */
4964 opsize = ext_opsize(ext, 10);
4965 cpu_src = gen_fp_result_ptr();
4966 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4967 EA_LOADS, IS_USER(s)) == -1) {
4968 gen_addr_fault(s);
4969 return;
4970 }
4971 } else {
4972 /* Source register. */
4973 opsize = OS_EXTENDED;
4974 cpu_src = gen_fp_ptr(REG(ext, 10));
4975 }
4976 cpu_dest = gen_fp_ptr(REG(ext, 7));
4977 switch (opmode) {
4978 case 0: /* fmove */
4979 gen_fp_move(cpu_dest, cpu_src);
4980 break;
4981 case 0x40: /* fsmove */
4982 gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
4983 break;
4984 case 0x44: /* fdmove */
4985 gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
4986 break;
4987 case 1: /* fint */
4988 gen_helper_firound(cpu_env, cpu_dest, cpu_src);
4989 break;
4990 case 2: /* fsinh */
4991 gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
4992 break;
4993 case 3: /* fintrz */
4994 gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
4995 break;
4996 case 4: /* fsqrt */
4997 gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
4998 break;
4999 case 0x41: /* fssqrt */
5000 gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
5001 break;
5002 case 0x45: /* fdsqrt */
5003 gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
5004 break;
5005 case 0x06: /* flognp1 */
5006 gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
5007 break;
5008 case 0x08: /* fetoxm1 */
5009 gen_helper_fetoxm1(cpu_env, cpu_dest, cpu_src);
5010 break;
5011 case 0x09: /* ftanh */
5012 gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
5013 break;
5014 case 0x0a: /* fatan */
5015 gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
5016 break;
5017 case 0x0c: /* fasin */
5018 gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
5019 break;
5020 case 0x0d: /* fatanh */
5021 gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
5022 break;
5023 case 0x0e: /* fsin */
5024 gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
5025 break;
5026 case 0x0f: /* ftan */
5027 gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
5028 break;
5029 case 0x10: /* fetox */
5030 gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
5031 break;
5032 case 0x11: /* ftwotox */
5033 gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
5034 break;
5035 case 0x12: /* ftentox */
5036 gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
5037 break;
5038 case 0x14: /* flogn */
5039 gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
5040 break;
5041 case 0x15: /* flog10 */
5042 gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
5043 break;
5044 case 0x16: /* flog2 */
5045 gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
5046 break;
5047 case 0x18: /* fabs */
5048 gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5049 break;
5050 case 0x58: /* fsabs */
5051 gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5052 break;
5053 case 0x5c: /* fdabs */
5054 gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5055 break;
5056 case 0x19: /* fcosh */
5057 gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
5058 break;
5059 case 0x1a: /* fneg */
5060 gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5061 break;
5062 case 0x5a: /* fsneg */
5063 gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5064 break;
5065 case 0x5e: /* fdneg */
5066 gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5067 break;
5068 case 0x1c: /* facos */
5069 gen_helper_facos(cpu_env, cpu_dest, cpu_src);
5070 break;
5071 case 0x1d: /* fcos */
5072 gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
5073 break;
5074 case 0x1e: /* fgetexp */
5075 gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
5076 break;
5077 case 0x1f: /* fgetman */
5078 gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
5079 break;
5080 case 0x20: /* fdiv */
5081 gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5082 break;
5083 case 0x60: /* fsdiv */
5084 gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5085 break;
5086 case 0x64: /* fddiv */
5087 gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5088 break;
5089 case 0x21: /* fmod */
5090 gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
5091 break;
5092 case 0x22: /* fadd */
5093 gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5094 break;
5095 case 0x62: /* fsadd */
5096 gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5097 break;
5098 case 0x66: /* fdadd */
5099 gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5100 break;
5101 case 0x23: /* fmul */
5102 gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5103 break;
5104 case 0x63: /* fsmul */
5105 gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5106 break;
5107 case 0x67: /* fdmul */
5108 gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5109 break;
5110 case 0x24: /* fsgldiv */
5111 gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5112 break;
5113 case 0x25: /* frem */
5114 gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
5115 break;
5116 case 0x26: /* fscale */
5117 gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
5118 break;
5119 case 0x27: /* fsglmul */
5120 gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5121 break;
5122 case 0x28: /* fsub */
5123 gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5124 break;
5125 case 0x68: /* fssub */
5126 gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5127 break;
5128 case 0x6c: /* fdsub */
5129 gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5130 break;
5131 case 0x30: case 0x31: case 0x32:
5132 case 0x33: case 0x34: case 0x35:
5133 case 0x36: case 0x37: {
5134 TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5135 gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
5136 }
5137 break;
5138 case 0x38: /* fcmp */
5139 gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5140 return;
5141 case 0x3a: /* ftst */
5142 gen_helper_ftst(cpu_env, cpu_src);
5143 return;
5144 default:
5145 goto undef;
5146 }
5147 gen_helper_ftst(cpu_env, cpu_dest);
5148 return;
5149 undef:
5150 /* FIXME: Is this right for offset addressing modes? */
5151 s->pc -= 2;
5152 disas_undef_fpu(env, s, insn);
5153 }
5154
5155 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5156 {
5157 TCGv fpsr;
5158
5159 c->v2 = tcg_constant_i32(0);
5160 /* TODO: Raise BSUN exception. */
5161 fpsr = tcg_temp_new();
5162 gen_load_fcr(s, fpsr, M68K_FPSR);
5163 switch (cond) {
5164 case 0: /* False */
5165 case 16: /* Signaling False */
5166 c->v1 = c->v2;
5167 c->tcond = TCG_COND_NEVER;
5168 break;
5169 case 1: /* EQual Z */
5170 case 17: /* Signaling EQual Z */
5171 c->v1 = tcg_temp_new();
5172 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5173 c->tcond = TCG_COND_NE;
5174 break;
5175 case 2: /* Ordered Greater Than !(A || Z || N) */
5176 case 18: /* Greater Than !(A || Z || N) */
5177 c->v1 = tcg_temp_new();
5178 tcg_gen_andi_i32(c->v1, fpsr,
5179 FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5180 c->tcond = TCG_COND_EQ;
5181 break;
5182 case 3: /* Ordered Greater than or Equal Z || !(A || N) */
5183 case 19: /* Greater than or Equal Z || !(A || N) */
5184 c->v1 = tcg_temp_new();
5185 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5186 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5187 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5188 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5189 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5190 c->tcond = TCG_COND_NE;
5191 break;
5192 case 4: /* Ordered Less Than !(!N || A || Z); */
5193 case 20: /* Less Than !(!N || A || Z); */
5194 c->v1 = tcg_temp_new();
5195 tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5196 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5197 c->tcond = TCG_COND_EQ;
5198 break;
5199 case 5: /* Ordered Less than or Equal Z || (N && !A) */
5200 case 21: /* Less than or Equal Z || (N && !A) */
5201 c->v1 = tcg_temp_new();
5202 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5203 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5204 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5205 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5206 c->tcond = TCG_COND_NE;
5207 break;
5208 case 6: /* Ordered Greater or Less than !(A || Z) */
5209 case 22: /* Greater or Less than !(A || Z) */
5210 c->v1 = tcg_temp_new();
5211 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5212 c->tcond = TCG_COND_EQ;
5213 break;
5214 case 7: /* Ordered !A */
5215 case 23: /* Greater, Less or Equal !A */
5216 c->v1 = tcg_temp_new();
5217 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5218 c->tcond = TCG_COND_EQ;
5219 break;
5220 case 8: /* Unordered A */
5221 case 24: /* Not Greater, Less or Equal A */
5222 c->v1 = tcg_temp_new();
5223 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5224 c->tcond = TCG_COND_NE;
5225 break;
5226 case 9: /* Unordered or Equal A || Z */
5227 case 25: /* Not Greater or Less then A || Z */
5228 c->v1 = tcg_temp_new();
5229 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5230 c->tcond = TCG_COND_NE;
5231 break;
5232 case 10: /* Unordered or Greater Than A || !(N || Z)) */
5233 case 26: /* Not Less or Equal A || !(N || Z)) */
5234 c->v1 = tcg_temp_new();
5235 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5236 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5237 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5238 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5239 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5240 c->tcond = TCG_COND_NE;
5241 break;
5242 case 11: /* Unordered or Greater or Equal A || Z || !N */
5243 case 27: /* Not Less Than A || Z || !N */
5244 c->v1 = tcg_temp_new();
5245 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5246 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5247 c->tcond = TCG_COND_NE;
5248 break;
5249 case 12: /* Unordered or Less Than A || (N && !Z) */
5250 case 28: /* Not Greater than or Equal A || (N && !Z) */
5251 c->v1 = tcg_temp_new();
5252 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5253 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5254 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5255 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5256 c->tcond = TCG_COND_NE;
5257 break;
5258 case 13: /* Unordered or Less or Equal A || Z || N */
5259 case 29: /* Not Greater Than A || Z || N */
5260 c->v1 = tcg_temp_new();
5261 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5262 c->tcond = TCG_COND_NE;
5263 break;
5264 case 14: /* Not Equal !Z */
5265 case 30: /* Signaling Not Equal !Z */
5266 c->v1 = tcg_temp_new();
5267 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5268 c->tcond = TCG_COND_EQ;
5269 break;
5270 case 15: /* True */
5271 case 31: /* Signaling True */
5272 c->v1 = c->v2;
5273 c->tcond = TCG_COND_ALWAYS;
5274 break;
5275 }
5276 }
5277
5278 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5279 {
5280 DisasCompare c;
5281
5282 gen_fcc_cond(&c, s, cond);
5283 update_cc_op(s);
5284 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5285 }
5286
5287 DISAS_INSN(fbcc)
5288 {
5289 uint32_t offset;
5290 uint32_t base;
5291 TCGLabel *l1;
5292
5293 base = s->pc;
5294 offset = (int16_t)read_im16(env, s);
5295 if (insn & (1 << 6)) {
5296 offset = (offset << 16) | read_im16(env, s);
5297 }
5298
5299 l1 = gen_new_label();
5300 update_cc_op(s);
5301 gen_fjmpcc(s, insn & 0x3f, l1);
5302 gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
5303 gen_set_label(l1);
5304 gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
5305 }
5306
5307 DISAS_INSN(fscc)
5308 {
5309 DisasCompare c;
5310 int cond;
5311 TCGv tmp;
5312 uint16_t ext;
5313
5314 ext = read_im16(env, s);
5315 cond = ext & 0x3f;
5316 gen_fcc_cond(&c, s, cond);
5317
5318 tmp = tcg_temp_new();
5319 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
5320
5321 tcg_gen_neg_i32(tmp, tmp);
5322 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5323 }
5324
5325 DISAS_INSN(ftrapcc)
5326 {
5327 DisasCompare c;
5328 uint16_t ext;
5329 int cond;
5330
5331 ext = read_im16(env, s);
5332 cond = ext & 0x3f;
5333
5334 /* Consume and discard the immediate operand. */
5335 switch (extract32(insn, 0, 3)) {
5336 case 2: /* ftrapcc.w */
5337 (void)read_im16(env, s);
5338 break;
5339 case 3: /* ftrapcc.l */
5340 (void)read_im32(env, s);
5341 break;
5342 case 4: /* ftrapcc (no operand) */
5343 break;
5344 default:
5345 /* ftrapcc registered with only valid opmodes */
5346 g_assert_not_reached();
5347 }
5348
5349 gen_fcc_cond(&c, s, cond);
5350 do_trapcc(s, &c);
5351 }
5352
5353 #if defined(CONFIG_SOFTMMU)
5354 DISAS_INSN(frestore)
5355 {
5356 TCGv addr;
5357
5358 if (IS_USER(s)) {
5359 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5360 return;
5361 }
5362 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5363 SRC_EA(env, addr, OS_LONG, 0, NULL);
5364 /* FIXME: check the state frame */
5365 } else {
5366 disas_undef(env, s, insn);
5367 }
5368 }
5369
5370 DISAS_INSN(fsave)
5371 {
5372 if (IS_USER(s)) {
5373 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5374 return;
5375 }
5376
5377 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5378 /* always write IDLE */
5379 TCGv idle = tcg_constant_i32(0x41000000);
5380 DEST_EA(env, insn, OS_LONG, idle, NULL);
5381 } else {
5382 disas_undef(env, s, insn);
5383 }
5384 }
5385 #endif
5386
5387 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5388 {
5389 TCGv tmp = tcg_temp_new();
5390 if (s->env->macsr & MACSR_FI) {
5391 if (upper)
5392 tcg_gen_andi_i32(tmp, val, 0xffff0000);
5393 else
5394 tcg_gen_shli_i32(tmp, val, 16);
5395 } else if (s->env->macsr & MACSR_SU) {
5396 if (upper)
5397 tcg_gen_sari_i32(tmp, val, 16);
5398 else
5399 tcg_gen_ext16s_i32(tmp, val);
5400 } else {
5401 if (upper)
5402 tcg_gen_shri_i32(tmp, val, 16);
5403 else
5404 tcg_gen_ext16u_i32(tmp, val);
5405 }
5406 return tmp;
5407 }
5408
5409 static void gen_mac_clear_flags(void)
5410 {
5411 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5412 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5413 }
5414
5415 DISAS_INSN(mac)
5416 {
5417 TCGv rx;
5418 TCGv ry;
5419 uint16_t ext;
5420 int acc;
5421 TCGv tmp;
5422 TCGv addr;
5423 TCGv loadval;
5424 int dual;
5425 TCGv saved_flags;
5426
5427 if (!s->done_mac) {
5428 s->mactmp = tcg_temp_new_i64();
5429 s->done_mac = 1;
5430 }
5431
5432 ext = read_im16(env, s);
5433
5434 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5435 dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5436 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5437 disas_undef(env, s, insn);
5438 return;
5439 }
5440 if (insn & 0x30) {
5441 /* MAC with load. */
5442 tmp = gen_lea(env, s, insn, OS_LONG);
5443 addr = tcg_temp_new();
5444 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5445 /*
5446 * Load the value now to ensure correct exception behavior.
5447 * Perform writeback after reading the MAC inputs.
5448 */
5449 loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5450
5451 acc ^= 1;
5452 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5453 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5454 } else {
5455 loadval = addr = NULL_QREG;
5456 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5457 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5458 }
5459
5460 gen_mac_clear_flags();
5461 #if 0
5462 l1 = -1;
5463 /* Disabled because conditional branches clobber temporary vars. */
5464 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5465 /* Skip the multiply if we know we will ignore it. */
5466 l1 = gen_new_label();
5467 tmp = tcg_temp_new();
5468 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5469 gen_op_jmp_nz32(tmp, l1);
5470 }
5471 #endif
5472
5473 if ((ext & 0x0800) == 0) {
5474 /* Word. */
5475 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5476 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5477 }
5478 if (s->env->macsr & MACSR_FI) {
5479 gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5480 } else {
5481 if (s->env->macsr & MACSR_SU)
5482 gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5483 else
5484 gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5485 switch ((ext >> 9) & 3) {
5486 case 1:
5487 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5488 break;
5489 case 3:
5490 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5491 break;
5492 }
5493 }
5494
5495 if (dual) {
5496 /* Save the overflow flag from the multiply. */
5497 saved_flags = tcg_temp_new();
5498 tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5499 } else {
5500 saved_flags = NULL_QREG;
5501 }
5502
5503 #if 0
5504 /* Disabled because conditional branches clobber temporary vars. */
5505 if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5506 /* Skip the accumulate if the value is already saturated. */
5507 l1 = gen_new_label();
5508 tmp = tcg_temp_new();
5509 gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5510 gen_op_jmp_nz32(tmp, l1);
5511 }
5512 #endif
5513
5514 if (insn & 0x100)
5515 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5516 else
5517 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5518
5519 if (s->env->macsr & MACSR_FI)
5520 gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
5521 else if (s->env->macsr & MACSR_SU)
5522 gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
5523 else
5524 gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
5525
5526 #if 0
5527 /* Disabled because conditional branches clobber temporary vars. */
5528 if (l1 != -1)
5529 gen_set_label(l1);
5530 #endif
5531
5532 if (dual) {
5533 /* Dual accumulate variant. */
5534 acc = (ext >> 2) & 3;
5535 /* Restore the overflow flag from the multiplier. */
5536 tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5537 #if 0
5538 /* Disabled because conditional branches clobber temporary vars. */
5539 if ((s->env->macsr & MACSR_OMC) != 0) {
5540 /* Skip the accumulate if the value is already saturated. */
5541 l1 = gen_new_label();
5542 tmp = tcg_temp_new();
5543 gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5544 gen_op_jmp_nz32(tmp, l1);
5545 }
5546 #endif
5547 if (ext & 2)
5548 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5549 else
5550 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5551 if (s->env->macsr & MACSR_FI)
5552 gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
5553 else if (s->env->macsr & MACSR_SU)
5554 gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
5555 else
5556 gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
5557 #if 0
5558 /* Disabled because conditional branches clobber temporary vars. */
5559 if (l1 != -1)
5560 gen_set_label(l1);
5561 #endif
5562 }
5563 gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(acc));
5564
5565 if (insn & 0x30) {
5566 TCGv rw;
5567 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5568 tcg_gen_mov_i32(rw, loadval);
5569 /*
5570 * FIXME: Should address writeback happen with the masked or
5571 * unmasked value?
5572 */
5573 switch ((insn >> 3) & 7) {
5574 case 3: /* Post-increment. */
5575 tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5576 break;
5577 case 4: /* Pre-decrement. */
5578 tcg_gen_mov_i32(AREG(insn, 0), addr);
5579 }
5580 }
5581 }
5582
5583 DISAS_INSN(from_mac)
5584 {
5585 TCGv rx;
5586 TCGv_i64 acc;
5587 int accnum;
5588
5589 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5590 accnum = (insn >> 9) & 3;
5591 acc = MACREG(accnum);
5592 if (s->env->macsr & MACSR_FI) {
5593 gen_helper_get_macf(rx, cpu_env, acc);
5594 } else if ((s->env->macsr & MACSR_OMC) == 0) {
5595 tcg_gen_extrl_i64_i32(rx, acc);
5596 } else if (s->env->macsr & MACSR_SU) {
5597 gen_helper_get_macs(rx, acc);
5598 } else {
5599 gen_helper_get_macu(rx, acc);
5600 }
5601 if (insn & 0x40) {
5602 tcg_gen_movi_i64(acc, 0);
5603 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5604 }
5605 }
5606
5607 DISAS_INSN(move_mac)
5608 {
5609 /* FIXME: This can be done without a helper. */
5610 int src;
5611 TCGv dest;
5612 src = insn & 3;
5613 dest = tcg_constant_i32((insn >> 9) & 3);
5614 gen_helper_mac_move(cpu_env, dest, tcg_constant_i32(src));
5615 gen_mac_clear_flags();
5616 gen_helper_mac_set_flags(cpu_env, dest);
5617 }
5618
5619 DISAS_INSN(from_macsr)
5620 {
5621 TCGv reg;
5622
5623 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5624 tcg_gen_mov_i32(reg, QREG_MACSR);
5625 }
5626
5627 DISAS_INSN(from_mask)
5628 {
5629 TCGv reg;
5630 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5631 tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5632 }
5633
5634 DISAS_INSN(from_mext)
5635 {
5636 TCGv reg;
5637 TCGv acc;
5638 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5639 acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5640 if (s->env->macsr & MACSR_FI)
5641 gen_helper_get_mac_extf(reg, cpu_env, acc);
5642 else
5643 gen_helper_get_mac_exti(reg, cpu_env, acc);
5644 }
5645
5646 DISAS_INSN(macsr_to_ccr)
5647 {
5648 TCGv tmp = tcg_temp_new();
5649
5650 /* Note that X and C are always cleared. */
5651 tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V);
5652 gen_helper_set_ccr(cpu_env, tmp);
5653 set_cc_op(s, CC_OP_FLAGS);
5654 }
5655
5656 DISAS_INSN(to_mac)
5657 {
5658 TCGv_i64 acc;
5659 TCGv val;
5660 int accnum;
5661 accnum = (insn >> 9) & 3;
5662 acc = MACREG(accnum);
5663 SRC_EA(env, val, OS_LONG, 0, NULL);
5664 if (s->env->macsr & MACSR_FI) {
5665 tcg_gen_ext_i32_i64(acc, val);
5666 tcg_gen_shli_i64(acc, acc, 8);
5667 } else if (s->env->macsr & MACSR_SU) {
5668 tcg_gen_ext_i32_i64(acc, val);
5669 } else {
5670 tcg_gen_extu_i32_i64(acc, val);
5671 }
5672 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5673 gen_mac_clear_flags();
5674 gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(accnum));
5675 }
5676
5677 DISAS_INSN(to_macsr)
5678 {
5679 TCGv val;
5680 SRC_EA(env, val, OS_LONG, 0, NULL);
5681 gen_helper_set_macsr(cpu_env, val);
5682 gen_exit_tb(s);
5683 }
5684
5685 DISAS_INSN(to_mask)
5686 {
5687 TCGv val;
5688 SRC_EA(env, val, OS_LONG, 0, NULL);
5689 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5690 }
5691
5692 DISAS_INSN(to_mext)
5693 {
5694 TCGv val;
5695 TCGv acc;
5696 SRC_EA(env, val, OS_LONG, 0, NULL);
5697 acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5698 if (s->env->macsr & MACSR_FI)
5699 gen_helper_set_mac_extf(cpu_env, val, acc);
5700 else if (s->env->macsr & MACSR_SU)
5701 gen_helper_set_mac_exts(cpu_env, val, acc);
5702 else
5703 gen_helper_set_mac_extu(cpu_env, val, acc);
5704 }
5705
5706 static disas_proc opcode_table[65536];
5707
5708 static void
5709 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5710 {
5711 int i;
5712 int from;
5713 int to;
5714
5715 /* Sanity check. All set bits must be included in the mask. */
5716 if (opcode & ~mask) {
5717 fprintf(stderr,
5718 "qemu internal error: bogus opcode definition %04x/%04x\n",
5719 opcode, mask);
5720 abort();
5721 }
5722 /*
5723 * This could probably be cleverer. For now just optimize the case where
5724 * the top bits are known.
5725 */
5726 /* Find the first zero bit in the mask. */
5727 i = 0x8000;
5728 while ((i & mask) != 0)
5729 i >>= 1;
5730 /* Iterate over all combinations of this and lower bits. */
5731 if (i == 0)
5732 i = 1;
5733 else
5734 i <<= 1;
5735 from = opcode & ~(i - 1);
5736 to = from + i;
5737 for (i = from; i < to; i++) {
5738 if ((i & mask) == opcode)
5739 opcode_table[i] = proc;
5740 }
5741 }
5742
5743 /*
5744 * Register m68k opcode handlers. Order is important.
5745 * Later insn override earlier ones.
5746 */
5747 void register_m68k_insns (CPUM68KState *env)
5748 {
5749 /*
5750 * Build the opcode table only once to avoid
5751 * multithreading issues.
5752 */
5753 if (opcode_table[0] != NULL) {
5754 return;
5755 }
5756
5757 /*
5758 * use BASE() for instruction available
5759 * for CF_ISA_A and M68000.
5760 */
5761 #define BASE(name, opcode, mask) \
5762 register_opcode(disas_##name, 0x##opcode, 0x##mask)
5763 #define INSN(name, opcode, mask, feature) do { \
5764 if (m68k_feature(env, M68K_FEATURE_##feature)) \
5765 BASE(name, opcode, mask); \
5766 } while(0)
5767 BASE(undef, 0000, 0000);
5768 INSN(arith_im, 0080, fff8, CF_ISA_A);
5769 INSN(arith_im, 0000, ff00, M68K);
5770 INSN(chk2, 00c0, f9c0, CHK2);
5771 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
5772 BASE(bitop_reg, 0100, f1c0);
5773 BASE(bitop_reg, 0140, f1c0);
5774 BASE(bitop_reg, 0180, f1c0);
5775 BASE(bitop_reg, 01c0, f1c0);
5776 INSN(movep, 0108, f138, MOVEP);
5777 INSN(arith_im, 0280, fff8, CF_ISA_A);
5778 INSN(arith_im, 0200, ff00, M68K);
5779 INSN(undef, 02c0, ffc0, M68K);
5780 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
5781 INSN(arith_im, 0480, fff8, CF_ISA_A);
5782 INSN(arith_im, 0400, ff00, M68K);
5783 INSN(undef, 04c0, ffc0, M68K);
5784 INSN(arith_im, 0600, ff00, M68K);
5785 INSN(undef, 06c0, ffc0, M68K);
5786 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
5787 INSN(arith_im, 0680, fff8, CF_ISA_A);
5788 INSN(arith_im, 0c00, ff38, CF_ISA_A);
5789 INSN(arith_im, 0c00, ff00, M68K);
5790 BASE(bitop_im, 0800, ffc0);
5791 BASE(bitop_im, 0840, ffc0);
5792 BASE(bitop_im, 0880, ffc0);
5793 BASE(bitop_im, 08c0, ffc0);
5794 INSN(arith_im, 0a80, fff8, CF_ISA_A);
5795 INSN(arith_im, 0a00, ff00, M68K);
5796 #if defined(CONFIG_SOFTMMU)
5797 INSN(moves, 0e00, ff00, M68K);
5798 #endif
5799 INSN(cas, 0ac0, ffc0, CAS);
5800 INSN(cas, 0cc0, ffc0, CAS);
5801 INSN(cas, 0ec0, ffc0, CAS);
5802 INSN(cas2w, 0cfc, ffff, CAS);
5803 INSN(cas2l, 0efc, ffff, CAS);
5804 BASE(move, 1000, f000);
5805 BASE(move, 2000, f000);
5806 BASE(move, 3000, f000);
5807 INSN(chk, 4000, f040, M68K);
5808 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
5809 INSN(negx, 4080, fff8, CF_ISA_A);
5810 INSN(negx, 4000, ff00, M68K);
5811 INSN(undef, 40c0, ffc0, M68K);
5812 INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5813 INSN(move_from_sr, 40c0, ffc0, M68K);
5814 BASE(lea, 41c0, f1c0);
5815 BASE(clr, 4200, ff00);
5816 BASE(undef, 42c0, ffc0);
5817 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5818 INSN(move_from_ccr, 42c0, ffc0, M68K);
5819 INSN(neg, 4480, fff8, CF_ISA_A);
5820 INSN(neg, 4400, ff00, M68K);
5821 INSN(undef, 44c0, ffc0, M68K);
5822 BASE(move_to_ccr, 44c0, ffc0);
5823 INSN(not, 4680, fff8, CF_ISA_A);
5824 INSN(not, 4600, ff00, M68K);
5825 #if defined(CONFIG_SOFTMMU)
5826 BASE(move_to_sr, 46c0, ffc0);
5827 #endif
5828 INSN(nbcd, 4800, ffc0, M68K);
5829 INSN(linkl, 4808, fff8, M68K);
5830 BASE(pea, 4840, ffc0);
5831 BASE(swap, 4840, fff8);
5832 INSN(bkpt, 4848, fff8, BKPT);
5833 INSN(movem, 48d0, fbf8, CF_ISA_A);
5834 INSN(movem, 48e8, fbf8, CF_ISA_A);
5835 INSN(movem, 4880, fb80, M68K);
5836 BASE(ext, 4880, fff8);
5837 BASE(ext, 48c0, fff8);
5838 BASE(ext, 49c0, fff8);
5839 BASE(tst, 4a00, ff00);
5840 INSN(tas, 4ac0, ffc0, CF_ISA_B);
5841 INSN(tas, 4ac0, ffc0, M68K);
5842 #if defined(CONFIG_SOFTMMU)
5843 INSN(halt, 4ac8, ffff, CF_ISA_A);
5844 INSN(halt, 4ac8, ffff, M68K);
5845 #endif
5846 INSN(pulse, 4acc, ffff, CF_ISA_A);
5847 BASE(illegal, 4afc, ffff);
5848 INSN(mull, 4c00, ffc0, CF_ISA_A);
5849 INSN(mull, 4c00, ffc0, LONG_MULDIV);
5850 INSN(divl, 4c40, ffc0, CF_ISA_A);
5851 INSN(divl, 4c40, ffc0, LONG_MULDIV);
5852 INSN(sats, 4c80, fff8, CF_ISA_B);
5853 BASE(trap, 4e40, fff0);
5854 BASE(link, 4e50, fff8);
5855 BASE(unlk, 4e58, fff8);
5856 #if defined(CONFIG_SOFTMMU)
5857 INSN(move_to_usp, 4e60, fff8, USP);
5858 INSN(move_from_usp, 4e68, fff8, USP);
5859 INSN(reset, 4e70, ffff, M68K);
5860 BASE(stop, 4e72, ffff);
5861 BASE(rte, 4e73, ffff);
5862 INSN(cf_movec, 4e7b, ffff, CF_ISA_A);
5863 INSN(m68k_movec, 4e7a, fffe, MOVEC);
5864 #endif
5865 BASE(nop, 4e71, ffff);
5866 INSN(rtd, 4e74, ffff, RTD);
5867 BASE(rts, 4e75, ffff);
5868 INSN(trapv, 4e76, ffff, M68K);
5869 INSN(rtr, 4e77, ffff, M68K);
5870 BASE(jump, 4e80, ffc0);
5871 BASE(jump, 4ec0, ffc0);
5872 INSN(addsubq, 5000, f080, M68K);
5873 BASE(addsubq, 5080, f0c0);
5874 INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */
5875 INSN(scc, 50c0, f0c0, M68K); /* Scc.B <EA> */
5876 INSN(dbcc, 50c8, f0f8, M68K);
5877 INSN(trapcc, 50fa, f0fe, TRAPCC); /* opmode 010, 011 */
5878 INSN(trapcc, 50fc, f0ff, TRAPCC); /* opmode 100 */
5879 INSN(trapcc, 51fa, fffe, CF_ISA_A); /* TPF (trapf) opmode 010, 011 */
5880 INSN(trapcc, 51fc, ffff, CF_ISA_A); /* TPF (trapf) opmode 100 */
5881
5882 /* Branch instructions. */
5883 BASE(branch, 6000, f000);
5884 /* Disable long branch instructions, then add back the ones we want. */
5885 BASE(undef, 60ff, f0ff); /* All long branches. */
5886 INSN(branch, 60ff, f0ff, CF_ISA_B);
5887 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
5888 INSN(branch, 60ff, ffff, BRAL);
5889 INSN(branch, 60ff, f0ff, BCCL);
5890
5891 BASE(moveq, 7000, f100);
5892 INSN(mvzs, 7100, f100, CF_ISA_B);
5893 BASE(or, 8000, f000);
5894 BASE(divw, 80c0, f0c0);
5895 INSN(sbcd_reg, 8100, f1f8, M68K);
5896 INSN(sbcd_mem, 8108, f1f8, M68K);
5897 BASE(addsub, 9000, f000);
5898 INSN(undef, 90c0, f0c0, CF_ISA_A);
5899 INSN(subx_reg, 9180, f1f8, CF_ISA_A);
5900 INSN(subx_reg, 9100, f138, M68K);
5901 INSN(subx_mem, 9108, f138, M68K);
5902 INSN(suba, 91c0, f1c0, CF_ISA_A);
5903 INSN(suba, 90c0, f0c0, M68K);
5904
5905 BASE(undef_mac, a000, f000);
5906 INSN(mac, a000, f100, CF_EMAC);
5907 INSN(from_mac, a180, f9b0, CF_EMAC);
5908 INSN(move_mac, a110, f9fc, CF_EMAC);
5909 INSN(from_macsr,a980, f9f0, CF_EMAC);
5910 INSN(from_mask, ad80, fff0, CF_EMAC);
5911 INSN(from_mext, ab80, fbf0, CF_EMAC);
5912 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5913 INSN(to_mac, a100, f9c0, CF_EMAC);
5914 INSN(to_macsr, a900, ffc0, CF_EMAC);
5915 INSN(to_mext, ab00, fbc0, CF_EMAC);
5916 INSN(to_mask, ad00, ffc0, CF_EMAC);
5917
5918 INSN(mov3q, a140, f1c0, CF_ISA_B);
5919 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
5920 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
5921 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5922 INSN(cmp, b080, f1c0, CF_ISA_A);
5923 INSN(cmpa, b1c0, f1c0, CF_ISA_A);
5924 INSN(cmp, b000, f100, M68K);
5925 INSN(eor, b100, f100, M68K);
5926 INSN(cmpm, b108, f138, M68K);
5927 INSN(cmpa, b0c0, f0c0, M68K);
5928 INSN(eor, b180, f1c0, CF_ISA_A);
5929 BASE(and, c000, f000);
5930 INSN(exg_dd, c140, f1f8, M68K);
5931 INSN(exg_aa, c148, f1f8, M68K);
5932 INSN(exg_da, c188, f1f8, M68K);
5933 BASE(mulw, c0c0, f0c0);
5934 INSN(abcd_reg, c100, f1f8, M68K);
5935 INSN(abcd_mem, c108, f1f8, M68K);
5936 BASE(addsub, d000, f000);
5937 INSN(undef, d0c0, f0c0, CF_ISA_A);
5938 INSN(addx_reg, d180, f1f8, CF_ISA_A);
5939 INSN(addx_reg, d100, f138, M68K);
5940 INSN(addx_mem, d108, f138, M68K);
5941 INSN(adda, d1c0, f1c0, CF_ISA_A);
5942 INSN(adda, d0c0, f0c0, M68K);
5943 INSN(shift_im, e080, f0f0, CF_ISA_A);
5944 INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5945 INSN(shift8_im, e000, f0f0, M68K);
5946 INSN(shift16_im, e040, f0f0, M68K);
5947 INSN(shift_im, e080, f0f0, M68K);
5948 INSN(shift8_reg, e020, f0f0, M68K);
5949 INSN(shift16_reg, e060, f0f0, M68K);
5950 INSN(shift_reg, e0a0, f0f0, M68K);
5951 INSN(shift_mem, e0c0, fcc0, M68K);
5952 INSN(rotate_im, e090, f0f0, M68K);
5953 INSN(rotate8_im, e010, f0f0, M68K);
5954 INSN(rotate16_im, e050, f0f0, M68K);
5955 INSN(rotate_reg, e0b0, f0f0, M68K);
5956 INSN(rotate8_reg, e030, f0f0, M68K);
5957 INSN(rotate16_reg, e070, f0f0, M68K);
5958 INSN(rotate_mem, e4c0, fcc0, M68K);
5959 INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */
5960 INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5961 INSN(bfins_mem, efc0, ffc0, BITFIELD);
5962 INSN(bfins_reg, efc0, fff8, BITFIELD);
5963 INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */
5964 INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */
5965 INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */
5966 INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */
5967 INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */
5968 INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */
5969 INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */
5970 INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */
5971 INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */
5972 INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */
5973 BASE(undef_fpu, f000, f000);
5974 INSN(fpu, f200, ffc0, CF_FPU);
5975 INSN(fbcc, f280, ffc0, CF_FPU);
5976 INSN(fpu, f200, ffc0, FPU);
5977 INSN(fscc, f240, ffc0, FPU);
5978 INSN(ftrapcc, f27a, fffe, FPU); /* opmode 010, 011 */
5979 INSN(ftrapcc, f27c, ffff, FPU); /* opmode 100 */
5980 INSN(fbcc, f280, ff80, FPU);
5981 #if defined(CONFIG_SOFTMMU)
5982 INSN(frestore, f340, ffc0, CF_FPU);
5983 INSN(fsave, f300, ffc0, CF_FPU);
5984 INSN(frestore, f340, ffc0, FPU);
5985 INSN(fsave, f300, ffc0, FPU);
5986 INSN(intouch, f340, ffc0, CF_ISA_A);
5987 INSN(cpushl, f428, ff38, CF_ISA_A);
5988 INSN(cpush, f420, ff20, M68040);
5989 INSN(cinv, f400, ff20, M68040);
5990 INSN(pflush, f500, ffe0, M68040);
5991 INSN(ptest, f548, ffd8, M68040);
5992 INSN(wddata, fb00, ff00, CF_ISA_A);
5993 INSN(wdebug, fbc0, ffc0, CF_ISA_A);
5994 #endif
5995 INSN(move16_mem, f600, ffe0, M68040);
5996 INSN(move16_reg, f620, fff8, M68040);
5997 #undef INSN
5998 }
5999
6000 static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6001 {
6002 DisasContext *dc = container_of(dcbase, DisasContext, base);
6003 CPUM68KState *env = cpu->env_ptr;
6004
6005 dc->env = env;
6006 dc->pc = dc->base.pc_first;
6007 /* This value will always be filled in properly before m68k_tr_tb_stop. */
6008 dc->pc_prev = 0xdeadbeef;
6009 dc->cc_op = CC_OP_DYNAMIC;
6010 dc->cc_op_synced = 1;
6011 dc->done_mac = 0;
6012 dc->writeback_mask = 0;
6013
6014 dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
6015 /* If architectural single step active, limit to 1 */
6016 if (dc->ss_active) {
6017 dc->base.max_insns = 1;
6018 }
6019 }
6020
6021 static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6022 {
6023 }
6024
6025 static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6026 {
6027 DisasContext *dc = container_of(dcbase, DisasContext, base);
6028 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6029 }
6030
6031 static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6032 {
6033 DisasContext *dc = container_of(dcbase, DisasContext, base);
6034 CPUM68KState *env = cpu->env_ptr;
6035 uint16_t insn = read_im16(env, dc);
6036
6037 opcode_table[insn](env, dc, insn);
6038 do_writebacks(dc);
6039
6040 dc->pc_prev = dc->base.pc_next;
6041 dc->base.pc_next = dc->pc;
6042
6043 if (dc->base.is_jmp == DISAS_NEXT) {
6044 /*
6045 * Stop translation when the next insn might touch a new page.
6046 * This ensures that prefetch aborts at the right place.
6047 *
6048 * We cannot determine the size of the next insn without
6049 * completely decoding it. However, the maximum insn size
6050 * is 32 bytes, so end if we do not have that much remaining.
6051 * This may produce several small TBs at the end of each page,
6052 * but they will all be linked with goto_tb.
6053 *
6054 * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6055 * smaller than MC68020's.
6056 */
6057 target_ulong start_page_offset
6058 = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6059
6060 if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6061 dc->base.is_jmp = DISAS_TOO_MANY;
6062 }
6063 }
6064 }
6065
6066 static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6067 {
6068 DisasContext *dc = container_of(dcbase, DisasContext, base);
6069
6070 switch (dc->base.is_jmp) {
6071 case DISAS_NORETURN:
6072 break;
6073 case DISAS_TOO_MANY:
6074 update_cc_op(dc);
6075 gen_jmp_tb(dc, 0, dc->pc, dc->pc_prev);
6076 break;
6077 case DISAS_JUMP:
6078 /* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */
6079 if (dc->ss_active) {
6080 gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6081 } else {
6082 tcg_gen_lookup_and_goto_ptr();
6083 }
6084 break;
6085 case DISAS_EXIT:
6086 /*
6087 * We updated CC_OP and PC in gen_exit_tb, but also modified
6088 * other state that may require returning to the main loop.
6089 */
6090 if (dc->ss_active) {
6091 gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6092 } else {
6093 tcg_gen_exit_tb(NULL, 0);
6094 }
6095 break;
6096 default:
6097 g_assert_not_reached();
6098 }
6099 }
6100
6101 static void m68k_tr_disas_log(const DisasContextBase *dcbase,
6102 CPUState *cpu, FILE *logfile)
6103 {
6104 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
6105 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
6106 }
6107
6108 static const TranslatorOps m68k_tr_ops = {
6109 .init_disas_context = m68k_tr_init_disas_context,
6110 .tb_start = m68k_tr_tb_start,
6111 .insn_start = m68k_tr_insn_start,
6112 .translate_insn = m68k_tr_translate_insn,
6113 .tb_stop = m68k_tr_tb_stop,
6114 .disas_log = m68k_tr_disas_log,
6115 };
6116
6117 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
6118 target_ulong pc, void *host_pc)
6119 {
6120 DisasContext dc;
6121 translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
6122 }
6123
6124 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6125 {
6126 floatx80 a = { .high = high, .low = low };
6127 union {
6128 float64 f64;
6129 double d;
6130 } u;
6131
6132 u.f64 = floatx80_to_float64(a, &env->fp_status);
6133 return u.d;
6134 }
6135
6136 void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
6137 {
6138 M68kCPU *cpu = M68K_CPU(cs);
6139 CPUM68KState *env = &cpu->env;
6140 int i;
6141 uint16_t sr;
6142 for (i = 0; i < 8; i++) {
6143 qemu_fprintf(f, "D%d = %08x A%d = %08x "
6144 "F%d = %04x %016"PRIx64" (%12g)\n",
6145 i, env->dregs[i], i, env->aregs[i],
6146 i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6147 floatx80_to_double(env, env->fregs[i].l.upper,
6148 env->fregs[i].l.lower));
6149 }
6150 qemu_fprintf(f, "PC = %08x ", env->pc);
6151 sr = env->sr | cpu_m68k_get_ccr(env);
6152 qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6153 sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6154 (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6155 (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6156 (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6157 (sr & CCF_C) ? 'C' : '-');
6158 qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6159 (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6160 (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6161 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6162 (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6163 qemu_fprintf(f, "\n "
6164 "FPCR = %04x ", env->fpcr);
6165 switch (env->fpcr & FPCR_PREC_MASK) {
6166 case FPCR_PREC_X:
6167 qemu_fprintf(f, "X ");
6168 break;
6169 case FPCR_PREC_S:
6170 qemu_fprintf(f, "S ");
6171 break;
6172 case FPCR_PREC_D:
6173 qemu_fprintf(f, "D ");
6174 break;
6175 }
6176 switch (env->fpcr & FPCR_RND_MASK) {
6177 case FPCR_RND_N:
6178 qemu_fprintf(f, "RN ");
6179 break;
6180 case FPCR_RND_Z:
6181 qemu_fprintf(f, "RZ ");
6182 break;
6183 case FPCR_RND_M:
6184 qemu_fprintf(f, "RM ");
6185 break;
6186 case FPCR_RND_P:
6187 qemu_fprintf(f, "RP ");
6188 break;
6189 }
6190 qemu_fprintf(f, "\n");
6191 #ifdef CONFIG_SOFTMMU
6192 qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6193 env->current_sp == M68K_SSP ? "->" : " ", env->sp[M68K_SSP],
6194 env->current_sp == M68K_USP ? "->" : " ", env->sp[M68K_USP],
6195 env->current_sp == M68K_ISP ? "->" : " ", env->sp[M68K_ISP]);
6196 qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6197 qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6198 qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6199 env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6200 qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6201 env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6202 env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6203 qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6204 env->mmu.mmusr, env->mmu.ar);
6205 #endif
6206 }