]> git.proxmox.com Git - mirror_qemu.git/blob - target/m68k/translate.c
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging
[mirror_qemu.git] / target / m68k / translate.c
1 /*
2 * m68k translation
3 *
4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "qemu/log.h"
27 #include "qemu/qemu-print.h"
28 #include "exec/translator.h"
29
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32
33 #include "exec/log.h"
34 #include "fpu/softfloat.h"
35
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
38 #undef HELPER_H
39
40 //#define DEBUG_DISPATCH 1
41
42 #define DEFO32(name, offset) static TCGv QREG_##name;
43 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
44 #include "qregs.h.inc"
45 #undef DEFO32
46 #undef DEFO64
47
48 static TCGv_i32 cpu_halted;
49 static TCGv_i32 cpu_exception_index;
50
51 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
52 static TCGv cpu_dregs[8];
53 static TCGv cpu_aregs[8];
54 static TCGv_i64 cpu_macc[4];
55
56 #define REG(insn, pos) (((insn) >> (pos)) & 7)
57 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
58 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
59 #define MACREG(acc) cpu_macc[acc]
60 #define QREG_SP get_areg(s, 7)
61
62 static TCGv NULL_QREG;
63 #define IS_NULL_QREG(t) (t == NULL_QREG)
64 /* Used to distinguish stores from bad addressing modes. */
65 static TCGv store_dummy;
66
67 void m68k_tcg_init(void)
68 {
69 char *p;
70 int i;
71
72 #define DEFO32(name, offset) \
73 QREG_##name = tcg_global_mem_new_i32(cpu_env, \
74 offsetof(CPUM68KState, offset), #name);
75 #define DEFO64(name, offset) \
76 QREG_##name = tcg_global_mem_new_i64(cpu_env, \
77 offsetof(CPUM68KState, offset), #name);
78 #include "qregs.h.inc"
79 #undef DEFO32
80 #undef DEFO64
81
82 cpu_halted = tcg_global_mem_new_i32(cpu_env,
83 -offsetof(M68kCPU, env) +
84 offsetof(CPUState, halted), "HALTED");
85 cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
86 -offsetof(M68kCPU, env) +
87 offsetof(CPUState, exception_index),
88 "EXCEPTION");
89
90 p = cpu_reg_names;
91 for (i = 0; i < 8; i++) {
92 sprintf(p, "D%d", i);
93 cpu_dregs[i] = tcg_global_mem_new(cpu_env,
94 offsetof(CPUM68KState, dregs[i]), p);
95 p += 3;
96 sprintf(p, "A%d", i);
97 cpu_aregs[i] = tcg_global_mem_new(cpu_env,
98 offsetof(CPUM68KState, aregs[i]), p);
99 p += 3;
100 }
101 for (i = 0; i < 4; i++) {
102 sprintf(p, "ACC%d", i);
103 cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
104 offsetof(CPUM68KState, macc[i]), p);
105 p += 5;
106 }
107
108 NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
109 store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
110 }
111
112 /* internal defines */
113 typedef struct DisasContext {
114 DisasContextBase base;
115 CPUM68KState *env;
116 target_ulong pc;
117 target_ulong pc_prev;
118 CCOp cc_op; /* Current CC operation */
119 int cc_op_synced;
120 TCGv_i64 mactmp;
121 int done_mac;
122 int writeback_mask;
123 TCGv writeback[8];
124 bool ss_active;
125 } DisasContext;
126
127 static TCGv get_areg(DisasContext *s, unsigned regno)
128 {
129 if (s->writeback_mask & (1 << regno)) {
130 return s->writeback[regno];
131 } else {
132 return cpu_aregs[regno];
133 }
134 }
135
136 static void delay_set_areg(DisasContext *s, unsigned regno,
137 TCGv val, bool give_temp)
138 {
139 if (s->writeback_mask & (1 << regno)) {
140 if (give_temp) {
141 s->writeback[regno] = val;
142 } else {
143 tcg_gen_mov_i32(s->writeback[regno], val);
144 }
145 } else {
146 s->writeback_mask |= 1 << regno;
147 if (give_temp) {
148 s->writeback[regno] = val;
149 } else {
150 TCGv tmp = tcg_temp_new();
151 s->writeback[regno] = tmp;
152 tcg_gen_mov_i32(tmp, val);
153 }
154 }
155 }
156
157 static void do_writebacks(DisasContext *s)
158 {
159 unsigned mask = s->writeback_mask;
160 if (mask) {
161 s->writeback_mask = 0;
162 do {
163 unsigned regno = ctz32(mask);
164 tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
165 mask &= mask - 1;
166 } while (mask);
167 }
168 }
169
170 /* is_jmp field values */
171 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
172 #define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */
173
174 #if defined(CONFIG_USER_ONLY)
175 #define IS_USER(s) 1
176 #else
177 #define IS_USER(s) (!(s->base.tb->flags & TB_FLAGS_MSR_S))
178 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
179 MMU_KERNEL_IDX : MMU_USER_IDX)
180 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
181 MMU_KERNEL_IDX : MMU_USER_IDX)
182 #endif
183
184 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
185
186 #ifdef DEBUG_DISPATCH
187 #define DISAS_INSN(name) \
188 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
189 uint16_t insn); \
190 static void disas_##name(CPUM68KState *env, DisasContext *s, \
191 uint16_t insn) \
192 { \
193 qemu_log("Dispatch " #name "\n"); \
194 real_disas_##name(env, s, insn); \
195 } \
196 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
197 uint16_t insn)
198 #else
199 #define DISAS_INSN(name) \
200 static void disas_##name(CPUM68KState *env, DisasContext *s, \
201 uint16_t insn)
202 #endif
203
204 static const uint8_t cc_op_live[CC_OP_NB] = {
205 [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
206 [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
207 [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
208 [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
209 [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
210 [CC_OP_LOGIC] = CCF_X | CCF_N
211 };
212
213 static void set_cc_op(DisasContext *s, CCOp op)
214 {
215 CCOp old_op = s->cc_op;
216 int dead;
217
218 if (old_op == op) {
219 return;
220 }
221 s->cc_op = op;
222 s->cc_op_synced = 0;
223
224 /*
225 * Discard CC computation that will no longer be used.
226 * Note that X and N are never dead.
227 */
228 dead = cc_op_live[old_op] & ~cc_op_live[op];
229 if (dead & CCF_C) {
230 tcg_gen_discard_i32(QREG_CC_C);
231 }
232 if (dead & CCF_Z) {
233 tcg_gen_discard_i32(QREG_CC_Z);
234 }
235 if (dead & CCF_V) {
236 tcg_gen_discard_i32(QREG_CC_V);
237 }
238 }
239
240 /* Update the CPU env CC_OP state. */
241 static void update_cc_op(DisasContext *s)
242 {
243 if (!s->cc_op_synced) {
244 s->cc_op_synced = 1;
245 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
246 }
247 }
248
249 /* Generate a jump to an immediate address. */
250 static void gen_jmp_im(DisasContext *s, uint32_t dest)
251 {
252 update_cc_op(s);
253 tcg_gen_movi_i32(QREG_PC, dest);
254 s->base.is_jmp = DISAS_JUMP;
255 }
256
257 /* Generate a jump to the address in qreg DEST. */
258 static void gen_jmp(DisasContext *s, TCGv dest)
259 {
260 update_cc_op(s);
261 tcg_gen_mov_i32(QREG_PC, dest);
262 s->base.is_jmp = DISAS_JUMP;
263 }
264
265 static void gen_raise_exception(int nr)
266 {
267 gen_helper_raise_exception(cpu_env, tcg_constant_i32(nr));
268 }
269
270 static void gen_raise_exception_format2(DisasContext *s, int nr,
271 target_ulong this_pc)
272 {
273 /*
274 * Pass the address of the insn to the exception handler,
275 * for recording in the Format $2 (6-word) stack frame.
276 * Re-use mmu.ar for the purpose, since that's only valid
277 * after tlb_fill.
278 */
279 tcg_gen_st_i32(tcg_constant_i32(this_pc), cpu_env,
280 offsetof(CPUM68KState, mmu.ar));
281 gen_raise_exception(nr);
282 s->base.is_jmp = DISAS_NORETURN;
283 }
284
285 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
286 {
287 update_cc_op(s);
288 tcg_gen_movi_i32(QREG_PC, dest);
289
290 gen_raise_exception(nr);
291
292 s->base.is_jmp = DISAS_NORETURN;
293 }
294
295 static inline void gen_addr_fault(DisasContext *s)
296 {
297 gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
298 }
299
300 /*
301 * Generate a load from the specified address. Narrow values are
302 * sign extended to full register width.
303 */
304 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
305 int sign, int index)
306 {
307 TCGv tmp = tcg_temp_new_i32();
308
309 switch (opsize) {
310 case OS_BYTE:
311 case OS_WORD:
312 case OS_LONG:
313 tcg_gen_qemu_ld_tl(tmp, addr, index,
314 opsize | (sign ? MO_SIGN : 0) | MO_TE);
315 break;
316 default:
317 g_assert_not_reached();
318 }
319 return tmp;
320 }
321
322 /* Generate a store. */
323 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
324 int index)
325 {
326 switch (opsize) {
327 case OS_BYTE:
328 case OS_WORD:
329 case OS_LONG:
330 tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE);
331 break;
332 default:
333 g_assert_not_reached();
334 }
335 }
336
337 typedef enum {
338 EA_STORE,
339 EA_LOADU,
340 EA_LOADS
341 } ea_what;
342
343 /*
344 * Generate an unsigned load if VAL is 0 a signed load if val is -1,
345 * otherwise generate a store.
346 */
347 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
348 ea_what what, int index)
349 {
350 if (what == EA_STORE) {
351 gen_store(s, opsize, addr, val, index);
352 return store_dummy;
353 } else {
354 return gen_load(s, opsize, addr, what == EA_LOADS, index);
355 }
356 }
357
358 /* Read a 16-bit immediate constant */
359 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
360 {
361 uint16_t im;
362 im = translator_lduw(env, &s->base, s->pc);
363 s->pc += 2;
364 return im;
365 }
366
367 /* Read an 8-bit immediate constant */
368 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
369 {
370 return read_im16(env, s);
371 }
372
373 /* Read a 32-bit immediate constant. */
374 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
375 {
376 uint32_t im;
377 im = read_im16(env, s) << 16;
378 im |= 0xffff & read_im16(env, s);
379 return im;
380 }
381
382 /* Read a 64-bit immediate constant. */
383 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
384 {
385 uint64_t im;
386 im = (uint64_t)read_im32(env, s) << 32;
387 im |= (uint64_t)read_im32(env, s);
388 return im;
389 }
390
391 /* Calculate and address index. */
392 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
393 {
394 TCGv add;
395 int scale;
396
397 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
398 if ((ext & 0x800) == 0) {
399 tcg_gen_ext16s_i32(tmp, add);
400 add = tmp;
401 }
402 scale = (ext >> 9) & 3;
403 if (scale != 0) {
404 tcg_gen_shli_i32(tmp, add, scale);
405 add = tmp;
406 }
407 return add;
408 }
409
410 /*
411 * Handle a base + index + displacement effective address.
412 * A NULL_QREG base means pc-relative.
413 */
414 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
415 {
416 uint32_t offset;
417 uint16_t ext;
418 TCGv add;
419 TCGv tmp;
420 uint32_t bd, od;
421
422 offset = s->pc;
423 ext = read_im16(env, s);
424
425 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
426 return NULL_QREG;
427
428 if (m68k_feature(s->env, M68K_FEATURE_M68K) &&
429 !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
430 ext &= ~(3 << 9);
431 }
432
433 if (ext & 0x100) {
434 /* full extension word format */
435 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
436 return NULL_QREG;
437
438 if ((ext & 0x30) > 0x10) {
439 /* base displacement */
440 if ((ext & 0x30) == 0x20) {
441 bd = (int16_t)read_im16(env, s);
442 } else {
443 bd = read_im32(env, s);
444 }
445 } else {
446 bd = 0;
447 }
448 tmp = tcg_temp_new();
449 if ((ext & 0x44) == 0) {
450 /* pre-index */
451 add = gen_addr_index(s, ext, tmp);
452 } else {
453 add = NULL_QREG;
454 }
455 if ((ext & 0x80) == 0) {
456 /* base not suppressed */
457 if (IS_NULL_QREG(base)) {
458 base = tcg_constant_i32(offset + bd);
459 bd = 0;
460 }
461 if (!IS_NULL_QREG(add)) {
462 tcg_gen_add_i32(tmp, add, base);
463 add = tmp;
464 } else {
465 add = base;
466 }
467 }
468 if (!IS_NULL_QREG(add)) {
469 if (bd != 0) {
470 tcg_gen_addi_i32(tmp, add, bd);
471 add = tmp;
472 }
473 } else {
474 add = tcg_constant_i32(bd);
475 }
476 if ((ext & 3) != 0) {
477 /* memory indirect */
478 base = gen_load(s, OS_LONG, add, 0, IS_USER(s));
479 if ((ext & 0x44) == 4) {
480 add = gen_addr_index(s, ext, tmp);
481 tcg_gen_add_i32(tmp, add, base);
482 add = tmp;
483 } else {
484 add = base;
485 }
486 if ((ext & 3) > 1) {
487 /* outer displacement */
488 if ((ext & 3) == 2) {
489 od = (int16_t)read_im16(env, s);
490 } else {
491 od = read_im32(env, s);
492 }
493 } else {
494 od = 0;
495 }
496 if (od != 0) {
497 tcg_gen_addi_i32(tmp, add, od);
498 add = tmp;
499 }
500 }
501 } else {
502 /* brief extension word format */
503 tmp = tcg_temp_new();
504 add = gen_addr_index(s, ext, tmp);
505 if (!IS_NULL_QREG(base)) {
506 tcg_gen_add_i32(tmp, add, base);
507 if ((int8_t)ext)
508 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
509 } else {
510 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
511 }
512 add = tmp;
513 }
514 return add;
515 }
516
517 /* Sign or zero extend a value. */
518
519 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
520 {
521 switch (opsize) {
522 case OS_BYTE:
523 if (sign) {
524 tcg_gen_ext8s_i32(res, val);
525 } else {
526 tcg_gen_ext8u_i32(res, val);
527 }
528 break;
529 case OS_WORD:
530 if (sign) {
531 tcg_gen_ext16s_i32(res, val);
532 } else {
533 tcg_gen_ext16u_i32(res, val);
534 }
535 break;
536 case OS_LONG:
537 tcg_gen_mov_i32(res, val);
538 break;
539 default:
540 g_assert_not_reached();
541 }
542 }
543
544 /* Evaluate all the CC flags. */
545
546 static void gen_flush_flags(DisasContext *s)
547 {
548 TCGv t0, t1;
549
550 switch (s->cc_op) {
551 case CC_OP_FLAGS:
552 return;
553
554 case CC_OP_ADDB:
555 case CC_OP_ADDW:
556 case CC_OP_ADDL:
557 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
558 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
559 /* Compute signed overflow for addition. */
560 t0 = tcg_temp_new();
561 t1 = tcg_temp_new();
562 tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
563 gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
564 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
565 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
566 tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
567 break;
568
569 case CC_OP_SUBB:
570 case CC_OP_SUBW:
571 case CC_OP_SUBL:
572 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
573 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
574 /* Compute signed overflow for subtraction. */
575 t0 = tcg_temp_new();
576 t1 = tcg_temp_new();
577 tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
578 gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
579 tcg_gen_xor_i32(t1, QREG_CC_N, t0);
580 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
581 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
582 break;
583
584 case CC_OP_CMPB:
585 case CC_OP_CMPW:
586 case CC_OP_CMPL:
587 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
588 tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
589 gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
590 /* Compute signed overflow for subtraction. */
591 t0 = tcg_temp_new();
592 tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
593 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
594 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
595 tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
596 break;
597
598 case CC_OP_LOGIC:
599 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
600 tcg_gen_movi_i32(QREG_CC_C, 0);
601 tcg_gen_movi_i32(QREG_CC_V, 0);
602 break;
603
604 case CC_OP_DYNAMIC:
605 gen_helper_flush_flags(cpu_env, QREG_CC_OP);
606 s->cc_op_synced = 1;
607 break;
608
609 default:
610 gen_helper_flush_flags(cpu_env, tcg_constant_i32(s->cc_op));
611 s->cc_op_synced = 1;
612 break;
613 }
614
615 /* Note that flush_flags also assigned to env->cc_op. */
616 s->cc_op = CC_OP_FLAGS;
617 }
618
619 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
620 {
621 TCGv tmp;
622
623 if (opsize == OS_LONG) {
624 tmp = val;
625 } else {
626 tmp = tcg_temp_new();
627 gen_ext(tmp, val, opsize, sign);
628 }
629
630 return tmp;
631 }
632
633 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
634 {
635 gen_ext(QREG_CC_N, val, opsize, 1);
636 set_cc_op(s, CC_OP_LOGIC);
637 }
638
639 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
640 {
641 tcg_gen_mov_i32(QREG_CC_N, dest);
642 tcg_gen_mov_i32(QREG_CC_V, src);
643 set_cc_op(s, CC_OP_CMPB + opsize);
644 }
645
646 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
647 {
648 gen_ext(QREG_CC_N, dest, opsize, 1);
649 tcg_gen_mov_i32(QREG_CC_V, src);
650 }
651
652 static inline int opsize_bytes(int opsize)
653 {
654 switch (opsize) {
655 case OS_BYTE: return 1;
656 case OS_WORD: return 2;
657 case OS_LONG: return 4;
658 case OS_SINGLE: return 4;
659 case OS_DOUBLE: return 8;
660 case OS_EXTENDED: return 12;
661 case OS_PACKED: return 12;
662 default:
663 g_assert_not_reached();
664 }
665 }
666
667 static inline int insn_opsize(int insn)
668 {
669 switch ((insn >> 6) & 3) {
670 case 0: return OS_BYTE;
671 case 1: return OS_WORD;
672 case 2: return OS_LONG;
673 default:
674 g_assert_not_reached();
675 }
676 }
677
678 static inline int ext_opsize(int ext, int pos)
679 {
680 switch ((ext >> pos) & 7) {
681 case 0: return OS_LONG;
682 case 1: return OS_SINGLE;
683 case 2: return OS_EXTENDED;
684 case 3: return OS_PACKED;
685 case 4: return OS_WORD;
686 case 5: return OS_DOUBLE;
687 case 6: return OS_BYTE;
688 default:
689 g_assert_not_reached();
690 }
691 }
692
693 /*
694 * Assign value to a register. If the width is less than the register width
695 * only the low part of the register is set.
696 */
697 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
698 {
699 switch (opsize) {
700 case OS_BYTE:
701 tcg_gen_deposit_i32(reg, reg, val, 0, 8);
702 break;
703 case OS_WORD:
704 tcg_gen_deposit_i32(reg, reg, val, 0, 16);
705 break;
706 case OS_LONG:
707 case OS_SINGLE:
708 tcg_gen_mov_i32(reg, val);
709 break;
710 default:
711 g_assert_not_reached();
712 }
713 }
714
715 /*
716 * Generate code for an "effective address". Does not adjust the base
717 * register for autoincrement addressing modes.
718 */
719 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
720 int mode, int reg0, int opsize)
721 {
722 TCGv reg;
723 TCGv tmp;
724 uint16_t ext;
725 uint32_t offset;
726
727 switch (mode) {
728 case 0: /* Data register direct. */
729 case 1: /* Address register direct. */
730 return NULL_QREG;
731 case 3: /* Indirect postincrement. */
732 if (opsize == OS_UNSIZED) {
733 return NULL_QREG;
734 }
735 /* fallthru */
736 case 2: /* Indirect register */
737 return get_areg(s, reg0);
738 case 4: /* Indirect predecrememnt. */
739 if (opsize == OS_UNSIZED) {
740 return NULL_QREG;
741 }
742 reg = get_areg(s, reg0);
743 tmp = tcg_temp_new();
744 if (reg0 == 7 && opsize == OS_BYTE &&
745 m68k_feature(s->env, M68K_FEATURE_M68K)) {
746 tcg_gen_subi_i32(tmp, reg, 2);
747 } else {
748 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
749 }
750 return tmp;
751 case 5: /* Indirect displacement. */
752 reg = get_areg(s, reg0);
753 tmp = tcg_temp_new();
754 ext = read_im16(env, s);
755 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
756 return tmp;
757 case 6: /* Indirect index + displacement. */
758 reg = get_areg(s, reg0);
759 return gen_lea_indexed(env, s, reg);
760 case 7: /* Other */
761 switch (reg0) {
762 case 0: /* Absolute short. */
763 offset = (int16_t)read_im16(env, s);
764 return tcg_constant_i32(offset);
765 case 1: /* Absolute long. */
766 offset = read_im32(env, s);
767 return tcg_constant_i32(offset);
768 case 2: /* pc displacement */
769 offset = s->pc;
770 offset += (int16_t)read_im16(env, s);
771 return tcg_constant_i32(offset);
772 case 3: /* pc index+displacement. */
773 return gen_lea_indexed(env, s, NULL_QREG);
774 case 4: /* Immediate. */
775 default:
776 return NULL_QREG;
777 }
778 }
779 /* Should never happen. */
780 return NULL_QREG;
781 }
782
783 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
784 int opsize)
785 {
786 int mode = extract32(insn, 3, 3);
787 int reg0 = REG(insn, 0);
788 return gen_lea_mode(env, s, mode, reg0, opsize);
789 }
790
791 /*
792 * Generate code to load/store a value from/into an EA. If WHAT > 0 this is
793 * a write otherwise it is a read (0 == sign extend, -1 == zero extend).
794 * ADDRP is non-null for readwrite operands.
795 */
796 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
797 int opsize, TCGv val, TCGv *addrp, ea_what what,
798 int index)
799 {
800 TCGv reg, tmp, result;
801 int32_t offset;
802
803 switch (mode) {
804 case 0: /* Data register direct. */
805 reg = cpu_dregs[reg0];
806 if (what == EA_STORE) {
807 gen_partset_reg(opsize, reg, val);
808 return store_dummy;
809 } else {
810 return gen_extend(s, reg, opsize, what == EA_LOADS);
811 }
812 case 1: /* Address register direct. */
813 reg = get_areg(s, reg0);
814 if (what == EA_STORE) {
815 tcg_gen_mov_i32(reg, val);
816 return store_dummy;
817 } else {
818 return gen_extend(s, reg, opsize, what == EA_LOADS);
819 }
820 case 2: /* Indirect register */
821 reg = get_areg(s, reg0);
822 return gen_ldst(s, opsize, reg, val, what, index);
823 case 3: /* Indirect postincrement. */
824 reg = get_areg(s, reg0);
825 result = gen_ldst(s, opsize, reg, val, what, index);
826 if (what == EA_STORE || !addrp) {
827 TCGv tmp = tcg_temp_new();
828 if (reg0 == 7 && opsize == OS_BYTE &&
829 m68k_feature(s->env, M68K_FEATURE_M68K)) {
830 tcg_gen_addi_i32(tmp, reg, 2);
831 } else {
832 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
833 }
834 delay_set_areg(s, reg0, tmp, true);
835 }
836 return result;
837 case 4: /* Indirect predecrememnt. */
838 if (addrp && what == EA_STORE) {
839 tmp = *addrp;
840 } else {
841 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
842 if (IS_NULL_QREG(tmp)) {
843 return tmp;
844 }
845 if (addrp) {
846 *addrp = tmp;
847 }
848 }
849 result = gen_ldst(s, opsize, tmp, val, what, index);
850 if (what == EA_STORE || !addrp) {
851 delay_set_areg(s, reg0, tmp, false);
852 }
853 return result;
854 case 5: /* Indirect displacement. */
855 case 6: /* Indirect index + displacement. */
856 do_indirect:
857 if (addrp && what == EA_STORE) {
858 tmp = *addrp;
859 } else {
860 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
861 if (IS_NULL_QREG(tmp)) {
862 return tmp;
863 }
864 if (addrp) {
865 *addrp = tmp;
866 }
867 }
868 return gen_ldst(s, opsize, tmp, val, what, index);
869 case 7: /* Other */
870 switch (reg0) {
871 case 0: /* Absolute short. */
872 case 1: /* Absolute long. */
873 case 2: /* pc displacement */
874 case 3: /* pc index+displacement. */
875 goto do_indirect;
876 case 4: /* Immediate. */
877 /* Sign extend values for consistency. */
878 switch (opsize) {
879 case OS_BYTE:
880 if (what == EA_LOADS) {
881 offset = (int8_t)read_im8(env, s);
882 } else {
883 offset = read_im8(env, s);
884 }
885 break;
886 case OS_WORD:
887 if (what == EA_LOADS) {
888 offset = (int16_t)read_im16(env, s);
889 } else {
890 offset = read_im16(env, s);
891 }
892 break;
893 case OS_LONG:
894 offset = read_im32(env, s);
895 break;
896 default:
897 g_assert_not_reached();
898 }
899 return tcg_constant_i32(offset);
900 default:
901 return NULL_QREG;
902 }
903 }
904 /* Should never happen. */
905 return NULL_QREG;
906 }
907
908 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
909 int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
910 {
911 int mode = extract32(insn, 3, 3);
912 int reg0 = REG(insn, 0);
913 return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
914 }
915
916 static TCGv_ptr gen_fp_ptr(int freg)
917 {
918 TCGv_ptr fp = tcg_temp_new_ptr();
919 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
920 return fp;
921 }
922
923 static TCGv_ptr gen_fp_result_ptr(void)
924 {
925 TCGv_ptr fp = tcg_temp_new_ptr();
926 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
927 return fp;
928 }
929
930 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
931 {
932 TCGv t32;
933 TCGv_i64 t64;
934
935 t32 = tcg_temp_new();
936 tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
937 tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
938
939 t64 = tcg_temp_new_i64();
940 tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
941 tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
942 }
943
944 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
945 int index)
946 {
947 TCGv tmp;
948 TCGv_i64 t64;
949
950 t64 = tcg_temp_new_i64();
951 tmp = tcg_temp_new();
952 switch (opsize) {
953 case OS_BYTE:
954 case OS_WORD:
955 case OS_LONG:
956 tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE);
957 gen_helper_exts32(cpu_env, fp, tmp);
958 break;
959 case OS_SINGLE:
960 tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
961 gen_helper_extf32(cpu_env, fp, tmp);
962 break;
963 case OS_DOUBLE:
964 tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ);
965 gen_helper_extf64(cpu_env, fp, t64);
966 break;
967 case OS_EXTENDED:
968 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
969 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
970 break;
971 }
972 tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL);
973 tcg_gen_shri_i32(tmp, tmp, 16);
974 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
975 tcg_gen_addi_i32(tmp, addr, 4);
976 tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ);
977 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
978 break;
979 case OS_PACKED:
980 /*
981 * unimplemented data type on 68040/ColdFire
982 * FIXME if needed for another FPU
983 */
984 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
985 break;
986 default:
987 g_assert_not_reached();
988 }
989 }
990
991 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
992 int index)
993 {
994 TCGv tmp;
995 TCGv_i64 t64;
996
997 t64 = tcg_temp_new_i64();
998 tmp = tcg_temp_new();
999 switch (opsize) {
1000 case OS_BYTE:
1001 case OS_WORD:
1002 case OS_LONG:
1003 gen_helper_reds32(tmp, cpu_env, fp);
1004 tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE);
1005 break;
1006 case OS_SINGLE:
1007 gen_helper_redf32(tmp, cpu_env, fp);
1008 tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
1009 break;
1010 case OS_DOUBLE:
1011 gen_helper_redf64(t64, cpu_env, fp);
1012 tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ);
1013 break;
1014 case OS_EXTENDED:
1015 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1016 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1017 break;
1018 }
1019 tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1020 tcg_gen_shli_i32(tmp, tmp, 16);
1021 tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL);
1022 tcg_gen_addi_i32(tmp, addr, 4);
1023 tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1024 tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ);
1025 break;
1026 case OS_PACKED:
1027 /*
1028 * unimplemented data type on 68040/ColdFire
1029 * FIXME if needed for another FPU
1030 */
1031 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1032 break;
1033 default:
1034 g_assert_not_reached();
1035 }
1036 }
1037
1038 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1039 TCGv_ptr fp, ea_what what, int index)
1040 {
1041 if (what == EA_STORE) {
1042 gen_store_fp(s, opsize, addr, fp, index);
1043 } else {
1044 gen_load_fp(s, opsize, addr, fp, index);
1045 }
1046 }
1047
1048 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1049 int reg0, int opsize, TCGv_ptr fp, ea_what what,
1050 int index)
1051 {
1052 TCGv reg, addr, tmp;
1053 TCGv_i64 t64;
1054
1055 switch (mode) {
1056 case 0: /* Data register direct. */
1057 reg = cpu_dregs[reg0];
1058 if (what == EA_STORE) {
1059 switch (opsize) {
1060 case OS_BYTE:
1061 case OS_WORD:
1062 case OS_LONG:
1063 gen_helper_reds32(reg, cpu_env, fp);
1064 break;
1065 case OS_SINGLE:
1066 gen_helper_redf32(reg, cpu_env, fp);
1067 break;
1068 default:
1069 g_assert_not_reached();
1070 }
1071 } else {
1072 tmp = tcg_temp_new();
1073 switch (opsize) {
1074 case OS_BYTE:
1075 tcg_gen_ext8s_i32(tmp, reg);
1076 gen_helper_exts32(cpu_env, fp, tmp);
1077 break;
1078 case OS_WORD:
1079 tcg_gen_ext16s_i32(tmp, reg);
1080 gen_helper_exts32(cpu_env, fp, tmp);
1081 break;
1082 case OS_LONG:
1083 gen_helper_exts32(cpu_env, fp, reg);
1084 break;
1085 case OS_SINGLE:
1086 gen_helper_extf32(cpu_env, fp, reg);
1087 break;
1088 default:
1089 g_assert_not_reached();
1090 }
1091 }
1092 return 0;
1093 case 1: /* Address register direct. */
1094 return -1;
1095 case 2: /* Indirect register */
1096 addr = get_areg(s, reg0);
1097 gen_ldst_fp(s, opsize, addr, fp, what, index);
1098 return 0;
1099 case 3: /* Indirect postincrement. */
1100 addr = cpu_aregs[reg0];
1101 gen_ldst_fp(s, opsize, addr, fp, what, index);
1102 tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1103 return 0;
1104 case 4: /* Indirect predecrememnt. */
1105 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1106 if (IS_NULL_QREG(addr)) {
1107 return -1;
1108 }
1109 gen_ldst_fp(s, opsize, addr, fp, what, index);
1110 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1111 return 0;
1112 case 5: /* Indirect displacement. */
1113 case 6: /* Indirect index + displacement. */
1114 do_indirect:
1115 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1116 if (IS_NULL_QREG(addr)) {
1117 return -1;
1118 }
1119 gen_ldst_fp(s, opsize, addr, fp, what, index);
1120 return 0;
1121 case 7: /* Other */
1122 switch (reg0) {
1123 case 0: /* Absolute short. */
1124 case 1: /* Absolute long. */
1125 case 2: /* pc displacement */
1126 case 3: /* pc index+displacement. */
1127 goto do_indirect;
1128 case 4: /* Immediate. */
1129 if (what == EA_STORE) {
1130 return -1;
1131 }
1132 switch (opsize) {
1133 case OS_BYTE:
1134 tmp = tcg_constant_i32((int8_t)read_im8(env, s));
1135 gen_helper_exts32(cpu_env, fp, tmp);
1136 break;
1137 case OS_WORD:
1138 tmp = tcg_constant_i32((int16_t)read_im16(env, s));
1139 gen_helper_exts32(cpu_env, fp, tmp);
1140 break;
1141 case OS_LONG:
1142 tmp = tcg_constant_i32(read_im32(env, s));
1143 gen_helper_exts32(cpu_env, fp, tmp);
1144 break;
1145 case OS_SINGLE:
1146 tmp = tcg_constant_i32(read_im32(env, s));
1147 gen_helper_extf32(cpu_env, fp, tmp);
1148 break;
1149 case OS_DOUBLE:
1150 t64 = tcg_constant_i64(read_im64(env, s));
1151 gen_helper_extf64(cpu_env, fp, t64);
1152 break;
1153 case OS_EXTENDED:
1154 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1155 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1156 break;
1157 }
1158 tmp = tcg_constant_i32(read_im32(env, s) >> 16);
1159 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1160 t64 = tcg_constant_i64(read_im64(env, s));
1161 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1162 break;
1163 case OS_PACKED:
1164 /*
1165 * unimplemented data type on 68040/ColdFire
1166 * FIXME if needed for another FPU
1167 */
1168 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1169 break;
1170 default:
1171 g_assert_not_reached();
1172 }
1173 return 0;
1174 default:
1175 return -1;
1176 }
1177 }
1178 return -1;
1179 }
1180
1181 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1182 int opsize, TCGv_ptr fp, ea_what what, int index)
1183 {
1184 int mode = extract32(insn, 3, 3);
1185 int reg0 = REG(insn, 0);
1186 return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1187 }
1188
1189 typedef struct {
1190 TCGCond tcond;
1191 TCGv v1;
1192 TCGv v2;
1193 } DisasCompare;
1194
1195 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1196 {
1197 TCGv tmp, tmp2;
1198 TCGCond tcond;
1199 CCOp op = s->cc_op;
1200
1201 /* The CC_OP_CMP form can handle most normal comparisons directly. */
1202 if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1203 c->v1 = QREG_CC_N;
1204 c->v2 = QREG_CC_V;
1205 switch (cond) {
1206 case 2: /* HI */
1207 case 3: /* LS */
1208 tcond = TCG_COND_LEU;
1209 goto done;
1210 case 4: /* CC */
1211 case 5: /* CS */
1212 tcond = TCG_COND_LTU;
1213 goto done;
1214 case 6: /* NE */
1215 case 7: /* EQ */
1216 tcond = TCG_COND_EQ;
1217 goto done;
1218 case 10: /* PL */
1219 case 11: /* MI */
1220 c->v2 = tcg_constant_i32(0);
1221 c->v1 = tmp = tcg_temp_new();
1222 tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1223 gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1224 /* fallthru */
1225 case 12: /* GE */
1226 case 13: /* LT */
1227 tcond = TCG_COND_LT;
1228 goto done;
1229 case 14: /* GT */
1230 case 15: /* LE */
1231 tcond = TCG_COND_LE;
1232 goto done;
1233 }
1234 }
1235
1236 c->v2 = tcg_constant_i32(0);
1237
1238 switch (cond) {
1239 case 0: /* T */
1240 case 1: /* F */
1241 c->v1 = c->v2;
1242 tcond = TCG_COND_NEVER;
1243 goto done;
1244 case 14: /* GT (!(Z || (N ^ V))) */
1245 case 15: /* LE (Z || (N ^ V)) */
1246 /*
1247 * Logic operations clear V, which simplifies LE to (Z || N),
1248 * and since Z and N are co-located, this becomes a normal
1249 * comparison vs N.
1250 */
1251 if (op == CC_OP_LOGIC) {
1252 c->v1 = QREG_CC_N;
1253 tcond = TCG_COND_LE;
1254 goto done;
1255 }
1256 break;
1257 case 12: /* GE (!(N ^ V)) */
1258 case 13: /* LT (N ^ V) */
1259 /* Logic operations clear V, which simplifies this to N. */
1260 if (op != CC_OP_LOGIC) {
1261 break;
1262 }
1263 /* fallthru */
1264 case 10: /* PL (!N) */
1265 case 11: /* MI (N) */
1266 /* Several cases represent N normally. */
1267 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1268 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1269 op == CC_OP_LOGIC) {
1270 c->v1 = QREG_CC_N;
1271 tcond = TCG_COND_LT;
1272 goto done;
1273 }
1274 break;
1275 case 6: /* NE (!Z) */
1276 case 7: /* EQ (Z) */
1277 /* Some cases fold Z into N. */
1278 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1279 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1280 op == CC_OP_LOGIC) {
1281 tcond = TCG_COND_EQ;
1282 c->v1 = QREG_CC_N;
1283 goto done;
1284 }
1285 break;
1286 case 4: /* CC (!C) */
1287 case 5: /* CS (C) */
1288 /* Some cases fold C into X. */
1289 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1290 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1291 tcond = TCG_COND_NE;
1292 c->v1 = QREG_CC_X;
1293 goto done;
1294 }
1295 /* fallthru */
1296 case 8: /* VC (!V) */
1297 case 9: /* VS (V) */
1298 /* Logic operations clear V and C. */
1299 if (op == CC_OP_LOGIC) {
1300 tcond = TCG_COND_NEVER;
1301 c->v1 = c->v2;
1302 goto done;
1303 }
1304 break;
1305 }
1306
1307 /* Otherwise, flush flag state to CC_OP_FLAGS. */
1308 gen_flush_flags(s);
1309
1310 switch (cond) {
1311 case 0: /* T */
1312 case 1: /* F */
1313 default:
1314 /* Invalid, or handled above. */
1315 abort();
1316 case 2: /* HI (!C && !Z) -> !(C || Z)*/
1317 case 3: /* LS (C || Z) */
1318 c->v1 = tmp = tcg_temp_new();
1319 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1320 tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1321 tcond = TCG_COND_NE;
1322 break;
1323 case 4: /* CC (!C) */
1324 case 5: /* CS (C) */
1325 c->v1 = QREG_CC_C;
1326 tcond = TCG_COND_NE;
1327 break;
1328 case 6: /* NE (!Z) */
1329 case 7: /* EQ (Z) */
1330 c->v1 = QREG_CC_Z;
1331 tcond = TCG_COND_EQ;
1332 break;
1333 case 8: /* VC (!V) */
1334 case 9: /* VS (V) */
1335 c->v1 = QREG_CC_V;
1336 tcond = TCG_COND_LT;
1337 break;
1338 case 10: /* PL (!N) */
1339 case 11: /* MI (N) */
1340 c->v1 = QREG_CC_N;
1341 tcond = TCG_COND_LT;
1342 break;
1343 case 12: /* GE (!(N ^ V)) */
1344 case 13: /* LT (N ^ V) */
1345 c->v1 = tmp = tcg_temp_new();
1346 tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1347 tcond = TCG_COND_LT;
1348 break;
1349 case 14: /* GT (!(Z || (N ^ V))) */
1350 case 15: /* LE (Z || (N ^ V)) */
1351 c->v1 = tmp = tcg_temp_new();
1352 tcg_gen_negsetcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1353 tmp2 = tcg_temp_new();
1354 tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1355 tcg_gen_or_i32(tmp, tmp, tmp2);
1356 tcond = TCG_COND_LT;
1357 break;
1358 }
1359
1360 done:
1361 if ((cond & 1) == 0) {
1362 tcond = tcg_invert_cond(tcond);
1363 }
1364 c->tcond = tcond;
1365 }
1366
1367 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1368 {
1369 DisasCompare c;
1370
1371 gen_cc_cond(&c, s, cond);
1372 update_cc_op(s);
1373 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1374 }
1375
1376 /* Force a TB lookup after an instruction that changes the CPU state. */
1377 static void gen_exit_tb(DisasContext *s)
1378 {
1379 update_cc_op(s);
1380 tcg_gen_movi_i32(QREG_PC, s->pc);
1381 s->base.is_jmp = DISAS_EXIT;
1382 }
1383
1384 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
1385 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
1386 op_sign ? EA_LOADS : EA_LOADU, IS_USER(s)); \
1387 if (IS_NULL_QREG(result)) { \
1388 gen_addr_fault(s); \
1389 return; \
1390 } \
1391 } while (0)
1392
1393 #define DEST_EA(env, insn, opsize, val, addrp) do { \
1394 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, \
1395 EA_STORE, IS_USER(s)); \
1396 if (IS_NULL_QREG(ea_result)) { \
1397 gen_addr_fault(s); \
1398 return; \
1399 } \
1400 } while (0)
1401
1402 /* Generate a jump to an immediate address. */
1403 static void gen_jmp_tb(DisasContext *s, int n, target_ulong dest,
1404 target_ulong src)
1405 {
1406 if (unlikely(s->ss_active)) {
1407 update_cc_op(s);
1408 tcg_gen_movi_i32(QREG_PC, dest);
1409 gen_raise_exception_format2(s, EXCP_TRACE, src);
1410 } else if (translator_use_goto_tb(&s->base, dest)) {
1411 tcg_gen_goto_tb(n);
1412 tcg_gen_movi_i32(QREG_PC, dest);
1413 tcg_gen_exit_tb(s->base.tb, n);
1414 } else {
1415 gen_jmp_im(s, dest);
1416 tcg_gen_exit_tb(NULL, 0);
1417 }
1418 s->base.is_jmp = DISAS_NORETURN;
1419 }
1420
1421 DISAS_INSN(scc)
1422 {
1423 DisasCompare c;
1424 int cond;
1425 TCGv tmp;
1426
1427 cond = (insn >> 8) & 0xf;
1428 gen_cc_cond(&c, s, cond);
1429
1430 tmp = tcg_temp_new();
1431 tcg_gen_negsetcond_i32(c.tcond, tmp, c.v1, c.v2);
1432
1433 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1434 }
1435
1436 DISAS_INSN(dbcc)
1437 {
1438 TCGLabel *l1;
1439 TCGv reg;
1440 TCGv tmp;
1441 int16_t offset;
1442 uint32_t base;
1443
1444 reg = DREG(insn, 0);
1445 base = s->pc;
1446 offset = (int16_t)read_im16(env, s);
1447 l1 = gen_new_label();
1448 gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1449
1450 tmp = tcg_temp_new();
1451 tcg_gen_ext16s_i32(tmp, reg);
1452 tcg_gen_addi_i32(tmp, tmp, -1);
1453 gen_partset_reg(OS_WORD, reg, tmp);
1454 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1455 gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
1456 gen_set_label(l1);
1457 gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
1458 }
1459
1460 DISAS_INSN(undef_mac)
1461 {
1462 gen_exception(s, s->base.pc_next, EXCP_LINEA);
1463 }
1464
1465 DISAS_INSN(undef_fpu)
1466 {
1467 gen_exception(s, s->base.pc_next, EXCP_LINEF);
1468 }
1469
1470 DISAS_INSN(undef)
1471 {
1472 /*
1473 * ??? This is both instructions that are as yet unimplemented
1474 * for the 680x0 series, as well as those that are implemented
1475 * but actually illegal for CPU32 or pre-68020.
1476 */
1477 qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n",
1478 insn, s->base.pc_next);
1479 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1480 }
1481
1482 DISAS_INSN(mulw)
1483 {
1484 TCGv reg;
1485 TCGv tmp;
1486 TCGv src;
1487 int sign;
1488
1489 sign = (insn & 0x100) != 0;
1490 reg = DREG(insn, 9);
1491 tmp = tcg_temp_new();
1492 if (sign)
1493 tcg_gen_ext16s_i32(tmp, reg);
1494 else
1495 tcg_gen_ext16u_i32(tmp, reg);
1496 SRC_EA(env, src, OS_WORD, sign, NULL);
1497 tcg_gen_mul_i32(tmp, tmp, src);
1498 tcg_gen_mov_i32(reg, tmp);
1499 gen_logic_cc(s, tmp, OS_LONG);
1500 }
1501
1502 DISAS_INSN(divw)
1503 {
1504 int sign;
1505 TCGv src;
1506 TCGv destr;
1507 TCGv ilen;
1508
1509 /* divX.w <EA>,Dn 32/16 -> 16r:16q */
1510
1511 sign = (insn & 0x100) != 0;
1512
1513 /* dest.l / src.w */
1514
1515 SRC_EA(env, src, OS_WORD, sign, NULL);
1516 destr = tcg_constant_i32(REG(insn, 9));
1517 ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1518 if (sign) {
1519 gen_helper_divsw(cpu_env, destr, src, ilen);
1520 } else {
1521 gen_helper_divuw(cpu_env, destr, src, ilen);
1522 }
1523
1524 set_cc_op(s, CC_OP_FLAGS);
1525 }
1526
1527 DISAS_INSN(divl)
1528 {
1529 TCGv num, reg, den, ilen;
1530 int sign;
1531 uint16_t ext;
1532
1533 ext = read_im16(env, s);
1534
1535 sign = (ext & 0x0800) != 0;
1536
1537 if (ext & 0x400) {
1538 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1539 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1540 return;
1541 }
1542
1543 /* divX.l <EA>, Dr:Dq 64/32 -> 32r:32q */
1544
1545 SRC_EA(env, den, OS_LONG, 0, NULL);
1546 num = tcg_constant_i32(REG(ext, 12));
1547 reg = tcg_constant_i32(REG(ext, 0));
1548 ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1549 if (sign) {
1550 gen_helper_divsll(cpu_env, num, reg, den, ilen);
1551 } else {
1552 gen_helper_divull(cpu_env, num, reg, den, ilen);
1553 }
1554 set_cc_op(s, CC_OP_FLAGS);
1555 return;
1556 }
1557
1558 /* divX.l <EA>, Dq 32/32 -> 32q */
1559 /* divXl.l <EA>, Dr:Dq 32/32 -> 32r:32q */
1560
1561 SRC_EA(env, den, OS_LONG, 0, NULL);
1562 num = tcg_constant_i32(REG(ext, 12));
1563 reg = tcg_constant_i32(REG(ext, 0));
1564 ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1565 if (sign) {
1566 gen_helper_divsl(cpu_env, num, reg, den, ilen);
1567 } else {
1568 gen_helper_divul(cpu_env, num, reg, den, ilen);
1569 }
1570
1571 set_cc_op(s, CC_OP_FLAGS);
1572 }
1573
1574 static void bcd_add(TCGv dest, TCGv src)
1575 {
1576 TCGv t0, t1;
1577
1578 /*
1579 * dest10 = dest10 + src10 + X
1580 *
1581 * t1 = src
1582 * t2 = t1 + 0x066
1583 * t3 = t2 + dest + X
1584 * t4 = t2 ^ dest
1585 * t5 = t3 ^ t4
1586 * t6 = ~t5 & 0x110
1587 * t7 = (t6 >> 2) | (t6 >> 3)
1588 * return t3 - t7
1589 */
1590
1591 /*
1592 * t1 = (src + 0x066) + dest + X
1593 * = result with some possible exceeding 0x6
1594 */
1595
1596 t0 = tcg_temp_new();
1597 tcg_gen_addi_i32(t0, src, 0x066);
1598
1599 t1 = tcg_temp_new();
1600 tcg_gen_add_i32(t1, t0, dest);
1601 tcg_gen_add_i32(t1, t1, QREG_CC_X);
1602
1603 /* we will remove exceeding 0x6 where there is no carry */
1604
1605 /*
1606 * t0 = (src + 0x0066) ^ dest
1607 * = t1 without carries
1608 */
1609
1610 tcg_gen_xor_i32(t0, t0, dest);
1611
1612 /*
1613 * extract the carries
1614 * t0 = t0 ^ t1
1615 * = only the carries
1616 */
1617
1618 tcg_gen_xor_i32(t0, t0, t1);
1619
1620 /*
1621 * generate 0x1 where there is no carry
1622 * and for each 0x10, generate a 0x6
1623 */
1624
1625 tcg_gen_shri_i32(t0, t0, 3);
1626 tcg_gen_not_i32(t0, t0);
1627 tcg_gen_andi_i32(t0, t0, 0x22);
1628 tcg_gen_add_i32(dest, t0, t0);
1629 tcg_gen_add_i32(dest, dest, t0);
1630
1631 /*
1632 * remove the exceeding 0x6
1633 * for digits that have not generated a carry
1634 */
1635
1636 tcg_gen_sub_i32(dest, t1, dest);
1637 }
1638
1639 static void bcd_sub(TCGv dest, TCGv src)
1640 {
1641 TCGv t0, t1, t2;
1642
1643 /*
1644 * dest10 = dest10 - src10 - X
1645 * = bcd_add(dest + 1 - X, 0x199 - src)
1646 */
1647
1648 /* t0 = 0x066 + (0x199 - src) */
1649
1650 t0 = tcg_temp_new();
1651 tcg_gen_subfi_i32(t0, 0x1ff, src);
1652
1653 /* t1 = t0 + dest + 1 - X*/
1654
1655 t1 = tcg_temp_new();
1656 tcg_gen_add_i32(t1, t0, dest);
1657 tcg_gen_addi_i32(t1, t1, 1);
1658 tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1659
1660 /* t2 = t0 ^ dest */
1661
1662 t2 = tcg_temp_new();
1663 tcg_gen_xor_i32(t2, t0, dest);
1664
1665 /* t0 = t1 ^ t2 */
1666
1667 tcg_gen_xor_i32(t0, t1, t2);
1668
1669 /*
1670 * t2 = ~t0 & 0x110
1671 * t0 = (t2 >> 2) | (t2 >> 3)
1672 *
1673 * to fit on 8bit operands, changed in:
1674 *
1675 * t2 = ~(t0 >> 3) & 0x22
1676 * t0 = t2 + t2
1677 * t0 = t0 + t2
1678 */
1679
1680 tcg_gen_shri_i32(t2, t0, 3);
1681 tcg_gen_not_i32(t2, t2);
1682 tcg_gen_andi_i32(t2, t2, 0x22);
1683 tcg_gen_add_i32(t0, t2, t2);
1684 tcg_gen_add_i32(t0, t0, t2);
1685
1686 /* return t1 - t0 */
1687
1688 tcg_gen_sub_i32(dest, t1, t0);
1689 }
1690
1691 static void bcd_flags(TCGv val)
1692 {
1693 tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1694 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1695
1696 tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1697
1698 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1699 }
1700
1701 DISAS_INSN(abcd_reg)
1702 {
1703 TCGv src;
1704 TCGv dest;
1705
1706 gen_flush_flags(s); /* !Z is sticky */
1707
1708 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1709 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1710 bcd_add(dest, src);
1711 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1712
1713 bcd_flags(dest);
1714 }
1715
1716 DISAS_INSN(abcd_mem)
1717 {
1718 TCGv src, dest, addr;
1719
1720 gen_flush_flags(s); /* !Z is sticky */
1721
1722 /* Indirect pre-decrement load (mode 4) */
1723
1724 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1725 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1726 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1727 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1728
1729 bcd_add(dest, src);
1730
1731 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1732 EA_STORE, IS_USER(s));
1733
1734 bcd_flags(dest);
1735 }
1736
1737 DISAS_INSN(sbcd_reg)
1738 {
1739 TCGv src, dest;
1740
1741 gen_flush_flags(s); /* !Z is sticky */
1742
1743 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1744 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1745
1746 bcd_sub(dest, src);
1747
1748 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1749
1750 bcd_flags(dest);
1751 }
1752
1753 DISAS_INSN(sbcd_mem)
1754 {
1755 TCGv src, dest, addr;
1756
1757 gen_flush_flags(s); /* !Z is sticky */
1758
1759 /* Indirect pre-decrement load (mode 4) */
1760
1761 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1762 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1763 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1764 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1765
1766 bcd_sub(dest, src);
1767
1768 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1769 EA_STORE, IS_USER(s));
1770
1771 bcd_flags(dest);
1772 }
1773
1774 DISAS_INSN(nbcd)
1775 {
1776 TCGv src, dest;
1777 TCGv addr;
1778
1779 gen_flush_flags(s); /* !Z is sticky */
1780
1781 SRC_EA(env, src, OS_BYTE, 0, &addr);
1782
1783 dest = tcg_temp_new();
1784 tcg_gen_movi_i32(dest, 0);
1785 bcd_sub(dest, src);
1786
1787 DEST_EA(env, insn, OS_BYTE, dest, &addr);
1788
1789 bcd_flags(dest);
1790 }
1791
1792 DISAS_INSN(addsub)
1793 {
1794 TCGv reg;
1795 TCGv dest;
1796 TCGv src;
1797 TCGv tmp;
1798 TCGv addr;
1799 int add;
1800 int opsize;
1801
1802 add = (insn & 0x4000) != 0;
1803 opsize = insn_opsize(insn);
1804 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1805 dest = tcg_temp_new();
1806 if (insn & 0x100) {
1807 SRC_EA(env, tmp, opsize, 1, &addr);
1808 src = reg;
1809 } else {
1810 tmp = reg;
1811 SRC_EA(env, src, opsize, 1, NULL);
1812 }
1813 if (add) {
1814 tcg_gen_add_i32(dest, tmp, src);
1815 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1816 set_cc_op(s, CC_OP_ADDB + opsize);
1817 } else {
1818 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1819 tcg_gen_sub_i32(dest, tmp, src);
1820 set_cc_op(s, CC_OP_SUBB + opsize);
1821 }
1822 gen_update_cc_add(dest, src, opsize);
1823 if (insn & 0x100) {
1824 DEST_EA(env, insn, opsize, dest, &addr);
1825 } else {
1826 gen_partset_reg(opsize, DREG(insn, 9), dest);
1827 }
1828 }
1829
1830 /* Reverse the order of the bits in REG. */
1831 DISAS_INSN(bitrev)
1832 {
1833 TCGv reg;
1834 reg = DREG(insn, 0);
1835 gen_helper_bitrev(reg, reg);
1836 }
1837
1838 DISAS_INSN(bitop_reg)
1839 {
1840 int opsize;
1841 int op;
1842 TCGv src1;
1843 TCGv src2;
1844 TCGv tmp;
1845 TCGv addr;
1846 TCGv dest;
1847
1848 if ((insn & 0x38) != 0)
1849 opsize = OS_BYTE;
1850 else
1851 opsize = OS_LONG;
1852 op = (insn >> 6) & 3;
1853 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1854
1855 gen_flush_flags(s);
1856 src2 = tcg_temp_new();
1857 if (opsize == OS_BYTE)
1858 tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1859 else
1860 tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1861
1862 tmp = tcg_temp_new();
1863 tcg_gen_shl_i32(tmp, tcg_constant_i32(1), src2);
1864
1865 tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1866
1867 dest = tcg_temp_new();
1868 switch (op) {
1869 case 1: /* bchg */
1870 tcg_gen_xor_i32(dest, src1, tmp);
1871 break;
1872 case 2: /* bclr */
1873 tcg_gen_andc_i32(dest, src1, tmp);
1874 break;
1875 case 3: /* bset */
1876 tcg_gen_or_i32(dest, src1, tmp);
1877 break;
1878 default: /* btst */
1879 break;
1880 }
1881 if (op) {
1882 DEST_EA(env, insn, opsize, dest, &addr);
1883 }
1884 }
1885
1886 DISAS_INSN(sats)
1887 {
1888 TCGv reg;
1889 reg = DREG(insn, 0);
1890 gen_flush_flags(s);
1891 gen_helper_sats(reg, reg, QREG_CC_V);
1892 gen_logic_cc(s, reg, OS_LONG);
1893 }
1894
1895 static void gen_push(DisasContext *s, TCGv val)
1896 {
1897 TCGv tmp;
1898
1899 tmp = tcg_temp_new();
1900 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1901 gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1902 tcg_gen_mov_i32(QREG_SP, tmp);
1903 }
1904
1905 static TCGv mreg(int reg)
1906 {
1907 if (reg < 8) {
1908 /* Dx */
1909 return cpu_dregs[reg];
1910 }
1911 /* Ax */
1912 return cpu_aregs[reg & 7];
1913 }
1914
1915 DISAS_INSN(movem)
1916 {
1917 TCGv addr, incr, tmp, r[16];
1918 int is_load = (insn & 0x0400) != 0;
1919 int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1920 uint16_t mask = read_im16(env, s);
1921 int mode = extract32(insn, 3, 3);
1922 int reg0 = REG(insn, 0);
1923 int i;
1924
1925 tmp = cpu_aregs[reg0];
1926
1927 switch (mode) {
1928 case 0: /* data register direct */
1929 case 1: /* addr register direct */
1930 do_addr_fault:
1931 gen_addr_fault(s);
1932 return;
1933
1934 case 2: /* indirect */
1935 break;
1936
1937 case 3: /* indirect post-increment */
1938 if (!is_load) {
1939 /* post-increment is not allowed */
1940 goto do_addr_fault;
1941 }
1942 break;
1943
1944 case 4: /* indirect pre-decrement */
1945 if (is_load) {
1946 /* pre-decrement is not allowed */
1947 goto do_addr_fault;
1948 }
1949 /*
1950 * We want a bare copy of the address reg, without any pre-decrement
1951 * adjustment, as gen_lea would provide.
1952 */
1953 break;
1954
1955 default:
1956 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
1957 if (IS_NULL_QREG(tmp)) {
1958 goto do_addr_fault;
1959 }
1960 break;
1961 }
1962
1963 addr = tcg_temp_new();
1964 tcg_gen_mov_i32(addr, tmp);
1965 incr = tcg_constant_i32(opsize_bytes(opsize));
1966
1967 if (is_load) {
1968 /* memory to register */
1969 for (i = 0; i < 16; i++) {
1970 if (mask & (1 << i)) {
1971 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
1972 tcg_gen_add_i32(addr, addr, incr);
1973 }
1974 }
1975 for (i = 0; i < 16; i++) {
1976 if (mask & (1 << i)) {
1977 tcg_gen_mov_i32(mreg(i), r[i]);
1978 }
1979 }
1980 if (mode == 3) {
1981 /* post-increment: movem (An)+,X */
1982 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1983 }
1984 } else {
1985 /* register to memory */
1986 if (mode == 4) {
1987 /* pre-decrement: movem X,-(An) */
1988 for (i = 15; i >= 0; i--) {
1989 if ((mask << i) & 0x8000) {
1990 tcg_gen_sub_i32(addr, addr, incr);
1991 if (reg0 + 8 == i &&
1992 m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
1993 /*
1994 * M68020+: if the addressing register is the
1995 * register moved to memory, the value written
1996 * is the initial value decremented by the size of
1997 * the operation, regardless of how many actual
1998 * stores have been performed until this point.
1999 * M68000/M68010: the value is the initial value.
2000 */
2001 tmp = tcg_temp_new();
2002 tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2003 gen_store(s, opsize, addr, tmp, IS_USER(s));
2004 } else {
2005 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2006 }
2007 }
2008 }
2009 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2010 } else {
2011 for (i = 0; i < 16; i++) {
2012 if (mask & (1 << i)) {
2013 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2014 tcg_gen_add_i32(addr, addr, incr);
2015 }
2016 }
2017 }
2018 }
2019 }
2020
2021 DISAS_INSN(movep)
2022 {
2023 uint8_t i;
2024 int16_t displ;
2025 TCGv reg;
2026 TCGv addr;
2027 TCGv abuf;
2028 TCGv dbuf;
2029
2030 displ = read_im16(env, s);
2031
2032 addr = AREG(insn, 0);
2033 reg = DREG(insn, 9);
2034
2035 abuf = tcg_temp_new();
2036 tcg_gen_addi_i32(abuf, addr, displ);
2037 dbuf = tcg_temp_new();
2038
2039 if (insn & 0x40) {
2040 i = 4;
2041 } else {
2042 i = 2;
2043 }
2044
2045 if (insn & 0x80) {
2046 for ( ; i > 0 ; i--) {
2047 tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2048 tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB);
2049 if (i > 1) {
2050 tcg_gen_addi_i32(abuf, abuf, 2);
2051 }
2052 }
2053 } else {
2054 for ( ; i > 0 ; i--) {
2055 tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB);
2056 tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2057 if (i > 1) {
2058 tcg_gen_addi_i32(abuf, abuf, 2);
2059 }
2060 }
2061 }
2062 }
2063
2064 DISAS_INSN(bitop_im)
2065 {
2066 int opsize;
2067 int op;
2068 TCGv src1;
2069 uint32_t mask;
2070 int bitnum;
2071 TCGv tmp;
2072 TCGv addr;
2073
2074 if ((insn & 0x38) != 0)
2075 opsize = OS_BYTE;
2076 else
2077 opsize = OS_LONG;
2078 op = (insn >> 6) & 3;
2079
2080 bitnum = read_im16(env, s);
2081 if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2082 if (bitnum & 0xfe00) {
2083 disas_undef(env, s, insn);
2084 return;
2085 }
2086 } else {
2087 if (bitnum & 0xff00) {
2088 disas_undef(env, s, insn);
2089 return;
2090 }
2091 }
2092
2093 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2094
2095 gen_flush_flags(s);
2096 if (opsize == OS_BYTE)
2097 bitnum &= 7;
2098 else
2099 bitnum &= 31;
2100 mask = 1 << bitnum;
2101
2102 tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2103
2104 if (op) {
2105 tmp = tcg_temp_new();
2106 switch (op) {
2107 case 1: /* bchg */
2108 tcg_gen_xori_i32(tmp, src1, mask);
2109 break;
2110 case 2: /* bclr */
2111 tcg_gen_andi_i32(tmp, src1, ~mask);
2112 break;
2113 case 3: /* bset */
2114 tcg_gen_ori_i32(tmp, src1, mask);
2115 break;
2116 default: /* btst */
2117 break;
2118 }
2119 DEST_EA(env, insn, opsize, tmp, &addr);
2120 }
2121 }
2122
2123 static TCGv gen_get_ccr(DisasContext *s)
2124 {
2125 TCGv dest;
2126
2127 update_cc_op(s);
2128 dest = tcg_temp_new();
2129 gen_helper_get_ccr(dest, cpu_env);
2130 return dest;
2131 }
2132
2133 static TCGv gen_get_sr(DisasContext *s)
2134 {
2135 TCGv ccr;
2136 TCGv sr;
2137
2138 ccr = gen_get_ccr(s);
2139 sr = tcg_temp_new();
2140 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2141 tcg_gen_or_i32(sr, sr, ccr);
2142 return sr;
2143 }
2144
2145 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2146 {
2147 if (ccr_only) {
2148 tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2149 tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2150 tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2151 tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2152 tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2153 } else {
2154 /* Must writeback before changing security state. */
2155 do_writebacks(s);
2156 gen_helper_set_sr(cpu_env, tcg_constant_i32(val));
2157 }
2158 set_cc_op(s, CC_OP_FLAGS);
2159 }
2160
2161 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2162 {
2163 if (ccr_only) {
2164 gen_helper_set_ccr(cpu_env, val);
2165 } else {
2166 /* Must writeback before changing security state. */
2167 do_writebacks(s);
2168 gen_helper_set_sr(cpu_env, val);
2169 }
2170 set_cc_op(s, CC_OP_FLAGS);
2171 }
2172
2173 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2174 bool ccr_only)
2175 {
2176 if ((insn & 0x3f) == 0x3c) {
2177 uint16_t val;
2178 val = read_im16(env, s);
2179 gen_set_sr_im(s, val, ccr_only);
2180 } else {
2181 TCGv src;
2182 SRC_EA(env, src, OS_WORD, 0, NULL);
2183 gen_set_sr(s, src, ccr_only);
2184 }
2185 }
2186
2187 DISAS_INSN(arith_im)
2188 {
2189 int op;
2190 TCGv im;
2191 TCGv src1;
2192 TCGv dest;
2193 TCGv addr;
2194 int opsize;
2195 bool with_SR = ((insn & 0x3f) == 0x3c);
2196
2197 op = (insn >> 9) & 7;
2198 opsize = insn_opsize(insn);
2199 switch (opsize) {
2200 case OS_BYTE:
2201 im = tcg_constant_i32((int8_t)read_im8(env, s));
2202 break;
2203 case OS_WORD:
2204 im = tcg_constant_i32((int16_t)read_im16(env, s));
2205 break;
2206 case OS_LONG:
2207 im = tcg_constant_i32(read_im32(env, s));
2208 break;
2209 default:
2210 g_assert_not_reached();
2211 }
2212
2213 if (with_SR) {
2214 /* SR/CCR can only be used with andi/eori/ori */
2215 if (op == 2 || op == 3 || op == 6) {
2216 disas_undef(env, s, insn);
2217 return;
2218 }
2219 switch (opsize) {
2220 case OS_BYTE:
2221 src1 = gen_get_ccr(s);
2222 break;
2223 case OS_WORD:
2224 if (IS_USER(s)) {
2225 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2226 return;
2227 }
2228 src1 = gen_get_sr(s);
2229 break;
2230 default:
2231 /* OS_LONG; others already g_assert_not_reached. */
2232 disas_undef(env, s, insn);
2233 return;
2234 }
2235 } else {
2236 SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2237 }
2238 dest = tcg_temp_new();
2239 switch (op) {
2240 case 0: /* ori */
2241 tcg_gen_or_i32(dest, src1, im);
2242 if (with_SR) {
2243 gen_set_sr(s, dest, opsize == OS_BYTE);
2244 gen_exit_tb(s);
2245 } else {
2246 DEST_EA(env, insn, opsize, dest, &addr);
2247 gen_logic_cc(s, dest, opsize);
2248 }
2249 break;
2250 case 1: /* andi */
2251 tcg_gen_and_i32(dest, src1, im);
2252 if (with_SR) {
2253 gen_set_sr(s, dest, opsize == OS_BYTE);
2254 gen_exit_tb(s);
2255 } else {
2256 DEST_EA(env, insn, opsize, dest, &addr);
2257 gen_logic_cc(s, dest, opsize);
2258 }
2259 break;
2260 case 2: /* subi */
2261 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2262 tcg_gen_sub_i32(dest, src1, im);
2263 gen_update_cc_add(dest, im, opsize);
2264 set_cc_op(s, CC_OP_SUBB + opsize);
2265 DEST_EA(env, insn, opsize, dest, &addr);
2266 break;
2267 case 3: /* addi */
2268 tcg_gen_add_i32(dest, src1, im);
2269 gen_update_cc_add(dest, im, opsize);
2270 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2271 set_cc_op(s, CC_OP_ADDB + opsize);
2272 DEST_EA(env, insn, opsize, dest, &addr);
2273 break;
2274 case 5: /* eori */
2275 tcg_gen_xor_i32(dest, src1, im);
2276 if (with_SR) {
2277 gen_set_sr(s, dest, opsize == OS_BYTE);
2278 gen_exit_tb(s);
2279 } else {
2280 DEST_EA(env, insn, opsize, dest, &addr);
2281 gen_logic_cc(s, dest, opsize);
2282 }
2283 break;
2284 case 6: /* cmpi */
2285 gen_update_cc_cmp(s, src1, im, opsize);
2286 break;
2287 default:
2288 abort();
2289 }
2290 }
2291
2292 DISAS_INSN(cas)
2293 {
2294 int opsize;
2295 TCGv addr;
2296 uint16_t ext;
2297 TCGv load;
2298 TCGv cmp;
2299 MemOp opc;
2300
2301 switch ((insn >> 9) & 3) {
2302 case 1:
2303 opsize = OS_BYTE;
2304 opc = MO_SB;
2305 break;
2306 case 2:
2307 opsize = OS_WORD;
2308 opc = MO_TESW;
2309 break;
2310 case 3:
2311 opsize = OS_LONG;
2312 opc = MO_TESL;
2313 break;
2314 default:
2315 g_assert_not_reached();
2316 }
2317
2318 ext = read_im16(env, s);
2319
2320 /* cas Dc,Du,<EA> */
2321
2322 addr = gen_lea(env, s, insn, opsize);
2323 if (IS_NULL_QREG(addr)) {
2324 gen_addr_fault(s);
2325 return;
2326 }
2327
2328 cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2329
2330 /*
2331 * if <EA> == Dc then
2332 * <EA> = Du
2333 * Dc = <EA> (because <EA> == Dc)
2334 * else
2335 * Dc = <EA>
2336 */
2337
2338 load = tcg_temp_new();
2339 tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2340 IS_USER(s), opc);
2341 /* update flags before setting cmp to load */
2342 gen_update_cc_cmp(s, load, cmp, opsize);
2343 gen_partset_reg(opsize, DREG(ext, 0), load);
2344
2345 switch (extract32(insn, 3, 3)) {
2346 case 3: /* Indirect postincrement. */
2347 tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2348 break;
2349 case 4: /* Indirect predecrememnt. */
2350 tcg_gen_mov_i32(AREG(insn, 0), addr);
2351 break;
2352 }
2353 }
2354
2355 DISAS_INSN(cas2w)
2356 {
2357 uint16_t ext1, ext2;
2358 TCGv addr1, addr2;
2359
2360 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2361
2362 ext1 = read_im16(env, s);
2363
2364 if (ext1 & 0x8000) {
2365 /* Address Register */
2366 addr1 = AREG(ext1, 12);
2367 } else {
2368 /* Data Register */
2369 addr1 = DREG(ext1, 12);
2370 }
2371
2372 ext2 = read_im16(env, s);
2373 if (ext2 & 0x8000) {
2374 /* Address Register */
2375 addr2 = AREG(ext2, 12);
2376 } else {
2377 /* Data Register */
2378 addr2 = DREG(ext2, 12);
2379 }
2380
2381 /*
2382 * if (R1) == Dc1 && (R2) == Dc2 then
2383 * (R1) = Du1
2384 * (R2) = Du2
2385 * else
2386 * Dc1 = (R1)
2387 * Dc2 = (R2)
2388 */
2389
2390 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2391 gen_helper_exit_atomic(cpu_env);
2392 } else {
2393 TCGv regs = tcg_constant_i32(REG(ext2, 6) |
2394 (REG(ext1, 6) << 3) |
2395 (REG(ext2, 0) << 6) |
2396 (REG(ext1, 0) << 9));
2397 gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2398 }
2399
2400 /* Note that cas2w also assigned to env->cc_op. */
2401 s->cc_op = CC_OP_CMPW;
2402 s->cc_op_synced = 1;
2403 }
2404
2405 DISAS_INSN(cas2l)
2406 {
2407 uint16_t ext1, ext2;
2408 TCGv addr1, addr2, regs;
2409
2410 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2411
2412 ext1 = read_im16(env, s);
2413
2414 if (ext1 & 0x8000) {
2415 /* Address Register */
2416 addr1 = AREG(ext1, 12);
2417 } else {
2418 /* Data Register */
2419 addr1 = DREG(ext1, 12);
2420 }
2421
2422 ext2 = read_im16(env, s);
2423 if (ext2 & 0x8000) {
2424 /* Address Register */
2425 addr2 = AREG(ext2, 12);
2426 } else {
2427 /* Data Register */
2428 addr2 = DREG(ext2, 12);
2429 }
2430
2431 /*
2432 * if (R1) == Dc1 && (R2) == Dc2 then
2433 * (R1) = Du1
2434 * (R2) = Du2
2435 * else
2436 * Dc1 = (R1)
2437 * Dc2 = (R2)
2438 */
2439
2440 regs = tcg_constant_i32(REG(ext2, 6) |
2441 (REG(ext1, 6) << 3) |
2442 (REG(ext2, 0) << 6) |
2443 (REG(ext1, 0) << 9));
2444 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2445 gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2446 } else {
2447 gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2448 }
2449
2450 /* Note that cas2l also assigned to env->cc_op. */
2451 s->cc_op = CC_OP_CMPL;
2452 s->cc_op_synced = 1;
2453 }
2454
2455 DISAS_INSN(byterev)
2456 {
2457 TCGv reg;
2458
2459 reg = DREG(insn, 0);
2460 tcg_gen_bswap32_i32(reg, reg);
2461 }
2462
2463 DISAS_INSN(move)
2464 {
2465 TCGv src;
2466 TCGv dest;
2467 int op;
2468 int opsize;
2469
2470 switch (insn >> 12) {
2471 case 1: /* move.b */
2472 opsize = OS_BYTE;
2473 break;
2474 case 2: /* move.l */
2475 opsize = OS_LONG;
2476 break;
2477 case 3: /* move.w */
2478 opsize = OS_WORD;
2479 break;
2480 default:
2481 abort();
2482 }
2483 SRC_EA(env, src, opsize, 1, NULL);
2484 op = (insn >> 6) & 7;
2485 if (op == 1) {
2486 /* movea */
2487 /* The value will already have been sign extended. */
2488 dest = AREG(insn, 9);
2489 tcg_gen_mov_i32(dest, src);
2490 } else {
2491 /* normal move */
2492 uint16_t dest_ea;
2493 dest_ea = ((insn >> 9) & 7) | (op << 3);
2494 DEST_EA(env, dest_ea, opsize, src, NULL);
2495 /* This will be correct because loads sign extend. */
2496 gen_logic_cc(s, src, opsize);
2497 }
2498 }
2499
2500 DISAS_INSN(negx)
2501 {
2502 TCGv z;
2503 TCGv src;
2504 TCGv addr;
2505 int opsize;
2506
2507 opsize = insn_opsize(insn);
2508 SRC_EA(env, src, opsize, 1, &addr);
2509
2510 gen_flush_flags(s); /* compute old Z */
2511
2512 /*
2513 * Perform subtract with borrow.
2514 * (X, N) = -(src + X);
2515 */
2516
2517 z = tcg_constant_i32(0);
2518 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2519 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2520 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2521
2522 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2523
2524 /*
2525 * Compute signed-overflow for negation. The normal formula for
2526 * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2527 * this simplifies to res & src.
2528 */
2529
2530 tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2531
2532 /* Copy the rest of the results into place. */
2533 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2534 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2535
2536 set_cc_op(s, CC_OP_FLAGS);
2537
2538 /* result is in QREG_CC_N */
2539
2540 DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2541 }
2542
2543 DISAS_INSN(lea)
2544 {
2545 TCGv reg;
2546 TCGv tmp;
2547
2548 reg = AREG(insn, 9);
2549 tmp = gen_lea(env, s, insn, OS_LONG);
2550 if (IS_NULL_QREG(tmp)) {
2551 gen_addr_fault(s);
2552 return;
2553 }
2554 tcg_gen_mov_i32(reg, tmp);
2555 }
2556
2557 DISAS_INSN(clr)
2558 {
2559 int opsize;
2560 TCGv zero;
2561
2562 zero = tcg_constant_i32(0);
2563 opsize = insn_opsize(insn);
2564 DEST_EA(env, insn, opsize, zero, NULL);
2565 gen_logic_cc(s, zero, opsize);
2566 }
2567
2568 DISAS_INSN(move_from_ccr)
2569 {
2570 TCGv ccr;
2571
2572 ccr = gen_get_ccr(s);
2573 DEST_EA(env, insn, OS_WORD, ccr, NULL);
2574 }
2575
2576 DISAS_INSN(neg)
2577 {
2578 TCGv src1;
2579 TCGv dest;
2580 TCGv addr;
2581 int opsize;
2582
2583 opsize = insn_opsize(insn);
2584 SRC_EA(env, src1, opsize, 1, &addr);
2585 dest = tcg_temp_new();
2586 tcg_gen_neg_i32(dest, src1);
2587 set_cc_op(s, CC_OP_SUBB + opsize);
2588 gen_update_cc_add(dest, src1, opsize);
2589 tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2590 DEST_EA(env, insn, opsize, dest, &addr);
2591 }
2592
2593 DISAS_INSN(move_to_ccr)
2594 {
2595 gen_move_to_sr(env, s, insn, true);
2596 }
2597
2598 DISAS_INSN(not)
2599 {
2600 TCGv src1;
2601 TCGv dest;
2602 TCGv addr;
2603 int opsize;
2604
2605 opsize = insn_opsize(insn);
2606 SRC_EA(env, src1, opsize, 1, &addr);
2607 dest = tcg_temp_new();
2608 tcg_gen_not_i32(dest, src1);
2609 DEST_EA(env, insn, opsize, dest, &addr);
2610 gen_logic_cc(s, dest, opsize);
2611 }
2612
2613 DISAS_INSN(swap)
2614 {
2615 TCGv src1;
2616 TCGv src2;
2617 TCGv reg;
2618
2619 src1 = tcg_temp_new();
2620 src2 = tcg_temp_new();
2621 reg = DREG(insn, 0);
2622 tcg_gen_shli_i32(src1, reg, 16);
2623 tcg_gen_shri_i32(src2, reg, 16);
2624 tcg_gen_or_i32(reg, src1, src2);
2625 gen_logic_cc(s, reg, OS_LONG);
2626 }
2627
2628 DISAS_INSN(bkpt)
2629 {
2630 #if defined(CONFIG_USER_ONLY)
2631 gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2632 #else
2633 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2634 #endif
2635 }
2636
2637 DISAS_INSN(pea)
2638 {
2639 TCGv tmp;
2640
2641 tmp = gen_lea(env, s, insn, OS_LONG);
2642 if (IS_NULL_QREG(tmp)) {
2643 gen_addr_fault(s);
2644 return;
2645 }
2646 gen_push(s, tmp);
2647 }
2648
2649 DISAS_INSN(ext)
2650 {
2651 int op;
2652 TCGv reg;
2653 TCGv tmp;
2654
2655 reg = DREG(insn, 0);
2656 op = (insn >> 6) & 7;
2657 tmp = tcg_temp_new();
2658 if (op == 3)
2659 tcg_gen_ext16s_i32(tmp, reg);
2660 else
2661 tcg_gen_ext8s_i32(tmp, reg);
2662 if (op == 2)
2663 gen_partset_reg(OS_WORD, reg, tmp);
2664 else
2665 tcg_gen_mov_i32(reg, tmp);
2666 gen_logic_cc(s, tmp, OS_LONG);
2667 }
2668
2669 DISAS_INSN(tst)
2670 {
2671 int opsize;
2672 TCGv tmp;
2673
2674 opsize = insn_opsize(insn);
2675 SRC_EA(env, tmp, opsize, 1, NULL);
2676 gen_logic_cc(s, tmp, opsize);
2677 }
2678
2679 DISAS_INSN(pulse)
2680 {
2681 /* Implemented as a NOP. */
2682 }
2683
2684 DISAS_INSN(illegal)
2685 {
2686 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2687 }
2688
2689 DISAS_INSN(tas)
2690 {
2691 int mode = extract32(insn, 3, 3);
2692 int reg0 = REG(insn, 0);
2693
2694 if (mode == 0) {
2695 /* data register direct */
2696 TCGv dest = cpu_dregs[reg0];
2697 gen_logic_cc(s, dest, OS_BYTE);
2698 tcg_gen_ori_tl(dest, dest, 0x80);
2699 } else {
2700 TCGv src1, addr;
2701
2702 addr = gen_lea_mode(env, s, mode, reg0, OS_BYTE);
2703 if (IS_NULL_QREG(addr)) {
2704 gen_addr_fault(s);
2705 return;
2706 }
2707 src1 = tcg_temp_new();
2708 tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80),
2709 IS_USER(s), MO_SB);
2710 gen_logic_cc(s, src1, OS_BYTE);
2711
2712 switch (mode) {
2713 case 3: /* Indirect postincrement. */
2714 tcg_gen_addi_i32(AREG(insn, 0), addr, 1);
2715 break;
2716 case 4: /* Indirect predecrememnt. */
2717 tcg_gen_mov_i32(AREG(insn, 0), addr);
2718 break;
2719 }
2720 }
2721 }
2722
2723 DISAS_INSN(mull)
2724 {
2725 uint16_t ext;
2726 TCGv src1;
2727 int sign;
2728
2729 ext = read_im16(env, s);
2730
2731 sign = ext & 0x800;
2732
2733 if (ext & 0x400) {
2734 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2735 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2736 return;
2737 }
2738
2739 SRC_EA(env, src1, OS_LONG, 0, NULL);
2740
2741 if (sign) {
2742 tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2743 } else {
2744 tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2745 }
2746 /* if Dl == Dh, 68040 returns low word */
2747 tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2748 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2749 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2750
2751 tcg_gen_movi_i32(QREG_CC_V, 0);
2752 tcg_gen_movi_i32(QREG_CC_C, 0);
2753
2754 set_cc_op(s, CC_OP_FLAGS);
2755 return;
2756 }
2757 SRC_EA(env, src1, OS_LONG, 0, NULL);
2758 if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2759 tcg_gen_movi_i32(QREG_CC_C, 0);
2760 if (sign) {
2761 tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2762 /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2763 tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2764 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V,
2765 QREG_CC_V, QREG_CC_Z);
2766 } else {
2767 tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2768 /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2769 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V,
2770 QREG_CC_V, QREG_CC_C);
2771 }
2772 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2773
2774 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2775
2776 set_cc_op(s, CC_OP_FLAGS);
2777 } else {
2778 /*
2779 * The upper 32 bits of the product are discarded, so
2780 * muls.l and mulu.l are functionally equivalent.
2781 */
2782 tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2783 gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2784 }
2785 }
2786
2787 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2788 {
2789 TCGv reg;
2790 TCGv tmp;
2791
2792 reg = AREG(insn, 0);
2793 tmp = tcg_temp_new();
2794 tcg_gen_subi_i32(tmp, QREG_SP, 4);
2795 gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2796 if ((insn & 7) != 7) {
2797 tcg_gen_mov_i32(reg, tmp);
2798 }
2799 tcg_gen_addi_i32(QREG_SP, tmp, offset);
2800 }
2801
2802 DISAS_INSN(link)
2803 {
2804 int16_t offset;
2805
2806 offset = read_im16(env, s);
2807 gen_link(s, insn, offset);
2808 }
2809
2810 DISAS_INSN(linkl)
2811 {
2812 int32_t offset;
2813
2814 offset = read_im32(env, s);
2815 gen_link(s, insn, offset);
2816 }
2817
2818 DISAS_INSN(unlk)
2819 {
2820 TCGv src;
2821 TCGv reg;
2822 TCGv tmp;
2823
2824 src = tcg_temp_new();
2825 reg = AREG(insn, 0);
2826 tcg_gen_mov_i32(src, reg);
2827 tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2828 tcg_gen_mov_i32(reg, tmp);
2829 tcg_gen_addi_i32(QREG_SP, src, 4);
2830 }
2831
2832 #if !defined(CONFIG_USER_ONLY)
2833 DISAS_INSN(reset)
2834 {
2835 if (IS_USER(s)) {
2836 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2837 return;
2838 }
2839
2840 gen_helper_reset(cpu_env);
2841 }
2842 #endif
2843
2844 DISAS_INSN(nop)
2845 {
2846 }
2847
2848 DISAS_INSN(rtd)
2849 {
2850 TCGv tmp;
2851 int16_t offset = read_im16(env, s);
2852
2853 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2854 tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2855 gen_jmp(s, tmp);
2856 }
2857
2858 DISAS_INSN(rtr)
2859 {
2860 TCGv tmp;
2861 TCGv ccr;
2862 TCGv sp;
2863
2864 sp = tcg_temp_new();
2865 ccr = gen_load(s, OS_WORD, QREG_SP, 0, IS_USER(s));
2866 tcg_gen_addi_i32(sp, QREG_SP, 2);
2867 tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s));
2868 tcg_gen_addi_i32(QREG_SP, sp, 4);
2869
2870 gen_set_sr(s, ccr, true);
2871
2872 gen_jmp(s, tmp);
2873 }
2874
2875 DISAS_INSN(rts)
2876 {
2877 TCGv tmp;
2878
2879 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2880 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2881 gen_jmp(s, tmp);
2882 }
2883
2884 DISAS_INSN(jump)
2885 {
2886 TCGv tmp;
2887
2888 /*
2889 * Load the target address first to ensure correct exception
2890 * behavior.
2891 */
2892 tmp = gen_lea(env, s, insn, OS_LONG);
2893 if (IS_NULL_QREG(tmp)) {
2894 gen_addr_fault(s);
2895 return;
2896 }
2897 if ((insn & 0x40) == 0) {
2898 /* jsr */
2899 gen_push(s, tcg_constant_i32(s->pc));
2900 }
2901 gen_jmp(s, tmp);
2902 }
2903
2904 DISAS_INSN(addsubq)
2905 {
2906 TCGv src;
2907 TCGv dest;
2908 TCGv val;
2909 int imm;
2910 TCGv addr;
2911 int opsize;
2912
2913 if ((insn & 070) == 010) {
2914 /* Operation on address register is always long. */
2915 opsize = OS_LONG;
2916 } else {
2917 opsize = insn_opsize(insn);
2918 }
2919 SRC_EA(env, src, opsize, 1, &addr);
2920 imm = (insn >> 9) & 7;
2921 if (imm == 0) {
2922 imm = 8;
2923 }
2924 val = tcg_constant_i32(imm);
2925 dest = tcg_temp_new();
2926 tcg_gen_mov_i32(dest, src);
2927 if ((insn & 0x38) == 0x08) {
2928 /*
2929 * Don't update condition codes if the destination is an
2930 * address register.
2931 */
2932 if (insn & 0x0100) {
2933 tcg_gen_sub_i32(dest, dest, val);
2934 } else {
2935 tcg_gen_add_i32(dest, dest, val);
2936 }
2937 } else {
2938 if (insn & 0x0100) {
2939 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2940 tcg_gen_sub_i32(dest, dest, val);
2941 set_cc_op(s, CC_OP_SUBB + opsize);
2942 } else {
2943 tcg_gen_add_i32(dest, dest, val);
2944 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2945 set_cc_op(s, CC_OP_ADDB + opsize);
2946 }
2947 gen_update_cc_add(dest, val, opsize);
2948 }
2949 DEST_EA(env, insn, opsize, dest, &addr);
2950 }
2951
2952 DISAS_INSN(branch)
2953 {
2954 int32_t offset;
2955 uint32_t base;
2956 int op;
2957
2958 base = s->pc;
2959 op = (insn >> 8) & 0xf;
2960 offset = (int8_t)insn;
2961 if (offset == 0) {
2962 offset = (int16_t)read_im16(env, s);
2963 } else if (offset == -1) {
2964 offset = read_im32(env, s);
2965 }
2966 if (op == 1) {
2967 /* bsr */
2968 gen_push(s, tcg_constant_i32(s->pc));
2969 }
2970 if (op > 1) {
2971 /* Bcc */
2972 TCGLabel *l1 = gen_new_label();
2973 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2974 gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
2975 gen_set_label(l1);
2976 gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
2977 } else {
2978 /* Unconditional branch. */
2979 update_cc_op(s);
2980 gen_jmp_tb(s, 0, base + offset, s->base.pc_next);
2981 }
2982 }
2983
2984 DISAS_INSN(moveq)
2985 {
2986 tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
2987 gen_logic_cc(s, DREG(insn, 9), OS_LONG);
2988 }
2989
2990 DISAS_INSN(mvzs)
2991 {
2992 int opsize;
2993 TCGv src;
2994 TCGv reg;
2995
2996 if (insn & 0x40)
2997 opsize = OS_WORD;
2998 else
2999 opsize = OS_BYTE;
3000 SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3001 reg = DREG(insn, 9);
3002 tcg_gen_mov_i32(reg, src);
3003 gen_logic_cc(s, src, opsize);
3004 }
3005
3006 DISAS_INSN(or)
3007 {
3008 TCGv reg;
3009 TCGv dest;
3010 TCGv src;
3011 TCGv addr;
3012 int opsize;
3013
3014 opsize = insn_opsize(insn);
3015 reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3016 dest = tcg_temp_new();
3017 if (insn & 0x100) {
3018 SRC_EA(env, src, opsize, 0, &addr);
3019 tcg_gen_or_i32(dest, src, reg);
3020 DEST_EA(env, insn, opsize, dest, &addr);
3021 } else {
3022 SRC_EA(env, src, opsize, 0, NULL);
3023 tcg_gen_or_i32(dest, src, reg);
3024 gen_partset_reg(opsize, DREG(insn, 9), dest);
3025 }
3026 gen_logic_cc(s, dest, opsize);
3027 }
3028
3029 DISAS_INSN(suba)
3030 {
3031 TCGv src;
3032 TCGv reg;
3033
3034 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3035 reg = AREG(insn, 9);
3036 tcg_gen_sub_i32(reg, reg, src);
3037 }
3038
3039 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3040 {
3041 TCGv tmp, zero;
3042
3043 gen_flush_flags(s); /* compute old Z */
3044
3045 /*
3046 * Perform subtract with borrow.
3047 * (X, N) = dest - (src + X);
3048 */
3049
3050 zero = tcg_constant_i32(0);
3051 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, zero, QREG_CC_X, zero);
3052 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, zero, QREG_CC_N, QREG_CC_X);
3053 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3054 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3055
3056 /* Compute signed-overflow for subtract. */
3057
3058 tmp = tcg_temp_new();
3059 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3060 tcg_gen_xor_i32(tmp, dest, src);
3061 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3062
3063 /* Copy the rest of the results into place. */
3064 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3065 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3066
3067 set_cc_op(s, CC_OP_FLAGS);
3068
3069 /* result is in QREG_CC_N */
3070 }
3071
3072 DISAS_INSN(subx_reg)
3073 {
3074 TCGv dest;
3075 TCGv src;
3076 int opsize;
3077
3078 opsize = insn_opsize(insn);
3079
3080 src = gen_extend(s, DREG(insn, 0), opsize, 1);
3081 dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3082
3083 gen_subx(s, src, dest, opsize);
3084
3085 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3086 }
3087
3088 DISAS_INSN(subx_mem)
3089 {
3090 TCGv src;
3091 TCGv addr_src;
3092 TCGv dest;
3093 TCGv addr_dest;
3094 int opsize;
3095
3096 opsize = insn_opsize(insn);
3097
3098 addr_src = AREG(insn, 0);
3099 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3100 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3101
3102 addr_dest = AREG(insn, 9);
3103 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3104 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3105
3106 gen_subx(s, src, dest, opsize);
3107
3108 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3109 }
3110
3111 DISAS_INSN(mov3q)
3112 {
3113 TCGv src;
3114 int val;
3115
3116 val = (insn >> 9) & 7;
3117 if (val == 0) {
3118 val = -1;
3119 }
3120 src = tcg_constant_i32(val);
3121 gen_logic_cc(s, src, OS_LONG);
3122 DEST_EA(env, insn, OS_LONG, src, NULL);
3123 }
3124
3125 DISAS_INSN(cmp)
3126 {
3127 TCGv src;
3128 TCGv reg;
3129 int opsize;
3130
3131 opsize = insn_opsize(insn);
3132 SRC_EA(env, src, opsize, 1, NULL);
3133 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3134 gen_update_cc_cmp(s, reg, src, opsize);
3135 }
3136
3137 DISAS_INSN(cmpa)
3138 {
3139 int opsize;
3140 TCGv src;
3141 TCGv reg;
3142
3143 if (insn & 0x100) {
3144 opsize = OS_LONG;
3145 } else {
3146 opsize = OS_WORD;
3147 }
3148 SRC_EA(env, src, opsize, 1, NULL);
3149 reg = AREG(insn, 9);
3150 gen_update_cc_cmp(s, reg, src, OS_LONG);
3151 }
3152
3153 DISAS_INSN(cmpm)
3154 {
3155 int opsize = insn_opsize(insn);
3156 TCGv src, dst;
3157
3158 /* Post-increment load (mode 3) from Ay. */
3159 src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3160 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3161 /* Post-increment load (mode 3) from Ax. */
3162 dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3163 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3164
3165 gen_update_cc_cmp(s, dst, src, opsize);
3166 }
3167
3168 DISAS_INSN(eor)
3169 {
3170 TCGv src;
3171 TCGv dest;
3172 TCGv addr;
3173 int opsize;
3174
3175 opsize = insn_opsize(insn);
3176
3177 SRC_EA(env, src, opsize, 0, &addr);
3178 dest = tcg_temp_new();
3179 tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3180 gen_logic_cc(s, dest, opsize);
3181 DEST_EA(env, insn, opsize, dest, &addr);
3182 }
3183
3184 static void do_exg(TCGv reg1, TCGv reg2)
3185 {
3186 TCGv temp = tcg_temp_new();
3187 tcg_gen_mov_i32(temp, reg1);
3188 tcg_gen_mov_i32(reg1, reg2);
3189 tcg_gen_mov_i32(reg2, temp);
3190 }
3191
3192 DISAS_INSN(exg_dd)
3193 {
3194 /* exchange Dx and Dy */
3195 do_exg(DREG(insn, 9), DREG(insn, 0));
3196 }
3197
3198 DISAS_INSN(exg_aa)
3199 {
3200 /* exchange Ax and Ay */
3201 do_exg(AREG(insn, 9), AREG(insn, 0));
3202 }
3203
3204 DISAS_INSN(exg_da)
3205 {
3206 /* exchange Dx and Ay */
3207 do_exg(DREG(insn, 9), AREG(insn, 0));
3208 }
3209
3210 DISAS_INSN(and)
3211 {
3212 TCGv src;
3213 TCGv reg;
3214 TCGv dest;
3215 TCGv addr;
3216 int opsize;
3217
3218 dest = tcg_temp_new();
3219
3220 opsize = insn_opsize(insn);
3221 reg = DREG(insn, 9);
3222 if (insn & 0x100) {
3223 SRC_EA(env, src, opsize, 0, &addr);
3224 tcg_gen_and_i32(dest, src, reg);
3225 DEST_EA(env, insn, opsize, dest, &addr);
3226 } else {
3227 SRC_EA(env, src, opsize, 0, NULL);
3228 tcg_gen_and_i32(dest, src, reg);
3229 gen_partset_reg(opsize, reg, dest);
3230 }
3231 gen_logic_cc(s, dest, opsize);
3232 }
3233
3234 DISAS_INSN(adda)
3235 {
3236 TCGv src;
3237 TCGv reg;
3238
3239 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3240 reg = AREG(insn, 9);
3241 tcg_gen_add_i32(reg, reg, src);
3242 }
3243
3244 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3245 {
3246 TCGv tmp, zero;
3247
3248 gen_flush_flags(s); /* compute old Z */
3249
3250 /*
3251 * Perform addition with carry.
3252 * (X, N) = src + dest + X;
3253 */
3254
3255 zero = tcg_constant_i32(0);
3256 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, zero, dest, zero);
3257 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, zero);
3258 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3259
3260 /* Compute signed-overflow for addition. */
3261
3262 tmp = tcg_temp_new();
3263 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3264 tcg_gen_xor_i32(tmp, dest, src);
3265 tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3266
3267 /* Copy the rest of the results into place. */
3268 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3269 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3270
3271 set_cc_op(s, CC_OP_FLAGS);
3272
3273 /* result is in QREG_CC_N */
3274 }
3275
3276 DISAS_INSN(addx_reg)
3277 {
3278 TCGv dest;
3279 TCGv src;
3280 int opsize;
3281
3282 opsize = insn_opsize(insn);
3283
3284 dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3285 src = gen_extend(s, DREG(insn, 0), opsize, 1);
3286
3287 gen_addx(s, src, dest, opsize);
3288
3289 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3290 }
3291
3292 DISAS_INSN(addx_mem)
3293 {
3294 TCGv src;
3295 TCGv addr_src;
3296 TCGv dest;
3297 TCGv addr_dest;
3298 int opsize;
3299
3300 opsize = insn_opsize(insn);
3301
3302 addr_src = AREG(insn, 0);
3303 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3304 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3305
3306 addr_dest = AREG(insn, 9);
3307 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3308 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3309
3310 gen_addx(s, src, dest, opsize);
3311
3312 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3313 }
3314
3315 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3316 {
3317 int count = (insn >> 9) & 7;
3318 int logical = insn & 8;
3319 int left = insn & 0x100;
3320 int bits = opsize_bytes(opsize) * 8;
3321 TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3322
3323 if (count == 0) {
3324 count = 8;
3325 }
3326
3327 tcg_gen_movi_i32(QREG_CC_V, 0);
3328 if (left) {
3329 tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3330 tcg_gen_shli_i32(QREG_CC_N, reg, count);
3331
3332 /*
3333 * Note that ColdFire always clears V (done above),
3334 * while M68000 sets if the most significant bit is changed at
3335 * any time during the shift operation.
3336 */
3337 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3338 /* if shift count >= bits, V is (reg != 0) */
3339 if (count >= bits) {
3340 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3341 } else {
3342 TCGv t0 = tcg_temp_new();
3343 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3344 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3345 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3346 }
3347 }
3348 } else {
3349 tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3350 if (logical) {
3351 tcg_gen_shri_i32(QREG_CC_N, reg, count);
3352 } else {
3353 tcg_gen_sari_i32(QREG_CC_N, reg, count);
3354 }
3355 }
3356
3357 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3358 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3359 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3360 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3361
3362 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3363 set_cc_op(s, CC_OP_FLAGS);
3364 }
3365
3366 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3367 {
3368 int logical = insn & 8;
3369 int left = insn & 0x100;
3370 int bits = opsize_bytes(opsize) * 8;
3371 TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3372 TCGv s32;
3373 TCGv_i64 t64, s64;
3374
3375 t64 = tcg_temp_new_i64();
3376 s64 = tcg_temp_new_i64();
3377 s32 = tcg_temp_new();
3378
3379 /*
3380 * Note that m68k truncates the shift count modulo 64, not 32.
3381 * In addition, a 64-bit shift makes it easy to find "the last
3382 * bit shifted out", for the carry flag.
3383 */
3384 tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3385 tcg_gen_extu_i32_i64(s64, s32);
3386 tcg_gen_extu_i32_i64(t64, reg);
3387
3388 /* Optimistically set V=0. Also used as a zero source below. */
3389 tcg_gen_movi_i32(QREG_CC_V, 0);
3390 if (left) {
3391 tcg_gen_shl_i64(t64, t64, s64);
3392
3393 if (opsize == OS_LONG) {
3394 tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3395 /* Note that C=0 if shift count is 0, and we get that for free. */
3396 } else {
3397 TCGv zero = tcg_constant_i32(0);
3398 tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3399 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3400 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3401 s32, zero, zero, QREG_CC_C);
3402 }
3403 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3404
3405 /* X = C, but only if the shift count was non-zero. */
3406 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3407 QREG_CC_C, QREG_CC_X);
3408
3409 /*
3410 * M68000 sets V if the most significant bit is changed at
3411 * any time during the shift operation. Do this via creating
3412 * an extension of the sign bit, comparing, and discarding
3413 * the bits below the sign bit. I.e.
3414 * int64_t s = (intN_t)reg;
3415 * int64_t t = (int64_t)(intN_t)reg << count;
3416 * V = ((s ^ t) & (-1 << (bits - 1))) != 0
3417 */
3418 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3419 TCGv_i64 tt = tcg_constant_i64(32);
3420 /* if shift is greater than 32, use 32 */
3421 tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3422 /* Sign extend the input to 64 bits; re-do the shift. */
3423 tcg_gen_ext_i32_i64(t64, reg);
3424 tcg_gen_shl_i64(s64, t64, s64);
3425 /* Clear all bits that are unchanged. */
3426 tcg_gen_xor_i64(t64, t64, s64);
3427 /* Ignore the bits below the sign bit. */
3428 tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3429 /* If any bits remain set, we have overflow. */
3430 tcg_gen_negsetcond_i64(TCG_COND_NE, t64, t64, tcg_constant_i64(0));
3431 tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3432 }
3433 } else {
3434 tcg_gen_shli_i64(t64, t64, 32);
3435 if (logical) {
3436 tcg_gen_shr_i64(t64, t64, s64);
3437 } else {
3438 tcg_gen_sar_i64(t64, t64, s64);
3439 }
3440 tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3441
3442 /* Note that C=0 if shift count is 0, and we get that for free. */
3443 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3444
3445 /* X = C, but only if the shift count was non-zero. */
3446 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3447 QREG_CC_C, QREG_CC_X);
3448 }
3449 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3450 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3451
3452 /* Write back the result. */
3453 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3454 set_cc_op(s, CC_OP_FLAGS);
3455 }
3456
3457 DISAS_INSN(shift8_im)
3458 {
3459 shift_im(s, insn, OS_BYTE);
3460 }
3461
3462 DISAS_INSN(shift16_im)
3463 {
3464 shift_im(s, insn, OS_WORD);
3465 }
3466
3467 DISAS_INSN(shift_im)
3468 {
3469 shift_im(s, insn, OS_LONG);
3470 }
3471
3472 DISAS_INSN(shift8_reg)
3473 {
3474 shift_reg(s, insn, OS_BYTE);
3475 }
3476
3477 DISAS_INSN(shift16_reg)
3478 {
3479 shift_reg(s, insn, OS_WORD);
3480 }
3481
3482 DISAS_INSN(shift_reg)
3483 {
3484 shift_reg(s, insn, OS_LONG);
3485 }
3486
3487 DISAS_INSN(shift_mem)
3488 {
3489 int logical = insn & 8;
3490 int left = insn & 0x100;
3491 TCGv src;
3492 TCGv addr;
3493
3494 SRC_EA(env, src, OS_WORD, !logical, &addr);
3495 tcg_gen_movi_i32(QREG_CC_V, 0);
3496 if (left) {
3497 tcg_gen_shri_i32(QREG_CC_C, src, 15);
3498 tcg_gen_shli_i32(QREG_CC_N, src, 1);
3499
3500 /*
3501 * Note that ColdFire always clears V,
3502 * while M68000 sets if the most significant bit is changed at
3503 * any time during the shift operation
3504 */
3505 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3506 src = gen_extend(s, src, OS_WORD, 1);
3507 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3508 }
3509 } else {
3510 tcg_gen_mov_i32(QREG_CC_C, src);
3511 if (logical) {
3512 tcg_gen_shri_i32(QREG_CC_N, src, 1);
3513 } else {
3514 tcg_gen_sari_i32(QREG_CC_N, src, 1);
3515 }
3516 }
3517
3518 gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3519 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3520 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3521 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3522
3523 DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3524 set_cc_op(s, CC_OP_FLAGS);
3525 }
3526
3527 static void rotate(TCGv reg, TCGv shift, int left, int size)
3528 {
3529 switch (size) {
3530 case 8:
3531 /* Replicate the 8-bit input so that a 32-bit rotate works. */
3532 tcg_gen_ext8u_i32(reg, reg);
3533 tcg_gen_muli_i32(reg, reg, 0x01010101);
3534 goto do_long;
3535 case 16:
3536 /* Replicate the 16-bit input so that a 32-bit rotate works. */
3537 tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3538 goto do_long;
3539 do_long:
3540 default:
3541 if (left) {
3542 tcg_gen_rotl_i32(reg, reg, shift);
3543 } else {
3544 tcg_gen_rotr_i32(reg, reg, shift);
3545 }
3546 }
3547
3548 /* compute flags */
3549
3550 switch (size) {
3551 case 8:
3552 tcg_gen_ext8s_i32(reg, reg);
3553 break;
3554 case 16:
3555 tcg_gen_ext16s_i32(reg, reg);
3556 break;
3557 default:
3558 break;
3559 }
3560
3561 /* QREG_CC_X is not affected */
3562
3563 tcg_gen_mov_i32(QREG_CC_N, reg);
3564 tcg_gen_mov_i32(QREG_CC_Z, reg);
3565
3566 if (left) {
3567 tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3568 } else {
3569 tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3570 }
3571
3572 tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3573 }
3574
3575 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3576 {
3577 switch (size) {
3578 case 8:
3579 tcg_gen_ext8s_i32(reg, reg);
3580 break;
3581 case 16:
3582 tcg_gen_ext16s_i32(reg, reg);
3583 break;
3584 default:
3585 break;
3586 }
3587 tcg_gen_mov_i32(QREG_CC_N, reg);
3588 tcg_gen_mov_i32(QREG_CC_Z, reg);
3589 tcg_gen_mov_i32(QREG_CC_X, X);
3590 tcg_gen_mov_i32(QREG_CC_C, X);
3591 tcg_gen_movi_i32(QREG_CC_V, 0);
3592 }
3593
3594 /* Result of rotate_x() is valid if 0 <= shift <= size */
3595 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3596 {
3597 TCGv X, shl, shr, shx, sz, zero;
3598
3599 sz = tcg_constant_i32(size);
3600
3601 shr = tcg_temp_new();
3602 shl = tcg_temp_new();
3603 shx = tcg_temp_new();
3604 if (left) {
3605 tcg_gen_mov_i32(shl, shift); /* shl = shift */
3606 tcg_gen_movi_i32(shr, size + 1);
3607 tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3608 tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */
3609 /* shx = shx < 0 ? size : shx; */
3610 zero = tcg_constant_i32(0);
3611 tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3612 } else {
3613 tcg_gen_mov_i32(shr, shift); /* shr = shift */
3614 tcg_gen_movi_i32(shl, size + 1);
3615 tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3616 tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3617 }
3618
3619 /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3620
3621 tcg_gen_shl_i32(shl, reg, shl);
3622 tcg_gen_shr_i32(shr, reg, shr);
3623 tcg_gen_or_i32(reg, shl, shr);
3624 tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3625 tcg_gen_or_i32(reg, reg, shx);
3626
3627 /* X = (reg >> size) & 1 */
3628
3629 X = tcg_temp_new();
3630 tcg_gen_extract_i32(X, reg, size, 1);
3631
3632 return X;
3633 }
3634
3635 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3636 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3637 {
3638 TCGv_i64 t0, shift64;
3639 TCGv X, lo, hi, zero;
3640
3641 shift64 = tcg_temp_new_i64();
3642 tcg_gen_extu_i32_i64(shift64, shift);
3643
3644 t0 = tcg_temp_new_i64();
3645
3646 X = tcg_temp_new();
3647 lo = tcg_temp_new();
3648 hi = tcg_temp_new();
3649
3650 if (left) {
3651 /* create [reg:X:..] */
3652
3653 tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3654 tcg_gen_concat_i32_i64(t0, lo, reg);
3655
3656 /* rotate */
3657
3658 tcg_gen_rotl_i64(t0, t0, shift64);
3659
3660 /* result is [reg:..:reg:X] */
3661
3662 tcg_gen_extr_i64_i32(lo, hi, t0);
3663 tcg_gen_andi_i32(X, lo, 1);
3664
3665 tcg_gen_shri_i32(lo, lo, 1);
3666 } else {
3667 /* create [..:X:reg] */
3668
3669 tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3670
3671 tcg_gen_rotr_i64(t0, t0, shift64);
3672
3673 /* result is value: [X:reg:..:reg] */
3674
3675 tcg_gen_extr_i64_i32(lo, hi, t0);
3676
3677 /* extract X */
3678
3679 tcg_gen_shri_i32(X, hi, 31);
3680
3681 /* extract result */
3682
3683 tcg_gen_shli_i32(hi, hi, 1);
3684 }
3685 tcg_gen_or_i32(lo, lo, hi);
3686
3687 /* if shift == 0, register and X are not affected */
3688
3689 zero = tcg_constant_i32(0);
3690 tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3691 tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3692
3693 return X;
3694 }
3695
3696 DISAS_INSN(rotate_im)
3697 {
3698 TCGv shift;
3699 int tmp;
3700 int left = (insn & 0x100);
3701
3702 tmp = (insn >> 9) & 7;
3703 if (tmp == 0) {
3704 tmp = 8;
3705 }
3706
3707 shift = tcg_constant_i32(tmp);
3708 if (insn & 8) {
3709 rotate(DREG(insn, 0), shift, left, 32);
3710 } else {
3711 TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3712 rotate_x_flags(DREG(insn, 0), X, 32);
3713 }
3714
3715 set_cc_op(s, CC_OP_FLAGS);
3716 }
3717
3718 DISAS_INSN(rotate8_im)
3719 {
3720 int left = (insn & 0x100);
3721 TCGv reg;
3722 TCGv shift;
3723 int tmp;
3724
3725 reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3726
3727 tmp = (insn >> 9) & 7;
3728 if (tmp == 0) {
3729 tmp = 8;
3730 }
3731
3732 shift = tcg_constant_i32(tmp);
3733 if (insn & 8) {
3734 rotate(reg, shift, left, 8);
3735 } else {
3736 TCGv X = rotate_x(reg, shift, left, 8);
3737 rotate_x_flags(reg, X, 8);
3738 }
3739 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3740 set_cc_op(s, CC_OP_FLAGS);
3741 }
3742
3743 DISAS_INSN(rotate16_im)
3744 {
3745 int left = (insn & 0x100);
3746 TCGv reg;
3747 TCGv shift;
3748 int tmp;
3749
3750 reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3751 tmp = (insn >> 9) & 7;
3752 if (tmp == 0) {
3753 tmp = 8;
3754 }
3755
3756 shift = tcg_constant_i32(tmp);
3757 if (insn & 8) {
3758 rotate(reg, shift, left, 16);
3759 } else {
3760 TCGv X = rotate_x(reg, shift, left, 16);
3761 rotate_x_flags(reg, X, 16);
3762 }
3763 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3764 set_cc_op(s, CC_OP_FLAGS);
3765 }
3766
3767 DISAS_INSN(rotate_reg)
3768 {
3769 TCGv reg;
3770 TCGv src;
3771 TCGv t0, t1;
3772 int left = (insn & 0x100);
3773
3774 reg = DREG(insn, 0);
3775 src = DREG(insn, 9);
3776 /* shift in [0..63] */
3777 t0 = tcg_temp_new();
3778 tcg_gen_andi_i32(t0, src, 63);
3779 t1 = tcg_temp_new_i32();
3780 if (insn & 8) {
3781 tcg_gen_andi_i32(t1, src, 31);
3782 rotate(reg, t1, left, 32);
3783 /* if shift == 0, clear C */
3784 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3785 t0, QREG_CC_V /* 0 */,
3786 QREG_CC_V /* 0 */, QREG_CC_C);
3787 } else {
3788 TCGv X;
3789 /* modulo 33 */
3790 tcg_gen_movi_i32(t1, 33);
3791 tcg_gen_remu_i32(t1, t0, t1);
3792 X = rotate32_x(DREG(insn, 0), t1, left);
3793 rotate_x_flags(DREG(insn, 0), X, 32);
3794 }
3795 set_cc_op(s, CC_OP_FLAGS);
3796 }
3797
3798 DISAS_INSN(rotate8_reg)
3799 {
3800 TCGv reg;
3801 TCGv src;
3802 TCGv t0, t1;
3803 int left = (insn & 0x100);
3804
3805 reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3806 src = DREG(insn, 9);
3807 /* shift in [0..63] */
3808 t0 = tcg_temp_new_i32();
3809 tcg_gen_andi_i32(t0, src, 63);
3810 t1 = tcg_temp_new_i32();
3811 if (insn & 8) {
3812 tcg_gen_andi_i32(t1, src, 7);
3813 rotate(reg, t1, left, 8);
3814 /* if shift == 0, clear C */
3815 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3816 t0, QREG_CC_V /* 0 */,
3817 QREG_CC_V /* 0 */, QREG_CC_C);
3818 } else {
3819 TCGv X;
3820 /* modulo 9 */
3821 tcg_gen_movi_i32(t1, 9);
3822 tcg_gen_remu_i32(t1, t0, t1);
3823 X = rotate_x(reg, t1, left, 8);
3824 rotate_x_flags(reg, X, 8);
3825 }
3826 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3827 set_cc_op(s, CC_OP_FLAGS);
3828 }
3829
3830 DISAS_INSN(rotate16_reg)
3831 {
3832 TCGv reg;
3833 TCGv src;
3834 TCGv t0, t1;
3835 int left = (insn & 0x100);
3836
3837 reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3838 src = DREG(insn, 9);
3839 /* shift in [0..63] */
3840 t0 = tcg_temp_new_i32();
3841 tcg_gen_andi_i32(t0, src, 63);
3842 t1 = tcg_temp_new_i32();
3843 if (insn & 8) {
3844 tcg_gen_andi_i32(t1, src, 15);
3845 rotate(reg, t1, left, 16);
3846 /* if shift == 0, clear C */
3847 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3848 t0, QREG_CC_V /* 0 */,
3849 QREG_CC_V /* 0 */, QREG_CC_C);
3850 } else {
3851 TCGv X;
3852 /* modulo 17 */
3853 tcg_gen_movi_i32(t1, 17);
3854 tcg_gen_remu_i32(t1, t0, t1);
3855 X = rotate_x(reg, t1, left, 16);
3856 rotate_x_flags(reg, X, 16);
3857 }
3858 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3859 set_cc_op(s, CC_OP_FLAGS);
3860 }
3861
3862 DISAS_INSN(rotate_mem)
3863 {
3864 TCGv src;
3865 TCGv addr;
3866 TCGv shift;
3867 int left = (insn & 0x100);
3868
3869 SRC_EA(env, src, OS_WORD, 0, &addr);
3870
3871 shift = tcg_constant_i32(1);
3872 if (insn & 0x0200) {
3873 rotate(src, shift, left, 16);
3874 } else {
3875 TCGv X = rotate_x(src, shift, left, 16);
3876 rotate_x_flags(src, X, 16);
3877 }
3878 DEST_EA(env, insn, OS_WORD, src, &addr);
3879 set_cc_op(s, CC_OP_FLAGS);
3880 }
3881
3882 DISAS_INSN(bfext_reg)
3883 {
3884 int ext = read_im16(env, s);
3885 int is_sign = insn & 0x200;
3886 TCGv src = DREG(insn, 0);
3887 TCGv dst = DREG(ext, 12);
3888 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3889 int ofs = extract32(ext, 6, 5); /* big bit-endian */
3890 int pos = 32 - ofs - len; /* little bit-endian */
3891 TCGv tmp = tcg_temp_new();
3892 TCGv shift;
3893
3894 /*
3895 * In general, we're going to rotate the field so that it's at the
3896 * top of the word and then right-shift by the complement of the
3897 * width to extend the field.
3898 */
3899 if (ext & 0x20) {
3900 /* Variable width. */
3901 if (ext & 0x800) {
3902 /* Variable offset. */
3903 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3904 tcg_gen_rotl_i32(tmp, src, tmp);
3905 } else {
3906 tcg_gen_rotli_i32(tmp, src, ofs);
3907 }
3908
3909 shift = tcg_temp_new();
3910 tcg_gen_neg_i32(shift, DREG(ext, 0));
3911 tcg_gen_andi_i32(shift, shift, 31);
3912 tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3913 if (is_sign) {
3914 tcg_gen_mov_i32(dst, QREG_CC_N);
3915 } else {
3916 tcg_gen_shr_i32(dst, tmp, shift);
3917 }
3918 } else {
3919 /* Immediate width. */
3920 if (ext & 0x800) {
3921 /* Variable offset */
3922 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3923 tcg_gen_rotl_i32(tmp, src, tmp);
3924 src = tmp;
3925 pos = 32 - len;
3926 } else {
3927 /*
3928 * Immediate offset. If the field doesn't wrap around the
3929 * end of the word, rely on (s)extract completely.
3930 */
3931 if (pos < 0) {
3932 tcg_gen_rotli_i32(tmp, src, ofs);
3933 src = tmp;
3934 pos = 32 - len;
3935 }
3936 }
3937
3938 tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3939 if (is_sign) {
3940 tcg_gen_mov_i32(dst, QREG_CC_N);
3941 } else {
3942 tcg_gen_extract_i32(dst, src, pos, len);
3943 }
3944 }
3945
3946 set_cc_op(s, CC_OP_LOGIC);
3947 }
3948
3949 DISAS_INSN(bfext_mem)
3950 {
3951 int ext = read_im16(env, s);
3952 int is_sign = insn & 0x200;
3953 TCGv dest = DREG(ext, 12);
3954 TCGv addr, len, ofs;
3955
3956 addr = gen_lea(env, s, insn, OS_UNSIZED);
3957 if (IS_NULL_QREG(addr)) {
3958 gen_addr_fault(s);
3959 return;
3960 }
3961
3962 if (ext & 0x20) {
3963 len = DREG(ext, 0);
3964 } else {
3965 len = tcg_constant_i32(extract32(ext, 0, 5));
3966 }
3967 if (ext & 0x800) {
3968 ofs = DREG(ext, 6);
3969 } else {
3970 ofs = tcg_constant_i32(extract32(ext, 6, 5));
3971 }
3972
3973 if (is_sign) {
3974 gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
3975 tcg_gen_mov_i32(QREG_CC_N, dest);
3976 } else {
3977 TCGv_i64 tmp = tcg_temp_new_i64();
3978 gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
3979 tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
3980 }
3981 set_cc_op(s, CC_OP_LOGIC);
3982 }
3983
3984 DISAS_INSN(bfop_reg)
3985 {
3986 int ext = read_im16(env, s);
3987 TCGv src = DREG(insn, 0);
3988 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3989 int ofs = extract32(ext, 6, 5); /* big bit-endian */
3990 TCGv mask, tofs = NULL, tlen = NULL;
3991 bool is_bfffo = (insn & 0x0f00) == 0x0d00;
3992
3993 if ((ext & 0x820) == 0) {
3994 /* Immediate width and offset. */
3995 uint32_t maski = 0x7fffffffu >> (len - 1);
3996 if (ofs + len <= 32) {
3997 tcg_gen_shli_i32(QREG_CC_N, src, ofs);
3998 } else {
3999 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4000 }
4001 tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4002
4003 mask = tcg_constant_i32(ror32(maski, ofs));
4004 if (is_bfffo) {
4005 tofs = tcg_constant_i32(ofs);
4006 tlen = tcg_constant_i32(len);
4007 }
4008 } else {
4009 TCGv tmp = tcg_temp_new();
4010
4011 mask = tcg_temp_new();
4012 if (ext & 0x20) {
4013 /* Variable width */
4014 tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4015 tcg_gen_andi_i32(tmp, tmp, 31);
4016 tcg_gen_shr_i32(mask, tcg_constant_i32(0x7fffffffu), tmp);
4017 if (is_bfffo) {
4018 tlen = tcg_temp_new();
4019 tcg_gen_addi_i32(tlen, tmp, 1);
4020 }
4021 } else {
4022 /* Immediate width */
4023 tcg_gen_movi_i32(mask, 0x7fffffffu >> (len - 1));
4024 if (is_bfffo) {
4025 tlen = tcg_constant_i32(len);
4026 }
4027 }
4028
4029 if (ext & 0x800) {
4030 /* Variable offset */
4031 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4032 tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4033 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4034 tcg_gen_rotr_i32(mask, mask, tmp);
4035 if (is_bfffo) {
4036 tofs = tmp;
4037 }
4038 } else {
4039 /* Immediate offset (and variable width) */
4040 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4041 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4042 tcg_gen_rotri_i32(mask, mask, ofs);
4043 if (is_bfffo) {
4044 tofs = tcg_constant_i32(ofs);
4045 }
4046 }
4047 }
4048 set_cc_op(s, CC_OP_LOGIC);
4049
4050 switch (insn & 0x0f00) {
4051 case 0x0a00: /* bfchg */
4052 tcg_gen_eqv_i32(src, src, mask);
4053 break;
4054 case 0x0c00: /* bfclr */
4055 tcg_gen_and_i32(src, src, mask);
4056 break;
4057 case 0x0d00: /* bfffo */
4058 gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4059 break;
4060 case 0x0e00: /* bfset */
4061 tcg_gen_orc_i32(src, src, mask);
4062 break;
4063 case 0x0800: /* bftst */
4064 /* flags already set; no other work to do. */
4065 break;
4066 default:
4067 g_assert_not_reached();
4068 }
4069 }
4070
4071 DISAS_INSN(bfop_mem)
4072 {
4073 int ext = read_im16(env, s);
4074 TCGv addr, len, ofs;
4075 TCGv_i64 t64;
4076
4077 addr = gen_lea(env, s, insn, OS_UNSIZED);
4078 if (IS_NULL_QREG(addr)) {
4079 gen_addr_fault(s);
4080 return;
4081 }
4082
4083 if (ext & 0x20) {
4084 len = DREG(ext, 0);
4085 } else {
4086 len = tcg_constant_i32(extract32(ext, 0, 5));
4087 }
4088 if (ext & 0x800) {
4089 ofs = DREG(ext, 6);
4090 } else {
4091 ofs = tcg_constant_i32(extract32(ext, 6, 5));
4092 }
4093
4094 switch (insn & 0x0f00) {
4095 case 0x0a00: /* bfchg */
4096 gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4097 break;
4098 case 0x0c00: /* bfclr */
4099 gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4100 break;
4101 case 0x0d00: /* bfffo */
4102 t64 = tcg_temp_new_i64();
4103 gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4104 tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4105 break;
4106 case 0x0e00: /* bfset */
4107 gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4108 break;
4109 case 0x0800: /* bftst */
4110 gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4111 break;
4112 default:
4113 g_assert_not_reached();
4114 }
4115 set_cc_op(s, CC_OP_LOGIC);
4116 }
4117
4118 DISAS_INSN(bfins_reg)
4119 {
4120 int ext = read_im16(env, s);
4121 TCGv dst = DREG(insn, 0);
4122 TCGv src = DREG(ext, 12);
4123 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4124 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4125 int pos = 32 - ofs - len; /* little bit-endian */
4126 TCGv tmp;
4127
4128 tmp = tcg_temp_new();
4129
4130 if (ext & 0x20) {
4131 /* Variable width */
4132 tcg_gen_neg_i32(tmp, DREG(ext, 0));
4133 tcg_gen_andi_i32(tmp, tmp, 31);
4134 tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4135 } else {
4136 /* Immediate width */
4137 tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4138 }
4139 set_cc_op(s, CC_OP_LOGIC);
4140
4141 /* Immediate width and offset */
4142 if ((ext & 0x820) == 0) {
4143 /* Check for suitability for deposit. */
4144 if (pos >= 0) {
4145 tcg_gen_deposit_i32(dst, dst, src, pos, len);
4146 } else {
4147 uint32_t maski = -2U << (len - 1);
4148 uint32_t roti = (ofs + len) & 31;
4149 tcg_gen_andi_i32(tmp, src, ~maski);
4150 tcg_gen_rotri_i32(tmp, tmp, roti);
4151 tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4152 tcg_gen_or_i32(dst, dst, tmp);
4153 }
4154 } else {
4155 TCGv mask = tcg_temp_new();
4156 TCGv rot = tcg_temp_new();
4157
4158 if (ext & 0x20) {
4159 /* Variable width */
4160 tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4161 tcg_gen_andi_i32(rot, rot, 31);
4162 tcg_gen_movi_i32(mask, -2);
4163 tcg_gen_shl_i32(mask, mask, rot);
4164 tcg_gen_mov_i32(rot, DREG(ext, 0));
4165 tcg_gen_andc_i32(tmp, src, mask);
4166 } else {
4167 /* Immediate width (variable offset) */
4168 uint32_t maski = -2U << (len - 1);
4169 tcg_gen_andi_i32(tmp, src, ~maski);
4170 tcg_gen_movi_i32(mask, maski);
4171 tcg_gen_movi_i32(rot, len & 31);
4172 }
4173 if (ext & 0x800) {
4174 /* Variable offset */
4175 tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4176 } else {
4177 /* Immediate offset (variable width) */
4178 tcg_gen_addi_i32(rot, rot, ofs);
4179 }
4180 tcg_gen_andi_i32(rot, rot, 31);
4181 tcg_gen_rotr_i32(mask, mask, rot);
4182 tcg_gen_rotr_i32(tmp, tmp, rot);
4183 tcg_gen_and_i32(dst, dst, mask);
4184 tcg_gen_or_i32(dst, dst, tmp);
4185 }
4186 }
4187
4188 DISAS_INSN(bfins_mem)
4189 {
4190 int ext = read_im16(env, s);
4191 TCGv src = DREG(ext, 12);
4192 TCGv addr, len, ofs;
4193
4194 addr = gen_lea(env, s, insn, OS_UNSIZED);
4195 if (IS_NULL_QREG(addr)) {
4196 gen_addr_fault(s);
4197 return;
4198 }
4199
4200 if (ext & 0x20) {
4201 len = DREG(ext, 0);
4202 } else {
4203 len = tcg_constant_i32(extract32(ext, 0, 5));
4204 }
4205 if (ext & 0x800) {
4206 ofs = DREG(ext, 6);
4207 } else {
4208 ofs = tcg_constant_i32(extract32(ext, 6, 5));
4209 }
4210
4211 gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4212 set_cc_op(s, CC_OP_LOGIC);
4213 }
4214
4215 DISAS_INSN(ff1)
4216 {
4217 TCGv reg;
4218 reg = DREG(insn, 0);
4219 gen_logic_cc(s, reg, OS_LONG);
4220 gen_helper_ff1(reg, reg);
4221 }
4222
4223 DISAS_INSN(chk)
4224 {
4225 TCGv src, reg;
4226 int opsize;
4227
4228 switch ((insn >> 7) & 3) {
4229 case 3:
4230 opsize = OS_WORD;
4231 break;
4232 case 2:
4233 if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4234 opsize = OS_LONG;
4235 break;
4236 }
4237 /* fallthru */
4238 default:
4239 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4240 return;
4241 }
4242 SRC_EA(env, src, opsize, 1, NULL);
4243 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4244
4245 gen_flush_flags(s);
4246 gen_helper_chk(cpu_env, reg, src);
4247 }
4248
4249 DISAS_INSN(chk2)
4250 {
4251 uint16_t ext;
4252 TCGv addr1, addr2, bound1, bound2, reg;
4253 int opsize;
4254
4255 switch ((insn >> 9) & 3) {
4256 case 0:
4257 opsize = OS_BYTE;
4258 break;
4259 case 1:
4260 opsize = OS_WORD;
4261 break;
4262 case 2:
4263 opsize = OS_LONG;
4264 break;
4265 default:
4266 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4267 return;
4268 }
4269
4270 ext = read_im16(env, s);
4271 if ((ext & 0x0800) == 0) {
4272 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4273 return;
4274 }
4275
4276 addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4277 addr2 = tcg_temp_new();
4278 tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4279
4280 bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4281 bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4282
4283 reg = tcg_temp_new();
4284 if (ext & 0x8000) {
4285 tcg_gen_mov_i32(reg, AREG(ext, 12));
4286 } else {
4287 gen_ext(reg, DREG(ext, 12), opsize, 1);
4288 }
4289
4290 gen_flush_flags(s);
4291 gen_helper_chk2(cpu_env, reg, bound1, bound2);
4292 }
4293
4294 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4295 {
4296 TCGv addr;
4297 TCGv_i64 t0, t1;
4298
4299 addr = tcg_temp_new();
4300
4301 t0 = tcg_temp_new_i64();
4302 t1 = tcg_temp_new_i64();
4303
4304 tcg_gen_andi_i32(addr, src, ~15);
4305 tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ);
4306 tcg_gen_addi_i32(addr, addr, 8);
4307 tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ);
4308
4309 tcg_gen_andi_i32(addr, dst, ~15);
4310 tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ);
4311 tcg_gen_addi_i32(addr, addr, 8);
4312 tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ);
4313 }
4314
4315 DISAS_INSN(move16_reg)
4316 {
4317 int index = IS_USER(s);
4318 TCGv tmp;
4319 uint16_t ext;
4320
4321 ext = read_im16(env, s);
4322 if ((ext & (1 << 15)) == 0) {
4323 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4324 }
4325
4326 m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4327
4328 /* Ax can be Ay, so save Ay before incrementing Ax */
4329 tmp = tcg_temp_new();
4330 tcg_gen_mov_i32(tmp, AREG(ext, 12));
4331 tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4332 tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4333 }
4334
4335 DISAS_INSN(move16_mem)
4336 {
4337 int index = IS_USER(s);
4338 TCGv reg, addr;
4339
4340 reg = AREG(insn, 0);
4341 addr = tcg_constant_i32(read_im32(env, s));
4342
4343 if ((insn >> 3) & 1) {
4344 /* MOVE16 (xxx).L, (Ay) */
4345 m68k_copy_line(reg, addr, index);
4346 } else {
4347 /* MOVE16 (Ay), (xxx).L */
4348 m68k_copy_line(addr, reg, index);
4349 }
4350
4351 if (((insn >> 3) & 2) == 0) {
4352 /* (Ay)+ */
4353 tcg_gen_addi_i32(reg, reg, 16);
4354 }
4355 }
4356
4357 DISAS_INSN(strldsr)
4358 {
4359 uint16_t ext;
4360 uint32_t addr;
4361
4362 addr = s->pc - 2;
4363 ext = read_im16(env, s);
4364 if (ext != 0x46FC) {
4365 gen_exception(s, addr, EXCP_ILLEGAL);
4366 return;
4367 }
4368 ext = read_im16(env, s);
4369 if (IS_USER(s) || (ext & SR_S) == 0) {
4370 gen_exception(s, addr, EXCP_PRIVILEGE);
4371 return;
4372 }
4373 gen_push(s, gen_get_sr(s));
4374 gen_set_sr_im(s, ext, 0);
4375 gen_exit_tb(s);
4376 }
4377
4378 DISAS_INSN(move_from_sr)
4379 {
4380 TCGv sr;
4381
4382 if (IS_USER(s) && m68k_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV)) {
4383 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4384 return;
4385 }
4386 sr = gen_get_sr(s);
4387 DEST_EA(env, insn, OS_WORD, sr, NULL);
4388 }
4389
4390 #if !defined(CONFIG_USER_ONLY)
4391 DISAS_INSN(moves)
4392 {
4393 int opsize;
4394 uint16_t ext;
4395 TCGv reg;
4396 TCGv addr;
4397 int extend;
4398
4399 if (IS_USER(s)) {
4400 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4401 return;
4402 }
4403
4404 ext = read_im16(env, s);
4405
4406 opsize = insn_opsize(insn);
4407
4408 if (ext & 0x8000) {
4409 /* address register */
4410 reg = AREG(ext, 12);
4411 extend = 1;
4412 } else {
4413 /* data register */
4414 reg = DREG(ext, 12);
4415 extend = 0;
4416 }
4417
4418 addr = gen_lea(env, s, insn, opsize);
4419 if (IS_NULL_QREG(addr)) {
4420 gen_addr_fault(s);
4421 return;
4422 }
4423
4424 if (ext & 0x0800) {
4425 /* from reg to ea */
4426 gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4427 } else {
4428 /* from ea to reg */
4429 TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4430 if (extend) {
4431 gen_ext(reg, tmp, opsize, 1);
4432 } else {
4433 gen_partset_reg(opsize, reg, tmp);
4434 }
4435 }
4436 switch (extract32(insn, 3, 3)) {
4437 case 3: /* Indirect postincrement. */
4438 tcg_gen_addi_i32(AREG(insn, 0), addr,
4439 REG(insn, 0) == 7 && opsize == OS_BYTE
4440 ? 2
4441 : opsize_bytes(opsize));
4442 break;
4443 case 4: /* Indirect predecrememnt. */
4444 tcg_gen_mov_i32(AREG(insn, 0), addr);
4445 break;
4446 }
4447 }
4448
4449 DISAS_INSN(move_to_sr)
4450 {
4451 if (IS_USER(s)) {
4452 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4453 return;
4454 }
4455 gen_move_to_sr(env, s, insn, false);
4456 gen_exit_tb(s);
4457 }
4458
4459 DISAS_INSN(move_from_usp)
4460 {
4461 if (IS_USER(s)) {
4462 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4463 return;
4464 }
4465 tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4466 offsetof(CPUM68KState, sp[M68K_USP]));
4467 }
4468
4469 DISAS_INSN(move_to_usp)
4470 {
4471 if (IS_USER(s)) {
4472 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4473 return;
4474 }
4475 tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4476 offsetof(CPUM68KState, sp[M68K_USP]));
4477 }
4478
4479 DISAS_INSN(halt)
4480 {
4481 if (IS_USER(s)) {
4482 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4483 return;
4484 }
4485
4486 gen_exception(s, s->pc, EXCP_HALT_INSN);
4487 }
4488
4489 DISAS_INSN(stop)
4490 {
4491 uint16_t ext;
4492
4493 if (IS_USER(s)) {
4494 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4495 return;
4496 }
4497
4498 ext = read_im16(env, s);
4499
4500 gen_set_sr_im(s, ext, 0);
4501 tcg_gen_movi_i32(cpu_halted, 1);
4502 gen_exception(s, s->pc, EXCP_HLT);
4503 }
4504
4505 DISAS_INSN(rte)
4506 {
4507 if (IS_USER(s)) {
4508 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4509 return;
4510 }
4511 gen_exception(s, s->base.pc_next, EXCP_RTE);
4512 }
4513
4514 DISAS_INSN(cf_movec)
4515 {
4516 uint16_t ext;
4517 TCGv reg;
4518
4519 if (IS_USER(s)) {
4520 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4521 return;
4522 }
4523
4524 ext = read_im16(env, s);
4525
4526 if (ext & 0x8000) {
4527 reg = AREG(ext, 12);
4528 } else {
4529 reg = DREG(ext, 12);
4530 }
4531 gen_helper_cf_movec_to(cpu_env, tcg_constant_i32(ext & 0xfff), reg);
4532 gen_exit_tb(s);
4533 }
4534
4535 DISAS_INSN(m68k_movec)
4536 {
4537 uint16_t ext;
4538 TCGv reg, creg;
4539
4540 if (IS_USER(s)) {
4541 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4542 return;
4543 }
4544
4545 ext = read_im16(env, s);
4546
4547 if (ext & 0x8000) {
4548 reg = AREG(ext, 12);
4549 } else {
4550 reg = DREG(ext, 12);
4551 }
4552 creg = tcg_constant_i32(ext & 0xfff);
4553 if (insn & 1) {
4554 gen_helper_m68k_movec_to(cpu_env, creg, reg);
4555 } else {
4556 gen_helper_m68k_movec_from(reg, cpu_env, creg);
4557 }
4558 gen_exit_tb(s);
4559 }
4560
4561 DISAS_INSN(intouch)
4562 {
4563 if (IS_USER(s)) {
4564 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4565 return;
4566 }
4567 /* ICache fetch. Implement as no-op. */
4568 }
4569
4570 DISAS_INSN(cpushl)
4571 {
4572 if (IS_USER(s)) {
4573 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4574 return;
4575 }
4576 /* Cache push/invalidate. Implement as no-op. */
4577 }
4578
4579 DISAS_INSN(cpush)
4580 {
4581 if (IS_USER(s)) {
4582 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4583 return;
4584 }
4585 /* Cache push/invalidate. Implement as no-op. */
4586 }
4587
4588 DISAS_INSN(cinv)
4589 {
4590 if (IS_USER(s)) {
4591 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4592 return;
4593 }
4594 /* Invalidate cache line. Implement as no-op. */
4595 }
4596
4597 #if !defined(CONFIG_USER_ONLY)
4598 DISAS_INSN(pflush)
4599 {
4600 TCGv opmode;
4601
4602 if (IS_USER(s)) {
4603 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4604 return;
4605 }
4606
4607 opmode = tcg_constant_i32((insn >> 3) & 3);
4608 gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4609 }
4610
4611 DISAS_INSN(ptest)
4612 {
4613 TCGv is_read;
4614
4615 if (IS_USER(s)) {
4616 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4617 return;
4618 }
4619 is_read = tcg_constant_i32((insn >> 5) & 1);
4620 gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4621 }
4622 #endif
4623
4624 DISAS_INSN(wddata)
4625 {
4626 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4627 }
4628
4629 DISAS_INSN(wdebug)
4630 {
4631 if (IS_USER(s)) {
4632 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4633 return;
4634 }
4635 /* TODO: Implement wdebug. */
4636 cpu_abort(env_cpu(env), "WDEBUG not implemented");
4637 }
4638 #endif
4639
4640 DISAS_INSN(trap)
4641 {
4642 gen_exception(s, s->pc, EXCP_TRAP0 + (insn & 0xf));
4643 }
4644
4645 static void do_trapcc(DisasContext *s, DisasCompare *c)
4646 {
4647 if (c->tcond != TCG_COND_NEVER) {
4648 TCGLabel *over = NULL;
4649
4650 update_cc_op(s);
4651
4652 if (c->tcond != TCG_COND_ALWAYS) {
4653 /* Jump over if !c. */
4654 over = gen_new_label();
4655 tcg_gen_brcond_i32(tcg_invert_cond(c->tcond), c->v1, c->v2, over);
4656 }
4657
4658 tcg_gen_movi_i32(QREG_PC, s->pc);
4659 gen_raise_exception_format2(s, EXCP_TRAPCC, s->base.pc_next);
4660
4661 if (over != NULL) {
4662 gen_set_label(over);
4663 s->base.is_jmp = DISAS_NEXT;
4664 }
4665 }
4666 }
4667
4668 DISAS_INSN(trapcc)
4669 {
4670 DisasCompare c;
4671
4672 /* Consume and discard the immediate operand. */
4673 switch (extract32(insn, 0, 3)) {
4674 case 2: /* trapcc.w */
4675 (void)read_im16(env, s);
4676 break;
4677 case 3: /* trapcc.l */
4678 (void)read_im32(env, s);
4679 break;
4680 case 4: /* trapcc (no operand) */
4681 break;
4682 default:
4683 /* trapcc registered with only valid opmodes */
4684 g_assert_not_reached();
4685 }
4686
4687 gen_cc_cond(&c, s, extract32(insn, 8, 4));
4688 do_trapcc(s, &c);
4689 }
4690
4691 DISAS_INSN(trapv)
4692 {
4693 DisasCompare c;
4694
4695 gen_cc_cond(&c, s, 9); /* V set */
4696 do_trapcc(s, &c);
4697 }
4698
4699 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4700 {
4701 switch (reg) {
4702 case M68K_FPIAR:
4703 tcg_gen_movi_i32(res, 0);
4704 break;
4705 case M68K_FPSR:
4706 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4707 break;
4708 case M68K_FPCR:
4709 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4710 break;
4711 }
4712 }
4713
4714 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4715 {
4716 switch (reg) {
4717 case M68K_FPIAR:
4718 break;
4719 case M68K_FPSR:
4720 tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4721 break;
4722 case M68K_FPCR:
4723 gen_helper_set_fpcr(cpu_env, val);
4724 break;
4725 }
4726 }
4727
4728 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4729 {
4730 int index = IS_USER(s);
4731 TCGv tmp;
4732
4733 tmp = tcg_temp_new();
4734 gen_load_fcr(s, tmp, reg);
4735 tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
4736 }
4737
4738 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4739 {
4740 int index = IS_USER(s);
4741 TCGv tmp;
4742
4743 tmp = tcg_temp_new();
4744 tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
4745 gen_store_fcr(s, tmp, reg);
4746 }
4747
4748
4749 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4750 uint32_t insn, uint32_t ext)
4751 {
4752 int mask = (ext >> 10) & 7;
4753 int is_write = (ext >> 13) & 1;
4754 int mode = extract32(insn, 3, 3);
4755 int i;
4756 TCGv addr, tmp;
4757
4758 switch (mode) {
4759 case 0: /* Dn */
4760 if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4761 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4762 return;
4763 }
4764 if (is_write) {
4765 gen_load_fcr(s, DREG(insn, 0), mask);
4766 } else {
4767 gen_store_fcr(s, DREG(insn, 0), mask);
4768 }
4769 return;
4770 case 1: /* An, only with FPIAR */
4771 if (mask != M68K_FPIAR) {
4772 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4773 return;
4774 }
4775 if (is_write) {
4776 gen_load_fcr(s, AREG(insn, 0), mask);
4777 } else {
4778 gen_store_fcr(s, AREG(insn, 0), mask);
4779 }
4780 return;
4781 case 7: /* Immediate */
4782 if (REG(insn, 0) == 4) {
4783 if (is_write ||
4784 (mask != M68K_FPIAR && mask != M68K_FPSR &&
4785 mask != M68K_FPCR)) {
4786 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4787 return;
4788 }
4789 tmp = tcg_constant_i32(read_im32(env, s));
4790 gen_store_fcr(s, tmp, mask);
4791 return;
4792 }
4793 break;
4794 default:
4795 break;
4796 }
4797
4798 tmp = gen_lea(env, s, insn, OS_LONG);
4799 if (IS_NULL_QREG(tmp)) {
4800 gen_addr_fault(s);
4801 return;
4802 }
4803
4804 addr = tcg_temp_new();
4805 tcg_gen_mov_i32(addr, tmp);
4806
4807 /*
4808 * mask:
4809 *
4810 * 0b100 Floating-Point Control Register
4811 * 0b010 Floating-Point Status Register
4812 * 0b001 Floating-Point Instruction Address Register
4813 *
4814 */
4815
4816 if (is_write && mode == 4) {
4817 for (i = 2; i >= 0; i--, mask >>= 1) {
4818 if (mask & 1) {
4819 gen_qemu_store_fcr(s, addr, 1 << i);
4820 if (mask != 1) {
4821 tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4822 }
4823 }
4824 }
4825 tcg_gen_mov_i32(AREG(insn, 0), addr);
4826 } else {
4827 for (i = 0; i < 3; i++, mask >>= 1) {
4828 if (mask & 1) {
4829 if (is_write) {
4830 gen_qemu_store_fcr(s, addr, 1 << i);
4831 } else {
4832 gen_qemu_load_fcr(s, addr, 1 << i);
4833 }
4834 if (mask != 1 || mode == 3) {
4835 tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4836 }
4837 }
4838 }
4839 if (mode == 3) {
4840 tcg_gen_mov_i32(AREG(insn, 0), addr);
4841 }
4842 }
4843 }
4844
4845 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4846 uint32_t insn, uint32_t ext)
4847 {
4848 int opsize;
4849 TCGv addr, tmp;
4850 int mode = (ext >> 11) & 0x3;
4851 int is_load = ((ext & 0x2000) == 0);
4852
4853 if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4854 opsize = OS_EXTENDED;
4855 } else {
4856 opsize = OS_DOUBLE; /* FIXME */
4857 }
4858
4859 addr = gen_lea(env, s, insn, opsize);
4860 if (IS_NULL_QREG(addr)) {
4861 gen_addr_fault(s);
4862 return;
4863 }
4864
4865 tmp = tcg_temp_new();
4866 if (mode & 0x1) {
4867 /* Dynamic register list */
4868 tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4869 } else {
4870 /* Static register list */
4871 tcg_gen_movi_i32(tmp, ext & 0xff);
4872 }
4873
4874 if (!is_load && (mode & 2) == 0) {
4875 /*
4876 * predecrement addressing mode
4877 * only available to store register to memory
4878 */
4879 if (opsize == OS_EXTENDED) {
4880 gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4881 } else {
4882 gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4883 }
4884 } else {
4885 /* postincrement addressing mode */
4886 if (opsize == OS_EXTENDED) {
4887 if (is_load) {
4888 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4889 } else {
4890 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4891 }
4892 } else {
4893 if (is_load) {
4894 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4895 } else {
4896 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4897 }
4898 }
4899 }
4900 if ((insn & 070) == 030 || (insn & 070) == 040) {
4901 tcg_gen_mov_i32(AREG(insn, 0), tmp);
4902 }
4903 }
4904
4905 /*
4906 * ??? FP exceptions are not implemented. Most exceptions are deferred until
4907 * immediately before the next FP instruction is executed.
4908 */
4909 DISAS_INSN(fpu)
4910 {
4911 uint16_t ext;
4912 int opmode;
4913 int opsize;
4914 TCGv_ptr cpu_src, cpu_dest;
4915
4916 ext = read_im16(env, s);
4917 opmode = ext & 0x7f;
4918 switch ((ext >> 13) & 7) {
4919 case 0:
4920 break;
4921 case 1:
4922 goto undef;
4923 case 2:
4924 if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4925 /* fmovecr */
4926 TCGv rom_offset = tcg_constant_i32(opmode);
4927 cpu_dest = gen_fp_ptr(REG(ext, 7));
4928 gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
4929 return;
4930 }
4931 break;
4932 case 3: /* fmove out */
4933 cpu_src = gen_fp_ptr(REG(ext, 7));
4934 opsize = ext_opsize(ext, 10);
4935 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4936 EA_STORE, IS_USER(s)) == -1) {
4937 gen_addr_fault(s);
4938 }
4939 gen_helper_ftst(cpu_env, cpu_src);
4940 return;
4941 case 4: /* fmove to control register. */
4942 case 5: /* fmove from control register. */
4943 gen_op_fmove_fcr(env, s, insn, ext);
4944 return;
4945 case 6: /* fmovem */
4946 case 7:
4947 if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
4948 goto undef;
4949 }
4950 gen_op_fmovem(env, s, insn, ext);
4951 return;
4952 }
4953 if (ext & (1 << 14)) {
4954 /* Source effective address. */
4955 opsize = ext_opsize(ext, 10);
4956 cpu_src = gen_fp_result_ptr();
4957 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4958 EA_LOADS, IS_USER(s)) == -1) {
4959 gen_addr_fault(s);
4960 return;
4961 }
4962 } else {
4963 /* Source register. */
4964 opsize = OS_EXTENDED;
4965 cpu_src = gen_fp_ptr(REG(ext, 10));
4966 }
4967 cpu_dest = gen_fp_ptr(REG(ext, 7));
4968 switch (opmode) {
4969 case 0: /* fmove */
4970 gen_fp_move(cpu_dest, cpu_src);
4971 break;
4972 case 0x40: /* fsmove */
4973 gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
4974 break;
4975 case 0x44: /* fdmove */
4976 gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
4977 break;
4978 case 1: /* fint */
4979 gen_helper_firound(cpu_env, cpu_dest, cpu_src);
4980 break;
4981 case 2: /* fsinh */
4982 gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
4983 break;
4984 case 3: /* fintrz */
4985 gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
4986 break;
4987 case 4: /* fsqrt */
4988 gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
4989 break;
4990 case 0x41: /* fssqrt */
4991 gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
4992 break;
4993 case 0x45: /* fdsqrt */
4994 gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
4995 break;
4996 case 0x06: /* flognp1 */
4997 gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
4998 break;
4999 case 0x08: /* fetoxm1 */
5000 gen_helper_fetoxm1(cpu_env, cpu_dest, cpu_src);
5001 break;
5002 case 0x09: /* ftanh */
5003 gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
5004 break;
5005 case 0x0a: /* fatan */
5006 gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
5007 break;
5008 case 0x0c: /* fasin */
5009 gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
5010 break;
5011 case 0x0d: /* fatanh */
5012 gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
5013 break;
5014 case 0x0e: /* fsin */
5015 gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
5016 break;
5017 case 0x0f: /* ftan */
5018 gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
5019 break;
5020 case 0x10: /* fetox */
5021 gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
5022 break;
5023 case 0x11: /* ftwotox */
5024 gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
5025 break;
5026 case 0x12: /* ftentox */
5027 gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
5028 break;
5029 case 0x14: /* flogn */
5030 gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
5031 break;
5032 case 0x15: /* flog10 */
5033 gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
5034 break;
5035 case 0x16: /* flog2 */
5036 gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
5037 break;
5038 case 0x18: /* fabs */
5039 gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5040 break;
5041 case 0x58: /* fsabs */
5042 gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5043 break;
5044 case 0x5c: /* fdabs */
5045 gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5046 break;
5047 case 0x19: /* fcosh */
5048 gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
5049 break;
5050 case 0x1a: /* fneg */
5051 gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5052 break;
5053 case 0x5a: /* fsneg */
5054 gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5055 break;
5056 case 0x5e: /* fdneg */
5057 gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5058 break;
5059 case 0x1c: /* facos */
5060 gen_helper_facos(cpu_env, cpu_dest, cpu_src);
5061 break;
5062 case 0x1d: /* fcos */
5063 gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
5064 break;
5065 case 0x1e: /* fgetexp */
5066 gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
5067 break;
5068 case 0x1f: /* fgetman */
5069 gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
5070 break;
5071 case 0x20: /* fdiv */
5072 gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5073 break;
5074 case 0x60: /* fsdiv */
5075 gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5076 break;
5077 case 0x64: /* fddiv */
5078 gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5079 break;
5080 case 0x21: /* fmod */
5081 gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
5082 break;
5083 case 0x22: /* fadd */
5084 gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5085 break;
5086 case 0x62: /* fsadd */
5087 gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5088 break;
5089 case 0x66: /* fdadd */
5090 gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5091 break;
5092 case 0x23: /* fmul */
5093 gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5094 break;
5095 case 0x63: /* fsmul */
5096 gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5097 break;
5098 case 0x67: /* fdmul */
5099 gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5100 break;
5101 case 0x24: /* fsgldiv */
5102 gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5103 break;
5104 case 0x25: /* frem */
5105 gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
5106 break;
5107 case 0x26: /* fscale */
5108 gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
5109 break;
5110 case 0x27: /* fsglmul */
5111 gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5112 break;
5113 case 0x28: /* fsub */
5114 gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5115 break;
5116 case 0x68: /* fssub */
5117 gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5118 break;
5119 case 0x6c: /* fdsub */
5120 gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5121 break;
5122 case 0x30: case 0x31: case 0x32:
5123 case 0x33: case 0x34: case 0x35:
5124 case 0x36: case 0x37: {
5125 TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5126 gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
5127 }
5128 break;
5129 case 0x38: /* fcmp */
5130 gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5131 return;
5132 case 0x3a: /* ftst */
5133 gen_helper_ftst(cpu_env, cpu_src);
5134 return;
5135 default:
5136 goto undef;
5137 }
5138 gen_helper_ftst(cpu_env, cpu_dest);
5139 return;
5140 undef:
5141 /* FIXME: Is this right for offset addressing modes? */
5142 s->pc -= 2;
5143 disas_undef_fpu(env, s, insn);
5144 }
5145
5146 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5147 {
5148 TCGv fpsr;
5149
5150 c->v2 = tcg_constant_i32(0);
5151 /* TODO: Raise BSUN exception. */
5152 fpsr = tcg_temp_new();
5153 gen_load_fcr(s, fpsr, M68K_FPSR);
5154 switch (cond) {
5155 case 0: /* False */
5156 case 16: /* Signaling False */
5157 c->v1 = c->v2;
5158 c->tcond = TCG_COND_NEVER;
5159 break;
5160 case 1: /* EQual Z */
5161 case 17: /* Signaling EQual Z */
5162 c->v1 = tcg_temp_new();
5163 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5164 c->tcond = TCG_COND_NE;
5165 break;
5166 case 2: /* Ordered Greater Than !(A || Z || N) */
5167 case 18: /* Greater Than !(A || Z || N) */
5168 c->v1 = tcg_temp_new();
5169 tcg_gen_andi_i32(c->v1, fpsr,
5170 FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5171 c->tcond = TCG_COND_EQ;
5172 break;
5173 case 3: /* Ordered Greater than or Equal Z || !(A || N) */
5174 case 19: /* Greater than or Equal Z || !(A || N) */
5175 c->v1 = tcg_temp_new();
5176 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5177 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5178 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5179 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5180 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5181 c->tcond = TCG_COND_NE;
5182 break;
5183 case 4: /* Ordered Less Than !(!N || A || Z); */
5184 case 20: /* Less Than !(!N || A || Z); */
5185 c->v1 = tcg_temp_new();
5186 tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5187 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5188 c->tcond = TCG_COND_EQ;
5189 break;
5190 case 5: /* Ordered Less than or Equal Z || (N && !A) */
5191 case 21: /* Less than or Equal Z || (N && !A) */
5192 c->v1 = tcg_temp_new();
5193 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5194 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5195 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5196 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5197 c->tcond = TCG_COND_NE;
5198 break;
5199 case 6: /* Ordered Greater or Less than !(A || Z) */
5200 case 22: /* Greater or Less than !(A || Z) */
5201 c->v1 = tcg_temp_new();
5202 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5203 c->tcond = TCG_COND_EQ;
5204 break;
5205 case 7: /* Ordered !A */
5206 case 23: /* Greater, Less or Equal !A */
5207 c->v1 = tcg_temp_new();
5208 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5209 c->tcond = TCG_COND_EQ;
5210 break;
5211 case 8: /* Unordered A */
5212 case 24: /* Not Greater, Less or Equal A */
5213 c->v1 = tcg_temp_new();
5214 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5215 c->tcond = TCG_COND_NE;
5216 break;
5217 case 9: /* Unordered or Equal A || Z */
5218 case 25: /* Not Greater or Less then A || Z */
5219 c->v1 = tcg_temp_new();
5220 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5221 c->tcond = TCG_COND_NE;
5222 break;
5223 case 10: /* Unordered or Greater Than A || !(N || Z)) */
5224 case 26: /* Not Less or Equal A || !(N || Z)) */
5225 c->v1 = tcg_temp_new();
5226 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5227 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5228 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5229 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5230 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5231 c->tcond = TCG_COND_NE;
5232 break;
5233 case 11: /* Unordered or Greater or Equal A || Z || !N */
5234 case 27: /* Not Less Than A || Z || !N */
5235 c->v1 = tcg_temp_new();
5236 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5237 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5238 c->tcond = TCG_COND_NE;
5239 break;
5240 case 12: /* Unordered or Less Than A || (N && !Z) */
5241 case 28: /* Not Greater than or Equal A || (N && !Z) */
5242 c->v1 = tcg_temp_new();
5243 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5244 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5245 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5246 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5247 c->tcond = TCG_COND_NE;
5248 break;
5249 case 13: /* Unordered or Less or Equal A || Z || N */
5250 case 29: /* Not Greater Than A || Z || N */
5251 c->v1 = tcg_temp_new();
5252 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5253 c->tcond = TCG_COND_NE;
5254 break;
5255 case 14: /* Not Equal !Z */
5256 case 30: /* Signaling Not Equal !Z */
5257 c->v1 = tcg_temp_new();
5258 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5259 c->tcond = TCG_COND_EQ;
5260 break;
5261 case 15: /* True */
5262 case 31: /* Signaling True */
5263 c->v1 = c->v2;
5264 c->tcond = TCG_COND_ALWAYS;
5265 break;
5266 }
5267 }
5268
5269 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5270 {
5271 DisasCompare c;
5272
5273 gen_fcc_cond(&c, s, cond);
5274 update_cc_op(s);
5275 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5276 }
5277
5278 DISAS_INSN(fbcc)
5279 {
5280 uint32_t offset;
5281 uint32_t base;
5282 TCGLabel *l1;
5283
5284 base = s->pc;
5285 offset = (int16_t)read_im16(env, s);
5286 if (insn & (1 << 6)) {
5287 offset = (offset << 16) | read_im16(env, s);
5288 }
5289
5290 l1 = gen_new_label();
5291 update_cc_op(s);
5292 gen_fjmpcc(s, insn & 0x3f, l1);
5293 gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
5294 gen_set_label(l1);
5295 gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
5296 }
5297
5298 DISAS_INSN(fscc)
5299 {
5300 DisasCompare c;
5301 int cond;
5302 TCGv tmp;
5303 uint16_t ext;
5304
5305 ext = read_im16(env, s);
5306 cond = ext & 0x3f;
5307 gen_fcc_cond(&c, s, cond);
5308
5309 tmp = tcg_temp_new();
5310 tcg_gen_negsetcond_i32(c.tcond, tmp, c.v1, c.v2);
5311
5312 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5313 }
5314
5315 DISAS_INSN(ftrapcc)
5316 {
5317 DisasCompare c;
5318 uint16_t ext;
5319 int cond;
5320
5321 ext = read_im16(env, s);
5322 cond = ext & 0x3f;
5323
5324 /* Consume and discard the immediate operand. */
5325 switch (extract32(insn, 0, 3)) {
5326 case 2: /* ftrapcc.w */
5327 (void)read_im16(env, s);
5328 break;
5329 case 3: /* ftrapcc.l */
5330 (void)read_im32(env, s);
5331 break;
5332 case 4: /* ftrapcc (no operand) */
5333 break;
5334 default:
5335 /* ftrapcc registered with only valid opmodes */
5336 g_assert_not_reached();
5337 }
5338
5339 gen_fcc_cond(&c, s, cond);
5340 do_trapcc(s, &c);
5341 }
5342
5343 #if !defined(CONFIG_USER_ONLY)
5344 DISAS_INSN(frestore)
5345 {
5346 TCGv addr;
5347
5348 if (IS_USER(s)) {
5349 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5350 return;
5351 }
5352 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5353 SRC_EA(env, addr, OS_LONG, 0, NULL);
5354 /* FIXME: check the state frame */
5355 } else {
5356 disas_undef(env, s, insn);
5357 }
5358 }
5359
5360 DISAS_INSN(fsave)
5361 {
5362 if (IS_USER(s)) {
5363 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5364 return;
5365 }
5366
5367 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5368 /* always write IDLE */
5369 TCGv idle = tcg_constant_i32(0x41000000);
5370 DEST_EA(env, insn, OS_LONG, idle, NULL);
5371 } else {
5372 disas_undef(env, s, insn);
5373 }
5374 }
5375 #endif
5376
5377 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5378 {
5379 TCGv tmp = tcg_temp_new();
5380 if (s->env->macsr & MACSR_FI) {
5381 if (upper)
5382 tcg_gen_andi_i32(tmp, val, 0xffff0000);
5383 else
5384 tcg_gen_shli_i32(tmp, val, 16);
5385 } else if (s->env->macsr & MACSR_SU) {
5386 if (upper)
5387 tcg_gen_sari_i32(tmp, val, 16);
5388 else
5389 tcg_gen_ext16s_i32(tmp, val);
5390 } else {
5391 if (upper)
5392 tcg_gen_shri_i32(tmp, val, 16);
5393 else
5394 tcg_gen_ext16u_i32(tmp, val);
5395 }
5396 return tmp;
5397 }
5398
5399 static void gen_mac_clear_flags(void)
5400 {
5401 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5402 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5403 }
5404
5405 DISAS_INSN(mac)
5406 {
5407 TCGv rx;
5408 TCGv ry;
5409 uint16_t ext;
5410 int acc;
5411 TCGv tmp;
5412 TCGv addr;
5413 TCGv loadval;
5414 int dual;
5415 TCGv saved_flags;
5416
5417 if (!s->done_mac) {
5418 s->mactmp = tcg_temp_new_i64();
5419 s->done_mac = 1;
5420 }
5421
5422 ext = read_im16(env, s);
5423
5424 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5425 dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5426 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5427 disas_undef(env, s, insn);
5428 return;
5429 }
5430 if (insn & 0x30) {
5431 /* MAC with load. */
5432 tmp = gen_lea(env, s, insn, OS_LONG);
5433 addr = tcg_temp_new();
5434 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5435 /*
5436 * Load the value now to ensure correct exception behavior.
5437 * Perform writeback after reading the MAC inputs.
5438 */
5439 loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5440
5441 acc ^= 1;
5442 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5443 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5444 } else {
5445 loadval = addr = NULL_QREG;
5446 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5447 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5448 }
5449
5450 gen_mac_clear_flags();
5451 #if 0
5452 l1 = -1;
5453 /* Disabled because conditional branches clobber temporary vars. */
5454 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5455 /* Skip the multiply if we know we will ignore it. */
5456 l1 = gen_new_label();
5457 tmp = tcg_temp_new();
5458 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5459 gen_op_jmp_nz32(tmp, l1);
5460 }
5461 #endif
5462
5463 if ((ext & 0x0800) == 0) {
5464 /* Word. */
5465 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5466 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5467 }
5468 if (s->env->macsr & MACSR_FI) {
5469 gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5470 } else {
5471 if (s->env->macsr & MACSR_SU)
5472 gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5473 else
5474 gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5475 switch ((ext >> 9) & 3) {
5476 case 1:
5477 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5478 break;
5479 case 3:
5480 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5481 break;
5482 }
5483 }
5484
5485 if (dual) {
5486 /* Save the overflow flag from the multiply. */
5487 saved_flags = tcg_temp_new();
5488 tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5489 } else {
5490 saved_flags = NULL_QREG;
5491 }
5492
5493 #if 0
5494 /* Disabled because conditional branches clobber temporary vars. */
5495 if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5496 /* Skip the accumulate if the value is already saturated. */
5497 l1 = gen_new_label();
5498 tmp = tcg_temp_new();
5499 gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5500 gen_op_jmp_nz32(tmp, l1);
5501 }
5502 #endif
5503
5504 if (insn & 0x100)
5505 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5506 else
5507 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5508
5509 if (s->env->macsr & MACSR_FI)
5510 gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
5511 else if (s->env->macsr & MACSR_SU)
5512 gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
5513 else
5514 gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
5515
5516 #if 0
5517 /* Disabled because conditional branches clobber temporary vars. */
5518 if (l1 != -1)
5519 gen_set_label(l1);
5520 #endif
5521
5522 if (dual) {
5523 /* Dual accumulate variant. */
5524 acc = (ext >> 2) & 3;
5525 /* Restore the overflow flag from the multiplier. */
5526 tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5527 #if 0
5528 /* Disabled because conditional branches clobber temporary vars. */
5529 if ((s->env->macsr & MACSR_OMC) != 0) {
5530 /* Skip the accumulate if the value is already saturated. */
5531 l1 = gen_new_label();
5532 tmp = tcg_temp_new();
5533 gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5534 gen_op_jmp_nz32(tmp, l1);
5535 }
5536 #endif
5537 if (ext & 2)
5538 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5539 else
5540 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5541 if (s->env->macsr & MACSR_FI)
5542 gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
5543 else if (s->env->macsr & MACSR_SU)
5544 gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
5545 else
5546 gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
5547 #if 0
5548 /* Disabled because conditional branches clobber temporary vars. */
5549 if (l1 != -1)
5550 gen_set_label(l1);
5551 #endif
5552 }
5553 gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(acc));
5554
5555 if (insn & 0x30) {
5556 TCGv rw;
5557 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5558 tcg_gen_mov_i32(rw, loadval);
5559 /*
5560 * FIXME: Should address writeback happen with the masked or
5561 * unmasked value?
5562 */
5563 switch ((insn >> 3) & 7) {
5564 case 3: /* Post-increment. */
5565 tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5566 break;
5567 case 4: /* Pre-decrement. */
5568 tcg_gen_mov_i32(AREG(insn, 0), addr);
5569 }
5570 }
5571 }
5572
5573 DISAS_INSN(from_mac)
5574 {
5575 TCGv rx;
5576 TCGv_i64 acc;
5577 int accnum;
5578
5579 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5580 accnum = (insn >> 9) & 3;
5581 acc = MACREG(accnum);
5582 if (s->env->macsr & MACSR_FI) {
5583 gen_helper_get_macf(rx, cpu_env, acc);
5584 } else if ((s->env->macsr & MACSR_OMC) == 0) {
5585 tcg_gen_extrl_i64_i32(rx, acc);
5586 } else if (s->env->macsr & MACSR_SU) {
5587 gen_helper_get_macs(rx, acc);
5588 } else {
5589 gen_helper_get_macu(rx, acc);
5590 }
5591 if (insn & 0x40) {
5592 tcg_gen_movi_i64(acc, 0);
5593 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5594 }
5595 }
5596
5597 DISAS_INSN(move_mac)
5598 {
5599 /* FIXME: This can be done without a helper. */
5600 int src;
5601 TCGv dest;
5602 src = insn & 3;
5603 dest = tcg_constant_i32((insn >> 9) & 3);
5604 gen_helper_mac_move(cpu_env, dest, tcg_constant_i32(src));
5605 gen_mac_clear_flags();
5606 gen_helper_mac_set_flags(cpu_env, dest);
5607 }
5608
5609 DISAS_INSN(from_macsr)
5610 {
5611 TCGv reg;
5612
5613 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5614 tcg_gen_mov_i32(reg, QREG_MACSR);
5615 }
5616
5617 DISAS_INSN(from_mask)
5618 {
5619 TCGv reg;
5620 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5621 tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5622 }
5623
5624 DISAS_INSN(from_mext)
5625 {
5626 TCGv reg;
5627 TCGv acc;
5628 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5629 acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5630 if (s->env->macsr & MACSR_FI)
5631 gen_helper_get_mac_extf(reg, cpu_env, acc);
5632 else
5633 gen_helper_get_mac_exti(reg, cpu_env, acc);
5634 }
5635
5636 DISAS_INSN(macsr_to_ccr)
5637 {
5638 TCGv tmp = tcg_temp_new();
5639
5640 /* Note that X and C are always cleared. */
5641 tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V);
5642 gen_helper_set_ccr(cpu_env, tmp);
5643 set_cc_op(s, CC_OP_FLAGS);
5644 }
5645
5646 DISAS_INSN(to_mac)
5647 {
5648 TCGv_i64 acc;
5649 TCGv val;
5650 int accnum;
5651 accnum = (insn >> 9) & 3;
5652 acc = MACREG(accnum);
5653 SRC_EA(env, val, OS_LONG, 0, NULL);
5654 if (s->env->macsr & MACSR_FI) {
5655 tcg_gen_ext_i32_i64(acc, val);
5656 tcg_gen_shli_i64(acc, acc, 8);
5657 } else if (s->env->macsr & MACSR_SU) {
5658 tcg_gen_ext_i32_i64(acc, val);
5659 } else {
5660 tcg_gen_extu_i32_i64(acc, val);
5661 }
5662 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5663 gen_mac_clear_flags();
5664 gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(accnum));
5665 }
5666
5667 DISAS_INSN(to_macsr)
5668 {
5669 TCGv val;
5670 SRC_EA(env, val, OS_LONG, 0, NULL);
5671 gen_helper_set_macsr(cpu_env, val);
5672 gen_exit_tb(s);
5673 }
5674
5675 DISAS_INSN(to_mask)
5676 {
5677 TCGv val;
5678 SRC_EA(env, val, OS_LONG, 0, NULL);
5679 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5680 }
5681
5682 DISAS_INSN(to_mext)
5683 {
5684 TCGv val;
5685 TCGv acc;
5686 SRC_EA(env, val, OS_LONG, 0, NULL);
5687 acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5688 if (s->env->macsr & MACSR_FI)
5689 gen_helper_set_mac_extf(cpu_env, val, acc);
5690 else if (s->env->macsr & MACSR_SU)
5691 gen_helper_set_mac_exts(cpu_env, val, acc);
5692 else
5693 gen_helper_set_mac_extu(cpu_env, val, acc);
5694 }
5695
5696 static disas_proc opcode_table[65536];
5697
5698 static void
5699 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5700 {
5701 int i;
5702 int from;
5703 int to;
5704
5705 /* Sanity check. All set bits must be included in the mask. */
5706 if (opcode & ~mask) {
5707 fprintf(stderr,
5708 "qemu internal error: bogus opcode definition %04x/%04x\n",
5709 opcode, mask);
5710 abort();
5711 }
5712 /*
5713 * This could probably be cleverer. For now just optimize the case where
5714 * the top bits are known.
5715 */
5716 /* Find the first zero bit in the mask. */
5717 i = 0x8000;
5718 while ((i & mask) != 0)
5719 i >>= 1;
5720 /* Iterate over all combinations of this and lower bits. */
5721 if (i == 0)
5722 i = 1;
5723 else
5724 i <<= 1;
5725 from = opcode & ~(i - 1);
5726 to = from + i;
5727 for (i = from; i < to; i++) {
5728 if ((i & mask) == opcode)
5729 opcode_table[i] = proc;
5730 }
5731 }
5732
5733 /*
5734 * Register m68k opcode handlers. Order is important.
5735 * Later insn override earlier ones.
5736 */
5737 void register_m68k_insns (CPUM68KState *env)
5738 {
5739 /*
5740 * Build the opcode table only once to avoid
5741 * multithreading issues.
5742 */
5743 if (opcode_table[0] != NULL) {
5744 return;
5745 }
5746
5747 /*
5748 * use BASE() for instruction available
5749 * for CF_ISA_A and M68000.
5750 */
5751 #define BASE(name, opcode, mask) \
5752 register_opcode(disas_##name, 0x##opcode, 0x##mask)
5753 #define INSN(name, opcode, mask, feature) do { \
5754 if (m68k_feature(env, M68K_FEATURE_##feature)) \
5755 BASE(name, opcode, mask); \
5756 } while(0)
5757 BASE(undef, 0000, 0000);
5758 INSN(arith_im, 0080, fff8, CF_ISA_A);
5759 INSN(arith_im, 0000, ff00, M68K);
5760 INSN(chk2, 00c0, f9c0, CHK2);
5761 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
5762 BASE(bitop_reg, 0100, f1c0);
5763 BASE(bitop_reg, 0140, f1c0);
5764 BASE(bitop_reg, 0180, f1c0);
5765 BASE(bitop_reg, 01c0, f1c0);
5766 INSN(movep, 0108, f138, MOVEP);
5767 INSN(arith_im, 0280, fff8, CF_ISA_A);
5768 INSN(arith_im, 0200, ff00, M68K);
5769 INSN(undef, 02c0, ffc0, M68K);
5770 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
5771 INSN(arith_im, 0480, fff8, CF_ISA_A);
5772 INSN(arith_im, 0400, ff00, M68K);
5773 INSN(undef, 04c0, ffc0, M68K);
5774 INSN(arith_im, 0600, ff00, M68K);
5775 INSN(undef, 06c0, ffc0, M68K);
5776 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
5777 INSN(arith_im, 0680, fff8, CF_ISA_A);
5778 INSN(arith_im, 0c00, ff38, CF_ISA_A);
5779 INSN(arith_im, 0c00, ff00, M68K);
5780 BASE(bitop_im, 0800, ffc0);
5781 BASE(bitop_im, 0840, ffc0);
5782 BASE(bitop_im, 0880, ffc0);
5783 BASE(bitop_im, 08c0, ffc0);
5784 INSN(arith_im, 0a80, fff8, CF_ISA_A);
5785 INSN(arith_im, 0a00, ff00, M68K);
5786 #if !defined(CONFIG_USER_ONLY)
5787 INSN(moves, 0e00, ff00, M68K);
5788 #endif
5789 INSN(cas, 0ac0, ffc0, CAS);
5790 INSN(cas, 0cc0, ffc0, CAS);
5791 INSN(cas, 0ec0, ffc0, CAS);
5792 INSN(cas2w, 0cfc, ffff, CAS);
5793 INSN(cas2l, 0efc, ffff, CAS);
5794 BASE(move, 1000, f000);
5795 BASE(move, 2000, f000);
5796 BASE(move, 3000, f000);
5797 INSN(chk, 4000, f040, M68K);
5798 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
5799 INSN(negx, 4080, fff8, CF_ISA_A);
5800 INSN(negx, 4000, ff00, M68K);
5801 INSN(undef, 40c0, ffc0, M68K);
5802 INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5803 INSN(move_from_sr, 40c0, ffc0, M68K);
5804 BASE(lea, 41c0, f1c0);
5805 BASE(clr, 4200, ff00);
5806 BASE(undef, 42c0, ffc0);
5807 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5808 INSN(move_from_ccr, 42c0, ffc0, M68K);
5809 INSN(neg, 4480, fff8, CF_ISA_A);
5810 INSN(neg, 4400, ff00, M68K);
5811 INSN(undef, 44c0, ffc0, M68K);
5812 BASE(move_to_ccr, 44c0, ffc0);
5813 INSN(not, 4680, fff8, CF_ISA_A);
5814 INSN(not, 4600, ff00, M68K);
5815 #if !defined(CONFIG_USER_ONLY)
5816 BASE(move_to_sr, 46c0, ffc0);
5817 #endif
5818 INSN(nbcd, 4800, ffc0, M68K);
5819 INSN(linkl, 4808, fff8, M68K);
5820 BASE(pea, 4840, ffc0);
5821 BASE(swap, 4840, fff8);
5822 INSN(bkpt, 4848, fff8, BKPT);
5823 INSN(movem, 48d0, fbf8, CF_ISA_A);
5824 INSN(movem, 48e8, fbf8, CF_ISA_A);
5825 INSN(movem, 4880, fb80, M68K);
5826 BASE(ext, 4880, fff8);
5827 BASE(ext, 48c0, fff8);
5828 BASE(ext, 49c0, fff8);
5829 BASE(tst, 4a00, ff00);
5830 INSN(tas, 4ac0, ffc0, CF_ISA_B);
5831 INSN(tas, 4ac0, ffc0, M68K);
5832 #if !defined(CONFIG_USER_ONLY)
5833 INSN(halt, 4ac8, ffff, CF_ISA_A);
5834 INSN(halt, 4ac8, ffff, M68K);
5835 #endif
5836 INSN(pulse, 4acc, ffff, CF_ISA_A);
5837 BASE(illegal, 4afc, ffff);
5838 INSN(mull, 4c00, ffc0, CF_ISA_A);
5839 INSN(mull, 4c00, ffc0, LONG_MULDIV);
5840 INSN(divl, 4c40, ffc0, CF_ISA_A);
5841 INSN(divl, 4c40, ffc0, LONG_MULDIV);
5842 INSN(sats, 4c80, fff8, CF_ISA_B);
5843 BASE(trap, 4e40, fff0);
5844 BASE(link, 4e50, fff8);
5845 BASE(unlk, 4e58, fff8);
5846 #if !defined(CONFIG_USER_ONLY)
5847 INSN(move_to_usp, 4e60, fff8, USP);
5848 INSN(move_from_usp, 4e68, fff8, USP);
5849 INSN(reset, 4e70, ffff, M68K);
5850 BASE(stop, 4e72, ffff);
5851 BASE(rte, 4e73, ffff);
5852 INSN(cf_movec, 4e7b, ffff, CF_ISA_A);
5853 INSN(m68k_movec, 4e7a, fffe, MOVEC);
5854 #endif
5855 BASE(nop, 4e71, ffff);
5856 INSN(rtd, 4e74, ffff, RTD);
5857 BASE(rts, 4e75, ffff);
5858 INSN(trapv, 4e76, ffff, M68K);
5859 INSN(rtr, 4e77, ffff, M68K);
5860 BASE(jump, 4e80, ffc0);
5861 BASE(jump, 4ec0, ffc0);
5862 INSN(addsubq, 5000, f080, M68K);
5863 BASE(addsubq, 5080, f0c0);
5864 INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */
5865 INSN(scc, 50c0, f0c0, M68K); /* Scc.B <EA> */
5866 INSN(dbcc, 50c8, f0f8, M68K);
5867 INSN(trapcc, 50fa, f0fe, TRAPCC); /* opmode 010, 011 */
5868 INSN(trapcc, 50fc, f0ff, TRAPCC); /* opmode 100 */
5869 INSN(trapcc, 51fa, fffe, CF_ISA_A); /* TPF (trapf) opmode 010, 011 */
5870 INSN(trapcc, 51fc, ffff, CF_ISA_A); /* TPF (trapf) opmode 100 */
5871
5872 /* Branch instructions. */
5873 BASE(branch, 6000, f000);
5874 /* Disable long branch instructions, then add back the ones we want. */
5875 BASE(undef, 60ff, f0ff); /* All long branches. */
5876 INSN(branch, 60ff, f0ff, CF_ISA_B);
5877 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
5878 INSN(branch, 60ff, ffff, BRAL);
5879 INSN(branch, 60ff, f0ff, BCCL);
5880
5881 BASE(moveq, 7000, f100);
5882 INSN(mvzs, 7100, f100, CF_ISA_B);
5883 BASE(or, 8000, f000);
5884 BASE(divw, 80c0, f0c0);
5885 INSN(sbcd_reg, 8100, f1f8, M68K);
5886 INSN(sbcd_mem, 8108, f1f8, M68K);
5887 BASE(addsub, 9000, f000);
5888 INSN(undef, 90c0, f0c0, CF_ISA_A);
5889 INSN(subx_reg, 9180, f1f8, CF_ISA_A);
5890 INSN(subx_reg, 9100, f138, M68K);
5891 INSN(subx_mem, 9108, f138, M68K);
5892 INSN(suba, 91c0, f1c0, CF_ISA_A);
5893 INSN(suba, 90c0, f0c0, M68K);
5894
5895 BASE(undef_mac, a000, f000);
5896 INSN(mac, a000, f100, CF_EMAC);
5897 INSN(from_mac, a180, f9b0, CF_EMAC);
5898 INSN(move_mac, a110, f9fc, CF_EMAC);
5899 INSN(from_macsr,a980, f9f0, CF_EMAC);
5900 INSN(from_mask, ad80, fff0, CF_EMAC);
5901 INSN(from_mext, ab80, fbf0, CF_EMAC);
5902 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5903 INSN(to_mac, a100, f9c0, CF_EMAC);
5904 INSN(to_macsr, a900, ffc0, CF_EMAC);
5905 INSN(to_mext, ab00, fbc0, CF_EMAC);
5906 INSN(to_mask, ad00, ffc0, CF_EMAC);
5907
5908 INSN(mov3q, a140, f1c0, CF_ISA_B);
5909 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
5910 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
5911 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5912 INSN(cmp, b080, f1c0, CF_ISA_A);
5913 INSN(cmpa, b1c0, f1c0, CF_ISA_A);
5914 INSN(cmp, b000, f100, M68K);
5915 INSN(eor, b100, f100, M68K);
5916 INSN(cmpm, b108, f138, M68K);
5917 INSN(cmpa, b0c0, f0c0, M68K);
5918 INSN(eor, b180, f1c0, CF_ISA_A);
5919 BASE(and, c000, f000);
5920 INSN(exg_dd, c140, f1f8, M68K);
5921 INSN(exg_aa, c148, f1f8, M68K);
5922 INSN(exg_da, c188, f1f8, M68K);
5923 BASE(mulw, c0c0, f0c0);
5924 INSN(abcd_reg, c100, f1f8, M68K);
5925 INSN(abcd_mem, c108, f1f8, M68K);
5926 BASE(addsub, d000, f000);
5927 INSN(undef, d0c0, f0c0, CF_ISA_A);
5928 INSN(addx_reg, d180, f1f8, CF_ISA_A);
5929 INSN(addx_reg, d100, f138, M68K);
5930 INSN(addx_mem, d108, f138, M68K);
5931 INSN(adda, d1c0, f1c0, CF_ISA_A);
5932 INSN(adda, d0c0, f0c0, M68K);
5933 INSN(shift_im, e080, f0f0, CF_ISA_A);
5934 INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5935 INSN(shift8_im, e000, f0f0, M68K);
5936 INSN(shift16_im, e040, f0f0, M68K);
5937 INSN(shift_im, e080, f0f0, M68K);
5938 INSN(shift8_reg, e020, f0f0, M68K);
5939 INSN(shift16_reg, e060, f0f0, M68K);
5940 INSN(shift_reg, e0a0, f0f0, M68K);
5941 INSN(shift_mem, e0c0, fcc0, M68K);
5942 INSN(rotate_im, e090, f0f0, M68K);
5943 INSN(rotate8_im, e010, f0f0, M68K);
5944 INSN(rotate16_im, e050, f0f0, M68K);
5945 INSN(rotate_reg, e0b0, f0f0, M68K);
5946 INSN(rotate8_reg, e030, f0f0, M68K);
5947 INSN(rotate16_reg, e070, f0f0, M68K);
5948 INSN(rotate_mem, e4c0, fcc0, M68K);
5949 INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */
5950 INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5951 INSN(bfins_mem, efc0, ffc0, BITFIELD);
5952 INSN(bfins_reg, efc0, fff8, BITFIELD);
5953 INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */
5954 INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */
5955 INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */
5956 INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */
5957 INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */
5958 INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */
5959 INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */
5960 INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */
5961 INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */
5962 INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */
5963 BASE(undef_fpu, f000, f000);
5964 INSN(fpu, f200, ffc0, CF_FPU);
5965 INSN(fbcc, f280, ffc0, CF_FPU);
5966 INSN(fpu, f200, ffc0, FPU);
5967 INSN(fscc, f240, ffc0, FPU);
5968 INSN(ftrapcc, f27a, fffe, FPU); /* opmode 010, 011 */
5969 INSN(ftrapcc, f27c, ffff, FPU); /* opmode 100 */
5970 INSN(fbcc, f280, ff80, FPU);
5971 #if !defined(CONFIG_USER_ONLY)
5972 INSN(frestore, f340, ffc0, CF_FPU);
5973 INSN(fsave, f300, ffc0, CF_FPU);
5974 INSN(frestore, f340, ffc0, FPU);
5975 INSN(fsave, f300, ffc0, FPU);
5976 INSN(intouch, f340, ffc0, CF_ISA_A);
5977 INSN(cpushl, f428, ff38, CF_ISA_A);
5978 INSN(cpush, f420, ff20, M68040);
5979 INSN(cinv, f400, ff20, M68040);
5980 INSN(pflush, f500, ffe0, M68040);
5981 INSN(ptest, f548, ffd8, M68040);
5982 INSN(wddata, fb00, ff00, CF_ISA_A);
5983 INSN(wdebug, fbc0, ffc0, CF_ISA_A);
5984 #endif
5985 INSN(move16_mem, f600, ffe0, M68040);
5986 INSN(move16_reg, f620, fff8, M68040);
5987 #undef INSN
5988 }
5989
5990 static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
5991 {
5992 DisasContext *dc = container_of(dcbase, DisasContext, base);
5993 CPUM68KState *env = cpu->env_ptr;
5994
5995 dc->env = env;
5996 dc->pc = dc->base.pc_first;
5997 /* This value will always be filled in properly before m68k_tr_tb_stop. */
5998 dc->pc_prev = 0xdeadbeef;
5999 dc->cc_op = CC_OP_DYNAMIC;
6000 dc->cc_op_synced = 1;
6001 dc->done_mac = 0;
6002 dc->writeback_mask = 0;
6003
6004 dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
6005 /* If architectural single step active, limit to 1 */
6006 if (dc->ss_active) {
6007 dc->base.max_insns = 1;
6008 }
6009 }
6010
6011 static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6012 {
6013 }
6014
6015 static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6016 {
6017 DisasContext *dc = container_of(dcbase, DisasContext, base);
6018 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6019 }
6020
6021 static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6022 {
6023 DisasContext *dc = container_of(dcbase, DisasContext, base);
6024 CPUM68KState *env = cpu->env_ptr;
6025 uint16_t insn = read_im16(env, dc);
6026
6027 opcode_table[insn](env, dc, insn);
6028 do_writebacks(dc);
6029
6030 dc->pc_prev = dc->base.pc_next;
6031 dc->base.pc_next = dc->pc;
6032
6033 if (dc->base.is_jmp == DISAS_NEXT) {
6034 /*
6035 * Stop translation when the next insn might touch a new page.
6036 * This ensures that prefetch aborts at the right place.
6037 *
6038 * We cannot determine the size of the next insn without
6039 * completely decoding it. However, the maximum insn size
6040 * is 32 bytes, so end if we do not have that much remaining.
6041 * This may produce several small TBs at the end of each page,
6042 * but they will all be linked with goto_tb.
6043 *
6044 * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6045 * smaller than MC68020's.
6046 */
6047 target_ulong start_page_offset
6048 = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6049
6050 if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6051 dc->base.is_jmp = DISAS_TOO_MANY;
6052 }
6053 }
6054 }
6055
6056 static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6057 {
6058 DisasContext *dc = container_of(dcbase, DisasContext, base);
6059
6060 switch (dc->base.is_jmp) {
6061 case DISAS_NORETURN:
6062 break;
6063 case DISAS_TOO_MANY:
6064 update_cc_op(dc);
6065 gen_jmp_tb(dc, 0, dc->pc, dc->pc_prev);
6066 break;
6067 case DISAS_JUMP:
6068 /* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */
6069 if (dc->ss_active) {
6070 gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6071 } else {
6072 tcg_gen_lookup_and_goto_ptr();
6073 }
6074 break;
6075 case DISAS_EXIT:
6076 /*
6077 * We updated CC_OP and PC in gen_exit_tb, but also modified
6078 * other state that may require returning to the main loop.
6079 */
6080 if (dc->ss_active) {
6081 gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6082 } else {
6083 tcg_gen_exit_tb(NULL, 0);
6084 }
6085 break;
6086 default:
6087 g_assert_not_reached();
6088 }
6089 }
6090
6091 static void m68k_tr_disas_log(const DisasContextBase *dcbase,
6092 CPUState *cpu, FILE *logfile)
6093 {
6094 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
6095 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
6096 }
6097
6098 static const TranslatorOps m68k_tr_ops = {
6099 .init_disas_context = m68k_tr_init_disas_context,
6100 .tb_start = m68k_tr_tb_start,
6101 .insn_start = m68k_tr_insn_start,
6102 .translate_insn = m68k_tr_translate_insn,
6103 .tb_stop = m68k_tr_tb_stop,
6104 .disas_log = m68k_tr_disas_log,
6105 };
6106
6107 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
6108 target_ulong pc, void *host_pc)
6109 {
6110 DisasContext dc;
6111 translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
6112 }
6113
6114 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6115 {
6116 floatx80 a = { .high = high, .low = low };
6117 union {
6118 float64 f64;
6119 double d;
6120 } u;
6121
6122 u.f64 = floatx80_to_float64(a, &env->fp_status);
6123 return u.d;
6124 }
6125
6126 void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
6127 {
6128 M68kCPU *cpu = M68K_CPU(cs);
6129 CPUM68KState *env = &cpu->env;
6130 int i;
6131 uint16_t sr;
6132 for (i = 0; i < 8; i++) {
6133 qemu_fprintf(f, "D%d = %08x A%d = %08x "
6134 "F%d = %04x %016"PRIx64" (%12g)\n",
6135 i, env->dregs[i], i, env->aregs[i],
6136 i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6137 floatx80_to_double(env, env->fregs[i].l.upper,
6138 env->fregs[i].l.lower));
6139 }
6140 qemu_fprintf(f, "PC = %08x ", env->pc);
6141 sr = env->sr | cpu_m68k_get_ccr(env);
6142 qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6143 sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6144 (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6145 (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6146 (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6147 (sr & CCF_C) ? 'C' : '-');
6148 qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6149 (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6150 (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6151 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6152 (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6153 qemu_fprintf(f, "\n "
6154 "FPCR = %04x ", env->fpcr);
6155 switch (env->fpcr & FPCR_PREC_MASK) {
6156 case FPCR_PREC_X:
6157 qemu_fprintf(f, "X ");
6158 break;
6159 case FPCR_PREC_S:
6160 qemu_fprintf(f, "S ");
6161 break;
6162 case FPCR_PREC_D:
6163 qemu_fprintf(f, "D ");
6164 break;
6165 }
6166 switch (env->fpcr & FPCR_RND_MASK) {
6167 case FPCR_RND_N:
6168 qemu_fprintf(f, "RN ");
6169 break;
6170 case FPCR_RND_Z:
6171 qemu_fprintf(f, "RZ ");
6172 break;
6173 case FPCR_RND_M:
6174 qemu_fprintf(f, "RM ");
6175 break;
6176 case FPCR_RND_P:
6177 qemu_fprintf(f, "RP ");
6178 break;
6179 }
6180 qemu_fprintf(f, "\n");
6181 #ifndef CONFIG_USER_ONLY
6182 qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6183 env->current_sp == M68K_SSP ? "->" : " ", env->sp[M68K_SSP],
6184 env->current_sp == M68K_USP ? "->" : " ", env->sp[M68K_USP],
6185 env->current_sp == M68K_ISP ? "->" : " ", env->sp[M68K_ISP]);
6186 qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6187 qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6188 qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6189 env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6190 qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6191 env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6192 env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6193 qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6194 env->mmu.mmusr, env->mmu.ar);
6195 #endif /* !CONFIG_USER_ONLY */
6196 }