]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/translate.c
target-s390: Convert MVC
[mirror_qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static inline void debug_insn(uint64_t insn)
83 {
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85 }
86
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 {
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
92 }
93 }
94 return pc;
95 }
96
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
99 {
100 int i;
101
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
108 }
109
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
116 }
117 }
118
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
125 }
126 }
127
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
135 }
136 }
137 #endif
138
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
143 }
144 #endif
145
146 cpu_fprintf(f, "\n");
147 }
148
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
151
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
156
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
160
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162
163 void s390x_translate_init(void)
164 {
165 int i;
166
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
174
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
196 }
197
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
201 }
202
203 static inline TCGv_i64 load_reg(int reg)
204 {
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
208 }
209
210 static inline TCGv_i64 load_freg(int reg)
211 {
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
215 }
216
217 static inline TCGv_i32 load_freg32(int reg)
218 {
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
226 }
227
228 static inline TCGv_i64 load_freg32_i64(int reg)
229 {
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
233 }
234
235 static inline TCGv_i32 load_reg32(int reg)
236 {
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
240 }
241
242 static inline TCGv_i64 load_reg32_i64(int reg)
243 {
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
247 }
248
249 static inline void store_reg(int reg, TCGv_i64 v)
250 {
251 tcg_gen_mov_i64(regs[reg], v);
252 }
253
254 static inline void store_freg(int reg, TCGv_i64 v)
255 {
256 tcg_gen_mov_i64(fregs[reg], v);
257 }
258
259 static inline void store_reg32(int reg, TCGv_i32 v)
260 {
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
268 }
269
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
271 {
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274 }
275
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
277 {
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
279 }
280
281 static inline void store_reg16(int reg, TCGv_i32 v)
282 {
283 /* 16 bit register writes keep the upper bytes */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_deposit_i32(TCGV_LOW(regs[reg]), TCGV_LOW(regs[reg]), v, 0, 16);
286 #else
287 tcg_gen_deposit_i64(regs[reg], regs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 16);
289 #endif
290 }
291
292 static inline void store_freg32(int reg, TCGv_i32 v)
293 {
294 /* 32 bit register writes keep the lower half */
295 #if HOST_LONG_BITS == 32
296 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
297 #else
298 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
299 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
300 #endif
301 }
302
303 static inline void store_freg32_i64(int reg, TCGv_i64 v)
304 {
305 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
306 }
307
308 static inline void return_low128(TCGv_i64 dest)
309 {
310 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
311 }
312
313 static inline void update_psw_addr(DisasContext *s)
314 {
315 /* psw.addr */
316 tcg_gen_movi_i64(psw_addr, s->pc);
317 }
318
319 static inline void potential_page_fault(DisasContext *s)
320 {
321 #ifndef CONFIG_USER_ONLY
322 update_psw_addr(s);
323 gen_op_calc_cc(s);
324 #endif
325 }
326
327 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
328 {
329 return (uint64_t)cpu_lduw_code(env, pc);
330 }
331
332 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
333 {
334 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
335 }
336
337 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
338 {
339 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
340 }
341
342 static inline int get_mem_index(DisasContext *s)
343 {
344 switch (s->tb->flags & FLAG_MASK_ASC) {
345 case PSW_ASC_PRIMARY >> 32:
346 return 0;
347 case PSW_ASC_SECONDARY >> 32:
348 return 1;
349 case PSW_ASC_HOME >> 32:
350 return 2;
351 default:
352 tcg_abort();
353 break;
354 }
355 }
356
357 static void gen_exception(int excp)
358 {
359 TCGv_i32 tmp = tcg_const_i32(excp);
360 gen_helper_exception(cpu_env, tmp);
361 tcg_temp_free_i32(tmp);
362 }
363
364 static void gen_program_exception(DisasContext *s, int code)
365 {
366 TCGv_i32 tmp;
367
368 /* Remember what pgm exeption this was. */
369 tmp = tcg_const_i32(code);
370 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
371 tcg_temp_free_i32(tmp);
372
373 tmp = tcg_const_i32(s->next_pc - s->pc);
374 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
375 tcg_temp_free_i32(tmp);
376
377 /* Advance past instruction. */
378 s->pc = s->next_pc;
379 update_psw_addr(s);
380
381 /* Save off cc. */
382 gen_op_calc_cc(s);
383
384 /* Trigger exception. */
385 gen_exception(EXCP_PGM);
386
387 /* End TB here. */
388 s->is_jmp = DISAS_EXCP;
389 }
390
391 static inline void gen_illegal_opcode(DisasContext *s)
392 {
393 gen_program_exception(s, PGM_SPECIFICATION);
394 }
395
396 static inline void check_privileged(DisasContext *s)
397 {
398 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
399 gen_program_exception(s, PGM_PRIVILEGED);
400 }
401 }
402
403 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
404 {
405 TCGv_i64 tmp;
406
407 /* 31-bitify the immediate part; register contents are dealt with below */
408 if (!(s->tb->flags & FLAG_MASK_64)) {
409 d2 &= 0x7fffffffUL;
410 }
411
412 if (x2) {
413 if (d2) {
414 tmp = tcg_const_i64(d2);
415 tcg_gen_add_i64(tmp, tmp, regs[x2]);
416 } else {
417 tmp = load_reg(x2);
418 }
419 if (b2) {
420 tcg_gen_add_i64(tmp, tmp, regs[b2]);
421 }
422 } else if (b2) {
423 if (d2) {
424 tmp = tcg_const_i64(d2);
425 tcg_gen_add_i64(tmp, tmp, regs[b2]);
426 } else {
427 tmp = load_reg(b2);
428 }
429 } else {
430 tmp = tcg_const_i64(d2);
431 }
432
433 /* 31-bit mode mask if there are values loaded from registers */
434 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
435 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
436 }
437
438 return tmp;
439 }
440
441 static void gen_op_movi_cc(DisasContext *s, uint32_t val)
442 {
443 s->cc_op = CC_OP_CONST0 + val;
444 }
445
446 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
447 {
448 tcg_gen_discard_i64(cc_src);
449 tcg_gen_mov_i64(cc_dst, dst);
450 tcg_gen_discard_i64(cc_vr);
451 s->cc_op = op;
452 }
453
454 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
455 {
456 tcg_gen_discard_i64(cc_src);
457 tcg_gen_extu_i32_i64(cc_dst, dst);
458 tcg_gen_discard_i64(cc_vr);
459 s->cc_op = op;
460 }
461
462 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
463 TCGv_i64 dst)
464 {
465 tcg_gen_mov_i64(cc_src, src);
466 tcg_gen_mov_i64(cc_dst, dst);
467 tcg_gen_discard_i64(cc_vr);
468 s->cc_op = op;
469 }
470
471 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
472 TCGv_i32 dst)
473 {
474 tcg_gen_extu_i32_i64(cc_src, src);
475 tcg_gen_extu_i32_i64(cc_dst, dst);
476 tcg_gen_discard_i64(cc_vr);
477 s->cc_op = op;
478 }
479
480 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
481 TCGv_i64 dst, TCGv_i64 vr)
482 {
483 tcg_gen_mov_i64(cc_src, src);
484 tcg_gen_mov_i64(cc_dst, dst);
485 tcg_gen_mov_i64(cc_vr, vr);
486 s->cc_op = op;
487 }
488
489 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
490 {
491 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
492 }
493
494 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
495 {
496 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
497 }
498
499 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
500 enum cc_op cond)
501 {
502 gen_op_update2_cc_i32(s, cond, v1, v2);
503 }
504
505 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
506 enum cc_op cond)
507 {
508 gen_op_update2_cc_i64(s, cond, v1, v2);
509 }
510
511 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
512 {
513 cmp_32(s, v1, v2, CC_OP_LTGT_32);
514 }
515
516 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
517 {
518 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
519 }
520
521 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
522 {
523 /* XXX optimize for the constant? put it in s? */
524 TCGv_i32 tmp = tcg_const_i32(v2);
525 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
526 tcg_temp_free_i32(tmp);
527 }
528
529 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
530 {
531 TCGv_i32 tmp = tcg_const_i32(v2);
532 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
533 tcg_temp_free_i32(tmp);
534 }
535
536 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
537 {
538 cmp_64(s, v1, v2, CC_OP_LTGT_64);
539 }
540
541 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
542 {
543 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
544 }
545
546 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
547 {
548 TCGv_i64 tmp = tcg_const_i64(v2);
549 cmp_s64(s, v1, tmp);
550 tcg_temp_free_i64(tmp);
551 }
552
553 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
554 {
555 TCGv_i64 tmp = tcg_const_i64(v2);
556 cmp_u64(s, v1, tmp);
557 tcg_temp_free_i64(tmp);
558 }
559
560 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
561 {
562 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
563 }
564
565 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
566 {
567 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
568 }
569
570 static void set_cc_cmp_f32_i64(DisasContext *s, TCGv_i32 v1, TCGv_i64 v2)
571 {
572 tcg_gen_extu_i32_i64(cc_src, v1);
573 tcg_gen_mov_i64(cc_dst, v2);
574 tcg_gen_discard_i64(cc_vr);
575 s->cc_op = CC_OP_LTGT_F32;
576 }
577
578 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i32 v1)
579 {
580 gen_op_update1_cc_i32(s, CC_OP_NZ_F32, v1);
581 }
582
583 /* CC value is in env->cc_op */
584 static inline void set_cc_static(DisasContext *s)
585 {
586 tcg_gen_discard_i64(cc_src);
587 tcg_gen_discard_i64(cc_dst);
588 tcg_gen_discard_i64(cc_vr);
589 s->cc_op = CC_OP_STATIC;
590 }
591
592 static inline void gen_op_set_cc_op(DisasContext *s)
593 {
594 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
595 tcg_gen_movi_i32(cc_op, s->cc_op);
596 }
597 }
598
599 static inline void gen_update_cc_op(DisasContext *s)
600 {
601 gen_op_set_cc_op(s);
602 }
603
604 /* calculates cc into cc_op */
605 static void gen_op_calc_cc(DisasContext *s)
606 {
607 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
608 TCGv_i64 dummy = tcg_const_i64(0);
609
610 switch (s->cc_op) {
611 case CC_OP_CONST0:
612 case CC_OP_CONST1:
613 case CC_OP_CONST2:
614 case CC_OP_CONST3:
615 /* s->cc_op is the cc value */
616 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
617 break;
618 case CC_OP_STATIC:
619 /* env->cc_op already is the cc value */
620 break;
621 case CC_OP_NZ:
622 case CC_OP_ABS_64:
623 case CC_OP_NABS_64:
624 case CC_OP_ABS_32:
625 case CC_OP_NABS_32:
626 case CC_OP_LTGT0_32:
627 case CC_OP_LTGT0_64:
628 case CC_OP_COMP_32:
629 case CC_OP_COMP_64:
630 case CC_OP_NZ_F32:
631 case CC_OP_NZ_F64:
632 /* 1 argument */
633 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
634 break;
635 case CC_OP_ICM:
636 case CC_OP_LTGT_32:
637 case CC_OP_LTGT_64:
638 case CC_OP_LTUGTU_32:
639 case CC_OP_LTUGTU_64:
640 case CC_OP_TM_32:
641 case CC_OP_TM_64:
642 case CC_OP_LTGT_F32:
643 case CC_OP_LTGT_F64:
644 case CC_OP_SLA_32:
645 case CC_OP_SLA_64:
646 /* 2 arguments */
647 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
648 break;
649 case CC_OP_ADD_64:
650 case CC_OP_ADDU_64:
651 case CC_OP_ADDC_64:
652 case CC_OP_SUB_64:
653 case CC_OP_SUBU_64:
654 case CC_OP_SUBB_64:
655 case CC_OP_ADD_32:
656 case CC_OP_ADDU_32:
657 case CC_OP_ADDC_32:
658 case CC_OP_SUB_32:
659 case CC_OP_SUBU_32:
660 case CC_OP_SUBB_32:
661 /* 3 arguments */
662 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
663 break;
664 case CC_OP_DYNAMIC:
665 /* unknown operation - assume 3 arguments and cc_op in env */
666 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
667 break;
668 default:
669 tcg_abort();
670 }
671
672 tcg_temp_free_i32(local_cc_op);
673 tcg_temp_free_i64(dummy);
674
675 /* We now have cc in cc_op as constant */
676 set_cc_static(s);
677 }
678
679 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
680 {
681 debug_insn(insn);
682
683 *r1 = (insn >> 4) & 0xf;
684 *r2 = insn & 0xf;
685 }
686
687 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
688 int *x2, int *b2, int *d2)
689 {
690 debug_insn(insn);
691
692 *r1 = (insn >> 20) & 0xf;
693 *x2 = (insn >> 16) & 0xf;
694 *b2 = (insn >> 12) & 0xf;
695 *d2 = insn & 0xfff;
696
697 return get_address(s, *x2, *b2, *d2);
698 }
699
700 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
701 int *b2, int *d2)
702 {
703 debug_insn(insn);
704
705 *r1 = (insn >> 20) & 0xf;
706 /* aka m3 */
707 *r3 = (insn >> 16) & 0xf;
708 *b2 = (insn >> 12) & 0xf;
709 *d2 = insn & 0xfff;
710 }
711
712 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
713 int *b1, int *d1)
714 {
715 debug_insn(insn);
716
717 *i2 = (insn >> 16) & 0xff;
718 *b1 = (insn >> 12) & 0xf;
719 *d1 = insn & 0xfff;
720
721 return get_address(s, 0, *b1, *d1);
722 }
723
724 static int use_goto_tb(DisasContext *s, uint64_t dest)
725 {
726 /* NOTE: we handle the case where the TB spans two pages here */
727 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
728 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
729 && !s->singlestep_enabled
730 && !(s->tb->cflags & CF_LAST_IO));
731 }
732
733 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
734 {
735 gen_update_cc_op(s);
736
737 if (use_goto_tb(s, pc)) {
738 tcg_gen_goto_tb(tb_num);
739 tcg_gen_movi_i64(psw_addr, pc);
740 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
741 } else {
742 /* jump to another page: currently not optimized */
743 tcg_gen_movi_i64(psw_addr, pc);
744 tcg_gen_exit_tb(0);
745 }
746 }
747
748 static inline void account_noninline_branch(DisasContext *s, int cc_op)
749 {
750 #ifdef DEBUG_INLINE_BRANCHES
751 inline_branch_miss[cc_op]++;
752 #endif
753 }
754
755 static inline void account_inline_branch(DisasContext *s, int cc_op)
756 {
757 #ifdef DEBUG_INLINE_BRANCHES
758 inline_branch_hit[cc_op]++;
759 #endif
760 }
761
762 /* Table of mask values to comparison codes, given a comparison as input.
763 For a true comparison CC=3 will never be set, but we treat this
764 conservatively for possible use when CC=3 indicates overflow. */
765 static const TCGCond ltgt_cond[16] = {
766 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
767 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
768 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
769 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
770 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
771 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
772 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
773 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
774 };
775
776 /* Table of mask values to comparison codes, given a logic op as input.
777 For such, only CC=0 and CC=1 should be possible. */
778 static const TCGCond nz_cond[16] = {
779 /* | | x | x */
780 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
781 /* | NE | x | x */
782 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
783 /* EQ | | x | x */
784 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
785 /* EQ | NE | x | x */
786 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
787 };
788
789 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
790 details required to generate a TCG comparison. */
791 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
792 {
793 TCGCond cond;
794 enum cc_op old_cc_op = s->cc_op;
795
796 if (mask == 15 || mask == 0) {
797 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
798 c->u.s32.a = cc_op;
799 c->u.s32.b = cc_op;
800 c->g1 = c->g2 = true;
801 c->is_64 = false;
802 return;
803 }
804
805 /* Find the TCG condition for the mask + cc op. */
806 switch (old_cc_op) {
807 case CC_OP_LTGT0_32:
808 case CC_OP_LTGT0_64:
809 case CC_OP_LTGT_32:
810 case CC_OP_LTGT_64:
811 cond = ltgt_cond[mask];
812 if (cond == TCG_COND_NEVER) {
813 goto do_dynamic;
814 }
815 account_inline_branch(s, old_cc_op);
816 break;
817
818 case CC_OP_LTUGTU_32:
819 case CC_OP_LTUGTU_64:
820 cond = tcg_unsigned_cond(ltgt_cond[mask]);
821 if (cond == TCG_COND_NEVER) {
822 goto do_dynamic;
823 }
824 account_inline_branch(s, old_cc_op);
825 break;
826
827 case CC_OP_NZ:
828 cond = nz_cond[mask];
829 if (cond == TCG_COND_NEVER) {
830 goto do_dynamic;
831 }
832 account_inline_branch(s, old_cc_op);
833 break;
834
835 case CC_OP_TM_32:
836 case CC_OP_TM_64:
837 switch (mask) {
838 case 8:
839 cond = TCG_COND_EQ;
840 break;
841 case 4 | 2 | 1:
842 cond = TCG_COND_NE;
843 break;
844 default:
845 goto do_dynamic;
846 }
847 account_inline_branch(s, old_cc_op);
848 break;
849
850 case CC_OP_ICM:
851 switch (mask) {
852 case 8:
853 cond = TCG_COND_EQ;
854 break;
855 case 4 | 2 | 1:
856 case 4 | 2:
857 cond = TCG_COND_NE;
858 break;
859 default:
860 goto do_dynamic;
861 }
862 account_inline_branch(s, old_cc_op);
863 break;
864
865 default:
866 do_dynamic:
867 /* Calculate cc value. */
868 gen_op_calc_cc(s);
869 /* FALLTHRU */
870
871 case CC_OP_STATIC:
872 /* Jump based on CC. We'll load up the real cond below;
873 the assignment here merely avoids a compiler warning. */
874 account_noninline_branch(s, old_cc_op);
875 old_cc_op = CC_OP_STATIC;
876 cond = TCG_COND_NEVER;
877 break;
878 }
879
880 /* Load up the arguments of the comparison. */
881 c->is_64 = true;
882 c->g1 = c->g2 = false;
883 switch (old_cc_op) {
884 case CC_OP_LTGT0_32:
885 c->is_64 = false;
886 c->u.s32.a = tcg_temp_new_i32();
887 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
888 c->u.s32.b = tcg_const_i32(0);
889 break;
890 case CC_OP_LTGT_32:
891 case CC_OP_LTUGTU_32:
892 c->is_64 = false;
893 c->u.s32.a = tcg_temp_new_i32();
894 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
895 c->u.s32.b = tcg_temp_new_i32();
896 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
897 break;
898
899 case CC_OP_LTGT0_64:
900 case CC_OP_NZ:
901 c->u.s64.a = cc_dst;
902 c->u.s64.b = tcg_const_i64(0);
903 c->g1 = true;
904 break;
905 case CC_OP_LTGT_64:
906 case CC_OP_LTUGTU_64:
907 c->u.s64.a = cc_src;
908 c->u.s64.b = cc_dst;
909 c->g1 = c->g2 = true;
910 break;
911
912 case CC_OP_TM_32:
913 case CC_OP_TM_64:
914 case CC_OP_ICM:
915 c->u.s64.a = tcg_temp_new_i64();
916 c->u.s64.b = tcg_const_i64(0);
917 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
918 break;
919
920 case CC_OP_STATIC:
921 c->is_64 = false;
922 c->u.s32.a = cc_op;
923 c->g1 = true;
924 switch (mask) {
925 case 0x8 | 0x4 | 0x2: /* cc != 3 */
926 cond = TCG_COND_NE;
927 c->u.s32.b = tcg_const_i32(3);
928 break;
929 case 0x8 | 0x4 | 0x1: /* cc != 2 */
930 cond = TCG_COND_NE;
931 c->u.s32.b = tcg_const_i32(2);
932 break;
933 case 0x8 | 0x2 | 0x1: /* cc != 1 */
934 cond = TCG_COND_NE;
935 c->u.s32.b = tcg_const_i32(1);
936 break;
937 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
938 cond = TCG_COND_EQ;
939 c->g1 = false;
940 c->u.s32.a = tcg_temp_new_i32();
941 c->u.s32.b = tcg_const_i32(0);
942 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
943 break;
944 case 0x8 | 0x4: /* cc < 2 */
945 cond = TCG_COND_LTU;
946 c->u.s32.b = tcg_const_i32(2);
947 break;
948 case 0x8: /* cc == 0 */
949 cond = TCG_COND_EQ;
950 c->u.s32.b = tcg_const_i32(0);
951 break;
952 case 0x4 | 0x2 | 0x1: /* cc != 0 */
953 cond = TCG_COND_NE;
954 c->u.s32.b = tcg_const_i32(0);
955 break;
956 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
957 cond = TCG_COND_NE;
958 c->g1 = false;
959 c->u.s32.a = tcg_temp_new_i32();
960 c->u.s32.b = tcg_const_i32(0);
961 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
962 break;
963 case 0x4: /* cc == 1 */
964 cond = TCG_COND_EQ;
965 c->u.s32.b = tcg_const_i32(1);
966 break;
967 case 0x2 | 0x1: /* cc > 1 */
968 cond = TCG_COND_GTU;
969 c->u.s32.b = tcg_const_i32(1);
970 break;
971 case 0x2: /* cc == 2 */
972 cond = TCG_COND_EQ;
973 c->u.s32.b = tcg_const_i32(2);
974 break;
975 case 0x1: /* cc == 3 */
976 cond = TCG_COND_EQ;
977 c->u.s32.b = tcg_const_i32(3);
978 break;
979 default:
980 /* CC is masked by something else: (8 >> cc) & mask. */
981 cond = TCG_COND_NE;
982 c->g1 = false;
983 c->u.s32.a = tcg_const_i32(8);
984 c->u.s32.b = tcg_const_i32(0);
985 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
986 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
987 break;
988 }
989 break;
990
991 default:
992 abort();
993 }
994 c->cond = cond;
995 }
996
997 static void free_compare(DisasCompare *c)
998 {
999 if (!c->g1) {
1000 if (c->is_64) {
1001 tcg_temp_free_i64(c->u.s64.a);
1002 } else {
1003 tcg_temp_free_i32(c->u.s32.a);
1004 }
1005 }
1006 if (!c->g2) {
1007 if (c->is_64) {
1008 tcg_temp_free_i64(c->u.s64.b);
1009 } else {
1010 tcg_temp_free_i32(c->u.s32.b);
1011 }
1012 }
1013 }
1014
1015 static void gen_op_clc(DisasContext *s, int l, TCGv_i64 s1, TCGv_i64 s2)
1016 {
1017 TCGv_i64 tmp;
1018 TCGv_i64 tmp2;
1019 TCGv_i32 vl;
1020
1021 /* check for simple 32bit or 64bit match */
1022 switch (l) {
1023 case 0:
1024 tmp = tcg_temp_new_i64();
1025 tmp2 = tcg_temp_new_i64();
1026
1027 tcg_gen_qemu_ld8u(tmp, s1, get_mem_index(s));
1028 tcg_gen_qemu_ld8u(tmp2, s2, get_mem_index(s));
1029 cmp_u64(s, tmp, tmp2);
1030
1031 tcg_temp_free_i64(tmp);
1032 tcg_temp_free_i64(tmp2);
1033 return;
1034 case 1:
1035 tmp = tcg_temp_new_i64();
1036 tmp2 = tcg_temp_new_i64();
1037
1038 tcg_gen_qemu_ld16u(tmp, s1, get_mem_index(s));
1039 tcg_gen_qemu_ld16u(tmp2, s2, get_mem_index(s));
1040 cmp_u64(s, tmp, tmp2);
1041
1042 tcg_temp_free_i64(tmp);
1043 tcg_temp_free_i64(tmp2);
1044 return;
1045 case 3:
1046 tmp = tcg_temp_new_i64();
1047 tmp2 = tcg_temp_new_i64();
1048
1049 tcg_gen_qemu_ld32u(tmp, s1, get_mem_index(s));
1050 tcg_gen_qemu_ld32u(tmp2, s2, get_mem_index(s));
1051 cmp_u64(s, tmp, tmp2);
1052
1053 tcg_temp_free_i64(tmp);
1054 tcg_temp_free_i64(tmp2);
1055 return;
1056 case 7:
1057 tmp = tcg_temp_new_i64();
1058 tmp2 = tcg_temp_new_i64();
1059
1060 tcg_gen_qemu_ld64(tmp, s1, get_mem_index(s));
1061 tcg_gen_qemu_ld64(tmp2, s2, get_mem_index(s));
1062 cmp_u64(s, tmp, tmp2);
1063
1064 tcg_temp_free_i64(tmp);
1065 tcg_temp_free_i64(tmp2);
1066 return;
1067 }
1068
1069 potential_page_fault(s);
1070 vl = tcg_const_i32(l);
1071 gen_helper_clc(cc_op, cpu_env, vl, s1, s2);
1072 tcg_temp_free_i32(vl);
1073 set_cc_static(s);
1074 }
1075
1076 static void disas_e3(CPUS390XState *env, DisasContext* s, int op, int r1,
1077 int x2, int b2, int d2)
1078 {
1079 TCGv_i64 addr, tmp2;
1080 TCGv_i32 tmp32_1;
1081
1082 LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
1083 op, r1, x2, b2, d2);
1084 addr = get_address(s, x2, b2, d2);
1085 switch (op) {
1086 case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
1087 tmp2 = tcg_temp_new_i64();
1088 tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
1089 tcg_gen_bswap64_i64(tmp2, tmp2);
1090 store_reg(r1, tmp2);
1091 tcg_temp_free_i64(tmp2);
1092 break;
1093 case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
1094 tmp2 = tcg_temp_new_i64();
1095 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1096 tcg_gen_andi_i64(tmp2, tmp2, 0x7fffffffULL);
1097 store_reg(r1, tmp2);
1098 tcg_temp_free_i64(tmp2);
1099 break;
1100 case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
1101 tmp2 = tcg_temp_new_i64();
1102 tmp32_1 = tcg_temp_new_i32();
1103 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1104 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1105 tcg_temp_free_i64(tmp2);
1106 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1107 store_reg32(r1, tmp32_1);
1108 tcg_temp_free_i32(tmp32_1);
1109 break;
1110 case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
1111 tmp2 = tcg_temp_new_i64();
1112 tmp32_1 = tcg_temp_new_i32();
1113 tcg_gen_qemu_ld16u(tmp2, addr, get_mem_index(s));
1114 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1115 tcg_temp_free_i64(tmp2);
1116 tcg_gen_bswap16_i32(tmp32_1, tmp32_1);
1117 store_reg16(r1, tmp32_1);
1118 tcg_temp_free_i32(tmp32_1);
1119 break;
1120 case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
1121 tmp32_1 = load_reg32(r1);
1122 tmp2 = tcg_temp_new_i64();
1123 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1124 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1125 tcg_temp_free_i32(tmp32_1);
1126 tcg_gen_qemu_st32(tmp2, addr, get_mem_index(s));
1127 tcg_temp_free_i64(tmp2);
1128 break;
1129 default:
1130 LOG_DISAS("illegal e3 operation 0x%x\n", op);
1131 gen_illegal_opcode(s);
1132 break;
1133 }
1134 tcg_temp_free_i64(addr);
1135 }
1136
1137 #ifndef CONFIG_USER_ONLY
1138 static void disas_e5(CPUS390XState *env, DisasContext* s, uint64_t insn)
1139 {
1140 TCGv_i64 tmp, tmp2;
1141 int op = (insn >> 32) & 0xff;
1142
1143 tmp = get_address(s, 0, (insn >> 28) & 0xf, (insn >> 16) & 0xfff);
1144 tmp2 = get_address(s, 0, (insn >> 12) & 0xf, insn & 0xfff);
1145
1146 LOG_DISAS("disas_e5: insn %" PRIx64 "\n", insn);
1147 switch (op) {
1148 case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
1149 /* Test Protection */
1150 potential_page_fault(s);
1151 gen_helper_tprot(cc_op, tmp, tmp2);
1152 set_cc_static(s);
1153 break;
1154 default:
1155 LOG_DISAS("illegal e5 operation 0x%x\n", op);
1156 gen_illegal_opcode(s);
1157 break;
1158 }
1159
1160 tcg_temp_free_i64(tmp);
1161 tcg_temp_free_i64(tmp2);
1162 }
1163 #endif
1164
1165 static void disas_eb(CPUS390XState *env, DisasContext *s, int op, int r1,
1166 int r3, int b2, int d2)
1167 {
1168 TCGv_i64 tmp;
1169 TCGv_i32 tmp32_1, tmp32_2;
1170
1171 LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
1172 op, r1, r3, b2, d2);
1173 switch (op) {
1174 case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */
1175 tmp = get_address(s, 0, b2, d2);
1176 tmp32_1 = tcg_const_i32(r1);
1177 tmp32_2 = tcg_const_i32(r3);
1178 potential_page_fault(s);
1179 gen_helper_stcmh(cpu_env, tmp32_1, tmp, tmp32_2);
1180 tcg_temp_free_i64(tmp);
1181 tcg_temp_free_i32(tmp32_1);
1182 tcg_temp_free_i32(tmp32_2);
1183 break;
1184 #ifndef CONFIG_USER_ONLY
1185 case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
1186 /* Load Control */
1187 check_privileged(s);
1188 tmp = get_address(s, 0, b2, d2);
1189 tmp32_1 = tcg_const_i32(r1);
1190 tmp32_2 = tcg_const_i32(r3);
1191 potential_page_fault(s);
1192 gen_helper_lctlg(cpu_env, tmp32_1, tmp, tmp32_2);
1193 tcg_temp_free_i64(tmp);
1194 tcg_temp_free_i32(tmp32_1);
1195 tcg_temp_free_i32(tmp32_2);
1196 break;
1197 case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
1198 /* Store Control */
1199 check_privileged(s);
1200 tmp = get_address(s, 0, b2, d2);
1201 tmp32_1 = tcg_const_i32(r1);
1202 tmp32_2 = tcg_const_i32(r3);
1203 potential_page_fault(s);
1204 gen_helper_stctg(cpu_env, tmp32_1, tmp, tmp32_2);
1205 tcg_temp_free_i64(tmp);
1206 tcg_temp_free_i32(tmp32_1);
1207 tcg_temp_free_i32(tmp32_2);
1208 break;
1209 #endif
1210 case 0x30: /* CSG R1,R3,D2(B2) [RSY] */
1211 tmp = get_address(s, 0, b2, d2);
1212 tmp32_1 = tcg_const_i32(r1);
1213 tmp32_2 = tcg_const_i32(r3);
1214 potential_page_fault(s);
1215 /* XXX rewrite in tcg */
1216 gen_helper_csg(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
1217 set_cc_static(s);
1218 tcg_temp_free_i64(tmp);
1219 tcg_temp_free_i32(tmp32_1);
1220 tcg_temp_free_i32(tmp32_2);
1221 break;
1222 case 0x3e: /* CDSG R1,R3,D2(B2) [RSY] */
1223 tmp = get_address(s, 0, b2, d2);
1224 tmp32_1 = tcg_const_i32(r1);
1225 tmp32_2 = tcg_const_i32(r3);
1226 potential_page_fault(s);
1227 /* XXX rewrite in tcg */
1228 gen_helper_cdsg(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
1229 set_cc_static(s);
1230 tcg_temp_free_i64(tmp);
1231 tcg_temp_free_i32(tmp32_1);
1232 tcg_temp_free_i32(tmp32_2);
1233 break;
1234 default:
1235 LOG_DISAS("illegal eb operation 0x%x\n", op);
1236 gen_illegal_opcode(s);
1237 break;
1238 }
1239 }
1240
1241 static void disas_ed(CPUS390XState *env, DisasContext *s, int op, int r1,
1242 int x2, int b2, int d2, int r1b)
1243 {
1244 TCGv_i32 tmp_r1, tmp32;
1245 TCGv_i64 addr, tmp;
1246 addr = get_address(s, x2, b2, d2);
1247 tmp_r1 = tcg_const_i32(r1);
1248 switch (op) {
1249 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1250 potential_page_fault(s);
1251 gen_helper_ldeb(cpu_env, tmp_r1, addr);
1252 break;
1253 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1254 potential_page_fault(s);
1255 gen_helper_lxdb(cpu_env, tmp_r1, addr);
1256 break;
1257 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1258 tmp = tcg_temp_new_i64();
1259 tmp32 = load_freg32(r1);
1260 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1261 set_cc_cmp_f32_i64(s, tmp32, tmp);
1262 tcg_temp_free_i64(tmp);
1263 tcg_temp_free_i32(tmp32);
1264 break;
1265 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1266 tmp = tcg_temp_new_i64();
1267 tmp32 = tcg_temp_new_i32();
1268 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1269 tcg_gen_trunc_i64_i32(tmp32, tmp);
1270 gen_helper_aeb(cpu_env, tmp_r1, tmp32);
1271 tcg_temp_free_i64(tmp);
1272 tcg_temp_free_i32(tmp32);
1273
1274 tmp32 = load_freg32(r1);
1275 gen_set_cc_nz_f32(s, tmp32);
1276 tcg_temp_free_i32(tmp32);
1277 break;
1278 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1279 tmp = tcg_temp_new_i64();
1280 tmp32 = tcg_temp_new_i32();
1281 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1282 tcg_gen_trunc_i64_i32(tmp32, tmp);
1283 gen_helper_seb(cpu_env, tmp_r1, tmp32);
1284 tcg_temp_free_i64(tmp);
1285 tcg_temp_free_i32(tmp32);
1286
1287 tmp32 = load_freg32(r1);
1288 gen_set_cc_nz_f32(s, tmp32);
1289 tcg_temp_free_i32(tmp32);
1290 break;
1291 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1292 tmp = tcg_temp_new_i64();
1293 tmp32 = tcg_temp_new_i32();
1294 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1295 tcg_gen_trunc_i64_i32(tmp32, tmp);
1296 gen_helper_deb(cpu_env, tmp_r1, tmp32);
1297 tcg_temp_free_i64(tmp);
1298 tcg_temp_free_i32(tmp32);
1299 break;
1300 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1301 potential_page_fault(s);
1302 gen_helper_tceb(cc_op, cpu_env, tmp_r1, addr);
1303 set_cc_static(s);
1304 break;
1305 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1306 potential_page_fault(s);
1307 gen_helper_tcdb(cc_op, cpu_env, tmp_r1, addr);
1308 set_cc_static(s);
1309 break;
1310 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1311 potential_page_fault(s);
1312 gen_helper_tcxb(cc_op, cpu_env, tmp_r1, addr);
1313 set_cc_static(s);
1314 break;
1315 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1316 tmp = tcg_temp_new_i64();
1317 tmp32 = tcg_temp_new_i32();
1318 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1319 tcg_gen_trunc_i64_i32(tmp32, tmp);
1320 gen_helper_meeb(cpu_env, tmp_r1, tmp32);
1321 tcg_temp_free_i64(tmp);
1322 tcg_temp_free_i32(tmp32);
1323 break;
1324 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1325 potential_page_fault(s);
1326 gen_helper_cdb(cc_op, cpu_env, tmp_r1, addr);
1327 set_cc_static(s);
1328 break;
1329 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1330 potential_page_fault(s);
1331 gen_helper_adb(cc_op, cpu_env, tmp_r1, addr);
1332 set_cc_static(s);
1333 break;
1334 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1335 potential_page_fault(s);
1336 gen_helper_sdb(cc_op, cpu_env, tmp_r1, addr);
1337 set_cc_static(s);
1338 break;
1339 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1340 potential_page_fault(s);
1341 gen_helper_mdb(cpu_env, tmp_r1, addr);
1342 break;
1343 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1344 potential_page_fault(s);
1345 gen_helper_ddb(cpu_env, tmp_r1, addr);
1346 break;
1347 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1348 /* for RXF insns, r1 is R3 and r1b is R1 */
1349 tmp32 = tcg_const_i32(r1b);
1350 potential_page_fault(s);
1351 gen_helper_madb(cpu_env, tmp32, addr, tmp_r1);
1352 tcg_temp_free_i32(tmp32);
1353 break;
1354 default:
1355 LOG_DISAS("illegal ed operation 0x%x\n", op);
1356 gen_illegal_opcode(s);
1357 return;
1358 }
1359 tcg_temp_free_i32(tmp_r1);
1360 tcg_temp_free_i64(addr);
1361 }
1362
1363 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1364 uint32_t insn)
1365 {
1366 TCGv_i64 tmp, tmp2, tmp3;
1367 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1368 int r1, r2;
1369 #ifndef CONFIG_USER_ONLY
1370 int r3, d2, b2;
1371 #endif
1372
1373 r1 = (insn >> 4) & 0xf;
1374 r2 = insn & 0xf;
1375
1376 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1377
1378 switch (op) {
1379 case 0x22: /* IPM R1 [RRE] */
1380 tmp32_1 = tcg_const_i32(r1);
1381 gen_op_calc_cc(s);
1382 gen_helper_ipm(cpu_env, cc_op, tmp32_1);
1383 tcg_temp_free_i32(tmp32_1);
1384 break;
1385 case 0x41: /* CKSM R1,R2 [RRE] */
1386 tmp32_1 = tcg_const_i32(r1);
1387 tmp32_2 = tcg_const_i32(r2);
1388 potential_page_fault(s);
1389 gen_helper_cksm(cpu_env, tmp32_1, tmp32_2);
1390 tcg_temp_free_i32(tmp32_1);
1391 tcg_temp_free_i32(tmp32_2);
1392 gen_op_movi_cc(s, 0);
1393 break;
1394 case 0x4e: /* SAR R1,R2 [RRE] */
1395 tmp32_1 = load_reg32(r2);
1396 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r1]));
1397 tcg_temp_free_i32(tmp32_1);
1398 break;
1399 case 0x4f: /* EAR R1,R2 [RRE] */
1400 tmp32_1 = tcg_temp_new_i32();
1401 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1402 store_reg32(r1, tmp32_1);
1403 tcg_temp_free_i32(tmp32_1);
1404 break;
1405 case 0x54: /* MVPG R1,R2 [RRE] */
1406 tmp = load_reg(0);
1407 tmp2 = load_reg(r1);
1408 tmp3 = load_reg(r2);
1409 potential_page_fault(s);
1410 gen_helper_mvpg(cpu_env, tmp, tmp2, tmp3);
1411 tcg_temp_free_i64(tmp);
1412 tcg_temp_free_i64(tmp2);
1413 tcg_temp_free_i64(tmp3);
1414 /* XXX check CCO bit and set CC accordingly */
1415 gen_op_movi_cc(s, 0);
1416 break;
1417 case 0x55: /* MVST R1,R2 [RRE] */
1418 tmp32_1 = load_reg32(0);
1419 tmp32_2 = tcg_const_i32(r1);
1420 tmp32_3 = tcg_const_i32(r2);
1421 potential_page_fault(s);
1422 gen_helper_mvst(cpu_env, tmp32_1, tmp32_2, tmp32_3);
1423 tcg_temp_free_i32(tmp32_1);
1424 tcg_temp_free_i32(tmp32_2);
1425 tcg_temp_free_i32(tmp32_3);
1426 gen_op_movi_cc(s, 1);
1427 break;
1428 case 0x5d: /* CLST R1,R2 [RRE] */
1429 tmp32_1 = load_reg32(0);
1430 tmp32_2 = tcg_const_i32(r1);
1431 tmp32_3 = tcg_const_i32(r2);
1432 potential_page_fault(s);
1433 gen_helper_clst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1434 set_cc_static(s);
1435 tcg_temp_free_i32(tmp32_1);
1436 tcg_temp_free_i32(tmp32_2);
1437 tcg_temp_free_i32(tmp32_3);
1438 break;
1439 case 0x5e: /* SRST R1,R2 [RRE] */
1440 tmp32_1 = load_reg32(0);
1441 tmp32_2 = tcg_const_i32(r1);
1442 tmp32_3 = tcg_const_i32(r2);
1443 potential_page_fault(s);
1444 gen_helper_srst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1445 set_cc_static(s);
1446 tcg_temp_free_i32(tmp32_1);
1447 tcg_temp_free_i32(tmp32_2);
1448 tcg_temp_free_i32(tmp32_3);
1449 break;
1450
1451 #ifndef CONFIG_USER_ONLY
1452 case 0x02: /* STIDP D2(B2) [S] */
1453 /* Store CPU ID */
1454 check_privileged(s);
1455 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1456 tmp = get_address(s, 0, b2, d2);
1457 potential_page_fault(s);
1458 gen_helper_stidp(cpu_env, tmp);
1459 tcg_temp_free_i64(tmp);
1460 break;
1461 case 0x04: /* SCK D2(B2) [S] */
1462 /* Set Clock */
1463 check_privileged(s);
1464 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1465 tmp = get_address(s, 0, b2, d2);
1466 potential_page_fault(s);
1467 gen_helper_sck(cc_op, tmp);
1468 set_cc_static(s);
1469 tcg_temp_free_i64(tmp);
1470 break;
1471 case 0x05: /* STCK D2(B2) [S] */
1472 /* Store Clock */
1473 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1474 tmp = get_address(s, 0, b2, d2);
1475 potential_page_fault(s);
1476 gen_helper_stck(cc_op, cpu_env, tmp);
1477 set_cc_static(s);
1478 tcg_temp_free_i64(tmp);
1479 break;
1480 case 0x06: /* SCKC D2(B2) [S] */
1481 /* Set Clock Comparator */
1482 check_privileged(s);
1483 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1484 tmp = get_address(s, 0, b2, d2);
1485 potential_page_fault(s);
1486 gen_helper_sckc(cpu_env, tmp);
1487 tcg_temp_free_i64(tmp);
1488 break;
1489 case 0x07: /* STCKC D2(B2) [S] */
1490 /* Store Clock Comparator */
1491 check_privileged(s);
1492 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1493 tmp = get_address(s, 0, b2, d2);
1494 potential_page_fault(s);
1495 gen_helper_stckc(cpu_env, tmp);
1496 tcg_temp_free_i64(tmp);
1497 break;
1498 case 0x08: /* SPT D2(B2) [S] */
1499 /* Set CPU Timer */
1500 check_privileged(s);
1501 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1502 tmp = get_address(s, 0, b2, d2);
1503 potential_page_fault(s);
1504 gen_helper_spt(cpu_env, tmp);
1505 tcg_temp_free_i64(tmp);
1506 break;
1507 case 0x09: /* STPT D2(B2) [S] */
1508 /* Store CPU Timer */
1509 check_privileged(s);
1510 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1511 tmp = get_address(s, 0, b2, d2);
1512 potential_page_fault(s);
1513 gen_helper_stpt(cpu_env, tmp);
1514 tcg_temp_free_i64(tmp);
1515 break;
1516 case 0x0a: /* SPKA D2(B2) [S] */
1517 /* Set PSW Key from Address */
1518 check_privileged(s);
1519 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1520 tmp = get_address(s, 0, b2, d2);
1521 tmp2 = tcg_temp_new_i64();
1522 tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
1523 tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
1524 tcg_gen_or_i64(psw_mask, tmp2, tmp);
1525 tcg_temp_free_i64(tmp2);
1526 tcg_temp_free_i64(tmp);
1527 break;
1528 case 0x0d: /* PTLB [S] */
1529 /* Purge TLB */
1530 check_privileged(s);
1531 gen_helper_ptlb(cpu_env);
1532 break;
1533 case 0x10: /* SPX D2(B2) [S] */
1534 /* Set Prefix Register */
1535 check_privileged(s);
1536 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1537 tmp = get_address(s, 0, b2, d2);
1538 potential_page_fault(s);
1539 gen_helper_spx(cpu_env, tmp);
1540 tcg_temp_free_i64(tmp);
1541 break;
1542 case 0x11: /* STPX D2(B2) [S] */
1543 /* Store Prefix */
1544 check_privileged(s);
1545 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1546 tmp = get_address(s, 0, b2, d2);
1547 tmp2 = tcg_temp_new_i64();
1548 tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1549 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1550 tcg_temp_free_i64(tmp);
1551 tcg_temp_free_i64(tmp2);
1552 break;
1553 case 0x12: /* STAP D2(B2) [S] */
1554 /* Store CPU Address */
1555 check_privileged(s);
1556 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1557 tmp = get_address(s, 0, b2, d2);
1558 tmp2 = tcg_temp_new_i64();
1559 tmp32_1 = tcg_temp_new_i32();
1560 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1561 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1562 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1563 tcg_temp_free_i64(tmp);
1564 tcg_temp_free_i64(tmp2);
1565 tcg_temp_free_i32(tmp32_1);
1566 break;
1567 case 0x21: /* IPTE R1,R2 [RRE] */
1568 /* Invalidate PTE */
1569 check_privileged(s);
1570 r1 = (insn >> 4) & 0xf;
1571 r2 = insn & 0xf;
1572 tmp = load_reg(r1);
1573 tmp2 = load_reg(r2);
1574 gen_helper_ipte(cpu_env, tmp, tmp2);
1575 tcg_temp_free_i64(tmp);
1576 tcg_temp_free_i64(tmp2);
1577 break;
1578 case 0x29: /* ISKE R1,R2 [RRE] */
1579 /* Insert Storage Key Extended */
1580 check_privileged(s);
1581 r1 = (insn >> 4) & 0xf;
1582 r2 = insn & 0xf;
1583 tmp = load_reg(r2);
1584 tmp2 = tcg_temp_new_i64();
1585 gen_helper_iske(tmp2, cpu_env, tmp);
1586 store_reg(r1, tmp2);
1587 tcg_temp_free_i64(tmp);
1588 tcg_temp_free_i64(tmp2);
1589 break;
1590 case 0x2a: /* RRBE R1,R2 [RRE] */
1591 /* Set Storage Key Extended */
1592 check_privileged(s);
1593 r1 = (insn >> 4) & 0xf;
1594 r2 = insn & 0xf;
1595 tmp32_1 = load_reg32(r1);
1596 tmp = load_reg(r2);
1597 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1598 set_cc_static(s);
1599 tcg_temp_free_i32(tmp32_1);
1600 tcg_temp_free_i64(tmp);
1601 break;
1602 case 0x2b: /* SSKE R1,R2 [RRE] */
1603 /* Set Storage Key Extended */
1604 check_privileged(s);
1605 r1 = (insn >> 4) & 0xf;
1606 r2 = insn & 0xf;
1607 tmp32_1 = load_reg32(r1);
1608 tmp = load_reg(r2);
1609 gen_helper_sske(cpu_env, tmp32_1, tmp);
1610 tcg_temp_free_i32(tmp32_1);
1611 tcg_temp_free_i64(tmp);
1612 break;
1613 case 0x34: /* STCH ? */
1614 /* Store Subchannel */
1615 check_privileged(s);
1616 gen_op_movi_cc(s, 3);
1617 break;
1618 case 0x46: /* STURA R1,R2 [RRE] */
1619 /* Store Using Real Address */
1620 check_privileged(s);
1621 r1 = (insn >> 4) & 0xf;
1622 r2 = insn & 0xf;
1623 tmp32_1 = load_reg32(r1);
1624 tmp = load_reg(r2);
1625 potential_page_fault(s);
1626 gen_helper_stura(cpu_env, tmp, tmp32_1);
1627 tcg_temp_free_i32(tmp32_1);
1628 tcg_temp_free_i64(tmp);
1629 break;
1630 case 0x50: /* CSP R1,R2 [RRE] */
1631 /* Compare And Swap And Purge */
1632 check_privileged(s);
1633 r1 = (insn >> 4) & 0xf;
1634 r2 = insn & 0xf;
1635 tmp32_1 = tcg_const_i32(r1);
1636 tmp32_2 = tcg_const_i32(r2);
1637 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1638 set_cc_static(s);
1639 tcg_temp_free_i32(tmp32_1);
1640 tcg_temp_free_i32(tmp32_2);
1641 break;
1642 case 0x5f: /* CHSC ? */
1643 /* Channel Subsystem Call */
1644 check_privileged(s);
1645 gen_op_movi_cc(s, 3);
1646 break;
1647 case 0x78: /* STCKE D2(B2) [S] */
1648 /* Store Clock Extended */
1649 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1650 tmp = get_address(s, 0, b2, d2);
1651 potential_page_fault(s);
1652 gen_helper_stcke(cc_op, cpu_env, tmp);
1653 set_cc_static(s);
1654 tcg_temp_free_i64(tmp);
1655 break;
1656 case 0x79: /* SACF D2(B2) [S] */
1657 /* Set Address Space Control Fast */
1658 check_privileged(s);
1659 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1660 tmp = get_address(s, 0, b2, d2);
1661 potential_page_fault(s);
1662 gen_helper_sacf(cpu_env, tmp);
1663 tcg_temp_free_i64(tmp);
1664 /* addressing mode has changed, so end the block */
1665 s->pc = s->next_pc;
1666 update_psw_addr(s);
1667 s->is_jmp = DISAS_JUMP;
1668 break;
1669 case 0x7d: /* STSI D2,(B2) [S] */
1670 check_privileged(s);
1671 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1672 tmp = get_address(s, 0, b2, d2);
1673 tmp32_1 = load_reg32(0);
1674 tmp32_2 = load_reg32(1);
1675 potential_page_fault(s);
1676 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1677 set_cc_static(s);
1678 tcg_temp_free_i64(tmp);
1679 tcg_temp_free_i32(tmp32_1);
1680 tcg_temp_free_i32(tmp32_2);
1681 break;
1682 case 0x9d: /* LFPC D2(B2) [S] */
1683 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1684 tmp = get_address(s, 0, b2, d2);
1685 tmp2 = tcg_temp_new_i64();
1686 tmp32_1 = tcg_temp_new_i32();
1687 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1688 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1689 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1690 tcg_temp_free_i64(tmp);
1691 tcg_temp_free_i64(tmp2);
1692 tcg_temp_free_i32(tmp32_1);
1693 break;
1694 case 0xb1: /* STFL D2(B2) [S] */
1695 /* Store Facility List (CPU features) at 200 */
1696 check_privileged(s);
1697 tmp2 = tcg_const_i64(0xc0000000);
1698 tmp = tcg_const_i64(200);
1699 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1700 tcg_temp_free_i64(tmp2);
1701 tcg_temp_free_i64(tmp);
1702 break;
1703 case 0xb2: /* LPSWE D2(B2) [S] */
1704 /* Load PSW Extended */
1705 check_privileged(s);
1706 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1707 tmp = get_address(s, 0, b2, d2);
1708 tmp2 = tcg_temp_new_i64();
1709 tmp3 = tcg_temp_new_i64();
1710 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1711 tcg_gen_addi_i64(tmp, tmp, 8);
1712 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1713 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1714 /* we need to keep cc_op intact */
1715 s->is_jmp = DISAS_JUMP;
1716 tcg_temp_free_i64(tmp);
1717 tcg_temp_free_i64(tmp2);
1718 tcg_temp_free_i64(tmp3);
1719 break;
1720 case 0x20: /* SERVC R1,R2 [RRE] */
1721 /* SCLP Service call (PV hypercall) */
1722 check_privileged(s);
1723 potential_page_fault(s);
1724 tmp32_1 = load_reg32(r2);
1725 tmp = load_reg(r1);
1726 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1727 set_cc_static(s);
1728 tcg_temp_free_i32(tmp32_1);
1729 tcg_temp_free_i64(tmp);
1730 break;
1731 #endif
1732 default:
1733 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1734 gen_illegal_opcode(s);
1735 break;
1736 }
1737 }
1738
1739 static void disas_b3(CPUS390XState *env, DisasContext *s, int op, int m3,
1740 int r1, int r2)
1741 {
1742 TCGv_i64 tmp;
1743 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1744 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
1745 #define FP_HELPER(i) \
1746 tmp32_1 = tcg_const_i32(r1); \
1747 tmp32_2 = tcg_const_i32(r2); \
1748 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1749 tcg_temp_free_i32(tmp32_1); \
1750 tcg_temp_free_i32(tmp32_2);
1751
1752 #define FP_HELPER_CC(i) \
1753 tmp32_1 = tcg_const_i32(r1); \
1754 tmp32_2 = tcg_const_i32(r2); \
1755 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1756 set_cc_static(s); \
1757 tcg_temp_free_i32(tmp32_1); \
1758 tcg_temp_free_i32(tmp32_2);
1759
1760 switch (op) {
1761 case 0x0: /* LPEBR R1,R2 [RRE] */
1762 FP_HELPER_CC(lpebr);
1763 break;
1764 case 0x2: /* LTEBR R1,R2 [RRE] */
1765 FP_HELPER_CC(ltebr);
1766 break;
1767 case 0x3: /* LCEBR R1,R2 [RRE] */
1768 FP_HELPER_CC(lcebr);
1769 break;
1770 case 0x4: /* LDEBR R1,R2 [RRE] */
1771 FP_HELPER(ldebr);
1772 break;
1773 case 0x5: /* LXDBR R1,R2 [RRE] */
1774 FP_HELPER(lxdbr);
1775 break;
1776 case 0x9: /* CEBR R1,R2 [RRE] */
1777 FP_HELPER_CC(cebr);
1778 break;
1779 case 0xa: /* AEBR R1,R2 [RRE] */
1780 FP_HELPER_CC(aebr);
1781 break;
1782 case 0xb: /* SEBR R1,R2 [RRE] */
1783 FP_HELPER_CC(sebr);
1784 break;
1785 case 0xd: /* DEBR R1,R2 [RRE] */
1786 FP_HELPER(debr);
1787 break;
1788 case 0x10: /* LPDBR R1,R2 [RRE] */
1789 FP_HELPER_CC(lpdbr);
1790 break;
1791 case 0x12: /* LTDBR R1,R2 [RRE] */
1792 FP_HELPER_CC(ltdbr);
1793 break;
1794 case 0x13: /* LCDBR R1,R2 [RRE] */
1795 FP_HELPER_CC(lcdbr);
1796 break;
1797 case 0x15: /* SQBDR R1,R2 [RRE] */
1798 FP_HELPER(sqdbr);
1799 break;
1800 case 0x17: /* MEEBR R1,R2 [RRE] */
1801 FP_HELPER(meebr);
1802 break;
1803 case 0x19: /* CDBR R1,R2 [RRE] */
1804 FP_HELPER_CC(cdbr);
1805 break;
1806 case 0x1a: /* ADBR R1,R2 [RRE] */
1807 FP_HELPER_CC(adbr);
1808 break;
1809 case 0x1b: /* SDBR R1,R2 [RRE] */
1810 FP_HELPER_CC(sdbr);
1811 break;
1812 case 0x1c: /* MDBR R1,R2 [RRE] */
1813 FP_HELPER(mdbr);
1814 break;
1815 case 0x1d: /* DDBR R1,R2 [RRE] */
1816 FP_HELPER(ddbr);
1817 break;
1818 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
1819 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
1820 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
1821 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
1822 tmp32_1 = tcg_const_i32(m3);
1823 tmp32_2 = tcg_const_i32(r2);
1824 tmp32_3 = tcg_const_i32(r1);
1825 switch (op) {
1826 case 0xe:
1827 gen_helper_maebr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1828 break;
1829 case 0x1e:
1830 gen_helper_madbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1831 break;
1832 case 0x1f:
1833 gen_helper_msdbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1834 break;
1835 default:
1836 tcg_abort();
1837 }
1838 tcg_temp_free_i32(tmp32_1);
1839 tcg_temp_free_i32(tmp32_2);
1840 tcg_temp_free_i32(tmp32_3);
1841 break;
1842 case 0x40: /* LPXBR R1,R2 [RRE] */
1843 FP_HELPER_CC(lpxbr);
1844 break;
1845 case 0x42: /* LTXBR R1,R2 [RRE] */
1846 FP_HELPER_CC(ltxbr);
1847 break;
1848 case 0x43: /* LCXBR R1,R2 [RRE] */
1849 FP_HELPER_CC(lcxbr);
1850 break;
1851 case 0x44: /* LEDBR R1,R2 [RRE] */
1852 FP_HELPER(ledbr);
1853 break;
1854 case 0x45: /* LDXBR R1,R2 [RRE] */
1855 FP_HELPER(ldxbr);
1856 break;
1857 case 0x46: /* LEXBR R1,R2 [RRE] */
1858 FP_HELPER(lexbr);
1859 break;
1860 case 0x49: /* CXBR R1,R2 [RRE] */
1861 FP_HELPER_CC(cxbr);
1862 break;
1863 case 0x4a: /* AXBR R1,R2 [RRE] */
1864 FP_HELPER_CC(axbr);
1865 break;
1866 case 0x4b: /* SXBR R1,R2 [RRE] */
1867 FP_HELPER_CC(sxbr);
1868 break;
1869 case 0x4c: /* MXBR R1,R2 [RRE] */
1870 FP_HELPER(mxbr);
1871 break;
1872 case 0x4d: /* DXBR R1,R2 [RRE] */
1873 FP_HELPER(dxbr);
1874 break;
1875 case 0x65: /* LXR R1,R2 [RRE] */
1876 tmp = load_freg(r2);
1877 store_freg(r1, tmp);
1878 tcg_temp_free_i64(tmp);
1879 tmp = load_freg(r2 + 2);
1880 store_freg(r1 + 2, tmp);
1881 tcg_temp_free_i64(tmp);
1882 break;
1883 case 0x74: /* LZER R1 [RRE] */
1884 tmp32_1 = tcg_const_i32(r1);
1885 gen_helper_lzer(cpu_env, tmp32_1);
1886 tcg_temp_free_i32(tmp32_1);
1887 break;
1888 case 0x75: /* LZDR R1 [RRE] */
1889 tmp32_1 = tcg_const_i32(r1);
1890 gen_helper_lzdr(cpu_env, tmp32_1);
1891 tcg_temp_free_i32(tmp32_1);
1892 break;
1893 case 0x76: /* LZXR R1 [RRE] */
1894 tmp32_1 = tcg_const_i32(r1);
1895 gen_helper_lzxr(cpu_env, tmp32_1);
1896 tcg_temp_free_i32(tmp32_1);
1897 break;
1898 case 0x84: /* SFPC R1 [RRE] */
1899 tmp32_1 = load_reg32(r1);
1900 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1901 tcg_temp_free_i32(tmp32_1);
1902 break;
1903 case 0x8c: /* EFPC R1 [RRE] */
1904 tmp32_1 = tcg_temp_new_i32();
1905 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1906 store_reg32(r1, tmp32_1);
1907 tcg_temp_free_i32(tmp32_1);
1908 break;
1909 case 0x94: /* CEFBR R1,R2 [RRE] */
1910 case 0x95: /* CDFBR R1,R2 [RRE] */
1911 case 0x96: /* CXFBR R1,R2 [RRE] */
1912 tmp32_1 = tcg_const_i32(r1);
1913 tmp32_2 = load_reg32(r2);
1914 switch (op) {
1915 case 0x94:
1916 gen_helper_cefbr(cpu_env, tmp32_1, tmp32_2);
1917 break;
1918 case 0x95:
1919 gen_helper_cdfbr(cpu_env, tmp32_1, tmp32_2);
1920 break;
1921 case 0x96:
1922 gen_helper_cxfbr(cpu_env, tmp32_1, tmp32_2);
1923 break;
1924 default:
1925 tcg_abort();
1926 }
1927 tcg_temp_free_i32(tmp32_1);
1928 tcg_temp_free_i32(tmp32_2);
1929 break;
1930 case 0x98: /* CFEBR R1,R2 [RRE] */
1931 case 0x99: /* CFDBR R1,R2 [RRE] */
1932 case 0x9a: /* CFXBR R1,R2 [RRE] */
1933 tmp32_1 = tcg_const_i32(r1);
1934 tmp32_2 = tcg_const_i32(r2);
1935 tmp32_3 = tcg_const_i32(m3);
1936 switch (op) {
1937 case 0x98:
1938 gen_helper_cfebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1939 break;
1940 case 0x99:
1941 gen_helper_cfdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1942 break;
1943 case 0x9a:
1944 gen_helper_cfxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1945 break;
1946 default:
1947 tcg_abort();
1948 }
1949 set_cc_static(s);
1950 tcg_temp_free_i32(tmp32_1);
1951 tcg_temp_free_i32(tmp32_2);
1952 tcg_temp_free_i32(tmp32_3);
1953 break;
1954 case 0xa4: /* CEGBR R1,R2 [RRE] */
1955 case 0xa5: /* CDGBR R1,R2 [RRE] */
1956 tmp32_1 = tcg_const_i32(r1);
1957 tmp = load_reg(r2);
1958 switch (op) {
1959 case 0xa4:
1960 gen_helper_cegbr(cpu_env, tmp32_1, tmp);
1961 break;
1962 case 0xa5:
1963 gen_helper_cdgbr(cpu_env, tmp32_1, tmp);
1964 break;
1965 default:
1966 tcg_abort();
1967 }
1968 tcg_temp_free_i32(tmp32_1);
1969 tcg_temp_free_i64(tmp);
1970 break;
1971 case 0xa6: /* CXGBR R1,R2 [RRE] */
1972 tmp32_1 = tcg_const_i32(r1);
1973 tmp = load_reg(r2);
1974 gen_helper_cxgbr(cpu_env, tmp32_1, tmp);
1975 tcg_temp_free_i32(tmp32_1);
1976 tcg_temp_free_i64(tmp);
1977 break;
1978 case 0xa8: /* CGEBR R1,R2 [RRE] */
1979 tmp32_1 = tcg_const_i32(r1);
1980 tmp32_2 = tcg_const_i32(r2);
1981 tmp32_3 = tcg_const_i32(m3);
1982 gen_helper_cgebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1983 set_cc_static(s);
1984 tcg_temp_free_i32(tmp32_1);
1985 tcg_temp_free_i32(tmp32_2);
1986 tcg_temp_free_i32(tmp32_3);
1987 break;
1988 case 0xa9: /* CGDBR R1,R2 [RRE] */
1989 tmp32_1 = tcg_const_i32(r1);
1990 tmp32_2 = tcg_const_i32(r2);
1991 tmp32_3 = tcg_const_i32(m3);
1992 gen_helper_cgdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1993 set_cc_static(s);
1994 tcg_temp_free_i32(tmp32_1);
1995 tcg_temp_free_i32(tmp32_2);
1996 tcg_temp_free_i32(tmp32_3);
1997 break;
1998 case 0xaa: /* CGXBR R1,R2 [RRE] */
1999 tmp32_1 = tcg_const_i32(r1);
2000 tmp32_2 = tcg_const_i32(r2);
2001 tmp32_3 = tcg_const_i32(m3);
2002 gen_helper_cgxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2003 set_cc_static(s);
2004 tcg_temp_free_i32(tmp32_1);
2005 tcg_temp_free_i32(tmp32_2);
2006 tcg_temp_free_i32(tmp32_3);
2007 break;
2008 default:
2009 LOG_DISAS("illegal b3 operation 0x%x\n", op);
2010 gen_illegal_opcode(s);
2011 break;
2012 }
2013
2014 #undef FP_HELPER_CC
2015 #undef FP_HELPER
2016 }
2017
2018 static void disas_b9(CPUS390XState *env, DisasContext *s, int op, int r1,
2019 int r2)
2020 {
2021 TCGv_i64 tmp;
2022 TCGv_i32 tmp32_1;
2023
2024 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
2025 switch (op) {
2026 case 0x17: /* LLGTR R1,R2 [RRE] */
2027 tmp32_1 = load_reg32(r2);
2028 tmp = tcg_temp_new_i64();
2029 tcg_gen_andi_i32(tmp32_1, tmp32_1, 0x7fffffffUL);
2030 tcg_gen_extu_i32_i64(tmp, tmp32_1);
2031 store_reg(r1, tmp);
2032 tcg_temp_free_i32(tmp32_1);
2033 tcg_temp_free_i64(tmp);
2034 break;
2035 case 0x0f: /* LRVGR R1,R2 [RRE] */
2036 tcg_gen_bswap64_i64(regs[r1], regs[r2]);
2037 break;
2038 case 0x1f: /* LRVR R1,R2 [RRE] */
2039 tmp32_1 = load_reg32(r2);
2040 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
2041 store_reg32(r1, tmp32_1);
2042 tcg_temp_free_i32(tmp32_1);
2043 break;
2044 case 0x83: /* FLOGR R1,R2 [RRE] */
2045 tmp = load_reg(r2);
2046 tmp32_1 = tcg_const_i32(r1);
2047 gen_helper_flogr(cc_op, cpu_env, tmp32_1, tmp);
2048 set_cc_static(s);
2049 tcg_temp_free_i64(tmp);
2050 tcg_temp_free_i32(tmp32_1);
2051 break;
2052 default:
2053 LOG_DISAS("illegal b9 operation 0x%x\n", op);
2054 gen_illegal_opcode(s);
2055 break;
2056 }
2057 }
2058
2059 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
2060 {
2061 TCGv_i64 tmp, tmp2;
2062 TCGv_i32 tmp32_1, tmp32_2;
2063 unsigned char opc;
2064 uint64_t insn;
2065 int op, r1, r2, r3, d1, d2, x2, b1, b2, r1b;
2066 TCGv_i32 vl;
2067
2068 opc = cpu_ldub_code(env, s->pc);
2069 LOG_DISAS("opc 0x%x\n", opc);
2070
2071 switch (opc) {
2072 #ifndef CONFIG_USER_ONLY
2073 case 0xae: /* SIGP R1,R3,D2(B2) [RS] */
2074 check_privileged(s);
2075 insn = ld_code4(env, s->pc);
2076 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2077 tmp = get_address(s, 0, b2, d2);
2078 tmp2 = load_reg(r3);
2079 tmp32_1 = tcg_const_i32(r1);
2080 potential_page_fault(s);
2081 gen_helper_sigp(cc_op, cpu_env, tmp, tmp32_1, tmp2);
2082 set_cc_static(s);
2083 tcg_temp_free_i64(tmp);
2084 tcg_temp_free_i64(tmp2);
2085 tcg_temp_free_i32(tmp32_1);
2086 break;
2087 case 0xb1: /* LRA R1,D2(X2, B2) [RX] */
2088 check_privileged(s);
2089 insn = ld_code4(env, s->pc);
2090 tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
2091 tmp32_1 = tcg_const_i32(r1);
2092 potential_page_fault(s);
2093 gen_helper_lra(cc_op, cpu_env, tmp, tmp32_1);
2094 set_cc_static(s);
2095 tcg_temp_free_i64(tmp);
2096 tcg_temp_free_i32(tmp32_1);
2097 break;
2098 #endif
2099 case 0xb2:
2100 insn = ld_code4(env, s->pc);
2101 op = (insn >> 16) & 0xff;
2102 switch (op) {
2103 case 0x9c: /* STFPC D2(B2) [S] */
2104 d2 = insn & 0xfff;
2105 b2 = (insn >> 12) & 0xf;
2106 tmp32_1 = tcg_temp_new_i32();
2107 tmp = tcg_temp_new_i64();
2108 tmp2 = get_address(s, 0, b2, d2);
2109 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
2110 tcg_gen_extu_i32_i64(tmp, tmp32_1);
2111 tcg_gen_qemu_st32(tmp, tmp2, get_mem_index(s));
2112 tcg_temp_free_i32(tmp32_1);
2113 tcg_temp_free_i64(tmp);
2114 tcg_temp_free_i64(tmp2);
2115 break;
2116 default:
2117 disas_b2(env, s, op, insn);
2118 break;
2119 }
2120 break;
2121 case 0xb3:
2122 insn = ld_code4(env, s->pc);
2123 op = (insn >> 16) & 0xff;
2124 r3 = (insn >> 12) & 0xf; /* aka m3 */
2125 r1 = (insn >> 4) & 0xf;
2126 r2 = insn & 0xf;
2127 disas_b3(env, s, op, r3, r1, r2);
2128 break;
2129 #ifndef CONFIG_USER_ONLY
2130 case 0xb6: /* STCTL R1,R3,D2(B2) [RS] */
2131 /* Store Control */
2132 check_privileged(s);
2133 insn = ld_code4(env, s->pc);
2134 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2135 tmp = get_address(s, 0, b2, d2);
2136 tmp32_1 = tcg_const_i32(r1);
2137 tmp32_2 = tcg_const_i32(r3);
2138 potential_page_fault(s);
2139 gen_helper_stctl(cpu_env, tmp32_1, tmp, tmp32_2);
2140 tcg_temp_free_i64(tmp);
2141 tcg_temp_free_i32(tmp32_1);
2142 tcg_temp_free_i32(tmp32_2);
2143 break;
2144 case 0xb7: /* LCTL R1,R3,D2(B2) [RS] */
2145 /* Load Control */
2146 check_privileged(s);
2147 insn = ld_code4(env, s->pc);
2148 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2149 tmp = get_address(s, 0, b2, d2);
2150 tmp32_1 = tcg_const_i32(r1);
2151 tmp32_2 = tcg_const_i32(r3);
2152 potential_page_fault(s);
2153 gen_helper_lctl(cpu_env, tmp32_1, tmp, tmp32_2);
2154 tcg_temp_free_i64(tmp);
2155 tcg_temp_free_i32(tmp32_1);
2156 tcg_temp_free_i32(tmp32_2);
2157 break;
2158 #endif
2159 case 0xb9:
2160 insn = ld_code4(env, s->pc);
2161 r1 = (insn >> 4) & 0xf;
2162 r2 = insn & 0xf;
2163 op = (insn >> 16) & 0xff;
2164 disas_b9(env, s, op, r1, r2);
2165 break;
2166 case 0xba: /* CS R1,R3,D2(B2) [RS] */
2167 insn = ld_code4(env, s->pc);
2168 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2169 tmp = get_address(s, 0, b2, d2);
2170 tmp32_1 = tcg_const_i32(r1);
2171 tmp32_2 = tcg_const_i32(r3);
2172 potential_page_fault(s);
2173 gen_helper_cs(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
2174 set_cc_static(s);
2175 tcg_temp_free_i64(tmp);
2176 tcg_temp_free_i32(tmp32_1);
2177 tcg_temp_free_i32(tmp32_2);
2178 break;
2179 case 0xbd: /* CLM R1,M3,D2(B2) [RS] */
2180 insn = ld_code4(env, s->pc);
2181 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2182 tmp = get_address(s, 0, b2, d2);
2183 tmp32_1 = load_reg32(r1);
2184 tmp32_2 = tcg_const_i32(r3);
2185 potential_page_fault(s);
2186 gen_helper_clm(cc_op, cpu_env, tmp32_1, tmp32_2, tmp);
2187 set_cc_static(s);
2188 tcg_temp_free_i64(tmp);
2189 tcg_temp_free_i32(tmp32_1);
2190 tcg_temp_free_i32(tmp32_2);
2191 break;
2192 case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
2193 insn = ld_code4(env, s->pc);
2194 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2195 tmp = get_address(s, 0, b2, d2);
2196 tmp32_1 = load_reg32(r1);
2197 tmp32_2 = tcg_const_i32(r3);
2198 potential_page_fault(s);
2199 gen_helper_stcm(cpu_env, tmp32_1, tmp32_2, tmp);
2200 tcg_temp_free_i64(tmp);
2201 tcg_temp_free_i32(tmp32_1);
2202 tcg_temp_free_i32(tmp32_2);
2203 break;
2204 case 0xd4: /* NC D1(L,B1),D2(B2) [SS] */
2205 case 0xd5: /* CLC D1(L,B1),D2(B2) [SS] */
2206 case 0xd6: /* OC D1(L,B1),D2(B2) [SS] */
2207 case 0xd7: /* XC D1(L,B1),D2(B2) [SS] */
2208 case 0xdc: /* TR D1(L,B1),D2(B2) [SS] */
2209 case 0xf3: /* UNPK D1(L1,B1),D2(L2,B2) [SS] */
2210 insn = ld_code6(env, s->pc);
2211 vl = tcg_const_i32((insn >> 32) & 0xff);
2212 b1 = (insn >> 28) & 0xf;
2213 b2 = (insn >> 12) & 0xf;
2214 d1 = (insn >> 16) & 0xfff;
2215 d2 = insn & 0xfff;
2216 tmp = get_address(s, 0, b1, d1);
2217 tmp2 = get_address(s, 0, b2, d2);
2218 switch (opc) {
2219 case 0xd4:
2220 potential_page_fault(s);
2221 gen_helper_nc(cc_op, cpu_env, vl, tmp, tmp2);
2222 set_cc_static(s);
2223 break;
2224 case 0xd5:
2225 gen_op_clc(s, (insn >> 32) & 0xff, tmp, tmp2);
2226 break;
2227 case 0xd6:
2228 potential_page_fault(s);
2229 gen_helper_oc(cc_op, cpu_env, vl, tmp, tmp2);
2230 set_cc_static(s);
2231 break;
2232 case 0xd7:
2233 potential_page_fault(s);
2234 gen_helper_xc(cc_op, cpu_env, vl, tmp, tmp2);
2235 set_cc_static(s);
2236 break;
2237 case 0xdc:
2238 potential_page_fault(s);
2239 gen_helper_tr(cpu_env, vl, tmp, tmp2);
2240 set_cc_static(s);
2241 break;
2242 case 0xf3:
2243 potential_page_fault(s);
2244 gen_helper_unpk(cpu_env, vl, tmp, tmp2);
2245 break;
2246 default:
2247 tcg_abort();
2248 }
2249 tcg_temp_free_i64(tmp);
2250 tcg_temp_free_i64(tmp2);
2251 break;
2252 #ifndef CONFIG_USER_ONLY
2253 case 0xda: /* MVCP D1(R1,B1),D2(B2),R3 [SS] */
2254 case 0xdb: /* MVCS D1(R1,B1),D2(B2),R3 [SS] */
2255 check_privileged(s);
2256 potential_page_fault(s);
2257 insn = ld_code6(env, s->pc);
2258 r1 = (insn >> 36) & 0xf;
2259 r3 = (insn >> 32) & 0xf;
2260 b1 = (insn >> 28) & 0xf;
2261 d1 = (insn >> 16) & 0xfff;
2262 b2 = (insn >> 12) & 0xf;
2263 d2 = insn & 0xfff;
2264 /* XXX key in r3 */
2265 tmp = get_address(s, 0, b1, d1);
2266 tmp2 = get_address(s, 0, b2, d2);
2267 if (opc == 0xda) {
2268 gen_helper_mvcp(cc_op, cpu_env, regs[r1], tmp, tmp2);
2269 } else {
2270 gen_helper_mvcs(cc_op, cpu_env, regs[r1], tmp, tmp2);
2271 }
2272 set_cc_static(s);
2273 tcg_temp_free_i64(tmp);
2274 tcg_temp_free_i64(tmp2);
2275 break;
2276 #endif
2277 case 0xe3:
2278 insn = ld_code6(env, s->pc);
2279 debug_insn(insn);
2280 op = insn & 0xff;
2281 r1 = (insn >> 36) & 0xf;
2282 x2 = (insn >> 32) & 0xf;
2283 b2 = (insn >> 28) & 0xf;
2284 d2 = ((int)((((insn >> 16) & 0xfff)
2285 | ((insn << 4) & 0xff000)) << 12)) >> 12;
2286 disas_e3(env, s, op, r1, x2, b2, d2 );
2287 break;
2288 #ifndef CONFIG_USER_ONLY
2289 case 0xe5:
2290 /* Test Protection */
2291 check_privileged(s);
2292 insn = ld_code6(env, s->pc);
2293 debug_insn(insn);
2294 disas_e5(env, s, insn);
2295 break;
2296 #endif
2297 case 0xeb:
2298 insn = ld_code6(env, s->pc);
2299 debug_insn(insn);
2300 op = insn & 0xff;
2301 r1 = (insn >> 36) & 0xf;
2302 r3 = (insn >> 32) & 0xf;
2303 b2 = (insn >> 28) & 0xf;
2304 d2 = ((int)((((insn >> 16) & 0xfff)
2305 | ((insn << 4) & 0xff000)) << 12)) >> 12;
2306 disas_eb(env, s, op, r1, r3, b2, d2);
2307 break;
2308 case 0xed:
2309 insn = ld_code6(env, s->pc);
2310 debug_insn(insn);
2311 op = insn & 0xff;
2312 r1 = (insn >> 36) & 0xf;
2313 x2 = (insn >> 32) & 0xf;
2314 b2 = (insn >> 28) & 0xf;
2315 d2 = (short)((insn >> 16) & 0xfff);
2316 r1b = (insn >> 12) & 0xf;
2317 disas_ed(env, s, op, r1, x2, b2, d2, r1b);
2318 break;
2319 default:
2320 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
2321 gen_illegal_opcode(s);
2322 break;
2323 }
2324 }
2325
2326 /* ====================================================================== */
2327 /* Define the insn format enumeration. */
2328 #define F0(N) FMT_##N,
2329 #define F1(N, X1) F0(N)
2330 #define F2(N, X1, X2) F0(N)
2331 #define F3(N, X1, X2, X3) F0(N)
2332 #define F4(N, X1, X2, X3, X4) F0(N)
2333 #define F5(N, X1, X2, X3, X4, X5) F0(N)
2334
2335 typedef enum {
2336 #include "insn-format.def"
2337 } DisasFormat;
2338
2339 #undef F0
2340 #undef F1
2341 #undef F2
2342 #undef F3
2343 #undef F4
2344 #undef F5
2345
2346 /* Define a structure to hold the decoded fields. We'll store each inside
2347 an array indexed by an enum. In order to conserve memory, we'll arrange
2348 for fields that do not exist at the same time to overlap, thus the "C"
2349 for compact. For checking purposes there is an "O" for original index
2350 as well that will be applied to availability bitmaps. */
2351
2352 enum DisasFieldIndexO {
2353 FLD_O_r1,
2354 FLD_O_r2,
2355 FLD_O_r3,
2356 FLD_O_m1,
2357 FLD_O_m3,
2358 FLD_O_m4,
2359 FLD_O_b1,
2360 FLD_O_b2,
2361 FLD_O_b4,
2362 FLD_O_d1,
2363 FLD_O_d2,
2364 FLD_O_d4,
2365 FLD_O_x2,
2366 FLD_O_l1,
2367 FLD_O_l2,
2368 FLD_O_i1,
2369 FLD_O_i2,
2370 FLD_O_i3,
2371 FLD_O_i4,
2372 FLD_O_i5
2373 };
2374
2375 enum DisasFieldIndexC {
2376 FLD_C_r1 = 0,
2377 FLD_C_m1 = 0,
2378 FLD_C_b1 = 0,
2379 FLD_C_i1 = 0,
2380
2381 FLD_C_r2 = 1,
2382 FLD_C_b2 = 1,
2383 FLD_C_i2 = 1,
2384
2385 FLD_C_r3 = 2,
2386 FLD_C_m3 = 2,
2387 FLD_C_i3 = 2,
2388
2389 FLD_C_m4 = 3,
2390 FLD_C_b4 = 3,
2391 FLD_C_i4 = 3,
2392 FLD_C_l1 = 3,
2393
2394 FLD_C_i5 = 4,
2395 FLD_C_d1 = 4,
2396
2397 FLD_C_d2 = 5,
2398
2399 FLD_C_d4 = 6,
2400 FLD_C_x2 = 6,
2401 FLD_C_l2 = 6,
2402
2403 NUM_C_FIELD = 7
2404 };
2405
2406 struct DisasFields {
2407 unsigned op:8;
2408 unsigned op2:8;
2409 unsigned presentC:16;
2410 unsigned int presentO;
2411 int c[NUM_C_FIELD];
2412 };
2413
2414 /* This is the way fields are to be accessed out of DisasFields. */
2415 #define have_field(S, F) have_field1((S), FLD_O_##F)
2416 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
2417
2418 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
2419 {
2420 return (f->presentO >> c) & 1;
2421 }
2422
2423 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
2424 enum DisasFieldIndexC c)
2425 {
2426 assert(have_field1(f, o));
2427 return f->c[c];
2428 }
2429
2430 /* Describe the layout of each field in each format. */
2431 typedef struct DisasField {
2432 unsigned int beg:8;
2433 unsigned int size:8;
2434 unsigned int type:2;
2435 unsigned int indexC:6;
2436 enum DisasFieldIndexO indexO:8;
2437 } DisasField;
2438
2439 typedef struct DisasFormatInfo {
2440 DisasField op[NUM_C_FIELD];
2441 } DisasFormatInfo;
2442
2443 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
2444 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
2445 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2446 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
2447 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2448 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2449 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
2450 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2451 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2452 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2453 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2454 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2455 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
2456 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
2457
2458 #define F0(N) { { } },
2459 #define F1(N, X1) { { X1 } },
2460 #define F2(N, X1, X2) { { X1, X2 } },
2461 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
2462 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
2463 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
2464
2465 static const DisasFormatInfo format_info[] = {
2466 #include "insn-format.def"
2467 };
2468
2469 #undef F0
2470 #undef F1
2471 #undef F2
2472 #undef F3
2473 #undef F4
2474 #undef F5
2475 #undef R
2476 #undef M
2477 #undef BD
2478 #undef BXD
2479 #undef BDL
2480 #undef BXDL
2481 #undef I
2482 #undef L
2483
2484 /* Generally, we'll extract operands into this structures, operate upon
2485 them, and store them back. See the "in1", "in2", "prep", "wout" sets
2486 of routines below for more details. */
2487 typedef struct {
2488 bool g_out, g_out2, g_in1, g_in2;
2489 TCGv_i64 out, out2, in1, in2;
2490 TCGv_i64 addr1;
2491 } DisasOps;
2492
2493 /* Return values from translate_one, indicating the state of the TB. */
2494 typedef enum {
2495 /* Continue the TB. */
2496 NO_EXIT,
2497 /* We have emitted one or more goto_tb. No fixup required. */
2498 EXIT_GOTO_TB,
2499 /* We are not using a goto_tb (for whatever reason), but have updated
2500 the PC (for whatever reason), so there's no need to do it again on
2501 exiting the TB. */
2502 EXIT_PC_UPDATED,
2503 /* We are exiting the TB, but have neither emitted a goto_tb, nor
2504 updated the PC for the next instruction to be executed. */
2505 EXIT_PC_STALE,
2506 /* We are ending the TB with a noreturn function call, e.g. longjmp.
2507 No following code will be executed. */
2508 EXIT_NORETURN,
2509 } ExitStatus;
2510
2511 typedef enum DisasFacility {
2512 FAC_Z, /* zarch (default) */
2513 FAC_CASS, /* compare and swap and store */
2514 FAC_CASS2, /* compare and swap and store 2*/
2515 FAC_DFP, /* decimal floating point */
2516 FAC_DFPR, /* decimal floating point rounding */
2517 FAC_DO, /* distinct operands */
2518 FAC_EE, /* execute extensions */
2519 FAC_EI, /* extended immediate */
2520 FAC_FPE, /* floating point extension */
2521 FAC_FPSSH, /* floating point support sign handling */
2522 FAC_FPRGR, /* FPR-GR transfer */
2523 FAC_GIE, /* general instructions extension */
2524 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
2525 FAC_HW, /* high-word */
2526 FAC_IEEEE_SIM, /* IEEE exception sumilation */
2527 FAC_LOC, /* load/store on condition */
2528 FAC_LD, /* long displacement */
2529 FAC_PC, /* population count */
2530 FAC_SCF, /* store clock fast */
2531 FAC_SFLE, /* store facility list extended */
2532 } DisasFacility;
2533
2534 struct DisasInsn {
2535 unsigned opc:16;
2536 DisasFormat fmt:6;
2537 DisasFacility fac:6;
2538
2539 const char *name;
2540
2541 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
2542 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
2543 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
2544 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
2545 void (*help_cout)(DisasContext *, DisasOps *);
2546 ExitStatus (*help_op)(DisasContext *, DisasOps *);
2547
2548 uint64_t data;
2549 };
2550
2551 /* ====================================================================== */
2552 /* Miscelaneous helpers, used by several operations. */
2553
2554 static void help_l2_shift(DisasContext *s, DisasFields *f,
2555 DisasOps *o, int mask)
2556 {
2557 int b2 = get_field(f, b2);
2558 int d2 = get_field(f, d2);
2559
2560 if (b2 == 0) {
2561 o->in2 = tcg_const_i64(d2 & mask);
2562 } else {
2563 o->in2 = get_address(s, 0, b2, d2);
2564 tcg_gen_andi_i64(o->in2, o->in2, mask);
2565 }
2566 }
2567
2568 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
2569 {
2570 if (dest == s->next_pc) {
2571 return NO_EXIT;
2572 }
2573 if (use_goto_tb(s, dest)) {
2574 gen_update_cc_op(s);
2575 tcg_gen_goto_tb(0);
2576 tcg_gen_movi_i64(psw_addr, dest);
2577 tcg_gen_exit_tb((tcg_target_long)s->tb);
2578 return EXIT_GOTO_TB;
2579 } else {
2580 tcg_gen_movi_i64(psw_addr, dest);
2581 return EXIT_PC_UPDATED;
2582 }
2583 }
2584
2585 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
2586 bool is_imm, int imm, TCGv_i64 cdest)
2587 {
2588 ExitStatus ret;
2589 uint64_t dest = s->pc + 2 * imm;
2590 int lab;
2591
2592 /* Take care of the special cases first. */
2593 if (c->cond == TCG_COND_NEVER) {
2594 ret = NO_EXIT;
2595 goto egress;
2596 }
2597 if (is_imm) {
2598 if (dest == s->next_pc) {
2599 /* Branch to next. */
2600 ret = NO_EXIT;
2601 goto egress;
2602 }
2603 if (c->cond == TCG_COND_ALWAYS) {
2604 ret = help_goto_direct(s, dest);
2605 goto egress;
2606 }
2607 } else {
2608 if (TCGV_IS_UNUSED_I64(cdest)) {
2609 /* E.g. bcr %r0 -> no branch. */
2610 ret = NO_EXIT;
2611 goto egress;
2612 }
2613 if (c->cond == TCG_COND_ALWAYS) {
2614 tcg_gen_mov_i64(psw_addr, cdest);
2615 ret = EXIT_PC_UPDATED;
2616 goto egress;
2617 }
2618 }
2619
2620 if (use_goto_tb(s, s->next_pc)) {
2621 if (is_imm && use_goto_tb(s, dest)) {
2622 /* Both exits can use goto_tb. */
2623 gen_update_cc_op(s);
2624
2625 lab = gen_new_label();
2626 if (c->is_64) {
2627 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
2628 } else {
2629 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
2630 }
2631
2632 /* Branch not taken. */
2633 tcg_gen_goto_tb(0);
2634 tcg_gen_movi_i64(psw_addr, s->next_pc);
2635 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
2636
2637 /* Branch taken. */
2638 gen_set_label(lab);
2639 tcg_gen_goto_tb(1);
2640 tcg_gen_movi_i64(psw_addr, dest);
2641 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
2642
2643 ret = EXIT_GOTO_TB;
2644 } else {
2645 /* Fallthru can use goto_tb, but taken branch cannot. */
2646 /* Store taken branch destination before the brcond. This
2647 avoids having to allocate a new local temp to hold it.
2648 We'll overwrite this in the not taken case anyway. */
2649 if (!is_imm) {
2650 tcg_gen_mov_i64(psw_addr, cdest);
2651 }
2652
2653 lab = gen_new_label();
2654 if (c->is_64) {
2655 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
2656 } else {
2657 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
2658 }
2659
2660 /* Branch not taken. */
2661 gen_update_cc_op(s);
2662 tcg_gen_goto_tb(0);
2663 tcg_gen_movi_i64(psw_addr, s->next_pc);
2664 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
2665
2666 gen_set_label(lab);
2667 if (is_imm) {
2668 tcg_gen_movi_i64(psw_addr, dest);
2669 }
2670 ret = EXIT_PC_UPDATED;
2671 }
2672 } else {
2673 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
2674 Most commonly we're single-stepping or some other condition that
2675 disables all use of goto_tb. Just update the PC and exit. */
2676
2677 TCGv_i64 next = tcg_const_i64(s->next_pc);
2678 if (is_imm) {
2679 cdest = tcg_const_i64(dest);
2680 }
2681
2682 if (c->is_64) {
2683 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
2684 cdest, next);
2685 } else {
2686 TCGv_i32 t0 = tcg_temp_new_i32();
2687 TCGv_i64 t1 = tcg_temp_new_i64();
2688 TCGv_i64 z = tcg_const_i64(0);
2689 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
2690 tcg_gen_extu_i32_i64(t1, t0);
2691 tcg_temp_free_i32(t0);
2692 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
2693 tcg_temp_free_i64(t1);
2694 tcg_temp_free_i64(z);
2695 }
2696
2697 if (is_imm) {
2698 tcg_temp_free_i64(cdest);
2699 }
2700 tcg_temp_free_i64(next);
2701
2702 ret = EXIT_PC_UPDATED;
2703 }
2704
2705 egress:
2706 free_compare(c);
2707 return ret;
2708 }
2709
2710 /* ====================================================================== */
2711 /* The operations. These perform the bulk of the work for any insn,
2712 usually after the operands have been loaded and output initialized. */
2713
2714 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
2715 {
2716 gen_helper_abs_i64(o->out, o->in2);
2717 return NO_EXIT;
2718 }
2719
2720 static ExitStatus op_add(DisasContext *s, DisasOps *o)
2721 {
2722 tcg_gen_add_i64(o->out, o->in1, o->in2);
2723 return NO_EXIT;
2724 }
2725
2726 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
2727 {
2728 TCGv_i64 cc;
2729
2730 tcg_gen_add_i64(o->out, o->in1, o->in2);
2731
2732 /* XXX possible optimization point */
2733 gen_op_calc_cc(s);
2734 cc = tcg_temp_new_i64();
2735 tcg_gen_extu_i32_i64(cc, cc_op);
2736 tcg_gen_shri_i64(cc, cc, 1);
2737
2738 tcg_gen_add_i64(o->out, o->out, cc);
2739 tcg_temp_free_i64(cc);
2740 return NO_EXIT;
2741 }
2742
2743 static ExitStatus op_and(DisasContext *s, DisasOps *o)
2744 {
2745 tcg_gen_and_i64(o->out, o->in1, o->in2);
2746 return NO_EXIT;
2747 }
2748
2749 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
2750 {
2751 int shift = s->insn->data & 0xff;
2752 int size = s->insn->data >> 8;
2753 uint64_t mask = ((1ull << size) - 1) << shift;
2754
2755 assert(!o->g_in2);
2756 tcg_gen_shli_i64(o->in2, o->in2, shift);
2757 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2758 tcg_gen_and_i64(o->out, o->in1, o->in2);
2759
2760 /* Produce the CC from only the bits manipulated. */
2761 tcg_gen_andi_i64(cc_dst, o->out, mask);
2762 set_cc_nz_u64(s, cc_dst);
2763 return NO_EXIT;
2764 }
2765
2766 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
2767 {
2768 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2769 if (!TCGV_IS_UNUSED_I64(o->in2)) {
2770 tcg_gen_mov_i64(psw_addr, o->in2);
2771 return EXIT_PC_UPDATED;
2772 } else {
2773 return NO_EXIT;
2774 }
2775 }
2776
2777 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
2778 {
2779 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2780 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
2781 }
2782
2783 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
2784 {
2785 int m1 = get_field(s->fields, m1);
2786 bool is_imm = have_field(s->fields, i2);
2787 int imm = is_imm ? get_field(s->fields, i2) : 0;
2788 DisasCompare c;
2789
2790 disas_jcc(s, &c, m1);
2791 return help_branch(s, &c, is_imm, imm, o->in2);
2792 }
2793
2794 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
2795 {
2796 int r1 = get_field(s->fields, r1);
2797 bool is_imm = have_field(s->fields, i2);
2798 int imm = is_imm ? get_field(s->fields, i2) : 0;
2799 DisasCompare c;
2800 TCGv_i64 t;
2801
2802 c.cond = TCG_COND_NE;
2803 c.is_64 = false;
2804 c.g1 = false;
2805 c.g2 = false;
2806
2807 t = tcg_temp_new_i64();
2808 tcg_gen_subi_i64(t, regs[r1], 1);
2809 store_reg32_i64(r1, t);
2810 c.u.s32.a = tcg_temp_new_i32();
2811 c.u.s32.b = tcg_const_i32(0);
2812 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
2813 tcg_temp_free_i64(t);
2814
2815 return help_branch(s, &c, is_imm, imm, o->in2);
2816 }
2817
2818 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
2819 {
2820 int r1 = get_field(s->fields, r1);
2821 bool is_imm = have_field(s->fields, i2);
2822 int imm = is_imm ? get_field(s->fields, i2) : 0;
2823 DisasCompare c;
2824
2825 c.cond = TCG_COND_NE;
2826 c.is_64 = true;
2827 c.g1 = true;
2828 c.g2 = false;
2829
2830 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
2831 c.u.s64.a = regs[r1];
2832 c.u.s64.b = tcg_const_i64(0);
2833
2834 return help_branch(s, &c, is_imm, imm, o->in2);
2835 }
2836
2837 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
2838 {
2839 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2840 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2841 potential_page_fault(s);
2842 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
2843 tcg_temp_free_i32(r1);
2844 tcg_temp_free_i32(r3);
2845 set_cc_static(s);
2846 return NO_EXIT;
2847 }
2848
2849 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2850 {
2851 TCGv_i64 t1 = tcg_temp_new_i64();
2852 TCGv_i32 t2 = tcg_temp_new_i32();
2853 tcg_gen_trunc_i64_i32(t2, o->in1);
2854 gen_helper_cvd(t1, t2);
2855 tcg_temp_free_i32(t2);
2856 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2857 tcg_temp_free_i64(t1);
2858 return NO_EXIT;
2859 }
2860
2861 #ifndef CONFIG_USER_ONLY
2862 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2863 {
2864 TCGv_i32 tmp;
2865
2866 check_privileged(s);
2867 potential_page_fault(s);
2868
2869 /* We pretend the format is RX_a so that D2 is the field we want. */
2870 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2871 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2872 tcg_temp_free_i32(tmp);
2873 return NO_EXIT;
2874 }
2875 #endif
2876
2877 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2878 {
2879 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2880 return_low128(o->out);
2881 return NO_EXIT;
2882 }
2883
2884 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2885 {
2886 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2887 return_low128(o->out);
2888 return NO_EXIT;
2889 }
2890
2891 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2892 {
2893 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2894 return_low128(o->out);
2895 return NO_EXIT;
2896 }
2897
2898 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2899 {
2900 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2901 return_low128(o->out);
2902 return NO_EXIT;
2903 }
2904
2905 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2906 {
2907 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2908 tb->flags, (ab)use the tb->cs_base field as the address of
2909 the template in memory, and grab 8 bits of tb->flags/cflags for
2910 the contents of the register. We would then recognize all this
2911 in gen_intermediate_code_internal, generating code for exactly
2912 one instruction. This new TB then gets executed normally.
2913
2914 On the other hand, this seems to be mostly used for modifying
2915 MVC inside of memcpy, which needs a helper call anyway. So
2916 perhaps this doesn't bear thinking about any further. */
2917
2918 TCGv_i64 tmp;
2919
2920 update_psw_addr(s);
2921 gen_op_calc_cc(s);
2922
2923 tmp = tcg_const_i64(s->next_pc);
2924 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2925 tcg_temp_free_i64(tmp);
2926
2927 set_cc_static(s);
2928 return NO_EXIT;
2929 }
2930
2931 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2932 {
2933 int m3 = get_field(s->fields, m3);
2934 int pos, len, base = s->insn->data;
2935 TCGv_i64 tmp = tcg_temp_new_i64();
2936 uint64_t ccm;
2937
2938 switch (m3) {
2939 case 0xf:
2940 /* Effectively a 32-bit load. */
2941 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2942 len = 32;
2943 goto one_insert;
2944
2945 case 0xc:
2946 case 0x6:
2947 case 0x3:
2948 /* Effectively a 16-bit load. */
2949 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2950 len = 16;
2951 goto one_insert;
2952
2953 case 0x8:
2954 case 0x4:
2955 case 0x2:
2956 case 0x1:
2957 /* Effectively an 8-bit load. */
2958 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2959 len = 8;
2960 goto one_insert;
2961
2962 one_insert:
2963 pos = base + ctz32(m3) * 8;
2964 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2965 ccm = ((1ull << len) - 1) << pos;
2966 break;
2967
2968 default:
2969 /* This is going to be a sequence of loads and inserts. */
2970 pos = base + 32 - 8;
2971 ccm = 0;
2972 while (m3) {
2973 if (m3 & 0x8) {
2974 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2975 tcg_gen_addi_i64(o->in2, o->in2, 1);
2976 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2977 ccm |= 0xff << pos;
2978 }
2979 m3 = (m3 << 1) & 0xf;
2980 pos -= 8;
2981 }
2982 break;
2983 }
2984
2985 tcg_gen_movi_i64(tmp, ccm);
2986 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2987 tcg_temp_free_i64(tmp);
2988 return NO_EXIT;
2989 }
2990
2991 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2992 {
2993 int shift = s->insn->data & 0xff;
2994 int size = s->insn->data >> 8;
2995 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2996 return NO_EXIT;
2997 }
2998
2999 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
3000 {
3001 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
3002 return NO_EXIT;
3003 }
3004
3005 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
3006 {
3007 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
3008 return NO_EXIT;
3009 }
3010
3011 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
3012 {
3013 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
3014 return NO_EXIT;
3015 }
3016
3017 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
3018 {
3019 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
3020 return NO_EXIT;
3021 }
3022
3023 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
3024 {
3025 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
3026 return NO_EXIT;
3027 }
3028
3029 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
3030 {
3031 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
3032 return NO_EXIT;
3033 }
3034
3035 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
3036 {
3037 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
3038 return NO_EXIT;
3039 }
3040
3041 #ifndef CONFIG_USER_ONLY
3042 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
3043 {
3044 TCGv_i64 t1, t2;
3045
3046 check_privileged(s);
3047
3048 t1 = tcg_temp_new_i64();
3049 t2 = tcg_temp_new_i64();
3050 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3051 tcg_gen_addi_i64(o->in2, o->in2, 4);
3052 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3053 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3054 tcg_gen_shli_i64(t1, t1, 32);
3055 gen_helper_load_psw(cpu_env, t1, t2);
3056 tcg_temp_free_i64(t1);
3057 tcg_temp_free_i64(t2);
3058 return EXIT_NORETURN;
3059 }
3060 #endif
3061
3062 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
3063 {
3064 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3065 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3066 potential_page_fault(s);
3067 gen_helper_lam(cpu_env, r1, o->in2, r3);
3068 tcg_temp_free_i32(r1);
3069 tcg_temp_free_i32(r3);
3070 return NO_EXIT;
3071 }
3072
3073 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
3074 {
3075 int r1 = get_field(s->fields, r1);
3076 int r3 = get_field(s->fields, r3);
3077 TCGv_i64 t = tcg_temp_new_i64();
3078 TCGv_i64 t4 = tcg_const_i64(4);
3079
3080 while (1) {
3081 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
3082 store_reg32_i64(r1, t);
3083 if (r1 == r3) {
3084 break;
3085 }
3086 tcg_gen_add_i64(o->in2, o->in2, t4);
3087 r1 = (r1 + 1) & 15;
3088 }
3089
3090 tcg_temp_free_i64(t);
3091 tcg_temp_free_i64(t4);
3092 return NO_EXIT;
3093 }
3094
3095 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
3096 {
3097 int r1 = get_field(s->fields, r1);
3098 int r3 = get_field(s->fields, r3);
3099 TCGv_i64 t = tcg_temp_new_i64();
3100 TCGv_i64 t4 = tcg_const_i64(4);
3101
3102 while (1) {
3103 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
3104 store_reg32h_i64(r1, t);
3105 if (r1 == r3) {
3106 break;
3107 }
3108 tcg_gen_add_i64(o->in2, o->in2, t4);
3109 r1 = (r1 + 1) & 15;
3110 }
3111
3112 tcg_temp_free_i64(t);
3113 tcg_temp_free_i64(t4);
3114 return NO_EXIT;
3115 }
3116
3117 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
3118 {
3119 int r1 = get_field(s->fields, r1);
3120 int r3 = get_field(s->fields, r3);
3121 TCGv_i64 t8 = tcg_const_i64(8);
3122
3123 while (1) {
3124 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3125 if (r1 == r3) {
3126 break;
3127 }
3128 tcg_gen_add_i64(o->in2, o->in2, t8);
3129 r1 = (r1 + 1) & 15;
3130 }
3131
3132 tcg_temp_free_i64(t8);
3133 return NO_EXIT;
3134 }
3135
3136 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
3137 {
3138 o->out = o->in2;
3139 o->g_out = o->g_in2;
3140 TCGV_UNUSED_I64(o->in2);
3141 o->g_in2 = false;
3142 return NO_EXIT;
3143 }
3144
3145 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
3146 {
3147 o->out = o->in1;
3148 o->out2 = o->in2;
3149 o->g_out = o->g_in1;
3150 o->g_out2 = o->g_in2;
3151 TCGV_UNUSED_I64(o->in1);
3152 TCGV_UNUSED_I64(o->in2);
3153 o->g_in1 = o->g_in2 = false;
3154 return NO_EXIT;
3155 }
3156
3157 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
3158 {
3159 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3160 potential_page_fault(s);
3161 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3162 tcg_temp_free_i32(l);
3163 return NO_EXIT;
3164 }
3165
3166 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3167 {
3168 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3169 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
3170 potential_page_fault(s);
3171 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
3172 tcg_temp_free_i32(r1);
3173 tcg_temp_free_i32(r2);
3174 set_cc_static(s);
3175 return NO_EXIT;
3176 }
3177
3178 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3179 {
3180 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3181 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3182 potential_page_fault(s);
3183 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
3184 tcg_temp_free_i32(r1);
3185 tcg_temp_free_i32(r3);
3186 set_cc_static(s);
3187 return NO_EXIT;
3188 }
3189
3190 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3191 {
3192 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3193 return NO_EXIT;
3194 }
3195
3196 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3197 {
3198 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
3199 return_low128(o->out2);
3200 return NO_EXIT;
3201 }
3202
3203 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3204 {
3205 gen_helper_nabs_i64(o->out, o->in2);
3206 return NO_EXIT;
3207 }
3208
3209 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3210 {
3211 tcg_gen_neg_i64(o->out, o->in2);
3212 return NO_EXIT;
3213 }
3214
3215 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3216 {
3217 tcg_gen_or_i64(o->out, o->in1, o->in2);
3218 return NO_EXIT;
3219 }
3220
3221 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3222 {
3223 int shift = s->insn->data & 0xff;
3224 int size = s->insn->data >> 8;
3225 uint64_t mask = ((1ull << size) - 1) << shift;
3226
3227 assert(!o->g_in2);
3228 tcg_gen_shli_i64(o->in2, o->in2, shift);
3229 tcg_gen_or_i64(o->out, o->in1, o->in2);
3230
3231 /* Produce the CC from only the bits manipulated. */
3232 tcg_gen_andi_i64(cc_dst, o->out, mask);
3233 set_cc_nz_u64(s, cc_dst);
3234 return NO_EXIT;
3235 }
3236
3237 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3238 {
3239 TCGv_i32 t1 = tcg_temp_new_i32();
3240 TCGv_i32 t2 = tcg_temp_new_i32();
3241 TCGv_i32 to = tcg_temp_new_i32();
3242 tcg_gen_trunc_i64_i32(t1, o->in1);
3243 tcg_gen_trunc_i64_i32(t2, o->in2);
3244 tcg_gen_rotl_i32(to, t1, t2);
3245 tcg_gen_extu_i32_i64(o->out, to);
3246 tcg_temp_free_i32(t1);
3247 tcg_temp_free_i32(t2);
3248 tcg_temp_free_i32(to);
3249 return NO_EXIT;
3250 }
3251
3252 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3253 {
3254 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3255 return NO_EXIT;
3256 }
3257
3258 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3259 {
3260 uint64_t sign = 1ull << s->insn->data;
3261 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3262 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3263 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3264 /* The arithmetic left shift is curious in that it does not affect
3265 the sign bit. Copy that over from the source unchanged. */
3266 tcg_gen_andi_i64(o->out, o->out, ~sign);
3267 tcg_gen_andi_i64(o->in1, o->in1, sign);
3268 tcg_gen_or_i64(o->out, o->out, o->in1);
3269 return NO_EXIT;
3270 }
3271
3272 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3273 {
3274 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3275 return NO_EXIT;
3276 }
3277
3278 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3279 {
3280 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3281 return NO_EXIT;
3282 }
3283
3284 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3285 {
3286 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3287 return NO_EXIT;
3288 }
3289
3290 #ifndef CONFIG_USER_ONLY
3291 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3292 {
3293 check_privileged(s);
3294 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3295 return NO_EXIT;
3296 }
3297
3298 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3299 {
3300 uint64_t i2 = get_field(s->fields, i2);
3301 TCGv_i64 t;
3302
3303 check_privileged(s);
3304
3305 /* It is important to do what the instruction name says: STORE THEN.
3306 If we let the output hook perform the store then if we fault and
3307 restart, we'll have the wrong SYSTEM MASK in place. */
3308 t = tcg_temp_new_i64();
3309 tcg_gen_shri_i64(t, psw_mask, 56);
3310 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3311 tcg_temp_free_i64(t);
3312
3313 if (s->fields->op == 0xac) {
3314 tcg_gen_andi_i64(psw_mask, psw_mask,
3315 (i2 << 56) | 0x00ffffffffffffffull);
3316 } else {
3317 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3318 }
3319 return NO_EXIT;
3320 }
3321 #endif
3322
3323 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3324 {
3325 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3326 return NO_EXIT;
3327 }
3328
3329 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3330 {
3331 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3332 return NO_EXIT;
3333 }
3334
3335 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3336 {
3337 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3338 return NO_EXIT;
3339 }
3340
3341 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3342 {
3343 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3344 return NO_EXIT;
3345 }
3346
3347 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3348 {
3349 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3350 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3351 potential_page_fault(s);
3352 gen_helper_stam(cpu_env, r1, o->in2, r3);
3353 tcg_temp_free_i32(r1);
3354 tcg_temp_free_i32(r3);
3355 return NO_EXIT;
3356 }
3357
3358 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3359 {
3360 int r1 = get_field(s->fields, r1);
3361 int r3 = get_field(s->fields, r3);
3362 int size = s->insn->data;
3363 TCGv_i64 tsize = tcg_const_i64(size);
3364
3365 while (1) {
3366 if (size == 8) {
3367 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3368 } else {
3369 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3370 }
3371 if (r1 == r3) {
3372 break;
3373 }
3374 tcg_gen_add_i64(o->in2, o->in2, tsize);
3375 r1 = (r1 + 1) & 15;
3376 }
3377
3378 tcg_temp_free_i64(tsize);
3379 return NO_EXIT;
3380 }
3381
3382 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3383 {
3384 int r1 = get_field(s->fields, r1);
3385 int r3 = get_field(s->fields, r3);
3386 TCGv_i64 t = tcg_temp_new_i64();
3387 TCGv_i64 t4 = tcg_const_i64(4);
3388 TCGv_i64 t32 = tcg_const_i64(32);
3389
3390 while (1) {
3391 tcg_gen_shl_i64(t, regs[r1], t32);
3392 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3393 if (r1 == r3) {
3394 break;
3395 }
3396 tcg_gen_add_i64(o->in2, o->in2, t4);
3397 r1 = (r1 + 1) & 15;
3398 }
3399
3400 tcg_temp_free_i64(t);
3401 tcg_temp_free_i64(t4);
3402 tcg_temp_free_i64(t32);
3403 return NO_EXIT;
3404 }
3405
3406 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3407 {
3408 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3409 return NO_EXIT;
3410 }
3411
3412 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3413 {
3414 TCGv_i64 cc;
3415
3416 assert(!o->g_in2);
3417 tcg_gen_not_i64(o->in2, o->in2);
3418 tcg_gen_add_i64(o->out, o->in1, o->in2);
3419
3420 /* XXX possible optimization point */
3421 gen_op_calc_cc(s);
3422 cc = tcg_temp_new_i64();
3423 tcg_gen_extu_i32_i64(cc, cc_op);
3424 tcg_gen_shri_i64(cc, cc, 1);
3425 tcg_gen_add_i64(o->out, o->out, cc);
3426 tcg_temp_free_i64(cc);
3427 return NO_EXIT;
3428 }
3429
3430 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3431 {
3432 TCGv_i32 t;
3433
3434 update_psw_addr(s);
3435 gen_op_calc_cc(s);
3436
3437 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3438 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3439 tcg_temp_free_i32(t);
3440
3441 t = tcg_const_i32(s->next_pc - s->pc);
3442 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3443 tcg_temp_free_i32(t);
3444
3445 gen_exception(EXCP_SVC);
3446 return EXIT_NORETURN;
3447 }
3448
3449 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3450 {
3451 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3452 return NO_EXIT;
3453 }
3454
3455 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3456 {
3457 int shift = s->insn->data & 0xff;
3458 int size = s->insn->data >> 8;
3459 uint64_t mask = ((1ull << size) - 1) << shift;
3460
3461 assert(!o->g_in2);
3462 tcg_gen_shli_i64(o->in2, o->in2, shift);
3463 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3464
3465 /* Produce the CC from only the bits manipulated. */
3466 tcg_gen_andi_i64(cc_dst, o->out, mask);
3467 set_cc_nz_u64(s, cc_dst);
3468 return NO_EXIT;
3469 }
3470
3471 /* ====================================================================== */
3472 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3473 the original inputs), update the various cc data structures in order to
3474 be able to compute the new condition code. */
3475
3476 static void cout_abs32(DisasContext *s, DisasOps *o)
3477 {
3478 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3479 }
3480
3481 static void cout_abs64(DisasContext *s, DisasOps *o)
3482 {
3483 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3484 }
3485
3486 static void cout_adds32(DisasContext *s, DisasOps *o)
3487 {
3488 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3489 }
3490
3491 static void cout_adds64(DisasContext *s, DisasOps *o)
3492 {
3493 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3494 }
3495
3496 static void cout_addu32(DisasContext *s, DisasOps *o)
3497 {
3498 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3499 }
3500
3501 static void cout_addu64(DisasContext *s, DisasOps *o)
3502 {
3503 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3504 }
3505
3506 static void cout_addc32(DisasContext *s, DisasOps *o)
3507 {
3508 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3509 }
3510
3511 static void cout_addc64(DisasContext *s, DisasOps *o)
3512 {
3513 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3514 }
3515
3516 static void cout_cmps32(DisasContext *s, DisasOps *o)
3517 {
3518 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3519 }
3520
3521 static void cout_cmps64(DisasContext *s, DisasOps *o)
3522 {
3523 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3524 }
3525
3526 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3527 {
3528 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3529 }
3530
3531 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3532 {
3533 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3534 }
3535
3536 static void cout_nabs32(DisasContext *s, DisasOps *o)
3537 {
3538 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3539 }
3540
3541 static void cout_nabs64(DisasContext *s, DisasOps *o)
3542 {
3543 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3544 }
3545
3546 static void cout_neg32(DisasContext *s, DisasOps *o)
3547 {
3548 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3549 }
3550
3551 static void cout_neg64(DisasContext *s, DisasOps *o)
3552 {
3553 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3554 }
3555
3556 static void cout_nz32(DisasContext *s, DisasOps *o)
3557 {
3558 tcg_gen_ext32u_i64(cc_dst, o->out);
3559 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3560 }
3561
3562 static void cout_nz64(DisasContext *s, DisasOps *o)
3563 {
3564 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3565 }
3566
3567 static void cout_s32(DisasContext *s, DisasOps *o)
3568 {
3569 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3570 }
3571
3572 static void cout_s64(DisasContext *s, DisasOps *o)
3573 {
3574 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3575 }
3576
3577 static void cout_subs32(DisasContext *s, DisasOps *o)
3578 {
3579 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3580 }
3581
3582 static void cout_subs64(DisasContext *s, DisasOps *o)
3583 {
3584 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3585 }
3586
3587 static void cout_subu32(DisasContext *s, DisasOps *o)
3588 {
3589 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3590 }
3591
3592 static void cout_subu64(DisasContext *s, DisasOps *o)
3593 {
3594 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3595 }
3596
3597 static void cout_subb32(DisasContext *s, DisasOps *o)
3598 {
3599 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3600 }
3601
3602 static void cout_subb64(DisasContext *s, DisasOps *o)
3603 {
3604 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3605 }
3606
3607 static void cout_tm32(DisasContext *s, DisasOps *o)
3608 {
3609 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3610 }
3611
3612 static void cout_tm64(DisasContext *s, DisasOps *o)
3613 {
3614 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3615 }
3616
3617 /* ====================================================================== */
3618 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3619 with the TCG register to which we will write. Used in combination with
3620 the "wout" generators, in some cases we need a new temporary, and in
3621 some cases we can write to a TCG global. */
3622
3623 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3624 {
3625 o->out = tcg_temp_new_i64();
3626 }
3627
3628 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3629 {
3630 o->out = tcg_temp_new_i64();
3631 o->out2 = tcg_temp_new_i64();
3632 }
3633
3634 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3635 {
3636 o->out = regs[get_field(f, r1)];
3637 o->g_out = true;
3638 }
3639
3640 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3641 {
3642 /* ??? Specification exception: r1 must be even. */
3643 int r1 = get_field(f, r1);
3644 o->out = regs[r1];
3645 o->out2 = regs[(r1 + 1) & 15];
3646 o->g_out = o->g_out2 = true;
3647 }
3648
3649 /* ====================================================================== */
3650 /* The "Write OUTput" generators. These generally perform some non-trivial
3651 copy of data to TCG globals, or to main memory. The trivial cases are
3652 generally handled by having a "prep" generator install the TCG global
3653 as the destination of the operation. */
3654
3655 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3656 {
3657 store_reg(get_field(f, r1), o->out);
3658 }
3659
3660 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3661 {
3662 int r1 = get_field(f, r1);
3663 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3664 }
3665
3666 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3667 {
3668 store_reg32_i64(get_field(f, r1), o->out);
3669 }
3670
3671 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3672 {
3673 /* ??? Specification exception: r1 must be even. */
3674 int r1 = get_field(f, r1);
3675 store_reg32_i64(r1, o->out);
3676 store_reg32_i64((r1 + 1) & 15, o->out2);
3677 }
3678
3679 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3680 {
3681 /* ??? Specification exception: r1 must be even. */
3682 int r1 = get_field(f, r1);
3683 store_reg32_i64((r1 + 1) & 15, o->out);
3684 tcg_gen_shri_i64(o->out, o->out, 32);
3685 store_reg32_i64(r1, o->out);
3686 }
3687
3688 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3689 {
3690 store_freg32_i64(get_field(f, r1), o->out);
3691 }
3692
3693 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3694 {
3695 store_freg(get_field(f, r1), o->out);
3696 }
3697
3698 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3699 {
3700 int f1 = get_field(s->fields, r1);
3701 store_freg(f1, o->out);
3702 store_freg((f1 + 2) & 15, o->out2);
3703 }
3704
3705 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3706 {
3707 if (get_field(f, r1) != get_field(f, r2)) {
3708 store_reg32_i64(get_field(f, r1), o->out);
3709 }
3710 }
3711
3712 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3713 {
3714 if (get_field(f, r1) != get_field(f, r2)) {
3715 store_freg32_i64(get_field(f, r1), o->out);
3716 }
3717 }
3718
3719 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3720 {
3721 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3722 }
3723
3724 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3725 {
3726 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3727 }
3728
3729 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3730 {
3731 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3732 }
3733
3734 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3735 {
3736 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3737 }
3738
3739 /* ====================================================================== */
3740 /* The "INput 1" generators. These load the first operand to an insn. */
3741
3742 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3743 {
3744 o->in1 = load_reg(get_field(f, r1));
3745 }
3746
3747 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3748 {
3749 o->in1 = regs[get_field(f, r1)];
3750 o->g_in1 = true;
3751 }
3752
3753 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3754 {
3755 o->in1 = tcg_temp_new_i64();
3756 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3757 }
3758
3759 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3760 {
3761 o->in1 = tcg_temp_new_i64();
3762 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3763 }
3764
3765 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3766 {
3767 /* ??? Specification exception: r1 must be even. */
3768 int r1 = get_field(f, r1);
3769 o->in1 = load_reg((r1 + 1) & 15);
3770 }
3771
3772 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3773 {
3774 /* ??? Specification exception: r1 must be even. */
3775 int r1 = get_field(f, r1);
3776 o->in1 = tcg_temp_new_i64();
3777 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3778 }
3779
3780 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3781 {
3782 /* ??? Specification exception: r1 must be even. */
3783 int r1 = get_field(f, r1);
3784 o->in1 = tcg_temp_new_i64();
3785 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3786 }
3787
3788 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3789 {
3790 /* ??? Specification exception: r1 must be even. */
3791 int r1 = get_field(f, r1);
3792 o->in1 = tcg_temp_new_i64();
3793 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3794 }
3795
3796 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3797 {
3798 o->in1 = load_reg(get_field(f, r2));
3799 }
3800
3801 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3802 {
3803 o->in1 = load_reg(get_field(f, r3));
3804 }
3805
3806 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3807 {
3808 o->in1 = regs[get_field(f, r3)];
3809 o->g_in1 = true;
3810 }
3811
3812 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3813 {
3814 o->in1 = tcg_temp_new_i64();
3815 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3816 }
3817
3818 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3819 {
3820 o->in1 = tcg_temp_new_i64();
3821 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3822 }
3823
3824 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3825 {
3826 o->in1 = load_freg32_i64(get_field(f, r1));
3827 }
3828
3829 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3830 {
3831 o->in1 = fregs[get_field(f, r1)];
3832 o->g_in1 = true;
3833 }
3834
3835 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3836 {
3837 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3838 }
3839
3840 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3841 {
3842 in1_la1(s, f, o);
3843 o->in1 = tcg_temp_new_i64();
3844 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3845 }
3846
3847 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3848 {
3849 in1_la1(s, f, o);
3850 o->in1 = tcg_temp_new_i64();
3851 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3852 }
3853
3854 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3855 {
3856 in1_la1(s, f, o);
3857 o->in1 = tcg_temp_new_i64();
3858 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3859 }
3860
3861 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3862 {
3863 in1_la1(s, f, o);
3864 o->in1 = tcg_temp_new_i64();
3865 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3866 }
3867
3868 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3869 {
3870 in1_la1(s, f, o);
3871 o->in1 = tcg_temp_new_i64();
3872 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3873 }
3874
3875 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3876 {
3877 in1_la1(s, f, o);
3878 o->in1 = tcg_temp_new_i64();
3879 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3880 }
3881
3882 /* ====================================================================== */
3883 /* The "INput 2" generators. These load the second operand to an insn. */
3884
3885 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3886 {
3887 o->in2 = load_reg(get_field(f, r2));
3888 }
3889
3890 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3891 {
3892 o->in2 = regs[get_field(f, r2)];
3893 o->g_in2 = true;
3894 }
3895
3896 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3897 {
3898 int r2 = get_field(f, r2);
3899 if (r2 != 0) {
3900 o->in2 = load_reg(r2);
3901 }
3902 }
3903
3904 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3905 {
3906 o->in2 = tcg_temp_new_i64();
3907 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3908 }
3909
3910 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3911 {
3912 o->in2 = tcg_temp_new_i64();
3913 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3914 }
3915
3916 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3917 {
3918 o->in2 = tcg_temp_new_i64();
3919 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3920 }
3921
3922 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3923 {
3924 o->in2 = tcg_temp_new_i64();
3925 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3926 }
3927
3928 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3929 {
3930 o->in2 = load_reg(get_field(f, r3));
3931 }
3932
3933 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3934 {
3935 o->in2 = tcg_temp_new_i64();
3936 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3937 }
3938
3939 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3940 {
3941 o->in2 = tcg_temp_new_i64();
3942 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3943 }
3944
3945 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3946 {
3947 o->in2 = load_freg32_i64(get_field(f, r2));
3948 }
3949
3950 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3951 {
3952 o->in2 = fregs[get_field(f, r2)];
3953 o->g_in2 = true;
3954 }
3955
3956 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3957 {
3958 int f2 = get_field(f, r2);
3959 o->in1 = fregs[f2];
3960 o->in2 = fregs[(f2 + 2) & 15];
3961 o->g_in1 = o->g_in2 = true;
3962 }
3963
3964 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3965 {
3966 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3967 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3968 }
3969
3970 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3971 {
3972 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3973 }
3974
3975 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3976 {
3977 help_l2_shift(s, f, o, 31);
3978 }
3979
3980 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3981 {
3982 help_l2_shift(s, f, o, 63);
3983 }
3984
3985 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3986 {
3987 in2_a2(s, f, o);
3988 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3989 }
3990
3991 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3992 {
3993 in2_a2(s, f, o);
3994 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3995 }
3996
3997 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3998 {
3999 in2_a2(s, f, o);
4000 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4001 }
4002
4003 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4004 {
4005 in2_a2(s, f, o);
4006 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4007 }
4008
4009 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4010 {
4011 in2_a2(s, f, o);
4012 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4013 }
4014
4015 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4016 {
4017 in2_ri2(s, f, o);
4018 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4019 }
4020
4021 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4022 {
4023 in2_ri2(s, f, o);
4024 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4025 }
4026
4027 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4028 {
4029 in2_ri2(s, f, o);
4030 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4031 }
4032
4033 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4034 {
4035 in2_ri2(s, f, o);
4036 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4037 }
4038
4039 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4040 {
4041 o->in2 = tcg_const_i64(get_field(f, i2));
4042 }
4043
4044 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4045 {
4046 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4047 }
4048
4049 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4050 {
4051 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4052 }
4053
4054 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4055 {
4056 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4057 }
4058
4059 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4060 {
4061 uint64_t i2 = (uint16_t)get_field(f, i2);
4062 o->in2 = tcg_const_i64(i2 << s->insn->data);
4063 }
4064
4065 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4066 {
4067 uint64_t i2 = (uint32_t)get_field(f, i2);
4068 o->in2 = tcg_const_i64(i2 << s->insn->data);
4069 }
4070
4071 /* ====================================================================== */
4072
4073 /* Find opc within the table of insns. This is formulated as a switch
4074 statement so that (1) we get compile-time notice of cut-paste errors
4075 for duplicated opcodes, and (2) the compiler generates the binary
4076 search tree, rather than us having to post-process the table. */
4077
4078 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4079 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4080
4081 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4082
4083 enum DisasInsnEnum {
4084 #include "insn-data.def"
4085 };
4086
4087 #undef D
4088 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4089 .opc = OPC, \
4090 .fmt = FMT_##FT, \
4091 .fac = FAC_##FC, \
4092 .name = #NM, \
4093 .help_in1 = in1_##I1, \
4094 .help_in2 = in2_##I2, \
4095 .help_prep = prep_##P, \
4096 .help_wout = wout_##W, \
4097 .help_cout = cout_##CC, \
4098 .help_op = op_##OP, \
4099 .data = D \
4100 },
4101
4102 /* Allow 0 to be used for NULL in the table below. */
4103 #define in1_0 NULL
4104 #define in2_0 NULL
4105 #define prep_0 NULL
4106 #define wout_0 NULL
4107 #define cout_0 NULL
4108 #define op_0 NULL
4109
4110 static const DisasInsn insn_info[] = {
4111 #include "insn-data.def"
4112 };
4113
4114 #undef D
4115 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4116 case OPC: return &insn_info[insn_ ## NM];
4117
4118 static const DisasInsn *lookup_opc(uint16_t opc)
4119 {
4120 switch (opc) {
4121 #include "insn-data.def"
4122 default:
4123 return NULL;
4124 }
4125 }
4126
4127 #undef D
4128 #undef C
4129
4130 /* Extract a field from the insn. The INSN should be left-aligned in
4131 the uint64_t so that we can more easily utilize the big-bit-endian
4132 definitions we extract from the Principals of Operation. */
4133
4134 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4135 {
4136 uint32_t r, m;
4137
4138 if (f->size == 0) {
4139 return;
4140 }
4141
4142 /* Zero extract the field from the insn. */
4143 r = (insn << f->beg) >> (64 - f->size);
4144
4145 /* Sign-extend, or un-swap the field as necessary. */
4146 switch (f->type) {
4147 case 0: /* unsigned */
4148 break;
4149 case 1: /* signed */
4150 assert(f->size <= 32);
4151 m = 1u << (f->size - 1);
4152 r = (r ^ m) - m;
4153 break;
4154 case 2: /* dl+dh split, signed 20 bit. */
4155 r = ((int8_t)r << 12) | (r >> 8);
4156 break;
4157 default:
4158 abort();
4159 }
4160
4161 /* Validate that the "compressed" encoding we selected above is valid.
4162 I.e. we havn't make two different original fields overlap. */
4163 assert(((o->presentC >> f->indexC) & 1) == 0);
4164 o->presentC |= 1 << f->indexC;
4165 o->presentO |= 1 << f->indexO;
4166
4167 o->c[f->indexC] = r;
4168 }
4169
4170 /* Lookup the insn at the current PC, extracting the operands into O and
4171 returning the info struct for the insn. Returns NULL for invalid insn. */
4172
4173 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4174 DisasFields *f)
4175 {
4176 uint64_t insn, pc = s->pc;
4177 int op, op2, ilen;
4178 const DisasInsn *info;
4179
4180 insn = ld_code2(env, pc);
4181 op = (insn >> 8) & 0xff;
4182 ilen = get_ilen(op);
4183 s->next_pc = s->pc + ilen;
4184
4185 switch (ilen) {
4186 case 2:
4187 insn = insn << 48;
4188 break;
4189 case 4:
4190 insn = ld_code4(env, pc) << 32;
4191 break;
4192 case 6:
4193 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4194 break;
4195 default:
4196 abort();
4197 }
4198
4199 /* We can't actually determine the insn format until we've looked up
4200 the full insn opcode. Which we can't do without locating the
4201 secondary opcode. Assume by default that OP2 is at bit 40; for
4202 those smaller insns that don't actually have a secondary opcode
4203 this will correctly result in OP2 = 0. */
4204 switch (op) {
4205 case 0x01: /* E */
4206 case 0x80: /* S */
4207 case 0x82: /* S */
4208 case 0x93: /* S */
4209 case 0xb2: /* S, RRF, RRE */
4210 case 0xb3: /* RRE, RRD, RRF */
4211 case 0xb9: /* RRE, RRF */
4212 case 0xe5: /* SSE, SIL */
4213 op2 = (insn << 8) >> 56;
4214 break;
4215 case 0xa5: /* RI */
4216 case 0xa7: /* RI */
4217 case 0xc0: /* RIL */
4218 case 0xc2: /* RIL */
4219 case 0xc4: /* RIL */
4220 case 0xc6: /* RIL */
4221 case 0xc8: /* SSF */
4222 case 0xcc: /* RIL */
4223 op2 = (insn << 12) >> 60;
4224 break;
4225 case 0xd0 ... 0xdf: /* SS */
4226 case 0xe1: /* SS */
4227 case 0xe2: /* SS */
4228 case 0xe8: /* SS */
4229 case 0xe9: /* SS */
4230 case 0xea: /* SS */
4231 case 0xee ... 0xf3: /* SS */
4232 case 0xf8 ... 0xfd: /* SS */
4233 op2 = 0;
4234 break;
4235 default:
4236 op2 = (insn << 40) >> 56;
4237 break;
4238 }
4239
4240 memset(f, 0, sizeof(*f));
4241 f->op = op;
4242 f->op2 = op2;
4243
4244 /* Lookup the instruction. */
4245 info = lookup_opc(op << 8 | op2);
4246
4247 /* If we found it, extract the operands. */
4248 if (info != NULL) {
4249 DisasFormat fmt = info->fmt;
4250 int i;
4251
4252 for (i = 0; i < NUM_C_FIELD; ++i) {
4253 extract_field(f, &format_info[fmt].op[i], insn);
4254 }
4255 }
4256 return info;
4257 }
4258
4259 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4260 {
4261 const DisasInsn *insn;
4262 ExitStatus ret = NO_EXIT;
4263 DisasFields f;
4264 DisasOps o;
4265
4266 insn = extract_insn(env, s, &f);
4267
4268 /* If not found, try the old interpreter. This includes ILLOPC. */
4269 if (insn == NULL) {
4270 disas_s390_insn(env, s);
4271 switch (s->is_jmp) {
4272 case DISAS_NEXT:
4273 ret = NO_EXIT;
4274 break;
4275 case DISAS_TB_JUMP:
4276 ret = EXIT_GOTO_TB;
4277 break;
4278 case DISAS_JUMP:
4279 ret = EXIT_PC_UPDATED;
4280 break;
4281 case DISAS_EXCP:
4282 ret = EXIT_NORETURN;
4283 break;
4284 default:
4285 abort();
4286 }
4287
4288 s->pc = s->next_pc;
4289 return ret;
4290 }
4291
4292 /* Set up the strutures we use to communicate with the helpers. */
4293 s->insn = insn;
4294 s->fields = &f;
4295 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4296 TCGV_UNUSED_I64(o.out);
4297 TCGV_UNUSED_I64(o.out2);
4298 TCGV_UNUSED_I64(o.in1);
4299 TCGV_UNUSED_I64(o.in2);
4300 TCGV_UNUSED_I64(o.addr1);
4301
4302 /* Implement the instruction. */
4303 if (insn->help_in1) {
4304 insn->help_in1(s, &f, &o);
4305 }
4306 if (insn->help_in2) {
4307 insn->help_in2(s, &f, &o);
4308 }
4309 if (insn->help_prep) {
4310 insn->help_prep(s, &f, &o);
4311 }
4312 if (insn->help_op) {
4313 ret = insn->help_op(s, &o);
4314 }
4315 if (insn->help_wout) {
4316 insn->help_wout(s, &f, &o);
4317 }
4318 if (insn->help_cout) {
4319 insn->help_cout(s, &o);
4320 }
4321
4322 /* Free any temporaries created by the helpers. */
4323 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4324 tcg_temp_free_i64(o.out);
4325 }
4326 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4327 tcg_temp_free_i64(o.out2);
4328 }
4329 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4330 tcg_temp_free_i64(o.in1);
4331 }
4332 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4333 tcg_temp_free_i64(o.in2);
4334 }
4335 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4336 tcg_temp_free_i64(o.addr1);
4337 }
4338
4339 /* Advance to the next instruction. */
4340 s->pc = s->next_pc;
4341 return ret;
4342 }
4343
4344 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4345 TranslationBlock *tb,
4346 int search_pc)
4347 {
4348 DisasContext dc;
4349 target_ulong pc_start;
4350 uint64_t next_page_start;
4351 uint16_t *gen_opc_end;
4352 int j, lj = -1;
4353 int num_insns, max_insns;
4354 CPUBreakpoint *bp;
4355 ExitStatus status;
4356 bool do_debug;
4357
4358 pc_start = tb->pc;
4359
4360 /* 31-bit mode */
4361 if (!(tb->flags & FLAG_MASK_64)) {
4362 pc_start &= 0x7fffffff;
4363 }
4364
4365 dc.tb = tb;
4366 dc.pc = pc_start;
4367 dc.cc_op = CC_OP_DYNAMIC;
4368 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4369 dc.is_jmp = DISAS_NEXT;
4370
4371 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4372
4373 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4374
4375 num_insns = 0;
4376 max_insns = tb->cflags & CF_COUNT_MASK;
4377 if (max_insns == 0) {
4378 max_insns = CF_COUNT_MASK;
4379 }
4380
4381 gen_icount_start();
4382
4383 do {
4384 if (search_pc) {
4385 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4386 if (lj < j) {
4387 lj++;
4388 while (lj < j) {
4389 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4390 }
4391 }
4392 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4393 gen_opc_cc_op[lj] = dc.cc_op;
4394 tcg_ctx.gen_opc_instr_start[lj] = 1;
4395 tcg_ctx.gen_opc_icount[lj] = num_insns;
4396 }
4397 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4398 gen_io_start();
4399 }
4400
4401 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4402 tcg_gen_debug_insn_start(dc.pc);
4403 }
4404
4405 status = NO_EXIT;
4406 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4407 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4408 if (bp->pc == dc.pc) {
4409 status = EXIT_PC_STALE;
4410 do_debug = true;
4411 break;
4412 }
4413 }
4414 }
4415 if (status == NO_EXIT) {
4416 status = translate_one(env, &dc);
4417 }
4418
4419 /* If we reach a page boundary, are single stepping,
4420 or exhaust instruction count, stop generation. */
4421 if (status == NO_EXIT
4422 && (dc.pc >= next_page_start
4423 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4424 || num_insns >= max_insns
4425 || singlestep
4426 || env->singlestep_enabled)) {
4427 status = EXIT_PC_STALE;
4428 }
4429 } while (status == NO_EXIT);
4430
4431 if (tb->cflags & CF_LAST_IO) {
4432 gen_io_end();
4433 }
4434
4435 switch (status) {
4436 case EXIT_GOTO_TB:
4437 case EXIT_NORETURN:
4438 break;
4439 case EXIT_PC_STALE:
4440 update_psw_addr(&dc);
4441 /* FALLTHRU */
4442 case EXIT_PC_UPDATED:
4443 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4444 gen_op_calc_cc(&dc);
4445 } else {
4446 /* Next TB starts off with CC_OP_DYNAMIC,
4447 so make sure the cc op type is in env */
4448 gen_op_set_cc_op(&dc);
4449 }
4450 if (do_debug) {
4451 gen_exception(EXCP_DEBUG);
4452 } else {
4453 /* Generate the return instruction */
4454 tcg_gen_exit_tb(0);
4455 }
4456 break;
4457 default:
4458 abort();
4459 }
4460
4461 gen_icount_end(tb, num_insns);
4462 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4463 if (search_pc) {
4464 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4465 lj++;
4466 while (lj <= j) {
4467 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4468 }
4469 } else {
4470 tb->size = dc.pc - pc_start;
4471 tb->icount = num_insns;
4472 }
4473
4474 #if defined(S390X_DEBUG_DISAS)
4475 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4476 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4477 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4478 qemu_log("\n");
4479 }
4480 #endif
4481 }
4482
4483 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4484 {
4485 gen_intermediate_code_internal(env, tb, 0);
4486 }
4487
4488 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4489 {
4490 gen_intermediate_code_internal(env, tb, 1);
4491 }
4492
4493 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4494 {
4495 int cc_op;
4496 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4497 cc_op = gen_opc_cc_op[pc_pos];
4498 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4499 env->cc_op = cc_op;
4500 }
4501 }