]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
265fc26800a78b939926853d7694c1129b47214e
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static inline void debug_insn(uint64_t insn)
83 {
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85 }
86
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 {
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
92 }
93 }
94 return pc;
95 }
96
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
99 {
100 int i;
101
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
108 }
109
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
116 }
117 }
118
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
125 }
126 }
127
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
135 }
136 }
137 #endif
138
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
143 }
144 #endif
145
146 cpu_fprintf(f, "\n");
147 }
148
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
151
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
156
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
160
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162
163 void s390x_translate_init(void)
164 {
165 int i;
166
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
174
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
196 }
197
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
201 }
202
203 static inline TCGv_i64 load_reg(int reg)
204 {
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
208 }
209
210 static inline TCGv_i64 load_freg(int reg)
211 {
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
215 }
216
217 static inline TCGv_i32 load_freg32(int reg)
218 {
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
226 }
227
228 static inline TCGv_i64 load_freg32_i64(int reg)
229 {
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
233 }
234
235 static inline TCGv_i32 load_reg32(int reg)
236 {
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
240 }
241
242 static inline TCGv_i64 load_reg32_i64(int reg)
243 {
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
247 }
248
249 static inline void store_reg(int reg, TCGv_i64 v)
250 {
251 tcg_gen_mov_i64(regs[reg], v);
252 }
253
254 static inline void store_freg(int reg, TCGv_i64 v)
255 {
256 tcg_gen_mov_i64(fregs[reg], v);
257 }
258
259 static inline void store_reg32(int reg, TCGv_i32 v)
260 {
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
268 }
269
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
271 {
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274 }
275
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
277 {
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
279 }
280
281 static inline void store_reg16(int reg, TCGv_i32 v)
282 {
283 /* 16 bit register writes keep the upper bytes */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_deposit_i32(TCGV_LOW(regs[reg]), TCGV_LOW(regs[reg]), v, 0, 16);
286 #else
287 tcg_gen_deposit_i64(regs[reg], regs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 16);
289 #endif
290 }
291
292 static inline void store_freg32(int reg, TCGv_i32 v)
293 {
294 /* 32 bit register writes keep the lower half */
295 #if HOST_LONG_BITS == 32
296 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
297 #else
298 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
299 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
300 #endif
301 }
302
303 static inline void store_freg32_i64(int reg, TCGv_i64 v)
304 {
305 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
306 }
307
308 static inline void return_low128(TCGv_i64 dest)
309 {
310 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
311 }
312
313 static inline void update_psw_addr(DisasContext *s)
314 {
315 /* psw.addr */
316 tcg_gen_movi_i64(psw_addr, s->pc);
317 }
318
319 static inline void potential_page_fault(DisasContext *s)
320 {
321 #ifndef CONFIG_USER_ONLY
322 update_psw_addr(s);
323 gen_op_calc_cc(s);
324 #endif
325 }
326
327 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
328 {
329 return (uint64_t)cpu_lduw_code(env, pc);
330 }
331
332 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
333 {
334 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
335 }
336
337 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
338 {
339 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
340 }
341
342 static inline int get_mem_index(DisasContext *s)
343 {
344 switch (s->tb->flags & FLAG_MASK_ASC) {
345 case PSW_ASC_PRIMARY >> 32:
346 return 0;
347 case PSW_ASC_SECONDARY >> 32:
348 return 1;
349 case PSW_ASC_HOME >> 32:
350 return 2;
351 default:
352 tcg_abort();
353 break;
354 }
355 }
356
357 static void gen_exception(int excp)
358 {
359 TCGv_i32 tmp = tcg_const_i32(excp);
360 gen_helper_exception(cpu_env, tmp);
361 tcg_temp_free_i32(tmp);
362 }
363
364 static void gen_program_exception(DisasContext *s, int code)
365 {
366 TCGv_i32 tmp;
367
368 /* Remember what pgm exeption this was. */
369 tmp = tcg_const_i32(code);
370 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
371 tcg_temp_free_i32(tmp);
372
373 tmp = tcg_const_i32(s->next_pc - s->pc);
374 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
375 tcg_temp_free_i32(tmp);
376
377 /* Advance past instruction. */
378 s->pc = s->next_pc;
379 update_psw_addr(s);
380
381 /* Save off cc. */
382 gen_op_calc_cc(s);
383
384 /* Trigger exception. */
385 gen_exception(EXCP_PGM);
386
387 /* End TB here. */
388 s->is_jmp = DISAS_EXCP;
389 }
390
391 static inline void gen_illegal_opcode(DisasContext *s)
392 {
393 gen_program_exception(s, PGM_SPECIFICATION);
394 }
395
396 static inline void check_privileged(DisasContext *s)
397 {
398 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
399 gen_program_exception(s, PGM_PRIVILEGED);
400 }
401 }
402
403 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
404 {
405 TCGv_i64 tmp;
406
407 /* 31-bitify the immediate part; register contents are dealt with below */
408 if (!(s->tb->flags & FLAG_MASK_64)) {
409 d2 &= 0x7fffffffUL;
410 }
411
412 if (x2) {
413 if (d2) {
414 tmp = tcg_const_i64(d2);
415 tcg_gen_add_i64(tmp, tmp, regs[x2]);
416 } else {
417 tmp = load_reg(x2);
418 }
419 if (b2) {
420 tcg_gen_add_i64(tmp, tmp, regs[b2]);
421 }
422 } else if (b2) {
423 if (d2) {
424 tmp = tcg_const_i64(d2);
425 tcg_gen_add_i64(tmp, tmp, regs[b2]);
426 } else {
427 tmp = load_reg(b2);
428 }
429 } else {
430 tmp = tcg_const_i64(d2);
431 }
432
433 /* 31-bit mode mask if there are values loaded from registers */
434 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
435 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
436 }
437
438 return tmp;
439 }
440
441 static void gen_op_movi_cc(DisasContext *s, uint32_t val)
442 {
443 s->cc_op = CC_OP_CONST0 + val;
444 }
445
446 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
447 {
448 tcg_gen_discard_i64(cc_src);
449 tcg_gen_mov_i64(cc_dst, dst);
450 tcg_gen_discard_i64(cc_vr);
451 s->cc_op = op;
452 }
453
454 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
455 {
456 tcg_gen_discard_i64(cc_src);
457 tcg_gen_extu_i32_i64(cc_dst, dst);
458 tcg_gen_discard_i64(cc_vr);
459 s->cc_op = op;
460 }
461
462 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
463 TCGv_i64 dst)
464 {
465 tcg_gen_mov_i64(cc_src, src);
466 tcg_gen_mov_i64(cc_dst, dst);
467 tcg_gen_discard_i64(cc_vr);
468 s->cc_op = op;
469 }
470
471 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
472 TCGv_i32 dst)
473 {
474 tcg_gen_extu_i32_i64(cc_src, src);
475 tcg_gen_extu_i32_i64(cc_dst, dst);
476 tcg_gen_discard_i64(cc_vr);
477 s->cc_op = op;
478 }
479
480 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
481 TCGv_i64 dst, TCGv_i64 vr)
482 {
483 tcg_gen_mov_i64(cc_src, src);
484 tcg_gen_mov_i64(cc_dst, dst);
485 tcg_gen_mov_i64(cc_vr, vr);
486 s->cc_op = op;
487 }
488
489 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
490 {
491 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
492 }
493
494 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
495 {
496 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
497 }
498
499 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
500 enum cc_op cond)
501 {
502 gen_op_update2_cc_i32(s, cond, v1, v2);
503 }
504
505 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
506 enum cc_op cond)
507 {
508 gen_op_update2_cc_i64(s, cond, v1, v2);
509 }
510
511 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
512 {
513 cmp_32(s, v1, v2, CC_OP_LTGT_32);
514 }
515
516 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
517 {
518 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
519 }
520
521 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
522 {
523 /* XXX optimize for the constant? put it in s? */
524 TCGv_i32 tmp = tcg_const_i32(v2);
525 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
526 tcg_temp_free_i32(tmp);
527 }
528
529 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
530 {
531 TCGv_i32 tmp = tcg_const_i32(v2);
532 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
533 tcg_temp_free_i32(tmp);
534 }
535
536 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
537 {
538 cmp_64(s, v1, v2, CC_OP_LTGT_64);
539 }
540
541 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
542 {
543 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
544 }
545
546 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
547 {
548 TCGv_i64 tmp = tcg_const_i64(v2);
549 cmp_s64(s, v1, tmp);
550 tcg_temp_free_i64(tmp);
551 }
552
553 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
554 {
555 TCGv_i64 tmp = tcg_const_i64(v2);
556 cmp_u64(s, v1, tmp);
557 tcg_temp_free_i64(tmp);
558 }
559
560 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
561 {
562 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
563 }
564
565 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
566 {
567 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
568 }
569
570 static void set_cc_cmp_f32_i64(DisasContext *s, TCGv_i32 v1, TCGv_i64 v2)
571 {
572 tcg_gen_extu_i32_i64(cc_src, v1);
573 tcg_gen_mov_i64(cc_dst, v2);
574 tcg_gen_discard_i64(cc_vr);
575 s->cc_op = CC_OP_LTGT_F32;
576 }
577
578 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i32 v1)
579 {
580 gen_op_update1_cc_i32(s, CC_OP_NZ_F32, v1);
581 }
582
583 /* CC value is in env->cc_op */
584 static inline void set_cc_static(DisasContext *s)
585 {
586 tcg_gen_discard_i64(cc_src);
587 tcg_gen_discard_i64(cc_dst);
588 tcg_gen_discard_i64(cc_vr);
589 s->cc_op = CC_OP_STATIC;
590 }
591
592 static inline void gen_op_set_cc_op(DisasContext *s)
593 {
594 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
595 tcg_gen_movi_i32(cc_op, s->cc_op);
596 }
597 }
598
599 static inline void gen_update_cc_op(DisasContext *s)
600 {
601 gen_op_set_cc_op(s);
602 }
603
604 /* calculates cc into cc_op */
605 static void gen_op_calc_cc(DisasContext *s)
606 {
607 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
608 TCGv_i64 dummy = tcg_const_i64(0);
609
610 switch (s->cc_op) {
611 case CC_OP_CONST0:
612 case CC_OP_CONST1:
613 case CC_OP_CONST2:
614 case CC_OP_CONST3:
615 /* s->cc_op is the cc value */
616 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
617 break;
618 case CC_OP_STATIC:
619 /* env->cc_op already is the cc value */
620 break;
621 case CC_OP_NZ:
622 case CC_OP_ABS_64:
623 case CC_OP_NABS_64:
624 case CC_OP_ABS_32:
625 case CC_OP_NABS_32:
626 case CC_OP_LTGT0_32:
627 case CC_OP_LTGT0_64:
628 case CC_OP_COMP_32:
629 case CC_OP_COMP_64:
630 case CC_OP_NZ_F32:
631 case CC_OP_NZ_F64:
632 /* 1 argument */
633 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
634 break;
635 case CC_OP_ICM:
636 case CC_OP_LTGT_32:
637 case CC_OP_LTGT_64:
638 case CC_OP_LTUGTU_32:
639 case CC_OP_LTUGTU_64:
640 case CC_OP_TM_32:
641 case CC_OP_TM_64:
642 case CC_OP_LTGT_F32:
643 case CC_OP_LTGT_F64:
644 case CC_OP_SLA_32:
645 case CC_OP_SLA_64:
646 /* 2 arguments */
647 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
648 break;
649 case CC_OP_ADD_64:
650 case CC_OP_ADDU_64:
651 case CC_OP_ADDC_64:
652 case CC_OP_SUB_64:
653 case CC_OP_SUBU_64:
654 case CC_OP_SUBB_64:
655 case CC_OP_ADD_32:
656 case CC_OP_ADDU_32:
657 case CC_OP_ADDC_32:
658 case CC_OP_SUB_32:
659 case CC_OP_SUBU_32:
660 case CC_OP_SUBB_32:
661 /* 3 arguments */
662 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
663 break;
664 case CC_OP_DYNAMIC:
665 /* unknown operation - assume 3 arguments and cc_op in env */
666 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
667 break;
668 default:
669 tcg_abort();
670 }
671
672 tcg_temp_free_i32(local_cc_op);
673 tcg_temp_free_i64(dummy);
674
675 /* We now have cc in cc_op as constant */
676 set_cc_static(s);
677 }
678
679 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
680 {
681 debug_insn(insn);
682
683 *r1 = (insn >> 4) & 0xf;
684 *r2 = insn & 0xf;
685 }
686
687 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
688 int *x2, int *b2, int *d2)
689 {
690 debug_insn(insn);
691
692 *r1 = (insn >> 20) & 0xf;
693 *x2 = (insn >> 16) & 0xf;
694 *b2 = (insn >> 12) & 0xf;
695 *d2 = insn & 0xfff;
696
697 return get_address(s, *x2, *b2, *d2);
698 }
699
700 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
701 int *b2, int *d2)
702 {
703 debug_insn(insn);
704
705 *r1 = (insn >> 20) & 0xf;
706 /* aka m3 */
707 *r3 = (insn >> 16) & 0xf;
708 *b2 = (insn >> 12) & 0xf;
709 *d2 = insn & 0xfff;
710 }
711
712 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
713 int *b1, int *d1)
714 {
715 debug_insn(insn);
716
717 *i2 = (insn >> 16) & 0xff;
718 *b1 = (insn >> 12) & 0xf;
719 *d1 = insn & 0xfff;
720
721 return get_address(s, 0, *b1, *d1);
722 }
723
724 static int use_goto_tb(DisasContext *s, uint64_t dest)
725 {
726 /* NOTE: we handle the case where the TB spans two pages here */
727 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
728 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
729 && !s->singlestep_enabled
730 && !(s->tb->cflags & CF_LAST_IO));
731 }
732
733 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
734 {
735 gen_update_cc_op(s);
736
737 if (use_goto_tb(s, pc)) {
738 tcg_gen_goto_tb(tb_num);
739 tcg_gen_movi_i64(psw_addr, pc);
740 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
741 } else {
742 /* jump to another page: currently not optimized */
743 tcg_gen_movi_i64(psw_addr, pc);
744 tcg_gen_exit_tb(0);
745 }
746 }
747
748 static inline void account_noninline_branch(DisasContext *s, int cc_op)
749 {
750 #ifdef DEBUG_INLINE_BRANCHES
751 inline_branch_miss[cc_op]++;
752 #endif
753 }
754
755 static inline void account_inline_branch(DisasContext *s, int cc_op)
756 {
757 #ifdef DEBUG_INLINE_BRANCHES
758 inline_branch_hit[cc_op]++;
759 #endif
760 }
761
762 /* Table of mask values to comparison codes, given a comparison as input.
763 For a true comparison CC=3 will never be set, but we treat this
764 conservatively for possible use when CC=3 indicates overflow. */
765 static const TCGCond ltgt_cond[16] = {
766 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
767 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
768 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
769 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
770 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
771 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
772 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
773 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
774 };
775
776 /* Table of mask values to comparison codes, given a logic op as input.
777 For such, only CC=0 and CC=1 should be possible. */
778 static const TCGCond nz_cond[16] = {
779 /* | | x | x */
780 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
781 /* | NE | x | x */
782 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
783 /* EQ | | x | x */
784 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
785 /* EQ | NE | x | x */
786 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
787 };
788
789 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
790 details required to generate a TCG comparison. */
791 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
792 {
793 TCGCond cond;
794 enum cc_op old_cc_op = s->cc_op;
795
796 if (mask == 15 || mask == 0) {
797 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
798 c->u.s32.a = cc_op;
799 c->u.s32.b = cc_op;
800 c->g1 = c->g2 = true;
801 c->is_64 = false;
802 return;
803 }
804
805 /* Find the TCG condition for the mask + cc op. */
806 switch (old_cc_op) {
807 case CC_OP_LTGT0_32:
808 case CC_OP_LTGT0_64:
809 case CC_OP_LTGT_32:
810 case CC_OP_LTGT_64:
811 cond = ltgt_cond[mask];
812 if (cond == TCG_COND_NEVER) {
813 goto do_dynamic;
814 }
815 account_inline_branch(s, old_cc_op);
816 break;
817
818 case CC_OP_LTUGTU_32:
819 case CC_OP_LTUGTU_64:
820 cond = tcg_unsigned_cond(ltgt_cond[mask]);
821 if (cond == TCG_COND_NEVER) {
822 goto do_dynamic;
823 }
824 account_inline_branch(s, old_cc_op);
825 break;
826
827 case CC_OP_NZ:
828 cond = nz_cond[mask];
829 if (cond == TCG_COND_NEVER) {
830 goto do_dynamic;
831 }
832 account_inline_branch(s, old_cc_op);
833 break;
834
835 case CC_OP_TM_32:
836 case CC_OP_TM_64:
837 switch (mask) {
838 case 8:
839 cond = TCG_COND_EQ;
840 break;
841 case 4 | 2 | 1:
842 cond = TCG_COND_NE;
843 break;
844 default:
845 goto do_dynamic;
846 }
847 account_inline_branch(s, old_cc_op);
848 break;
849
850 case CC_OP_ICM:
851 switch (mask) {
852 case 8:
853 cond = TCG_COND_EQ;
854 break;
855 case 4 | 2 | 1:
856 case 4 | 2:
857 cond = TCG_COND_NE;
858 break;
859 default:
860 goto do_dynamic;
861 }
862 account_inline_branch(s, old_cc_op);
863 break;
864
865 default:
866 do_dynamic:
867 /* Calculate cc value. */
868 gen_op_calc_cc(s);
869 /* FALLTHRU */
870
871 case CC_OP_STATIC:
872 /* Jump based on CC. We'll load up the real cond below;
873 the assignment here merely avoids a compiler warning. */
874 account_noninline_branch(s, old_cc_op);
875 old_cc_op = CC_OP_STATIC;
876 cond = TCG_COND_NEVER;
877 break;
878 }
879
880 /* Load up the arguments of the comparison. */
881 c->is_64 = true;
882 c->g1 = c->g2 = false;
883 switch (old_cc_op) {
884 case CC_OP_LTGT0_32:
885 c->is_64 = false;
886 c->u.s32.a = tcg_temp_new_i32();
887 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
888 c->u.s32.b = tcg_const_i32(0);
889 break;
890 case CC_OP_LTGT_32:
891 case CC_OP_LTUGTU_32:
892 c->is_64 = false;
893 c->u.s32.a = tcg_temp_new_i32();
894 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
895 c->u.s32.b = tcg_temp_new_i32();
896 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
897 break;
898
899 case CC_OP_LTGT0_64:
900 case CC_OP_NZ:
901 c->u.s64.a = cc_dst;
902 c->u.s64.b = tcg_const_i64(0);
903 c->g1 = true;
904 break;
905 case CC_OP_LTGT_64:
906 case CC_OP_LTUGTU_64:
907 c->u.s64.a = cc_src;
908 c->u.s64.b = cc_dst;
909 c->g1 = c->g2 = true;
910 break;
911
912 case CC_OP_TM_32:
913 case CC_OP_TM_64:
914 case CC_OP_ICM:
915 c->u.s64.a = tcg_temp_new_i64();
916 c->u.s64.b = tcg_const_i64(0);
917 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
918 break;
919
920 case CC_OP_STATIC:
921 c->is_64 = false;
922 c->u.s32.a = cc_op;
923 c->g1 = true;
924 switch (mask) {
925 case 0x8 | 0x4 | 0x2: /* cc != 3 */
926 cond = TCG_COND_NE;
927 c->u.s32.b = tcg_const_i32(3);
928 break;
929 case 0x8 | 0x4 | 0x1: /* cc != 2 */
930 cond = TCG_COND_NE;
931 c->u.s32.b = tcg_const_i32(2);
932 break;
933 case 0x8 | 0x2 | 0x1: /* cc != 1 */
934 cond = TCG_COND_NE;
935 c->u.s32.b = tcg_const_i32(1);
936 break;
937 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
938 cond = TCG_COND_EQ;
939 c->g1 = false;
940 c->u.s32.a = tcg_temp_new_i32();
941 c->u.s32.b = tcg_const_i32(0);
942 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
943 break;
944 case 0x8 | 0x4: /* cc < 2 */
945 cond = TCG_COND_LTU;
946 c->u.s32.b = tcg_const_i32(2);
947 break;
948 case 0x8: /* cc == 0 */
949 cond = TCG_COND_EQ;
950 c->u.s32.b = tcg_const_i32(0);
951 break;
952 case 0x4 | 0x2 | 0x1: /* cc != 0 */
953 cond = TCG_COND_NE;
954 c->u.s32.b = tcg_const_i32(0);
955 break;
956 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
957 cond = TCG_COND_NE;
958 c->g1 = false;
959 c->u.s32.a = tcg_temp_new_i32();
960 c->u.s32.b = tcg_const_i32(0);
961 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
962 break;
963 case 0x4: /* cc == 1 */
964 cond = TCG_COND_EQ;
965 c->u.s32.b = tcg_const_i32(1);
966 break;
967 case 0x2 | 0x1: /* cc > 1 */
968 cond = TCG_COND_GTU;
969 c->u.s32.b = tcg_const_i32(1);
970 break;
971 case 0x2: /* cc == 2 */
972 cond = TCG_COND_EQ;
973 c->u.s32.b = tcg_const_i32(2);
974 break;
975 case 0x1: /* cc == 3 */
976 cond = TCG_COND_EQ;
977 c->u.s32.b = tcg_const_i32(3);
978 break;
979 default:
980 /* CC is masked by something else: (8 >> cc) & mask. */
981 cond = TCG_COND_NE;
982 c->g1 = false;
983 c->u.s32.a = tcg_const_i32(8);
984 c->u.s32.b = tcg_const_i32(0);
985 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
986 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
987 break;
988 }
989 break;
990
991 default:
992 abort();
993 }
994 c->cond = cond;
995 }
996
997 static void free_compare(DisasCompare *c)
998 {
999 if (!c->g1) {
1000 if (c->is_64) {
1001 tcg_temp_free_i64(c->u.s64.a);
1002 } else {
1003 tcg_temp_free_i32(c->u.s32.a);
1004 }
1005 }
1006 if (!c->g2) {
1007 if (c->is_64) {
1008 tcg_temp_free_i64(c->u.s64.b);
1009 } else {
1010 tcg_temp_free_i32(c->u.s32.b);
1011 }
1012 }
1013 }
1014
1015 static void disas_e3(CPUS390XState *env, DisasContext* s, int op, int r1,
1016 int x2, int b2, int d2)
1017 {
1018 TCGv_i64 addr, tmp2;
1019 TCGv_i32 tmp32_1;
1020
1021 LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
1022 op, r1, x2, b2, d2);
1023 addr = get_address(s, x2, b2, d2);
1024 switch (op) {
1025 case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
1026 tmp2 = tcg_temp_new_i64();
1027 tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
1028 tcg_gen_bswap64_i64(tmp2, tmp2);
1029 store_reg(r1, tmp2);
1030 tcg_temp_free_i64(tmp2);
1031 break;
1032 case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
1033 tmp2 = tcg_temp_new_i64();
1034 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1035 tcg_gen_andi_i64(tmp2, tmp2, 0x7fffffffULL);
1036 store_reg(r1, tmp2);
1037 tcg_temp_free_i64(tmp2);
1038 break;
1039 case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
1040 tmp2 = tcg_temp_new_i64();
1041 tmp32_1 = tcg_temp_new_i32();
1042 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1043 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1044 tcg_temp_free_i64(tmp2);
1045 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1046 store_reg32(r1, tmp32_1);
1047 tcg_temp_free_i32(tmp32_1);
1048 break;
1049 case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
1050 tmp2 = tcg_temp_new_i64();
1051 tmp32_1 = tcg_temp_new_i32();
1052 tcg_gen_qemu_ld16u(tmp2, addr, get_mem_index(s));
1053 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1054 tcg_temp_free_i64(tmp2);
1055 tcg_gen_bswap16_i32(tmp32_1, tmp32_1);
1056 store_reg16(r1, tmp32_1);
1057 tcg_temp_free_i32(tmp32_1);
1058 break;
1059 case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
1060 tmp32_1 = load_reg32(r1);
1061 tmp2 = tcg_temp_new_i64();
1062 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1063 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1064 tcg_temp_free_i32(tmp32_1);
1065 tcg_gen_qemu_st32(tmp2, addr, get_mem_index(s));
1066 tcg_temp_free_i64(tmp2);
1067 break;
1068 default:
1069 LOG_DISAS("illegal e3 operation 0x%x\n", op);
1070 gen_illegal_opcode(s);
1071 break;
1072 }
1073 tcg_temp_free_i64(addr);
1074 }
1075
1076 #ifndef CONFIG_USER_ONLY
1077 static void disas_e5(CPUS390XState *env, DisasContext* s, uint64_t insn)
1078 {
1079 TCGv_i64 tmp, tmp2;
1080 int op = (insn >> 32) & 0xff;
1081
1082 tmp = get_address(s, 0, (insn >> 28) & 0xf, (insn >> 16) & 0xfff);
1083 tmp2 = get_address(s, 0, (insn >> 12) & 0xf, insn & 0xfff);
1084
1085 LOG_DISAS("disas_e5: insn %" PRIx64 "\n", insn);
1086 switch (op) {
1087 case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
1088 /* Test Protection */
1089 potential_page_fault(s);
1090 gen_helper_tprot(cc_op, tmp, tmp2);
1091 set_cc_static(s);
1092 break;
1093 default:
1094 LOG_DISAS("illegal e5 operation 0x%x\n", op);
1095 gen_illegal_opcode(s);
1096 break;
1097 }
1098
1099 tcg_temp_free_i64(tmp);
1100 tcg_temp_free_i64(tmp2);
1101 }
1102 #endif
1103
1104 static void disas_eb(CPUS390XState *env, DisasContext *s, int op, int r1,
1105 int r3, int b2, int d2)
1106 {
1107 TCGv_i64 tmp;
1108 TCGv_i32 tmp32_1, tmp32_2;
1109
1110 LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
1111 op, r1, r3, b2, d2);
1112 switch (op) {
1113 case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */
1114 tmp = get_address(s, 0, b2, d2);
1115 tmp32_1 = tcg_const_i32(r1);
1116 tmp32_2 = tcg_const_i32(r3);
1117 potential_page_fault(s);
1118 gen_helper_stcmh(cpu_env, tmp32_1, tmp, tmp32_2);
1119 tcg_temp_free_i64(tmp);
1120 tcg_temp_free_i32(tmp32_1);
1121 tcg_temp_free_i32(tmp32_2);
1122 break;
1123 #ifndef CONFIG_USER_ONLY
1124 case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
1125 /* Load Control */
1126 check_privileged(s);
1127 tmp = get_address(s, 0, b2, d2);
1128 tmp32_1 = tcg_const_i32(r1);
1129 tmp32_2 = tcg_const_i32(r3);
1130 potential_page_fault(s);
1131 gen_helper_lctlg(cpu_env, tmp32_1, tmp, tmp32_2);
1132 tcg_temp_free_i64(tmp);
1133 tcg_temp_free_i32(tmp32_1);
1134 tcg_temp_free_i32(tmp32_2);
1135 break;
1136 case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
1137 /* Store Control */
1138 check_privileged(s);
1139 tmp = get_address(s, 0, b2, d2);
1140 tmp32_1 = tcg_const_i32(r1);
1141 tmp32_2 = tcg_const_i32(r3);
1142 potential_page_fault(s);
1143 gen_helper_stctg(cpu_env, tmp32_1, tmp, tmp32_2);
1144 tcg_temp_free_i64(tmp);
1145 tcg_temp_free_i32(tmp32_1);
1146 tcg_temp_free_i32(tmp32_2);
1147 break;
1148 #endif
1149 case 0x30: /* CSG R1,R3,D2(B2) [RSY] */
1150 tmp = get_address(s, 0, b2, d2);
1151 tmp32_1 = tcg_const_i32(r1);
1152 tmp32_2 = tcg_const_i32(r3);
1153 potential_page_fault(s);
1154 /* XXX rewrite in tcg */
1155 gen_helper_csg(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
1156 set_cc_static(s);
1157 tcg_temp_free_i64(tmp);
1158 tcg_temp_free_i32(tmp32_1);
1159 tcg_temp_free_i32(tmp32_2);
1160 break;
1161 case 0x3e: /* CDSG R1,R3,D2(B2) [RSY] */
1162 tmp = get_address(s, 0, b2, d2);
1163 tmp32_1 = tcg_const_i32(r1);
1164 tmp32_2 = tcg_const_i32(r3);
1165 potential_page_fault(s);
1166 /* XXX rewrite in tcg */
1167 gen_helper_cdsg(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
1168 set_cc_static(s);
1169 tcg_temp_free_i64(tmp);
1170 tcg_temp_free_i32(tmp32_1);
1171 tcg_temp_free_i32(tmp32_2);
1172 break;
1173 default:
1174 LOG_DISAS("illegal eb operation 0x%x\n", op);
1175 gen_illegal_opcode(s);
1176 break;
1177 }
1178 }
1179
1180 static void disas_ed(CPUS390XState *env, DisasContext *s, int op, int r1,
1181 int x2, int b2, int d2, int r1b)
1182 {
1183 TCGv_i32 tmp_r1, tmp32;
1184 TCGv_i64 addr, tmp;
1185 addr = get_address(s, x2, b2, d2);
1186 tmp_r1 = tcg_const_i32(r1);
1187 switch (op) {
1188 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1189 potential_page_fault(s);
1190 gen_helper_ldeb(cpu_env, tmp_r1, addr);
1191 break;
1192 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1193 potential_page_fault(s);
1194 gen_helper_lxdb(cpu_env, tmp_r1, addr);
1195 break;
1196 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1197 tmp = tcg_temp_new_i64();
1198 tmp32 = load_freg32(r1);
1199 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1200 set_cc_cmp_f32_i64(s, tmp32, tmp);
1201 tcg_temp_free_i64(tmp);
1202 tcg_temp_free_i32(tmp32);
1203 break;
1204 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1205 tmp = tcg_temp_new_i64();
1206 tmp32 = tcg_temp_new_i32();
1207 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1208 tcg_gen_trunc_i64_i32(tmp32, tmp);
1209 gen_helper_aeb(cpu_env, tmp_r1, tmp32);
1210 tcg_temp_free_i64(tmp);
1211 tcg_temp_free_i32(tmp32);
1212
1213 tmp32 = load_freg32(r1);
1214 gen_set_cc_nz_f32(s, tmp32);
1215 tcg_temp_free_i32(tmp32);
1216 break;
1217 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1218 tmp = tcg_temp_new_i64();
1219 tmp32 = tcg_temp_new_i32();
1220 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1221 tcg_gen_trunc_i64_i32(tmp32, tmp);
1222 gen_helper_seb(cpu_env, tmp_r1, tmp32);
1223 tcg_temp_free_i64(tmp);
1224 tcg_temp_free_i32(tmp32);
1225
1226 tmp32 = load_freg32(r1);
1227 gen_set_cc_nz_f32(s, tmp32);
1228 tcg_temp_free_i32(tmp32);
1229 break;
1230 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1231 tmp = tcg_temp_new_i64();
1232 tmp32 = tcg_temp_new_i32();
1233 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1234 tcg_gen_trunc_i64_i32(tmp32, tmp);
1235 gen_helper_deb(cpu_env, tmp_r1, tmp32);
1236 tcg_temp_free_i64(tmp);
1237 tcg_temp_free_i32(tmp32);
1238 break;
1239 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1240 potential_page_fault(s);
1241 gen_helper_tceb(cc_op, cpu_env, tmp_r1, addr);
1242 set_cc_static(s);
1243 break;
1244 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1245 potential_page_fault(s);
1246 gen_helper_tcdb(cc_op, cpu_env, tmp_r1, addr);
1247 set_cc_static(s);
1248 break;
1249 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1250 potential_page_fault(s);
1251 gen_helper_tcxb(cc_op, cpu_env, tmp_r1, addr);
1252 set_cc_static(s);
1253 break;
1254 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1255 tmp = tcg_temp_new_i64();
1256 tmp32 = tcg_temp_new_i32();
1257 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1258 tcg_gen_trunc_i64_i32(tmp32, tmp);
1259 gen_helper_meeb(cpu_env, tmp_r1, tmp32);
1260 tcg_temp_free_i64(tmp);
1261 tcg_temp_free_i32(tmp32);
1262 break;
1263 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1264 potential_page_fault(s);
1265 gen_helper_cdb(cc_op, cpu_env, tmp_r1, addr);
1266 set_cc_static(s);
1267 break;
1268 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1269 potential_page_fault(s);
1270 gen_helper_adb(cc_op, cpu_env, tmp_r1, addr);
1271 set_cc_static(s);
1272 break;
1273 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1274 potential_page_fault(s);
1275 gen_helper_sdb(cc_op, cpu_env, tmp_r1, addr);
1276 set_cc_static(s);
1277 break;
1278 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1279 potential_page_fault(s);
1280 gen_helper_mdb(cpu_env, tmp_r1, addr);
1281 break;
1282 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1283 potential_page_fault(s);
1284 gen_helper_ddb(cpu_env, tmp_r1, addr);
1285 break;
1286 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1287 /* for RXF insns, r1 is R3 and r1b is R1 */
1288 tmp32 = tcg_const_i32(r1b);
1289 potential_page_fault(s);
1290 gen_helper_madb(cpu_env, tmp32, addr, tmp_r1);
1291 tcg_temp_free_i32(tmp32);
1292 break;
1293 default:
1294 LOG_DISAS("illegal ed operation 0x%x\n", op);
1295 gen_illegal_opcode(s);
1296 return;
1297 }
1298 tcg_temp_free_i32(tmp_r1);
1299 tcg_temp_free_i64(addr);
1300 }
1301
1302 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1303 uint32_t insn)
1304 {
1305 TCGv_i64 tmp, tmp2, tmp3;
1306 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1307 int r1, r2;
1308 #ifndef CONFIG_USER_ONLY
1309 int r3, d2, b2;
1310 #endif
1311
1312 r1 = (insn >> 4) & 0xf;
1313 r2 = insn & 0xf;
1314
1315 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1316
1317 switch (op) {
1318 case 0x22: /* IPM R1 [RRE] */
1319 tmp32_1 = tcg_const_i32(r1);
1320 gen_op_calc_cc(s);
1321 gen_helper_ipm(cpu_env, cc_op, tmp32_1);
1322 tcg_temp_free_i32(tmp32_1);
1323 break;
1324 case 0x41: /* CKSM R1,R2 [RRE] */
1325 tmp32_1 = tcg_const_i32(r1);
1326 tmp32_2 = tcg_const_i32(r2);
1327 potential_page_fault(s);
1328 gen_helper_cksm(cpu_env, tmp32_1, tmp32_2);
1329 tcg_temp_free_i32(tmp32_1);
1330 tcg_temp_free_i32(tmp32_2);
1331 gen_op_movi_cc(s, 0);
1332 break;
1333 case 0x4e: /* SAR R1,R2 [RRE] */
1334 tmp32_1 = load_reg32(r2);
1335 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r1]));
1336 tcg_temp_free_i32(tmp32_1);
1337 break;
1338 case 0x4f: /* EAR R1,R2 [RRE] */
1339 tmp32_1 = tcg_temp_new_i32();
1340 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1341 store_reg32(r1, tmp32_1);
1342 tcg_temp_free_i32(tmp32_1);
1343 break;
1344 case 0x54: /* MVPG R1,R2 [RRE] */
1345 tmp = load_reg(0);
1346 tmp2 = load_reg(r1);
1347 tmp3 = load_reg(r2);
1348 potential_page_fault(s);
1349 gen_helper_mvpg(cpu_env, tmp, tmp2, tmp3);
1350 tcg_temp_free_i64(tmp);
1351 tcg_temp_free_i64(tmp2);
1352 tcg_temp_free_i64(tmp3);
1353 /* XXX check CCO bit and set CC accordingly */
1354 gen_op_movi_cc(s, 0);
1355 break;
1356 case 0x55: /* MVST R1,R2 [RRE] */
1357 tmp32_1 = load_reg32(0);
1358 tmp32_2 = tcg_const_i32(r1);
1359 tmp32_3 = tcg_const_i32(r2);
1360 potential_page_fault(s);
1361 gen_helper_mvst(cpu_env, tmp32_1, tmp32_2, tmp32_3);
1362 tcg_temp_free_i32(tmp32_1);
1363 tcg_temp_free_i32(tmp32_2);
1364 tcg_temp_free_i32(tmp32_3);
1365 gen_op_movi_cc(s, 1);
1366 break;
1367 case 0x5d: /* CLST R1,R2 [RRE] */
1368 tmp32_1 = load_reg32(0);
1369 tmp32_2 = tcg_const_i32(r1);
1370 tmp32_3 = tcg_const_i32(r2);
1371 potential_page_fault(s);
1372 gen_helper_clst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1373 set_cc_static(s);
1374 tcg_temp_free_i32(tmp32_1);
1375 tcg_temp_free_i32(tmp32_2);
1376 tcg_temp_free_i32(tmp32_3);
1377 break;
1378 case 0x5e: /* SRST R1,R2 [RRE] */
1379 tmp32_1 = load_reg32(0);
1380 tmp32_2 = tcg_const_i32(r1);
1381 tmp32_3 = tcg_const_i32(r2);
1382 potential_page_fault(s);
1383 gen_helper_srst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1384 set_cc_static(s);
1385 tcg_temp_free_i32(tmp32_1);
1386 tcg_temp_free_i32(tmp32_2);
1387 tcg_temp_free_i32(tmp32_3);
1388 break;
1389
1390 #ifndef CONFIG_USER_ONLY
1391 case 0x02: /* STIDP D2(B2) [S] */
1392 /* Store CPU ID */
1393 check_privileged(s);
1394 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1395 tmp = get_address(s, 0, b2, d2);
1396 potential_page_fault(s);
1397 gen_helper_stidp(cpu_env, tmp);
1398 tcg_temp_free_i64(tmp);
1399 break;
1400 case 0x04: /* SCK D2(B2) [S] */
1401 /* Set Clock */
1402 check_privileged(s);
1403 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1404 tmp = get_address(s, 0, b2, d2);
1405 potential_page_fault(s);
1406 gen_helper_sck(cc_op, tmp);
1407 set_cc_static(s);
1408 tcg_temp_free_i64(tmp);
1409 break;
1410 case 0x05: /* STCK D2(B2) [S] */
1411 /* Store Clock */
1412 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1413 tmp = get_address(s, 0, b2, d2);
1414 potential_page_fault(s);
1415 gen_helper_stck(cc_op, cpu_env, tmp);
1416 set_cc_static(s);
1417 tcg_temp_free_i64(tmp);
1418 break;
1419 case 0x06: /* SCKC D2(B2) [S] */
1420 /* Set Clock Comparator */
1421 check_privileged(s);
1422 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1423 tmp = get_address(s, 0, b2, d2);
1424 potential_page_fault(s);
1425 gen_helper_sckc(cpu_env, tmp);
1426 tcg_temp_free_i64(tmp);
1427 break;
1428 case 0x07: /* STCKC D2(B2) [S] */
1429 /* Store Clock Comparator */
1430 check_privileged(s);
1431 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1432 tmp = get_address(s, 0, b2, d2);
1433 potential_page_fault(s);
1434 gen_helper_stckc(cpu_env, tmp);
1435 tcg_temp_free_i64(tmp);
1436 break;
1437 case 0x08: /* SPT D2(B2) [S] */
1438 /* Set CPU Timer */
1439 check_privileged(s);
1440 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1441 tmp = get_address(s, 0, b2, d2);
1442 potential_page_fault(s);
1443 gen_helper_spt(cpu_env, tmp);
1444 tcg_temp_free_i64(tmp);
1445 break;
1446 case 0x09: /* STPT D2(B2) [S] */
1447 /* Store CPU Timer */
1448 check_privileged(s);
1449 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1450 tmp = get_address(s, 0, b2, d2);
1451 potential_page_fault(s);
1452 gen_helper_stpt(cpu_env, tmp);
1453 tcg_temp_free_i64(tmp);
1454 break;
1455 case 0x0a: /* SPKA D2(B2) [S] */
1456 /* Set PSW Key from Address */
1457 check_privileged(s);
1458 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1459 tmp = get_address(s, 0, b2, d2);
1460 tmp2 = tcg_temp_new_i64();
1461 tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
1462 tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
1463 tcg_gen_or_i64(psw_mask, tmp2, tmp);
1464 tcg_temp_free_i64(tmp2);
1465 tcg_temp_free_i64(tmp);
1466 break;
1467 case 0x0d: /* PTLB [S] */
1468 /* Purge TLB */
1469 check_privileged(s);
1470 gen_helper_ptlb(cpu_env);
1471 break;
1472 case 0x10: /* SPX D2(B2) [S] */
1473 /* Set Prefix Register */
1474 check_privileged(s);
1475 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1476 tmp = get_address(s, 0, b2, d2);
1477 potential_page_fault(s);
1478 gen_helper_spx(cpu_env, tmp);
1479 tcg_temp_free_i64(tmp);
1480 break;
1481 case 0x11: /* STPX D2(B2) [S] */
1482 /* Store Prefix */
1483 check_privileged(s);
1484 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1485 tmp = get_address(s, 0, b2, d2);
1486 tmp2 = tcg_temp_new_i64();
1487 tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1488 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1489 tcg_temp_free_i64(tmp);
1490 tcg_temp_free_i64(tmp2);
1491 break;
1492 case 0x12: /* STAP D2(B2) [S] */
1493 /* Store CPU Address */
1494 check_privileged(s);
1495 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1496 tmp = get_address(s, 0, b2, d2);
1497 tmp2 = tcg_temp_new_i64();
1498 tmp32_1 = tcg_temp_new_i32();
1499 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1500 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1501 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1502 tcg_temp_free_i64(tmp);
1503 tcg_temp_free_i64(tmp2);
1504 tcg_temp_free_i32(tmp32_1);
1505 break;
1506 case 0x21: /* IPTE R1,R2 [RRE] */
1507 /* Invalidate PTE */
1508 check_privileged(s);
1509 r1 = (insn >> 4) & 0xf;
1510 r2 = insn & 0xf;
1511 tmp = load_reg(r1);
1512 tmp2 = load_reg(r2);
1513 gen_helper_ipte(cpu_env, tmp, tmp2);
1514 tcg_temp_free_i64(tmp);
1515 tcg_temp_free_i64(tmp2);
1516 break;
1517 case 0x29: /* ISKE R1,R2 [RRE] */
1518 /* Insert Storage Key Extended */
1519 check_privileged(s);
1520 r1 = (insn >> 4) & 0xf;
1521 r2 = insn & 0xf;
1522 tmp = load_reg(r2);
1523 tmp2 = tcg_temp_new_i64();
1524 gen_helper_iske(tmp2, cpu_env, tmp);
1525 store_reg(r1, tmp2);
1526 tcg_temp_free_i64(tmp);
1527 tcg_temp_free_i64(tmp2);
1528 break;
1529 case 0x2a: /* RRBE R1,R2 [RRE] */
1530 /* Set Storage Key Extended */
1531 check_privileged(s);
1532 r1 = (insn >> 4) & 0xf;
1533 r2 = insn & 0xf;
1534 tmp32_1 = load_reg32(r1);
1535 tmp = load_reg(r2);
1536 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1537 set_cc_static(s);
1538 tcg_temp_free_i32(tmp32_1);
1539 tcg_temp_free_i64(tmp);
1540 break;
1541 case 0x2b: /* SSKE R1,R2 [RRE] */
1542 /* Set Storage Key Extended */
1543 check_privileged(s);
1544 r1 = (insn >> 4) & 0xf;
1545 r2 = insn & 0xf;
1546 tmp32_1 = load_reg32(r1);
1547 tmp = load_reg(r2);
1548 gen_helper_sske(cpu_env, tmp32_1, tmp);
1549 tcg_temp_free_i32(tmp32_1);
1550 tcg_temp_free_i64(tmp);
1551 break;
1552 case 0x34: /* STCH ? */
1553 /* Store Subchannel */
1554 check_privileged(s);
1555 gen_op_movi_cc(s, 3);
1556 break;
1557 case 0x46: /* STURA R1,R2 [RRE] */
1558 /* Store Using Real Address */
1559 check_privileged(s);
1560 r1 = (insn >> 4) & 0xf;
1561 r2 = insn & 0xf;
1562 tmp32_1 = load_reg32(r1);
1563 tmp = load_reg(r2);
1564 potential_page_fault(s);
1565 gen_helper_stura(cpu_env, tmp, tmp32_1);
1566 tcg_temp_free_i32(tmp32_1);
1567 tcg_temp_free_i64(tmp);
1568 break;
1569 case 0x50: /* CSP R1,R2 [RRE] */
1570 /* Compare And Swap And Purge */
1571 check_privileged(s);
1572 r1 = (insn >> 4) & 0xf;
1573 r2 = insn & 0xf;
1574 tmp32_1 = tcg_const_i32(r1);
1575 tmp32_2 = tcg_const_i32(r2);
1576 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1577 set_cc_static(s);
1578 tcg_temp_free_i32(tmp32_1);
1579 tcg_temp_free_i32(tmp32_2);
1580 break;
1581 case 0x5f: /* CHSC ? */
1582 /* Channel Subsystem Call */
1583 check_privileged(s);
1584 gen_op_movi_cc(s, 3);
1585 break;
1586 case 0x78: /* STCKE D2(B2) [S] */
1587 /* Store Clock Extended */
1588 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1589 tmp = get_address(s, 0, b2, d2);
1590 potential_page_fault(s);
1591 gen_helper_stcke(cc_op, cpu_env, tmp);
1592 set_cc_static(s);
1593 tcg_temp_free_i64(tmp);
1594 break;
1595 case 0x79: /* SACF D2(B2) [S] */
1596 /* Set Address Space Control Fast */
1597 check_privileged(s);
1598 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1599 tmp = get_address(s, 0, b2, d2);
1600 potential_page_fault(s);
1601 gen_helper_sacf(cpu_env, tmp);
1602 tcg_temp_free_i64(tmp);
1603 /* addressing mode has changed, so end the block */
1604 s->pc = s->next_pc;
1605 update_psw_addr(s);
1606 s->is_jmp = DISAS_JUMP;
1607 break;
1608 case 0x7d: /* STSI D2,(B2) [S] */
1609 check_privileged(s);
1610 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1611 tmp = get_address(s, 0, b2, d2);
1612 tmp32_1 = load_reg32(0);
1613 tmp32_2 = load_reg32(1);
1614 potential_page_fault(s);
1615 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1616 set_cc_static(s);
1617 tcg_temp_free_i64(tmp);
1618 tcg_temp_free_i32(tmp32_1);
1619 tcg_temp_free_i32(tmp32_2);
1620 break;
1621 case 0x9d: /* LFPC D2(B2) [S] */
1622 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1623 tmp = get_address(s, 0, b2, d2);
1624 tmp2 = tcg_temp_new_i64();
1625 tmp32_1 = tcg_temp_new_i32();
1626 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1627 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1628 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1629 tcg_temp_free_i64(tmp);
1630 tcg_temp_free_i64(tmp2);
1631 tcg_temp_free_i32(tmp32_1);
1632 break;
1633 case 0xb1: /* STFL D2(B2) [S] */
1634 /* Store Facility List (CPU features) at 200 */
1635 check_privileged(s);
1636 tmp2 = tcg_const_i64(0xc0000000);
1637 tmp = tcg_const_i64(200);
1638 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1639 tcg_temp_free_i64(tmp2);
1640 tcg_temp_free_i64(tmp);
1641 break;
1642 case 0xb2: /* LPSWE D2(B2) [S] */
1643 /* Load PSW Extended */
1644 check_privileged(s);
1645 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1646 tmp = get_address(s, 0, b2, d2);
1647 tmp2 = tcg_temp_new_i64();
1648 tmp3 = tcg_temp_new_i64();
1649 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1650 tcg_gen_addi_i64(tmp, tmp, 8);
1651 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1652 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1653 /* we need to keep cc_op intact */
1654 s->is_jmp = DISAS_JUMP;
1655 tcg_temp_free_i64(tmp);
1656 tcg_temp_free_i64(tmp2);
1657 tcg_temp_free_i64(tmp3);
1658 break;
1659 case 0x20: /* SERVC R1,R2 [RRE] */
1660 /* SCLP Service call (PV hypercall) */
1661 check_privileged(s);
1662 potential_page_fault(s);
1663 tmp32_1 = load_reg32(r2);
1664 tmp = load_reg(r1);
1665 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1666 set_cc_static(s);
1667 tcg_temp_free_i32(tmp32_1);
1668 tcg_temp_free_i64(tmp);
1669 break;
1670 #endif
1671 default:
1672 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1673 gen_illegal_opcode(s);
1674 break;
1675 }
1676 }
1677
1678 static void disas_b3(CPUS390XState *env, DisasContext *s, int op, int m3,
1679 int r1, int r2)
1680 {
1681 TCGv_i64 tmp;
1682 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1683 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
1684 #define FP_HELPER(i) \
1685 tmp32_1 = tcg_const_i32(r1); \
1686 tmp32_2 = tcg_const_i32(r2); \
1687 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1688 tcg_temp_free_i32(tmp32_1); \
1689 tcg_temp_free_i32(tmp32_2);
1690
1691 #define FP_HELPER_CC(i) \
1692 tmp32_1 = tcg_const_i32(r1); \
1693 tmp32_2 = tcg_const_i32(r2); \
1694 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1695 set_cc_static(s); \
1696 tcg_temp_free_i32(tmp32_1); \
1697 tcg_temp_free_i32(tmp32_2);
1698
1699 switch (op) {
1700 case 0x0: /* LPEBR R1,R2 [RRE] */
1701 FP_HELPER_CC(lpebr);
1702 break;
1703 case 0x2: /* LTEBR R1,R2 [RRE] */
1704 FP_HELPER_CC(ltebr);
1705 break;
1706 case 0x3: /* LCEBR R1,R2 [RRE] */
1707 FP_HELPER_CC(lcebr);
1708 break;
1709 case 0x4: /* LDEBR R1,R2 [RRE] */
1710 FP_HELPER(ldebr);
1711 break;
1712 case 0x5: /* LXDBR R1,R2 [RRE] */
1713 FP_HELPER(lxdbr);
1714 break;
1715 case 0x9: /* CEBR R1,R2 [RRE] */
1716 FP_HELPER_CC(cebr);
1717 break;
1718 case 0xa: /* AEBR R1,R2 [RRE] */
1719 FP_HELPER_CC(aebr);
1720 break;
1721 case 0xb: /* SEBR R1,R2 [RRE] */
1722 FP_HELPER_CC(sebr);
1723 break;
1724 case 0xd: /* DEBR R1,R2 [RRE] */
1725 FP_HELPER(debr);
1726 break;
1727 case 0x10: /* LPDBR R1,R2 [RRE] */
1728 FP_HELPER_CC(lpdbr);
1729 break;
1730 case 0x12: /* LTDBR R1,R2 [RRE] */
1731 FP_HELPER_CC(ltdbr);
1732 break;
1733 case 0x13: /* LCDBR R1,R2 [RRE] */
1734 FP_HELPER_CC(lcdbr);
1735 break;
1736 case 0x15: /* SQBDR R1,R2 [RRE] */
1737 FP_HELPER(sqdbr);
1738 break;
1739 case 0x17: /* MEEBR R1,R2 [RRE] */
1740 FP_HELPER(meebr);
1741 break;
1742 case 0x19: /* CDBR R1,R2 [RRE] */
1743 FP_HELPER_CC(cdbr);
1744 break;
1745 case 0x1a: /* ADBR R1,R2 [RRE] */
1746 FP_HELPER_CC(adbr);
1747 break;
1748 case 0x1b: /* SDBR R1,R2 [RRE] */
1749 FP_HELPER_CC(sdbr);
1750 break;
1751 case 0x1c: /* MDBR R1,R2 [RRE] */
1752 FP_HELPER(mdbr);
1753 break;
1754 case 0x1d: /* DDBR R1,R2 [RRE] */
1755 FP_HELPER(ddbr);
1756 break;
1757 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
1758 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
1759 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
1760 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
1761 tmp32_1 = tcg_const_i32(m3);
1762 tmp32_2 = tcg_const_i32(r2);
1763 tmp32_3 = tcg_const_i32(r1);
1764 switch (op) {
1765 case 0xe:
1766 gen_helper_maebr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1767 break;
1768 case 0x1e:
1769 gen_helper_madbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1770 break;
1771 case 0x1f:
1772 gen_helper_msdbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1773 break;
1774 default:
1775 tcg_abort();
1776 }
1777 tcg_temp_free_i32(tmp32_1);
1778 tcg_temp_free_i32(tmp32_2);
1779 tcg_temp_free_i32(tmp32_3);
1780 break;
1781 case 0x40: /* LPXBR R1,R2 [RRE] */
1782 FP_HELPER_CC(lpxbr);
1783 break;
1784 case 0x42: /* LTXBR R1,R2 [RRE] */
1785 FP_HELPER_CC(ltxbr);
1786 break;
1787 case 0x43: /* LCXBR R1,R2 [RRE] */
1788 FP_HELPER_CC(lcxbr);
1789 break;
1790 case 0x44: /* LEDBR R1,R2 [RRE] */
1791 FP_HELPER(ledbr);
1792 break;
1793 case 0x45: /* LDXBR R1,R2 [RRE] */
1794 FP_HELPER(ldxbr);
1795 break;
1796 case 0x46: /* LEXBR R1,R2 [RRE] */
1797 FP_HELPER(lexbr);
1798 break;
1799 case 0x49: /* CXBR R1,R2 [RRE] */
1800 FP_HELPER_CC(cxbr);
1801 break;
1802 case 0x4a: /* AXBR R1,R2 [RRE] */
1803 FP_HELPER_CC(axbr);
1804 break;
1805 case 0x4b: /* SXBR R1,R2 [RRE] */
1806 FP_HELPER_CC(sxbr);
1807 break;
1808 case 0x4c: /* MXBR R1,R2 [RRE] */
1809 FP_HELPER(mxbr);
1810 break;
1811 case 0x4d: /* DXBR R1,R2 [RRE] */
1812 FP_HELPER(dxbr);
1813 break;
1814 case 0x65: /* LXR R1,R2 [RRE] */
1815 tmp = load_freg(r2);
1816 store_freg(r1, tmp);
1817 tcg_temp_free_i64(tmp);
1818 tmp = load_freg(r2 + 2);
1819 store_freg(r1 + 2, tmp);
1820 tcg_temp_free_i64(tmp);
1821 break;
1822 case 0x74: /* LZER R1 [RRE] */
1823 tmp32_1 = tcg_const_i32(r1);
1824 gen_helper_lzer(cpu_env, tmp32_1);
1825 tcg_temp_free_i32(tmp32_1);
1826 break;
1827 case 0x75: /* LZDR R1 [RRE] */
1828 tmp32_1 = tcg_const_i32(r1);
1829 gen_helper_lzdr(cpu_env, tmp32_1);
1830 tcg_temp_free_i32(tmp32_1);
1831 break;
1832 case 0x76: /* LZXR R1 [RRE] */
1833 tmp32_1 = tcg_const_i32(r1);
1834 gen_helper_lzxr(cpu_env, tmp32_1);
1835 tcg_temp_free_i32(tmp32_1);
1836 break;
1837 case 0x84: /* SFPC R1 [RRE] */
1838 tmp32_1 = load_reg32(r1);
1839 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1840 tcg_temp_free_i32(tmp32_1);
1841 break;
1842 case 0x94: /* CEFBR R1,R2 [RRE] */
1843 case 0x95: /* CDFBR R1,R2 [RRE] */
1844 case 0x96: /* CXFBR R1,R2 [RRE] */
1845 tmp32_1 = tcg_const_i32(r1);
1846 tmp32_2 = load_reg32(r2);
1847 switch (op) {
1848 case 0x94:
1849 gen_helper_cefbr(cpu_env, tmp32_1, tmp32_2);
1850 break;
1851 case 0x95:
1852 gen_helper_cdfbr(cpu_env, tmp32_1, tmp32_2);
1853 break;
1854 case 0x96:
1855 gen_helper_cxfbr(cpu_env, tmp32_1, tmp32_2);
1856 break;
1857 default:
1858 tcg_abort();
1859 }
1860 tcg_temp_free_i32(tmp32_1);
1861 tcg_temp_free_i32(tmp32_2);
1862 break;
1863 case 0x98: /* CFEBR R1,R2 [RRE] */
1864 case 0x99: /* CFDBR R1,R2 [RRE] */
1865 case 0x9a: /* CFXBR R1,R2 [RRE] */
1866 tmp32_1 = tcg_const_i32(r1);
1867 tmp32_2 = tcg_const_i32(r2);
1868 tmp32_3 = tcg_const_i32(m3);
1869 switch (op) {
1870 case 0x98:
1871 gen_helper_cfebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1872 break;
1873 case 0x99:
1874 gen_helper_cfdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1875 break;
1876 case 0x9a:
1877 gen_helper_cfxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1878 break;
1879 default:
1880 tcg_abort();
1881 }
1882 set_cc_static(s);
1883 tcg_temp_free_i32(tmp32_1);
1884 tcg_temp_free_i32(tmp32_2);
1885 tcg_temp_free_i32(tmp32_3);
1886 break;
1887 case 0xa4: /* CEGBR R1,R2 [RRE] */
1888 case 0xa5: /* CDGBR R1,R2 [RRE] */
1889 tmp32_1 = tcg_const_i32(r1);
1890 tmp = load_reg(r2);
1891 switch (op) {
1892 case 0xa4:
1893 gen_helper_cegbr(cpu_env, tmp32_1, tmp);
1894 break;
1895 case 0xa5:
1896 gen_helper_cdgbr(cpu_env, tmp32_1, tmp);
1897 break;
1898 default:
1899 tcg_abort();
1900 }
1901 tcg_temp_free_i32(tmp32_1);
1902 tcg_temp_free_i64(tmp);
1903 break;
1904 case 0xa6: /* CXGBR R1,R2 [RRE] */
1905 tmp32_1 = tcg_const_i32(r1);
1906 tmp = load_reg(r2);
1907 gen_helper_cxgbr(cpu_env, tmp32_1, tmp);
1908 tcg_temp_free_i32(tmp32_1);
1909 tcg_temp_free_i64(tmp);
1910 break;
1911 case 0xa8: /* CGEBR R1,R2 [RRE] */
1912 tmp32_1 = tcg_const_i32(r1);
1913 tmp32_2 = tcg_const_i32(r2);
1914 tmp32_3 = tcg_const_i32(m3);
1915 gen_helper_cgebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1916 set_cc_static(s);
1917 tcg_temp_free_i32(tmp32_1);
1918 tcg_temp_free_i32(tmp32_2);
1919 tcg_temp_free_i32(tmp32_3);
1920 break;
1921 case 0xa9: /* CGDBR R1,R2 [RRE] */
1922 tmp32_1 = tcg_const_i32(r1);
1923 tmp32_2 = tcg_const_i32(r2);
1924 tmp32_3 = tcg_const_i32(m3);
1925 gen_helper_cgdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1926 set_cc_static(s);
1927 tcg_temp_free_i32(tmp32_1);
1928 tcg_temp_free_i32(tmp32_2);
1929 tcg_temp_free_i32(tmp32_3);
1930 break;
1931 case 0xaa: /* CGXBR R1,R2 [RRE] */
1932 tmp32_1 = tcg_const_i32(r1);
1933 tmp32_2 = tcg_const_i32(r2);
1934 tmp32_3 = tcg_const_i32(m3);
1935 gen_helper_cgxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1936 set_cc_static(s);
1937 tcg_temp_free_i32(tmp32_1);
1938 tcg_temp_free_i32(tmp32_2);
1939 tcg_temp_free_i32(tmp32_3);
1940 break;
1941 default:
1942 LOG_DISAS("illegal b3 operation 0x%x\n", op);
1943 gen_illegal_opcode(s);
1944 break;
1945 }
1946
1947 #undef FP_HELPER_CC
1948 #undef FP_HELPER
1949 }
1950
1951 static void disas_b9(CPUS390XState *env, DisasContext *s, int op, int r1,
1952 int r2)
1953 {
1954 TCGv_i64 tmp;
1955 TCGv_i32 tmp32_1;
1956
1957 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1958 switch (op) {
1959 case 0x17: /* LLGTR R1,R2 [RRE] */
1960 tmp32_1 = load_reg32(r2);
1961 tmp = tcg_temp_new_i64();
1962 tcg_gen_andi_i32(tmp32_1, tmp32_1, 0x7fffffffUL);
1963 tcg_gen_extu_i32_i64(tmp, tmp32_1);
1964 store_reg(r1, tmp);
1965 tcg_temp_free_i32(tmp32_1);
1966 tcg_temp_free_i64(tmp);
1967 break;
1968 case 0x0f: /* LRVGR R1,R2 [RRE] */
1969 tcg_gen_bswap64_i64(regs[r1], regs[r2]);
1970 break;
1971 case 0x1f: /* LRVR R1,R2 [RRE] */
1972 tmp32_1 = load_reg32(r2);
1973 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1974 store_reg32(r1, tmp32_1);
1975 tcg_temp_free_i32(tmp32_1);
1976 break;
1977 case 0x83: /* FLOGR R1,R2 [RRE] */
1978 tmp = load_reg(r2);
1979 tmp32_1 = tcg_const_i32(r1);
1980 gen_helper_flogr(cc_op, cpu_env, tmp32_1, tmp);
1981 set_cc_static(s);
1982 tcg_temp_free_i64(tmp);
1983 tcg_temp_free_i32(tmp32_1);
1984 break;
1985 default:
1986 LOG_DISAS("illegal b9 operation 0x%x\n", op);
1987 gen_illegal_opcode(s);
1988 break;
1989 }
1990 }
1991
1992 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1993 {
1994 TCGv_i64 tmp;
1995 TCGv_i32 tmp32_1, tmp32_2;
1996 unsigned char opc;
1997 uint64_t insn;
1998 int op, r1, r2, r3, d2, x2, b2, r1b;
1999
2000 opc = cpu_ldub_code(env, s->pc);
2001 LOG_DISAS("opc 0x%x\n", opc);
2002
2003 switch (opc) {
2004 case 0xb2:
2005 insn = ld_code4(env, s->pc);
2006 op = (insn >> 16) & 0xff;
2007 disas_b2(env, s, op, insn);
2008 break;
2009 case 0xb3:
2010 insn = ld_code4(env, s->pc);
2011 op = (insn >> 16) & 0xff;
2012 r3 = (insn >> 12) & 0xf; /* aka m3 */
2013 r1 = (insn >> 4) & 0xf;
2014 r2 = insn & 0xf;
2015 disas_b3(env, s, op, r3, r1, r2);
2016 break;
2017 case 0xb9:
2018 insn = ld_code4(env, s->pc);
2019 r1 = (insn >> 4) & 0xf;
2020 r2 = insn & 0xf;
2021 op = (insn >> 16) & 0xff;
2022 disas_b9(env, s, op, r1, r2);
2023 break;
2024 case 0xba: /* CS R1,R3,D2(B2) [RS] */
2025 insn = ld_code4(env, s->pc);
2026 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2027 tmp = get_address(s, 0, b2, d2);
2028 tmp32_1 = tcg_const_i32(r1);
2029 tmp32_2 = tcg_const_i32(r3);
2030 potential_page_fault(s);
2031 gen_helper_cs(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
2032 set_cc_static(s);
2033 tcg_temp_free_i64(tmp);
2034 tcg_temp_free_i32(tmp32_1);
2035 tcg_temp_free_i32(tmp32_2);
2036 break;
2037 case 0xbd: /* CLM R1,M3,D2(B2) [RS] */
2038 insn = ld_code4(env, s->pc);
2039 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2040 tmp = get_address(s, 0, b2, d2);
2041 tmp32_1 = load_reg32(r1);
2042 tmp32_2 = tcg_const_i32(r3);
2043 potential_page_fault(s);
2044 gen_helper_clm(cc_op, cpu_env, tmp32_1, tmp32_2, tmp);
2045 set_cc_static(s);
2046 tcg_temp_free_i64(tmp);
2047 tcg_temp_free_i32(tmp32_1);
2048 tcg_temp_free_i32(tmp32_2);
2049 break;
2050 case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
2051 insn = ld_code4(env, s->pc);
2052 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2053 tmp = get_address(s, 0, b2, d2);
2054 tmp32_1 = load_reg32(r1);
2055 tmp32_2 = tcg_const_i32(r3);
2056 potential_page_fault(s);
2057 gen_helper_stcm(cpu_env, tmp32_1, tmp32_2, tmp);
2058 tcg_temp_free_i64(tmp);
2059 tcg_temp_free_i32(tmp32_1);
2060 tcg_temp_free_i32(tmp32_2);
2061 break;
2062 case 0xe3:
2063 insn = ld_code6(env, s->pc);
2064 debug_insn(insn);
2065 op = insn & 0xff;
2066 r1 = (insn >> 36) & 0xf;
2067 x2 = (insn >> 32) & 0xf;
2068 b2 = (insn >> 28) & 0xf;
2069 d2 = ((int)((((insn >> 16) & 0xfff)
2070 | ((insn << 4) & 0xff000)) << 12)) >> 12;
2071 disas_e3(env, s, op, r1, x2, b2, d2 );
2072 break;
2073 #ifndef CONFIG_USER_ONLY
2074 case 0xe5:
2075 /* Test Protection */
2076 check_privileged(s);
2077 insn = ld_code6(env, s->pc);
2078 debug_insn(insn);
2079 disas_e5(env, s, insn);
2080 break;
2081 #endif
2082 case 0xeb:
2083 insn = ld_code6(env, s->pc);
2084 debug_insn(insn);
2085 op = insn & 0xff;
2086 r1 = (insn >> 36) & 0xf;
2087 r3 = (insn >> 32) & 0xf;
2088 b2 = (insn >> 28) & 0xf;
2089 d2 = ((int)((((insn >> 16) & 0xfff)
2090 | ((insn << 4) & 0xff000)) << 12)) >> 12;
2091 disas_eb(env, s, op, r1, r3, b2, d2);
2092 break;
2093 case 0xed:
2094 insn = ld_code6(env, s->pc);
2095 debug_insn(insn);
2096 op = insn & 0xff;
2097 r1 = (insn >> 36) & 0xf;
2098 x2 = (insn >> 32) & 0xf;
2099 b2 = (insn >> 28) & 0xf;
2100 d2 = (short)((insn >> 16) & 0xfff);
2101 r1b = (insn >> 12) & 0xf;
2102 disas_ed(env, s, op, r1, x2, b2, d2, r1b);
2103 break;
2104 default:
2105 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
2106 gen_illegal_opcode(s);
2107 break;
2108 }
2109 }
2110
2111 /* ====================================================================== */
2112 /* Define the insn format enumeration. */
2113 #define F0(N) FMT_##N,
2114 #define F1(N, X1) F0(N)
2115 #define F2(N, X1, X2) F0(N)
2116 #define F3(N, X1, X2, X3) F0(N)
2117 #define F4(N, X1, X2, X3, X4) F0(N)
2118 #define F5(N, X1, X2, X3, X4, X5) F0(N)
2119
2120 typedef enum {
2121 #include "insn-format.def"
2122 } DisasFormat;
2123
2124 #undef F0
2125 #undef F1
2126 #undef F2
2127 #undef F3
2128 #undef F4
2129 #undef F5
2130
2131 /* Define a structure to hold the decoded fields. We'll store each inside
2132 an array indexed by an enum. In order to conserve memory, we'll arrange
2133 for fields that do not exist at the same time to overlap, thus the "C"
2134 for compact. For checking purposes there is an "O" for original index
2135 as well that will be applied to availability bitmaps. */
2136
2137 enum DisasFieldIndexO {
2138 FLD_O_r1,
2139 FLD_O_r2,
2140 FLD_O_r3,
2141 FLD_O_m1,
2142 FLD_O_m3,
2143 FLD_O_m4,
2144 FLD_O_b1,
2145 FLD_O_b2,
2146 FLD_O_b4,
2147 FLD_O_d1,
2148 FLD_O_d2,
2149 FLD_O_d4,
2150 FLD_O_x2,
2151 FLD_O_l1,
2152 FLD_O_l2,
2153 FLD_O_i1,
2154 FLD_O_i2,
2155 FLD_O_i3,
2156 FLD_O_i4,
2157 FLD_O_i5
2158 };
2159
2160 enum DisasFieldIndexC {
2161 FLD_C_r1 = 0,
2162 FLD_C_m1 = 0,
2163 FLD_C_b1 = 0,
2164 FLD_C_i1 = 0,
2165
2166 FLD_C_r2 = 1,
2167 FLD_C_b2 = 1,
2168 FLD_C_i2 = 1,
2169
2170 FLD_C_r3 = 2,
2171 FLD_C_m3 = 2,
2172 FLD_C_i3 = 2,
2173
2174 FLD_C_m4 = 3,
2175 FLD_C_b4 = 3,
2176 FLD_C_i4 = 3,
2177 FLD_C_l1 = 3,
2178
2179 FLD_C_i5 = 4,
2180 FLD_C_d1 = 4,
2181
2182 FLD_C_d2 = 5,
2183
2184 FLD_C_d4 = 6,
2185 FLD_C_x2 = 6,
2186 FLD_C_l2 = 6,
2187
2188 NUM_C_FIELD = 7
2189 };
2190
2191 struct DisasFields {
2192 unsigned op:8;
2193 unsigned op2:8;
2194 unsigned presentC:16;
2195 unsigned int presentO;
2196 int c[NUM_C_FIELD];
2197 };
2198
2199 /* This is the way fields are to be accessed out of DisasFields. */
2200 #define have_field(S, F) have_field1((S), FLD_O_##F)
2201 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
2202
2203 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
2204 {
2205 return (f->presentO >> c) & 1;
2206 }
2207
2208 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
2209 enum DisasFieldIndexC c)
2210 {
2211 assert(have_field1(f, o));
2212 return f->c[c];
2213 }
2214
2215 /* Describe the layout of each field in each format. */
2216 typedef struct DisasField {
2217 unsigned int beg:8;
2218 unsigned int size:8;
2219 unsigned int type:2;
2220 unsigned int indexC:6;
2221 enum DisasFieldIndexO indexO:8;
2222 } DisasField;
2223
2224 typedef struct DisasFormatInfo {
2225 DisasField op[NUM_C_FIELD];
2226 } DisasFormatInfo;
2227
2228 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
2229 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
2230 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2231 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
2232 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2233 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2234 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
2235 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2236 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2237 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2238 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2239 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2240 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
2241 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
2242
2243 #define F0(N) { { } },
2244 #define F1(N, X1) { { X1 } },
2245 #define F2(N, X1, X2) { { X1, X2 } },
2246 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
2247 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
2248 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
2249
2250 static const DisasFormatInfo format_info[] = {
2251 #include "insn-format.def"
2252 };
2253
2254 #undef F0
2255 #undef F1
2256 #undef F2
2257 #undef F3
2258 #undef F4
2259 #undef F5
2260 #undef R
2261 #undef M
2262 #undef BD
2263 #undef BXD
2264 #undef BDL
2265 #undef BXDL
2266 #undef I
2267 #undef L
2268
2269 /* Generally, we'll extract operands into this structures, operate upon
2270 them, and store them back. See the "in1", "in2", "prep", "wout" sets
2271 of routines below for more details. */
2272 typedef struct {
2273 bool g_out, g_out2, g_in1, g_in2;
2274 TCGv_i64 out, out2, in1, in2;
2275 TCGv_i64 addr1;
2276 } DisasOps;
2277
2278 /* Return values from translate_one, indicating the state of the TB. */
2279 typedef enum {
2280 /* Continue the TB. */
2281 NO_EXIT,
2282 /* We have emitted one or more goto_tb. No fixup required. */
2283 EXIT_GOTO_TB,
2284 /* We are not using a goto_tb (for whatever reason), but have updated
2285 the PC (for whatever reason), so there's no need to do it again on
2286 exiting the TB. */
2287 EXIT_PC_UPDATED,
2288 /* We are exiting the TB, but have neither emitted a goto_tb, nor
2289 updated the PC for the next instruction to be executed. */
2290 EXIT_PC_STALE,
2291 /* We are ending the TB with a noreturn function call, e.g. longjmp.
2292 No following code will be executed. */
2293 EXIT_NORETURN,
2294 } ExitStatus;
2295
2296 typedef enum DisasFacility {
2297 FAC_Z, /* zarch (default) */
2298 FAC_CASS, /* compare and swap and store */
2299 FAC_CASS2, /* compare and swap and store 2*/
2300 FAC_DFP, /* decimal floating point */
2301 FAC_DFPR, /* decimal floating point rounding */
2302 FAC_DO, /* distinct operands */
2303 FAC_EE, /* execute extensions */
2304 FAC_EI, /* extended immediate */
2305 FAC_FPE, /* floating point extension */
2306 FAC_FPSSH, /* floating point support sign handling */
2307 FAC_FPRGR, /* FPR-GR transfer */
2308 FAC_GIE, /* general instructions extension */
2309 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
2310 FAC_HW, /* high-word */
2311 FAC_IEEEE_SIM, /* IEEE exception sumilation */
2312 FAC_LOC, /* load/store on condition */
2313 FAC_LD, /* long displacement */
2314 FAC_PC, /* population count */
2315 FAC_SCF, /* store clock fast */
2316 FAC_SFLE, /* store facility list extended */
2317 } DisasFacility;
2318
2319 struct DisasInsn {
2320 unsigned opc:16;
2321 DisasFormat fmt:6;
2322 DisasFacility fac:6;
2323
2324 const char *name;
2325
2326 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
2327 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
2328 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
2329 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
2330 void (*help_cout)(DisasContext *, DisasOps *);
2331 ExitStatus (*help_op)(DisasContext *, DisasOps *);
2332
2333 uint64_t data;
2334 };
2335
2336 /* ====================================================================== */
2337 /* Miscelaneous helpers, used by several operations. */
2338
2339 static void help_l2_shift(DisasContext *s, DisasFields *f,
2340 DisasOps *o, int mask)
2341 {
2342 int b2 = get_field(f, b2);
2343 int d2 = get_field(f, d2);
2344
2345 if (b2 == 0) {
2346 o->in2 = tcg_const_i64(d2 & mask);
2347 } else {
2348 o->in2 = get_address(s, 0, b2, d2);
2349 tcg_gen_andi_i64(o->in2, o->in2, mask);
2350 }
2351 }
2352
2353 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
2354 {
2355 if (dest == s->next_pc) {
2356 return NO_EXIT;
2357 }
2358 if (use_goto_tb(s, dest)) {
2359 gen_update_cc_op(s);
2360 tcg_gen_goto_tb(0);
2361 tcg_gen_movi_i64(psw_addr, dest);
2362 tcg_gen_exit_tb((tcg_target_long)s->tb);
2363 return EXIT_GOTO_TB;
2364 } else {
2365 tcg_gen_movi_i64(psw_addr, dest);
2366 return EXIT_PC_UPDATED;
2367 }
2368 }
2369
2370 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
2371 bool is_imm, int imm, TCGv_i64 cdest)
2372 {
2373 ExitStatus ret;
2374 uint64_t dest = s->pc + 2 * imm;
2375 int lab;
2376
2377 /* Take care of the special cases first. */
2378 if (c->cond == TCG_COND_NEVER) {
2379 ret = NO_EXIT;
2380 goto egress;
2381 }
2382 if (is_imm) {
2383 if (dest == s->next_pc) {
2384 /* Branch to next. */
2385 ret = NO_EXIT;
2386 goto egress;
2387 }
2388 if (c->cond == TCG_COND_ALWAYS) {
2389 ret = help_goto_direct(s, dest);
2390 goto egress;
2391 }
2392 } else {
2393 if (TCGV_IS_UNUSED_I64(cdest)) {
2394 /* E.g. bcr %r0 -> no branch. */
2395 ret = NO_EXIT;
2396 goto egress;
2397 }
2398 if (c->cond == TCG_COND_ALWAYS) {
2399 tcg_gen_mov_i64(psw_addr, cdest);
2400 ret = EXIT_PC_UPDATED;
2401 goto egress;
2402 }
2403 }
2404
2405 if (use_goto_tb(s, s->next_pc)) {
2406 if (is_imm && use_goto_tb(s, dest)) {
2407 /* Both exits can use goto_tb. */
2408 gen_update_cc_op(s);
2409
2410 lab = gen_new_label();
2411 if (c->is_64) {
2412 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
2413 } else {
2414 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
2415 }
2416
2417 /* Branch not taken. */
2418 tcg_gen_goto_tb(0);
2419 tcg_gen_movi_i64(psw_addr, s->next_pc);
2420 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
2421
2422 /* Branch taken. */
2423 gen_set_label(lab);
2424 tcg_gen_goto_tb(1);
2425 tcg_gen_movi_i64(psw_addr, dest);
2426 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
2427
2428 ret = EXIT_GOTO_TB;
2429 } else {
2430 /* Fallthru can use goto_tb, but taken branch cannot. */
2431 /* Store taken branch destination before the brcond. This
2432 avoids having to allocate a new local temp to hold it.
2433 We'll overwrite this in the not taken case anyway. */
2434 if (!is_imm) {
2435 tcg_gen_mov_i64(psw_addr, cdest);
2436 }
2437
2438 lab = gen_new_label();
2439 if (c->is_64) {
2440 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
2441 } else {
2442 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
2443 }
2444
2445 /* Branch not taken. */
2446 gen_update_cc_op(s);
2447 tcg_gen_goto_tb(0);
2448 tcg_gen_movi_i64(psw_addr, s->next_pc);
2449 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
2450
2451 gen_set_label(lab);
2452 if (is_imm) {
2453 tcg_gen_movi_i64(psw_addr, dest);
2454 }
2455 ret = EXIT_PC_UPDATED;
2456 }
2457 } else {
2458 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
2459 Most commonly we're single-stepping or some other condition that
2460 disables all use of goto_tb. Just update the PC and exit. */
2461
2462 TCGv_i64 next = tcg_const_i64(s->next_pc);
2463 if (is_imm) {
2464 cdest = tcg_const_i64(dest);
2465 }
2466
2467 if (c->is_64) {
2468 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
2469 cdest, next);
2470 } else {
2471 TCGv_i32 t0 = tcg_temp_new_i32();
2472 TCGv_i64 t1 = tcg_temp_new_i64();
2473 TCGv_i64 z = tcg_const_i64(0);
2474 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
2475 tcg_gen_extu_i32_i64(t1, t0);
2476 tcg_temp_free_i32(t0);
2477 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
2478 tcg_temp_free_i64(t1);
2479 tcg_temp_free_i64(z);
2480 }
2481
2482 if (is_imm) {
2483 tcg_temp_free_i64(cdest);
2484 }
2485 tcg_temp_free_i64(next);
2486
2487 ret = EXIT_PC_UPDATED;
2488 }
2489
2490 egress:
2491 free_compare(c);
2492 return ret;
2493 }
2494
2495 /* ====================================================================== */
2496 /* The operations. These perform the bulk of the work for any insn,
2497 usually after the operands have been loaded and output initialized. */
2498
2499 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
2500 {
2501 gen_helper_abs_i64(o->out, o->in2);
2502 return NO_EXIT;
2503 }
2504
2505 static ExitStatus op_add(DisasContext *s, DisasOps *o)
2506 {
2507 tcg_gen_add_i64(o->out, o->in1, o->in2);
2508 return NO_EXIT;
2509 }
2510
2511 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
2512 {
2513 TCGv_i64 cc;
2514
2515 tcg_gen_add_i64(o->out, o->in1, o->in2);
2516
2517 /* XXX possible optimization point */
2518 gen_op_calc_cc(s);
2519 cc = tcg_temp_new_i64();
2520 tcg_gen_extu_i32_i64(cc, cc_op);
2521 tcg_gen_shri_i64(cc, cc, 1);
2522
2523 tcg_gen_add_i64(o->out, o->out, cc);
2524 tcg_temp_free_i64(cc);
2525 return NO_EXIT;
2526 }
2527
2528 static ExitStatus op_and(DisasContext *s, DisasOps *o)
2529 {
2530 tcg_gen_and_i64(o->out, o->in1, o->in2);
2531 return NO_EXIT;
2532 }
2533
2534 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
2535 {
2536 int shift = s->insn->data & 0xff;
2537 int size = s->insn->data >> 8;
2538 uint64_t mask = ((1ull << size) - 1) << shift;
2539
2540 assert(!o->g_in2);
2541 tcg_gen_shli_i64(o->in2, o->in2, shift);
2542 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2543 tcg_gen_and_i64(o->out, o->in1, o->in2);
2544
2545 /* Produce the CC from only the bits manipulated. */
2546 tcg_gen_andi_i64(cc_dst, o->out, mask);
2547 set_cc_nz_u64(s, cc_dst);
2548 return NO_EXIT;
2549 }
2550
2551 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
2552 {
2553 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2554 if (!TCGV_IS_UNUSED_I64(o->in2)) {
2555 tcg_gen_mov_i64(psw_addr, o->in2);
2556 return EXIT_PC_UPDATED;
2557 } else {
2558 return NO_EXIT;
2559 }
2560 }
2561
2562 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
2563 {
2564 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2565 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
2566 }
2567
2568 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
2569 {
2570 int m1 = get_field(s->fields, m1);
2571 bool is_imm = have_field(s->fields, i2);
2572 int imm = is_imm ? get_field(s->fields, i2) : 0;
2573 DisasCompare c;
2574
2575 disas_jcc(s, &c, m1);
2576 return help_branch(s, &c, is_imm, imm, o->in2);
2577 }
2578
2579 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
2580 {
2581 int r1 = get_field(s->fields, r1);
2582 bool is_imm = have_field(s->fields, i2);
2583 int imm = is_imm ? get_field(s->fields, i2) : 0;
2584 DisasCompare c;
2585 TCGv_i64 t;
2586
2587 c.cond = TCG_COND_NE;
2588 c.is_64 = false;
2589 c.g1 = false;
2590 c.g2 = false;
2591
2592 t = tcg_temp_new_i64();
2593 tcg_gen_subi_i64(t, regs[r1], 1);
2594 store_reg32_i64(r1, t);
2595 c.u.s32.a = tcg_temp_new_i32();
2596 c.u.s32.b = tcg_const_i32(0);
2597 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
2598 tcg_temp_free_i64(t);
2599
2600 return help_branch(s, &c, is_imm, imm, o->in2);
2601 }
2602
2603 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
2604 {
2605 int r1 = get_field(s->fields, r1);
2606 bool is_imm = have_field(s->fields, i2);
2607 int imm = is_imm ? get_field(s->fields, i2) : 0;
2608 DisasCompare c;
2609
2610 c.cond = TCG_COND_NE;
2611 c.is_64 = true;
2612 c.g1 = true;
2613 c.g2 = false;
2614
2615 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
2616 c.u.s64.a = regs[r1];
2617 c.u.s64.b = tcg_const_i64(0);
2618
2619 return help_branch(s, &c, is_imm, imm, o->in2);
2620 }
2621
2622 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
2623 {
2624 int l = get_field(s->fields, l1);
2625 TCGv_i32 vl;
2626
2627 switch (l + 1) {
2628 case 1:
2629 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2630 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2631 break;
2632 case 2:
2633 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2634 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2635 break;
2636 case 4:
2637 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2638 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2639 break;
2640 case 8:
2641 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2642 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2643 break;
2644 default:
2645 potential_page_fault(s);
2646 vl = tcg_const_i32(l);
2647 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2648 tcg_temp_free_i32(vl);
2649 set_cc_static(s);
2650 return NO_EXIT;
2651 }
2652 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2653 return NO_EXIT;
2654 }
2655
2656 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
2657 {
2658 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2659 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2660 potential_page_fault(s);
2661 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
2662 tcg_temp_free_i32(r1);
2663 tcg_temp_free_i32(r3);
2664 set_cc_static(s);
2665 return NO_EXIT;
2666 }
2667
2668 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2669 {
2670 TCGv_i64 t1 = tcg_temp_new_i64();
2671 TCGv_i32 t2 = tcg_temp_new_i32();
2672 tcg_gen_trunc_i64_i32(t2, o->in1);
2673 gen_helper_cvd(t1, t2);
2674 tcg_temp_free_i32(t2);
2675 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2676 tcg_temp_free_i64(t1);
2677 return NO_EXIT;
2678 }
2679
2680 #ifndef CONFIG_USER_ONLY
2681 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2682 {
2683 TCGv_i32 tmp;
2684
2685 check_privileged(s);
2686 potential_page_fault(s);
2687
2688 /* We pretend the format is RX_a so that D2 is the field we want. */
2689 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2690 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2691 tcg_temp_free_i32(tmp);
2692 return NO_EXIT;
2693 }
2694 #endif
2695
2696 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2697 {
2698 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2699 return_low128(o->out);
2700 return NO_EXIT;
2701 }
2702
2703 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2704 {
2705 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2706 return_low128(o->out);
2707 return NO_EXIT;
2708 }
2709
2710 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2711 {
2712 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2713 return_low128(o->out);
2714 return NO_EXIT;
2715 }
2716
2717 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2718 {
2719 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2720 return_low128(o->out);
2721 return NO_EXIT;
2722 }
2723
2724 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2725 {
2726 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2727 return NO_EXIT;
2728 }
2729
2730 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2731 {
2732 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2733 tb->flags, (ab)use the tb->cs_base field as the address of
2734 the template in memory, and grab 8 bits of tb->flags/cflags for
2735 the contents of the register. We would then recognize all this
2736 in gen_intermediate_code_internal, generating code for exactly
2737 one instruction. This new TB then gets executed normally.
2738
2739 On the other hand, this seems to be mostly used for modifying
2740 MVC inside of memcpy, which needs a helper call anyway. So
2741 perhaps this doesn't bear thinking about any further. */
2742
2743 TCGv_i64 tmp;
2744
2745 update_psw_addr(s);
2746 gen_op_calc_cc(s);
2747
2748 tmp = tcg_const_i64(s->next_pc);
2749 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2750 tcg_temp_free_i64(tmp);
2751
2752 set_cc_static(s);
2753 return NO_EXIT;
2754 }
2755
2756 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2757 {
2758 int m3 = get_field(s->fields, m3);
2759 int pos, len, base = s->insn->data;
2760 TCGv_i64 tmp = tcg_temp_new_i64();
2761 uint64_t ccm;
2762
2763 switch (m3) {
2764 case 0xf:
2765 /* Effectively a 32-bit load. */
2766 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2767 len = 32;
2768 goto one_insert;
2769
2770 case 0xc:
2771 case 0x6:
2772 case 0x3:
2773 /* Effectively a 16-bit load. */
2774 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2775 len = 16;
2776 goto one_insert;
2777
2778 case 0x8:
2779 case 0x4:
2780 case 0x2:
2781 case 0x1:
2782 /* Effectively an 8-bit load. */
2783 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2784 len = 8;
2785 goto one_insert;
2786
2787 one_insert:
2788 pos = base + ctz32(m3) * 8;
2789 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2790 ccm = ((1ull << len) - 1) << pos;
2791 break;
2792
2793 default:
2794 /* This is going to be a sequence of loads and inserts. */
2795 pos = base + 32 - 8;
2796 ccm = 0;
2797 while (m3) {
2798 if (m3 & 0x8) {
2799 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2800 tcg_gen_addi_i64(o->in2, o->in2, 1);
2801 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2802 ccm |= 0xff << pos;
2803 }
2804 m3 = (m3 << 1) & 0xf;
2805 pos -= 8;
2806 }
2807 break;
2808 }
2809
2810 tcg_gen_movi_i64(tmp, ccm);
2811 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2812 tcg_temp_free_i64(tmp);
2813 return NO_EXIT;
2814 }
2815
2816 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2817 {
2818 int shift = s->insn->data & 0xff;
2819 int size = s->insn->data >> 8;
2820 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2821 return NO_EXIT;
2822 }
2823
2824 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2825 {
2826 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2827 return NO_EXIT;
2828 }
2829
2830 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2831 {
2832 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2833 return NO_EXIT;
2834 }
2835
2836 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2837 {
2838 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2839 return NO_EXIT;
2840 }
2841
2842 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2843 {
2844 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2845 return NO_EXIT;
2846 }
2847
2848 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2849 {
2850 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2851 return NO_EXIT;
2852 }
2853
2854 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2855 {
2856 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2857 return NO_EXIT;
2858 }
2859
2860 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2861 {
2862 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2863 return NO_EXIT;
2864 }
2865
2866 #ifndef CONFIG_USER_ONLY
2867 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2868 {
2869 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2870 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2871 check_privileged(s);
2872 potential_page_fault(s);
2873 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2874 tcg_temp_free_i32(r1);
2875 tcg_temp_free_i32(r3);
2876 return NO_EXIT;
2877 }
2878
2879 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2880 {
2881 check_privileged(s);
2882 potential_page_fault(s);
2883 gen_helper_lra(o->out, cpu_env, o->in2);
2884 set_cc_static(s);
2885 return NO_EXIT;
2886 }
2887
2888 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2889 {
2890 TCGv_i64 t1, t2;
2891
2892 check_privileged(s);
2893
2894 t1 = tcg_temp_new_i64();
2895 t2 = tcg_temp_new_i64();
2896 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2897 tcg_gen_addi_i64(o->in2, o->in2, 4);
2898 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2899 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2900 tcg_gen_shli_i64(t1, t1, 32);
2901 gen_helper_load_psw(cpu_env, t1, t2);
2902 tcg_temp_free_i64(t1);
2903 tcg_temp_free_i64(t2);
2904 return EXIT_NORETURN;
2905 }
2906 #endif
2907
2908 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2909 {
2910 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2911 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2912 potential_page_fault(s);
2913 gen_helper_lam(cpu_env, r1, o->in2, r3);
2914 tcg_temp_free_i32(r1);
2915 tcg_temp_free_i32(r3);
2916 return NO_EXIT;
2917 }
2918
2919 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2920 {
2921 int r1 = get_field(s->fields, r1);
2922 int r3 = get_field(s->fields, r3);
2923 TCGv_i64 t = tcg_temp_new_i64();
2924 TCGv_i64 t4 = tcg_const_i64(4);
2925
2926 while (1) {
2927 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2928 store_reg32_i64(r1, t);
2929 if (r1 == r3) {
2930 break;
2931 }
2932 tcg_gen_add_i64(o->in2, o->in2, t4);
2933 r1 = (r1 + 1) & 15;
2934 }
2935
2936 tcg_temp_free_i64(t);
2937 tcg_temp_free_i64(t4);
2938 return NO_EXIT;
2939 }
2940
2941 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2942 {
2943 int r1 = get_field(s->fields, r1);
2944 int r3 = get_field(s->fields, r3);
2945 TCGv_i64 t = tcg_temp_new_i64();
2946 TCGv_i64 t4 = tcg_const_i64(4);
2947
2948 while (1) {
2949 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2950 store_reg32h_i64(r1, t);
2951 if (r1 == r3) {
2952 break;
2953 }
2954 tcg_gen_add_i64(o->in2, o->in2, t4);
2955 r1 = (r1 + 1) & 15;
2956 }
2957
2958 tcg_temp_free_i64(t);
2959 tcg_temp_free_i64(t4);
2960 return NO_EXIT;
2961 }
2962
2963 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2964 {
2965 int r1 = get_field(s->fields, r1);
2966 int r3 = get_field(s->fields, r3);
2967 TCGv_i64 t8 = tcg_const_i64(8);
2968
2969 while (1) {
2970 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2971 if (r1 == r3) {
2972 break;
2973 }
2974 tcg_gen_add_i64(o->in2, o->in2, t8);
2975 r1 = (r1 + 1) & 15;
2976 }
2977
2978 tcg_temp_free_i64(t8);
2979 return NO_EXIT;
2980 }
2981
2982 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2983 {
2984 o->out = o->in2;
2985 o->g_out = o->g_in2;
2986 TCGV_UNUSED_I64(o->in2);
2987 o->g_in2 = false;
2988 return NO_EXIT;
2989 }
2990
2991 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2992 {
2993 o->out = o->in1;
2994 o->out2 = o->in2;
2995 o->g_out = o->g_in1;
2996 o->g_out2 = o->g_in2;
2997 TCGV_UNUSED_I64(o->in1);
2998 TCGV_UNUSED_I64(o->in2);
2999 o->g_in1 = o->g_in2 = false;
3000 return NO_EXIT;
3001 }
3002
3003 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
3004 {
3005 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3006 potential_page_fault(s);
3007 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3008 tcg_temp_free_i32(l);
3009 return NO_EXIT;
3010 }
3011
3012 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3013 {
3014 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3015 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
3016 potential_page_fault(s);
3017 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
3018 tcg_temp_free_i32(r1);
3019 tcg_temp_free_i32(r2);
3020 set_cc_static(s);
3021 return NO_EXIT;
3022 }
3023
3024 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3025 {
3026 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3027 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3028 potential_page_fault(s);
3029 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
3030 tcg_temp_free_i32(r1);
3031 tcg_temp_free_i32(r3);
3032 set_cc_static(s);
3033 return NO_EXIT;
3034 }
3035
3036 #ifndef CONFIG_USER_ONLY
3037 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3038 {
3039 int r1 = get_field(s->fields, l1);
3040 check_privileged(s);
3041 potential_page_fault(s);
3042 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3043 set_cc_static(s);
3044 return NO_EXIT;
3045 }
3046
3047 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3048 {
3049 int r1 = get_field(s->fields, l1);
3050 check_privileged(s);
3051 potential_page_fault(s);
3052 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3053 set_cc_static(s);
3054 return NO_EXIT;
3055 }
3056 #endif
3057
3058 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3059 {
3060 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3061 return NO_EXIT;
3062 }
3063
3064 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3065 {
3066 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
3067 return_low128(o->out2);
3068 return NO_EXIT;
3069 }
3070
3071 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3072 {
3073 gen_helper_nabs_i64(o->out, o->in2);
3074 return NO_EXIT;
3075 }
3076
3077 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3078 {
3079 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3080 potential_page_fault(s);
3081 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3082 tcg_temp_free_i32(l);
3083 set_cc_static(s);
3084 return NO_EXIT;
3085 }
3086
3087 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3088 {
3089 tcg_gen_neg_i64(o->out, o->in2);
3090 return NO_EXIT;
3091 }
3092
3093 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3094 {
3095 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3096 potential_page_fault(s);
3097 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3098 tcg_temp_free_i32(l);
3099 set_cc_static(s);
3100 return NO_EXIT;
3101 }
3102
3103 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3104 {
3105 tcg_gen_or_i64(o->out, o->in1, o->in2);
3106 return NO_EXIT;
3107 }
3108
3109 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3110 {
3111 int shift = s->insn->data & 0xff;
3112 int size = s->insn->data >> 8;
3113 uint64_t mask = ((1ull << size) - 1) << shift;
3114
3115 assert(!o->g_in2);
3116 tcg_gen_shli_i64(o->in2, o->in2, shift);
3117 tcg_gen_or_i64(o->out, o->in1, o->in2);
3118
3119 /* Produce the CC from only the bits manipulated. */
3120 tcg_gen_andi_i64(cc_dst, o->out, mask);
3121 set_cc_nz_u64(s, cc_dst);
3122 return NO_EXIT;
3123 }
3124
3125 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3126 {
3127 TCGv_i32 t1 = tcg_temp_new_i32();
3128 TCGv_i32 t2 = tcg_temp_new_i32();
3129 TCGv_i32 to = tcg_temp_new_i32();
3130 tcg_gen_trunc_i64_i32(t1, o->in1);
3131 tcg_gen_trunc_i64_i32(t2, o->in2);
3132 tcg_gen_rotl_i32(to, t1, t2);
3133 tcg_gen_extu_i32_i64(o->out, to);
3134 tcg_temp_free_i32(t1);
3135 tcg_temp_free_i32(t2);
3136 tcg_temp_free_i32(to);
3137 return NO_EXIT;
3138 }
3139
3140 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3141 {
3142 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3143 return NO_EXIT;
3144 }
3145
3146 #ifndef CONFIG_USER_ONLY
3147 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3148 {
3149 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3150 check_privileged(s);
3151 potential_page_fault(s);
3152 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3153 tcg_temp_free_i32(r1);
3154 return NO_EXIT;
3155 }
3156 #endif
3157
3158 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3159 {
3160 uint64_t sign = 1ull << s->insn->data;
3161 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3162 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3163 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3164 /* The arithmetic left shift is curious in that it does not affect
3165 the sign bit. Copy that over from the source unchanged. */
3166 tcg_gen_andi_i64(o->out, o->out, ~sign);
3167 tcg_gen_andi_i64(o->in1, o->in1, sign);
3168 tcg_gen_or_i64(o->out, o->out, o->in1);
3169 return NO_EXIT;
3170 }
3171
3172 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3173 {
3174 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3175 return NO_EXIT;
3176 }
3177
3178 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3179 {
3180 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3181 return NO_EXIT;
3182 }
3183
3184 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3185 {
3186 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3187 return NO_EXIT;
3188 }
3189
3190 #ifndef CONFIG_USER_ONLY
3191 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3192 {
3193 check_privileged(s);
3194 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3195 return NO_EXIT;
3196 }
3197
3198 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3199 {
3200 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3201 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3202 check_privileged(s);
3203 potential_page_fault(s);
3204 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3205 tcg_temp_free_i32(r1);
3206 tcg_temp_free_i32(r3);
3207 return NO_EXIT;
3208 }
3209
3210 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3211 {
3212 uint64_t i2 = get_field(s->fields, i2);
3213 TCGv_i64 t;
3214
3215 check_privileged(s);
3216
3217 /* It is important to do what the instruction name says: STORE THEN.
3218 If we let the output hook perform the store then if we fault and
3219 restart, we'll have the wrong SYSTEM MASK in place. */
3220 t = tcg_temp_new_i64();
3221 tcg_gen_shri_i64(t, psw_mask, 56);
3222 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3223 tcg_temp_free_i64(t);
3224
3225 if (s->fields->op == 0xac) {
3226 tcg_gen_andi_i64(psw_mask, psw_mask,
3227 (i2 << 56) | 0x00ffffffffffffffull);
3228 } else {
3229 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3230 }
3231 return NO_EXIT;
3232 }
3233 #endif
3234
3235 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3236 {
3237 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3238 return NO_EXIT;
3239 }
3240
3241 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3242 {
3243 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3244 return NO_EXIT;
3245 }
3246
3247 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3248 {
3249 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3250 return NO_EXIT;
3251 }
3252
3253 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3254 {
3255 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3256 return NO_EXIT;
3257 }
3258
3259 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3260 {
3261 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3262 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3263 potential_page_fault(s);
3264 gen_helper_stam(cpu_env, r1, o->in2, r3);
3265 tcg_temp_free_i32(r1);
3266 tcg_temp_free_i32(r3);
3267 return NO_EXIT;
3268 }
3269
3270 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3271 {
3272 int r1 = get_field(s->fields, r1);
3273 int r3 = get_field(s->fields, r3);
3274 int size = s->insn->data;
3275 TCGv_i64 tsize = tcg_const_i64(size);
3276
3277 while (1) {
3278 if (size == 8) {
3279 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3280 } else {
3281 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3282 }
3283 if (r1 == r3) {
3284 break;
3285 }
3286 tcg_gen_add_i64(o->in2, o->in2, tsize);
3287 r1 = (r1 + 1) & 15;
3288 }
3289
3290 tcg_temp_free_i64(tsize);
3291 return NO_EXIT;
3292 }
3293
3294 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3295 {
3296 int r1 = get_field(s->fields, r1);
3297 int r3 = get_field(s->fields, r3);
3298 TCGv_i64 t = tcg_temp_new_i64();
3299 TCGv_i64 t4 = tcg_const_i64(4);
3300 TCGv_i64 t32 = tcg_const_i64(32);
3301
3302 while (1) {
3303 tcg_gen_shl_i64(t, regs[r1], t32);
3304 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3305 if (r1 == r3) {
3306 break;
3307 }
3308 tcg_gen_add_i64(o->in2, o->in2, t4);
3309 r1 = (r1 + 1) & 15;
3310 }
3311
3312 tcg_temp_free_i64(t);
3313 tcg_temp_free_i64(t4);
3314 tcg_temp_free_i64(t32);
3315 return NO_EXIT;
3316 }
3317
3318 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3319 {
3320 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3321 return NO_EXIT;
3322 }
3323
3324 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3325 {
3326 TCGv_i64 cc;
3327
3328 assert(!o->g_in2);
3329 tcg_gen_not_i64(o->in2, o->in2);
3330 tcg_gen_add_i64(o->out, o->in1, o->in2);
3331
3332 /* XXX possible optimization point */
3333 gen_op_calc_cc(s);
3334 cc = tcg_temp_new_i64();
3335 tcg_gen_extu_i32_i64(cc, cc_op);
3336 tcg_gen_shri_i64(cc, cc, 1);
3337 tcg_gen_add_i64(o->out, o->out, cc);
3338 tcg_temp_free_i64(cc);
3339 return NO_EXIT;
3340 }
3341
3342 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3343 {
3344 TCGv_i32 t;
3345
3346 update_psw_addr(s);
3347 gen_op_calc_cc(s);
3348
3349 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3350 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3351 tcg_temp_free_i32(t);
3352
3353 t = tcg_const_i32(s->next_pc - s->pc);
3354 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3355 tcg_temp_free_i32(t);
3356
3357 gen_exception(EXCP_SVC);
3358 return EXIT_NORETURN;
3359 }
3360
3361 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3362 {
3363 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3364 potential_page_fault(s);
3365 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3366 tcg_temp_free_i32(l);
3367 set_cc_static(s);
3368 return NO_EXIT;
3369 }
3370
3371 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3372 {
3373 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3374 potential_page_fault(s);
3375 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3376 tcg_temp_free_i32(l);
3377 return NO_EXIT;
3378 }
3379
3380 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3381 {
3382 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3383 potential_page_fault(s);
3384 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3385 tcg_temp_free_i32(l);
3386 set_cc_static(s);
3387 return NO_EXIT;
3388 }
3389
3390 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3391 {
3392 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3393 return NO_EXIT;
3394 }
3395
3396 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3397 {
3398 int shift = s->insn->data & 0xff;
3399 int size = s->insn->data >> 8;
3400 uint64_t mask = ((1ull << size) - 1) << shift;
3401
3402 assert(!o->g_in2);
3403 tcg_gen_shli_i64(o->in2, o->in2, shift);
3404 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3405
3406 /* Produce the CC from only the bits manipulated. */
3407 tcg_gen_andi_i64(cc_dst, o->out, mask);
3408 set_cc_nz_u64(s, cc_dst);
3409 return NO_EXIT;
3410 }
3411
3412 /* ====================================================================== */
3413 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3414 the original inputs), update the various cc data structures in order to
3415 be able to compute the new condition code. */
3416
3417 static void cout_abs32(DisasContext *s, DisasOps *o)
3418 {
3419 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3420 }
3421
3422 static void cout_abs64(DisasContext *s, DisasOps *o)
3423 {
3424 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3425 }
3426
3427 static void cout_adds32(DisasContext *s, DisasOps *o)
3428 {
3429 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3430 }
3431
3432 static void cout_adds64(DisasContext *s, DisasOps *o)
3433 {
3434 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3435 }
3436
3437 static void cout_addu32(DisasContext *s, DisasOps *o)
3438 {
3439 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3440 }
3441
3442 static void cout_addu64(DisasContext *s, DisasOps *o)
3443 {
3444 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3445 }
3446
3447 static void cout_addc32(DisasContext *s, DisasOps *o)
3448 {
3449 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3450 }
3451
3452 static void cout_addc64(DisasContext *s, DisasOps *o)
3453 {
3454 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3455 }
3456
3457 static void cout_cmps32(DisasContext *s, DisasOps *o)
3458 {
3459 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3460 }
3461
3462 static void cout_cmps64(DisasContext *s, DisasOps *o)
3463 {
3464 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3465 }
3466
3467 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3468 {
3469 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3470 }
3471
3472 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3473 {
3474 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3475 }
3476
3477 static void cout_nabs32(DisasContext *s, DisasOps *o)
3478 {
3479 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3480 }
3481
3482 static void cout_nabs64(DisasContext *s, DisasOps *o)
3483 {
3484 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3485 }
3486
3487 static void cout_neg32(DisasContext *s, DisasOps *o)
3488 {
3489 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3490 }
3491
3492 static void cout_neg64(DisasContext *s, DisasOps *o)
3493 {
3494 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3495 }
3496
3497 static void cout_nz32(DisasContext *s, DisasOps *o)
3498 {
3499 tcg_gen_ext32u_i64(cc_dst, o->out);
3500 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3501 }
3502
3503 static void cout_nz64(DisasContext *s, DisasOps *o)
3504 {
3505 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3506 }
3507
3508 static void cout_s32(DisasContext *s, DisasOps *o)
3509 {
3510 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3511 }
3512
3513 static void cout_s64(DisasContext *s, DisasOps *o)
3514 {
3515 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3516 }
3517
3518 static void cout_subs32(DisasContext *s, DisasOps *o)
3519 {
3520 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3521 }
3522
3523 static void cout_subs64(DisasContext *s, DisasOps *o)
3524 {
3525 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3526 }
3527
3528 static void cout_subu32(DisasContext *s, DisasOps *o)
3529 {
3530 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3531 }
3532
3533 static void cout_subu64(DisasContext *s, DisasOps *o)
3534 {
3535 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3536 }
3537
3538 static void cout_subb32(DisasContext *s, DisasOps *o)
3539 {
3540 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3541 }
3542
3543 static void cout_subb64(DisasContext *s, DisasOps *o)
3544 {
3545 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3546 }
3547
3548 static void cout_tm32(DisasContext *s, DisasOps *o)
3549 {
3550 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3551 }
3552
3553 static void cout_tm64(DisasContext *s, DisasOps *o)
3554 {
3555 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3556 }
3557
3558 /* ====================================================================== */
3559 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3560 with the TCG register to which we will write. Used in combination with
3561 the "wout" generators, in some cases we need a new temporary, and in
3562 some cases we can write to a TCG global. */
3563
3564 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3565 {
3566 o->out = tcg_temp_new_i64();
3567 }
3568
3569 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3570 {
3571 o->out = tcg_temp_new_i64();
3572 o->out2 = tcg_temp_new_i64();
3573 }
3574
3575 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3576 {
3577 o->out = regs[get_field(f, r1)];
3578 o->g_out = true;
3579 }
3580
3581 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3582 {
3583 /* ??? Specification exception: r1 must be even. */
3584 int r1 = get_field(f, r1);
3585 o->out = regs[r1];
3586 o->out2 = regs[(r1 + 1) & 15];
3587 o->g_out = o->g_out2 = true;
3588 }
3589
3590 /* ====================================================================== */
3591 /* The "Write OUTput" generators. These generally perform some non-trivial
3592 copy of data to TCG globals, or to main memory. The trivial cases are
3593 generally handled by having a "prep" generator install the TCG global
3594 as the destination of the operation. */
3595
3596 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3597 {
3598 store_reg(get_field(f, r1), o->out);
3599 }
3600
3601 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3602 {
3603 int r1 = get_field(f, r1);
3604 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3605 }
3606
3607 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3608 {
3609 store_reg32_i64(get_field(f, r1), o->out);
3610 }
3611
3612 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3613 {
3614 /* ??? Specification exception: r1 must be even. */
3615 int r1 = get_field(f, r1);
3616 store_reg32_i64(r1, o->out);
3617 store_reg32_i64((r1 + 1) & 15, o->out2);
3618 }
3619
3620 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3621 {
3622 /* ??? Specification exception: r1 must be even. */
3623 int r1 = get_field(f, r1);
3624 store_reg32_i64((r1 + 1) & 15, o->out);
3625 tcg_gen_shri_i64(o->out, o->out, 32);
3626 store_reg32_i64(r1, o->out);
3627 }
3628
3629 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3630 {
3631 store_freg32_i64(get_field(f, r1), o->out);
3632 }
3633
3634 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3635 {
3636 store_freg(get_field(f, r1), o->out);
3637 }
3638
3639 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3640 {
3641 int f1 = get_field(s->fields, r1);
3642 store_freg(f1, o->out);
3643 store_freg((f1 + 2) & 15, o->out2);
3644 }
3645
3646 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3647 {
3648 if (get_field(f, r1) != get_field(f, r2)) {
3649 store_reg32_i64(get_field(f, r1), o->out);
3650 }
3651 }
3652
3653 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3654 {
3655 if (get_field(f, r1) != get_field(f, r2)) {
3656 store_freg32_i64(get_field(f, r1), o->out);
3657 }
3658 }
3659
3660 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3661 {
3662 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3663 }
3664
3665 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3666 {
3667 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3668 }
3669
3670 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3671 {
3672 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3673 }
3674
3675 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3676 {
3677 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3678 }
3679
3680 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3681 {
3682 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3683 }
3684
3685 /* ====================================================================== */
3686 /* The "INput 1" generators. These load the first operand to an insn. */
3687
3688 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3689 {
3690 o->in1 = load_reg(get_field(f, r1));
3691 }
3692
3693 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3694 {
3695 o->in1 = regs[get_field(f, r1)];
3696 o->g_in1 = true;
3697 }
3698
3699 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3700 {
3701 o->in1 = tcg_temp_new_i64();
3702 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3703 }
3704
3705 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3706 {
3707 o->in1 = tcg_temp_new_i64();
3708 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3709 }
3710
3711 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3712 {
3713 /* ??? Specification exception: r1 must be even. */
3714 int r1 = get_field(f, r1);
3715 o->in1 = load_reg((r1 + 1) & 15);
3716 }
3717
3718 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3719 {
3720 /* ??? Specification exception: r1 must be even. */
3721 int r1 = get_field(f, r1);
3722 o->in1 = tcg_temp_new_i64();
3723 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3724 }
3725
3726 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3727 {
3728 /* ??? Specification exception: r1 must be even. */
3729 int r1 = get_field(f, r1);
3730 o->in1 = tcg_temp_new_i64();
3731 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3732 }
3733
3734 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3735 {
3736 /* ??? Specification exception: r1 must be even. */
3737 int r1 = get_field(f, r1);
3738 o->in1 = tcg_temp_new_i64();
3739 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3740 }
3741
3742 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3743 {
3744 o->in1 = load_reg(get_field(f, r2));
3745 }
3746
3747 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3748 {
3749 o->in1 = load_reg(get_field(f, r3));
3750 }
3751
3752 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3753 {
3754 o->in1 = regs[get_field(f, r3)];
3755 o->g_in1 = true;
3756 }
3757
3758 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3759 {
3760 o->in1 = tcg_temp_new_i64();
3761 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3762 }
3763
3764 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3765 {
3766 o->in1 = tcg_temp_new_i64();
3767 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3768 }
3769
3770 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3771 {
3772 o->in1 = load_freg32_i64(get_field(f, r1));
3773 }
3774
3775 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3776 {
3777 o->in1 = fregs[get_field(f, r1)];
3778 o->g_in1 = true;
3779 }
3780
3781 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3782 {
3783 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3784 }
3785
3786 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3787 {
3788 in1_la1(s, f, o);
3789 o->in1 = tcg_temp_new_i64();
3790 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3791 }
3792
3793 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3794 {
3795 in1_la1(s, f, o);
3796 o->in1 = tcg_temp_new_i64();
3797 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3798 }
3799
3800 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3801 {
3802 in1_la1(s, f, o);
3803 o->in1 = tcg_temp_new_i64();
3804 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3805 }
3806
3807 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3808 {
3809 in1_la1(s, f, o);
3810 o->in1 = tcg_temp_new_i64();
3811 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3812 }
3813
3814 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3815 {
3816 in1_la1(s, f, o);
3817 o->in1 = tcg_temp_new_i64();
3818 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3819 }
3820
3821 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3822 {
3823 in1_la1(s, f, o);
3824 o->in1 = tcg_temp_new_i64();
3825 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3826 }
3827
3828 /* ====================================================================== */
3829 /* The "INput 2" generators. These load the second operand to an insn. */
3830
3831 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3832 {
3833 o->in2 = load_reg(get_field(f, r2));
3834 }
3835
3836 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3837 {
3838 o->in2 = regs[get_field(f, r2)];
3839 o->g_in2 = true;
3840 }
3841
3842 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3843 {
3844 int r2 = get_field(f, r2);
3845 if (r2 != 0) {
3846 o->in2 = load_reg(r2);
3847 }
3848 }
3849
3850 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3851 {
3852 o->in2 = tcg_temp_new_i64();
3853 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3854 }
3855
3856 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3857 {
3858 o->in2 = tcg_temp_new_i64();
3859 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3860 }
3861
3862 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3863 {
3864 o->in2 = tcg_temp_new_i64();
3865 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3866 }
3867
3868 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3869 {
3870 o->in2 = tcg_temp_new_i64();
3871 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3872 }
3873
3874 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3875 {
3876 o->in2 = load_reg(get_field(f, r3));
3877 }
3878
3879 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3880 {
3881 o->in2 = tcg_temp_new_i64();
3882 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3883 }
3884
3885 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3886 {
3887 o->in2 = tcg_temp_new_i64();
3888 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3889 }
3890
3891 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3892 {
3893 o->in2 = load_freg32_i64(get_field(f, r2));
3894 }
3895
3896 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3897 {
3898 o->in2 = fregs[get_field(f, r2)];
3899 o->g_in2 = true;
3900 }
3901
3902 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3903 {
3904 int f2 = get_field(f, r2);
3905 o->in1 = fregs[f2];
3906 o->in2 = fregs[(f2 + 2) & 15];
3907 o->g_in1 = o->g_in2 = true;
3908 }
3909
3910 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3911 {
3912 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3913 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3914 }
3915
3916 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3917 {
3918 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3919 }
3920
3921 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3922 {
3923 help_l2_shift(s, f, o, 31);
3924 }
3925
3926 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3927 {
3928 help_l2_shift(s, f, o, 63);
3929 }
3930
3931 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3932 {
3933 in2_a2(s, f, o);
3934 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3935 }
3936
3937 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3938 {
3939 in2_a2(s, f, o);
3940 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3941 }
3942
3943 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3944 {
3945 in2_a2(s, f, o);
3946 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3947 }
3948
3949 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3950 {
3951 in2_a2(s, f, o);
3952 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3953 }
3954
3955 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3956 {
3957 in2_a2(s, f, o);
3958 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3959 }
3960
3961 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3962 {
3963 in2_ri2(s, f, o);
3964 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3965 }
3966
3967 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3968 {
3969 in2_ri2(s, f, o);
3970 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3971 }
3972
3973 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3974 {
3975 in2_ri2(s, f, o);
3976 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3977 }
3978
3979 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3980 {
3981 in2_ri2(s, f, o);
3982 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3983 }
3984
3985 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3986 {
3987 o->in2 = tcg_const_i64(get_field(f, i2));
3988 }
3989
3990 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3991 {
3992 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3993 }
3994
3995 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3996 {
3997 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3998 }
3999
4000 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4001 {
4002 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4003 }
4004
4005 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4006 {
4007 uint64_t i2 = (uint16_t)get_field(f, i2);
4008 o->in2 = tcg_const_i64(i2 << s->insn->data);
4009 }
4010
4011 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4012 {
4013 uint64_t i2 = (uint32_t)get_field(f, i2);
4014 o->in2 = tcg_const_i64(i2 << s->insn->data);
4015 }
4016
4017 /* ====================================================================== */
4018
4019 /* Find opc within the table of insns. This is formulated as a switch
4020 statement so that (1) we get compile-time notice of cut-paste errors
4021 for duplicated opcodes, and (2) the compiler generates the binary
4022 search tree, rather than us having to post-process the table. */
4023
4024 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4025 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4026
4027 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4028
4029 enum DisasInsnEnum {
4030 #include "insn-data.def"
4031 };
4032
4033 #undef D
4034 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4035 .opc = OPC, \
4036 .fmt = FMT_##FT, \
4037 .fac = FAC_##FC, \
4038 .name = #NM, \
4039 .help_in1 = in1_##I1, \
4040 .help_in2 = in2_##I2, \
4041 .help_prep = prep_##P, \
4042 .help_wout = wout_##W, \
4043 .help_cout = cout_##CC, \
4044 .help_op = op_##OP, \
4045 .data = D \
4046 },
4047
4048 /* Allow 0 to be used for NULL in the table below. */
4049 #define in1_0 NULL
4050 #define in2_0 NULL
4051 #define prep_0 NULL
4052 #define wout_0 NULL
4053 #define cout_0 NULL
4054 #define op_0 NULL
4055
4056 static const DisasInsn insn_info[] = {
4057 #include "insn-data.def"
4058 };
4059
4060 #undef D
4061 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4062 case OPC: return &insn_info[insn_ ## NM];
4063
4064 static const DisasInsn *lookup_opc(uint16_t opc)
4065 {
4066 switch (opc) {
4067 #include "insn-data.def"
4068 default:
4069 return NULL;
4070 }
4071 }
4072
4073 #undef D
4074 #undef C
4075
4076 /* Extract a field from the insn. The INSN should be left-aligned in
4077 the uint64_t so that we can more easily utilize the big-bit-endian
4078 definitions we extract from the Principals of Operation. */
4079
4080 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4081 {
4082 uint32_t r, m;
4083
4084 if (f->size == 0) {
4085 return;
4086 }
4087
4088 /* Zero extract the field from the insn. */
4089 r = (insn << f->beg) >> (64 - f->size);
4090
4091 /* Sign-extend, or un-swap the field as necessary. */
4092 switch (f->type) {
4093 case 0: /* unsigned */
4094 break;
4095 case 1: /* signed */
4096 assert(f->size <= 32);
4097 m = 1u << (f->size - 1);
4098 r = (r ^ m) - m;
4099 break;
4100 case 2: /* dl+dh split, signed 20 bit. */
4101 r = ((int8_t)r << 12) | (r >> 8);
4102 break;
4103 default:
4104 abort();
4105 }
4106
4107 /* Validate that the "compressed" encoding we selected above is valid.
4108 I.e. we havn't make two different original fields overlap. */
4109 assert(((o->presentC >> f->indexC) & 1) == 0);
4110 o->presentC |= 1 << f->indexC;
4111 o->presentO |= 1 << f->indexO;
4112
4113 o->c[f->indexC] = r;
4114 }
4115
4116 /* Lookup the insn at the current PC, extracting the operands into O and
4117 returning the info struct for the insn. Returns NULL for invalid insn. */
4118
4119 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4120 DisasFields *f)
4121 {
4122 uint64_t insn, pc = s->pc;
4123 int op, op2, ilen;
4124 const DisasInsn *info;
4125
4126 insn = ld_code2(env, pc);
4127 op = (insn >> 8) & 0xff;
4128 ilen = get_ilen(op);
4129 s->next_pc = s->pc + ilen;
4130
4131 switch (ilen) {
4132 case 2:
4133 insn = insn << 48;
4134 break;
4135 case 4:
4136 insn = ld_code4(env, pc) << 32;
4137 break;
4138 case 6:
4139 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4140 break;
4141 default:
4142 abort();
4143 }
4144
4145 /* We can't actually determine the insn format until we've looked up
4146 the full insn opcode. Which we can't do without locating the
4147 secondary opcode. Assume by default that OP2 is at bit 40; for
4148 those smaller insns that don't actually have a secondary opcode
4149 this will correctly result in OP2 = 0. */
4150 switch (op) {
4151 case 0x01: /* E */
4152 case 0x80: /* S */
4153 case 0x82: /* S */
4154 case 0x93: /* S */
4155 case 0xb2: /* S, RRF, RRE */
4156 case 0xb3: /* RRE, RRD, RRF */
4157 case 0xb9: /* RRE, RRF */
4158 case 0xe5: /* SSE, SIL */
4159 op2 = (insn << 8) >> 56;
4160 break;
4161 case 0xa5: /* RI */
4162 case 0xa7: /* RI */
4163 case 0xc0: /* RIL */
4164 case 0xc2: /* RIL */
4165 case 0xc4: /* RIL */
4166 case 0xc6: /* RIL */
4167 case 0xc8: /* SSF */
4168 case 0xcc: /* RIL */
4169 op2 = (insn << 12) >> 60;
4170 break;
4171 case 0xd0 ... 0xdf: /* SS */
4172 case 0xe1: /* SS */
4173 case 0xe2: /* SS */
4174 case 0xe8: /* SS */
4175 case 0xe9: /* SS */
4176 case 0xea: /* SS */
4177 case 0xee ... 0xf3: /* SS */
4178 case 0xf8 ... 0xfd: /* SS */
4179 op2 = 0;
4180 break;
4181 default:
4182 op2 = (insn << 40) >> 56;
4183 break;
4184 }
4185
4186 memset(f, 0, sizeof(*f));
4187 f->op = op;
4188 f->op2 = op2;
4189
4190 /* Lookup the instruction. */
4191 info = lookup_opc(op << 8 | op2);
4192
4193 /* If we found it, extract the operands. */
4194 if (info != NULL) {
4195 DisasFormat fmt = info->fmt;
4196 int i;
4197
4198 for (i = 0; i < NUM_C_FIELD; ++i) {
4199 extract_field(f, &format_info[fmt].op[i], insn);
4200 }
4201 }
4202 return info;
4203 }
4204
4205 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4206 {
4207 const DisasInsn *insn;
4208 ExitStatus ret = NO_EXIT;
4209 DisasFields f;
4210 DisasOps o;
4211
4212 insn = extract_insn(env, s, &f);
4213
4214 /* If not found, try the old interpreter. This includes ILLOPC. */
4215 if (insn == NULL) {
4216 disas_s390_insn(env, s);
4217 switch (s->is_jmp) {
4218 case DISAS_NEXT:
4219 ret = NO_EXIT;
4220 break;
4221 case DISAS_TB_JUMP:
4222 ret = EXIT_GOTO_TB;
4223 break;
4224 case DISAS_JUMP:
4225 ret = EXIT_PC_UPDATED;
4226 break;
4227 case DISAS_EXCP:
4228 ret = EXIT_NORETURN;
4229 break;
4230 default:
4231 abort();
4232 }
4233
4234 s->pc = s->next_pc;
4235 return ret;
4236 }
4237
4238 /* Set up the strutures we use to communicate with the helpers. */
4239 s->insn = insn;
4240 s->fields = &f;
4241 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4242 TCGV_UNUSED_I64(o.out);
4243 TCGV_UNUSED_I64(o.out2);
4244 TCGV_UNUSED_I64(o.in1);
4245 TCGV_UNUSED_I64(o.in2);
4246 TCGV_UNUSED_I64(o.addr1);
4247
4248 /* Implement the instruction. */
4249 if (insn->help_in1) {
4250 insn->help_in1(s, &f, &o);
4251 }
4252 if (insn->help_in2) {
4253 insn->help_in2(s, &f, &o);
4254 }
4255 if (insn->help_prep) {
4256 insn->help_prep(s, &f, &o);
4257 }
4258 if (insn->help_op) {
4259 ret = insn->help_op(s, &o);
4260 }
4261 if (insn->help_wout) {
4262 insn->help_wout(s, &f, &o);
4263 }
4264 if (insn->help_cout) {
4265 insn->help_cout(s, &o);
4266 }
4267
4268 /* Free any temporaries created by the helpers. */
4269 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4270 tcg_temp_free_i64(o.out);
4271 }
4272 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4273 tcg_temp_free_i64(o.out2);
4274 }
4275 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4276 tcg_temp_free_i64(o.in1);
4277 }
4278 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4279 tcg_temp_free_i64(o.in2);
4280 }
4281 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4282 tcg_temp_free_i64(o.addr1);
4283 }
4284
4285 /* Advance to the next instruction. */
4286 s->pc = s->next_pc;
4287 return ret;
4288 }
4289
4290 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4291 TranslationBlock *tb,
4292 int search_pc)
4293 {
4294 DisasContext dc;
4295 target_ulong pc_start;
4296 uint64_t next_page_start;
4297 uint16_t *gen_opc_end;
4298 int j, lj = -1;
4299 int num_insns, max_insns;
4300 CPUBreakpoint *bp;
4301 ExitStatus status;
4302 bool do_debug;
4303
4304 pc_start = tb->pc;
4305
4306 /* 31-bit mode */
4307 if (!(tb->flags & FLAG_MASK_64)) {
4308 pc_start &= 0x7fffffff;
4309 }
4310
4311 dc.tb = tb;
4312 dc.pc = pc_start;
4313 dc.cc_op = CC_OP_DYNAMIC;
4314 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4315 dc.is_jmp = DISAS_NEXT;
4316
4317 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4318
4319 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4320
4321 num_insns = 0;
4322 max_insns = tb->cflags & CF_COUNT_MASK;
4323 if (max_insns == 0) {
4324 max_insns = CF_COUNT_MASK;
4325 }
4326
4327 gen_icount_start();
4328
4329 do {
4330 if (search_pc) {
4331 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4332 if (lj < j) {
4333 lj++;
4334 while (lj < j) {
4335 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4336 }
4337 }
4338 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4339 gen_opc_cc_op[lj] = dc.cc_op;
4340 tcg_ctx.gen_opc_instr_start[lj] = 1;
4341 tcg_ctx.gen_opc_icount[lj] = num_insns;
4342 }
4343 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4344 gen_io_start();
4345 }
4346
4347 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4348 tcg_gen_debug_insn_start(dc.pc);
4349 }
4350
4351 status = NO_EXIT;
4352 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4353 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4354 if (bp->pc == dc.pc) {
4355 status = EXIT_PC_STALE;
4356 do_debug = true;
4357 break;
4358 }
4359 }
4360 }
4361 if (status == NO_EXIT) {
4362 status = translate_one(env, &dc);
4363 }
4364
4365 /* If we reach a page boundary, are single stepping,
4366 or exhaust instruction count, stop generation. */
4367 if (status == NO_EXIT
4368 && (dc.pc >= next_page_start
4369 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4370 || num_insns >= max_insns
4371 || singlestep
4372 || env->singlestep_enabled)) {
4373 status = EXIT_PC_STALE;
4374 }
4375 } while (status == NO_EXIT);
4376
4377 if (tb->cflags & CF_LAST_IO) {
4378 gen_io_end();
4379 }
4380
4381 switch (status) {
4382 case EXIT_GOTO_TB:
4383 case EXIT_NORETURN:
4384 break;
4385 case EXIT_PC_STALE:
4386 update_psw_addr(&dc);
4387 /* FALLTHRU */
4388 case EXIT_PC_UPDATED:
4389 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4390 gen_op_calc_cc(&dc);
4391 } else {
4392 /* Next TB starts off with CC_OP_DYNAMIC,
4393 so make sure the cc op type is in env */
4394 gen_op_set_cc_op(&dc);
4395 }
4396 if (do_debug) {
4397 gen_exception(EXCP_DEBUG);
4398 } else {
4399 /* Generate the return instruction */
4400 tcg_gen_exit_tb(0);
4401 }
4402 break;
4403 default:
4404 abort();
4405 }
4406
4407 gen_icount_end(tb, num_insns);
4408 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4409 if (search_pc) {
4410 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4411 lj++;
4412 while (lj <= j) {
4413 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4414 }
4415 } else {
4416 tb->size = dc.pc - pc_start;
4417 tb->icount = num_insns;
4418 }
4419
4420 #if defined(S390X_DEBUG_DISAS)
4421 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4422 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4423 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4424 qemu_log("\n");
4425 }
4426 #endif
4427 }
4428
4429 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4430 {
4431 gen_intermediate_code_internal(env, tb, 0);
4432 }
4433
4434 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4435 {
4436 gen_intermediate_code_internal(env, tb, 1);
4437 }
4438
4439 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4440 {
4441 int cc_op;
4442 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4443 cc_op = gen_opc_cc_op[pc_pos];
4444 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4445 env->cc_op = cc_op;
4446 }
4447 }