]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Convert STCM
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static inline void debug_insn(uint64_t insn)
83 {
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85 }
86
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 {
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
92 }
93 }
94 return pc;
95 }
96
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
99 {
100 int i;
101
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
108 }
109
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
116 }
117 }
118
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
125 }
126 }
127
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
135 }
136 }
137 #endif
138
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
143 }
144 #endif
145
146 cpu_fprintf(f, "\n");
147 }
148
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
151
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
156
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
160
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162
163 void s390x_translate_init(void)
164 {
165 int i;
166
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
174
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
196 }
197
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
201 }
202
203 static inline TCGv_i64 load_reg(int reg)
204 {
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
208 }
209
210 static inline TCGv_i64 load_freg(int reg)
211 {
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
215 }
216
217 static inline TCGv_i32 load_freg32(int reg)
218 {
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
226 }
227
228 static inline TCGv_i64 load_freg32_i64(int reg)
229 {
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
233 }
234
235 static inline TCGv_i32 load_reg32(int reg)
236 {
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
240 }
241
242 static inline TCGv_i64 load_reg32_i64(int reg)
243 {
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
247 }
248
249 static inline void store_reg(int reg, TCGv_i64 v)
250 {
251 tcg_gen_mov_i64(regs[reg], v);
252 }
253
254 static inline void store_freg(int reg, TCGv_i64 v)
255 {
256 tcg_gen_mov_i64(fregs[reg], v);
257 }
258
259 static inline void store_reg32(int reg, TCGv_i32 v)
260 {
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
268 }
269
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
271 {
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274 }
275
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
277 {
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
279 }
280
281 static inline void store_reg16(int reg, TCGv_i32 v)
282 {
283 /* 16 bit register writes keep the upper bytes */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_deposit_i32(TCGV_LOW(regs[reg]), TCGV_LOW(regs[reg]), v, 0, 16);
286 #else
287 tcg_gen_deposit_i64(regs[reg], regs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 16);
289 #endif
290 }
291
292 static inline void store_freg32(int reg, TCGv_i32 v)
293 {
294 /* 32 bit register writes keep the lower half */
295 #if HOST_LONG_BITS == 32
296 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
297 #else
298 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
299 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
300 #endif
301 }
302
303 static inline void store_freg32_i64(int reg, TCGv_i64 v)
304 {
305 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
306 }
307
308 static inline void return_low128(TCGv_i64 dest)
309 {
310 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
311 }
312
313 static inline void update_psw_addr(DisasContext *s)
314 {
315 /* psw.addr */
316 tcg_gen_movi_i64(psw_addr, s->pc);
317 }
318
319 static inline void potential_page_fault(DisasContext *s)
320 {
321 #ifndef CONFIG_USER_ONLY
322 update_psw_addr(s);
323 gen_op_calc_cc(s);
324 #endif
325 }
326
327 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
328 {
329 return (uint64_t)cpu_lduw_code(env, pc);
330 }
331
332 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
333 {
334 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
335 }
336
337 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
338 {
339 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
340 }
341
342 static inline int get_mem_index(DisasContext *s)
343 {
344 switch (s->tb->flags & FLAG_MASK_ASC) {
345 case PSW_ASC_PRIMARY >> 32:
346 return 0;
347 case PSW_ASC_SECONDARY >> 32:
348 return 1;
349 case PSW_ASC_HOME >> 32:
350 return 2;
351 default:
352 tcg_abort();
353 break;
354 }
355 }
356
357 static void gen_exception(int excp)
358 {
359 TCGv_i32 tmp = tcg_const_i32(excp);
360 gen_helper_exception(cpu_env, tmp);
361 tcg_temp_free_i32(tmp);
362 }
363
364 static void gen_program_exception(DisasContext *s, int code)
365 {
366 TCGv_i32 tmp;
367
368 /* Remember what pgm exeption this was. */
369 tmp = tcg_const_i32(code);
370 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
371 tcg_temp_free_i32(tmp);
372
373 tmp = tcg_const_i32(s->next_pc - s->pc);
374 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
375 tcg_temp_free_i32(tmp);
376
377 /* Advance past instruction. */
378 s->pc = s->next_pc;
379 update_psw_addr(s);
380
381 /* Save off cc. */
382 gen_op_calc_cc(s);
383
384 /* Trigger exception. */
385 gen_exception(EXCP_PGM);
386
387 /* End TB here. */
388 s->is_jmp = DISAS_EXCP;
389 }
390
391 static inline void gen_illegal_opcode(DisasContext *s)
392 {
393 gen_program_exception(s, PGM_SPECIFICATION);
394 }
395
396 static inline void check_privileged(DisasContext *s)
397 {
398 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
399 gen_program_exception(s, PGM_PRIVILEGED);
400 }
401 }
402
403 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
404 {
405 TCGv_i64 tmp;
406
407 /* 31-bitify the immediate part; register contents are dealt with below */
408 if (!(s->tb->flags & FLAG_MASK_64)) {
409 d2 &= 0x7fffffffUL;
410 }
411
412 if (x2) {
413 if (d2) {
414 tmp = tcg_const_i64(d2);
415 tcg_gen_add_i64(tmp, tmp, regs[x2]);
416 } else {
417 tmp = load_reg(x2);
418 }
419 if (b2) {
420 tcg_gen_add_i64(tmp, tmp, regs[b2]);
421 }
422 } else if (b2) {
423 if (d2) {
424 tmp = tcg_const_i64(d2);
425 tcg_gen_add_i64(tmp, tmp, regs[b2]);
426 } else {
427 tmp = load_reg(b2);
428 }
429 } else {
430 tmp = tcg_const_i64(d2);
431 }
432
433 /* 31-bit mode mask if there are values loaded from registers */
434 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
435 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
436 }
437
438 return tmp;
439 }
440
441 static void gen_op_movi_cc(DisasContext *s, uint32_t val)
442 {
443 s->cc_op = CC_OP_CONST0 + val;
444 }
445
446 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
447 {
448 tcg_gen_discard_i64(cc_src);
449 tcg_gen_mov_i64(cc_dst, dst);
450 tcg_gen_discard_i64(cc_vr);
451 s->cc_op = op;
452 }
453
454 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
455 {
456 tcg_gen_discard_i64(cc_src);
457 tcg_gen_extu_i32_i64(cc_dst, dst);
458 tcg_gen_discard_i64(cc_vr);
459 s->cc_op = op;
460 }
461
462 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
463 TCGv_i64 dst)
464 {
465 tcg_gen_mov_i64(cc_src, src);
466 tcg_gen_mov_i64(cc_dst, dst);
467 tcg_gen_discard_i64(cc_vr);
468 s->cc_op = op;
469 }
470
471 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
472 TCGv_i32 dst)
473 {
474 tcg_gen_extu_i32_i64(cc_src, src);
475 tcg_gen_extu_i32_i64(cc_dst, dst);
476 tcg_gen_discard_i64(cc_vr);
477 s->cc_op = op;
478 }
479
480 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
481 TCGv_i64 dst, TCGv_i64 vr)
482 {
483 tcg_gen_mov_i64(cc_src, src);
484 tcg_gen_mov_i64(cc_dst, dst);
485 tcg_gen_mov_i64(cc_vr, vr);
486 s->cc_op = op;
487 }
488
489 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
490 {
491 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
492 }
493
494 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
495 {
496 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
497 }
498
499 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
500 enum cc_op cond)
501 {
502 gen_op_update2_cc_i32(s, cond, v1, v2);
503 }
504
505 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
506 enum cc_op cond)
507 {
508 gen_op_update2_cc_i64(s, cond, v1, v2);
509 }
510
511 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
512 {
513 cmp_32(s, v1, v2, CC_OP_LTGT_32);
514 }
515
516 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
517 {
518 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
519 }
520
521 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
522 {
523 /* XXX optimize for the constant? put it in s? */
524 TCGv_i32 tmp = tcg_const_i32(v2);
525 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
526 tcg_temp_free_i32(tmp);
527 }
528
529 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
530 {
531 TCGv_i32 tmp = tcg_const_i32(v2);
532 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
533 tcg_temp_free_i32(tmp);
534 }
535
536 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
537 {
538 cmp_64(s, v1, v2, CC_OP_LTGT_64);
539 }
540
541 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
542 {
543 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
544 }
545
546 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
547 {
548 TCGv_i64 tmp = tcg_const_i64(v2);
549 cmp_s64(s, v1, tmp);
550 tcg_temp_free_i64(tmp);
551 }
552
553 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
554 {
555 TCGv_i64 tmp = tcg_const_i64(v2);
556 cmp_u64(s, v1, tmp);
557 tcg_temp_free_i64(tmp);
558 }
559
560 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
561 {
562 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
563 }
564
565 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
566 {
567 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
568 }
569
570 static void set_cc_cmp_f32_i64(DisasContext *s, TCGv_i32 v1, TCGv_i64 v2)
571 {
572 tcg_gen_extu_i32_i64(cc_src, v1);
573 tcg_gen_mov_i64(cc_dst, v2);
574 tcg_gen_discard_i64(cc_vr);
575 s->cc_op = CC_OP_LTGT_F32;
576 }
577
578 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i32 v1)
579 {
580 gen_op_update1_cc_i32(s, CC_OP_NZ_F32, v1);
581 }
582
583 /* CC value is in env->cc_op */
584 static inline void set_cc_static(DisasContext *s)
585 {
586 tcg_gen_discard_i64(cc_src);
587 tcg_gen_discard_i64(cc_dst);
588 tcg_gen_discard_i64(cc_vr);
589 s->cc_op = CC_OP_STATIC;
590 }
591
592 static inline void gen_op_set_cc_op(DisasContext *s)
593 {
594 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
595 tcg_gen_movi_i32(cc_op, s->cc_op);
596 }
597 }
598
599 static inline void gen_update_cc_op(DisasContext *s)
600 {
601 gen_op_set_cc_op(s);
602 }
603
604 /* calculates cc into cc_op */
605 static void gen_op_calc_cc(DisasContext *s)
606 {
607 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
608 TCGv_i64 dummy = tcg_const_i64(0);
609
610 switch (s->cc_op) {
611 case CC_OP_CONST0:
612 case CC_OP_CONST1:
613 case CC_OP_CONST2:
614 case CC_OP_CONST3:
615 /* s->cc_op is the cc value */
616 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
617 break;
618 case CC_OP_STATIC:
619 /* env->cc_op already is the cc value */
620 break;
621 case CC_OP_NZ:
622 case CC_OP_ABS_64:
623 case CC_OP_NABS_64:
624 case CC_OP_ABS_32:
625 case CC_OP_NABS_32:
626 case CC_OP_LTGT0_32:
627 case CC_OP_LTGT0_64:
628 case CC_OP_COMP_32:
629 case CC_OP_COMP_64:
630 case CC_OP_NZ_F32:
631 case CC_OP_NZ_F64:
632 /* 1 argument */
633 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
634 break;
635 case CC_OP_ICM:
636 case CC_OP_LTGT_32:
637 case CC_OP_LTGT_64:
638 case CC_OP_LTUGTU_32:
639 case CC_OP_LTUGTU_64:
640 case CC_OP_TM_32:
641 case CC_OP_TM_64:
642 case CC_OP_LTGT_F32:
643 case CC_OP_LTGT_F64:
644 case CC_OP_SLA_32:
645 case CC_OP_SLA_64:
646 /* 2 arguments */
647 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
648 break;
649 case CC_OP_ADD_64:
650 case CC_OP_ADDU_64:
651 case CC_OP_ADDC_64:
652 case CC_OP_SUB_64:
653 case CC_OP_SUBU_64:
654 case CC_OP_SUBB_64:
655 case CC_OP_ADD_32:
656 case CC_OP_ADDU_32:
657 case CC_OP_ADDC_32:
658 case CC_OP_SUB_32:
659 case CC_OP_SUBU_32:
660 case CC_OP_SUBB_32:
661 /* 3 arguments */
662 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
663 break;
664 case CC_OP_DYNAMIC:
665 /* unknown operation - assume 3 arguments and cc_op in env */
666 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
667 break;
668 default:
669 tcg_abort();
670 }
671
672 tcg_temp_free_i32(local_cc_op);
673 tcg_temp_free_i64(dummy);
674
675 /* We now have cc in cc_op as constant */
676 set_cc_static(s);
677 }
678
679 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
680 {
681 debug_insn(insn);
682
683 *r1 = (insn >> 4) & 0xf;
684 *r2 = insn & 0xf;
685 }
686
687 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
688 int *x2, int *b2, int *d2)
689 {
690 debug_insn(insn);
691
692 *r1 = (insn >> 20) & 0xf;
693 *x2 = (insn >> 16) & 0xf;
694 *b2 = (insn >> 12) & 0xf;
695 *d2 = insn & 0xfff;
696
697 return get_address(s, *x2, *b2, *d2);
698 }
699
700 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
701 int *b2, int *d2)
702 {
703 debug_insn(insn);
704
705 *r1 = (insn >> 20) & 0xf;
706 /* aka m3 */
707 *r3 = (insn >> 16) & 0xf;
708 *b2 = (insn >> 12) & 0xf;
709 *d2 = insn & 0xfff;
710 }
711
712 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
713 int *b1, int *d1)
714 {
715 debug_insn(insn);
716
717 *i2 = (insn >> 16) & 0xff;
718 *b1 = (insn >> 12) & 0xf;
719 *d1 = insn & 0xfff;
720
721 return get_address(s, 0, *b1, *d1);
722 }
723
724 static int use_goto_tb(DisasContext *s, uint64_t dest)
725 {
726 /* NOTE: we handle the case where the TB spans two pages here */
727 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
728 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
729 && !s->singlestep_enabled
730 && !(s->tb->cflags & CF_LAST_IO));
731 }
732
733 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
734 {
735 gen_update_cc_op(s);
736
737 if (use_goto_tb(s, pc)) {
738 tcg_gen_goto_tb(tb_num);
739 tcg_gen_movi_i64(psw_addr, pc);
740 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
741 } else {
742 /* jump to another page: currently not optimized */
743 tcg_gen_movi_i64(psw_addr, pc);
744 tcg_gen_exit_tb(0);
745 }
746 }
747
748 static inline void account_noninline_branch(DisasContext *s, int cc_op)
749 {
750 #ifdef DEBUG_INLINE_BRANCHES
751 inline_branch_miss[cc_op]++;
752 #endif
753 }
754
755 static inline void account_inline_branch(DisasContext *s, int cc_op)
756 {
757 #ifdef DEBUG_INLINE_BRANCHES
758 inline_branch_hit[cc_op]++;
759 #endif
760 }
761
762 /* Table of mask values to comparison codes, given a comparison as input.
763 For a true comparison CC=3 will never be set, but we treat this
764 conservatively for possible use when CC=3 indicates overflow. */
765 static const TCGCond ltgt_cond[16] = {
766 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
767 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
768 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
769 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
770 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
771 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
772 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
773 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
774 };
775
776 /* Table of mask values to comparison codes, given a logic op as input.
777 For such, only CC=0 and CC=1 should be possible. */
778 static const TCGCond nz_cond[16] = {
779 /* | | x | x */
780 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
781 /* | NE | x | x */
782 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
783 /* EQ | | x | x */
784 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
785 /* EQ | NE | x | x */
786 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
787 };
788
789 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
790 details required to generate a TCG comparison. */
791 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
792 {
793 TCGCond cond;
794 enum cc_op old_cc_op = s->cc_op;
795
796 if (mask == 15 || mask == 0) {
797 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
798 c->u.s32.a = cc_op;
799 c->u.s32.b = cc_op;
800 c->g1 = c->g2 = true;
801 c->is_64 = false;
802 return;
803 }
804
805 /* Find the TCG condition for the mask + cc op. */
806 switch (old_cc_op) {
807 case CC_OP_LTGT0_32:
808 case CC_OP_LTGT0_64:
809 case CC_OP_LTGT_32:
810 case CC_OP_LTGT_64:
811 cond = ltgt_cond[mask];
812 if (cond == TCG_COND_NEVER) {
813 goto do_dynamic;
814 }
815 account_inline_branch(s, old_cc_op);
816 break;
817
818 case CC_OP_LTUGTU_32:
819 case CC_OP_LTUGTU_64:
820 cond = tcg_unsigned_cond(ltgt_cond[mask]);
821 if (cond == TCG_COND_NEVER) {
822 goto do_dynamic;
823 }
824 account_inline_branch(s, old_cc_op);
825 break;
826
827 case CC_OP_NZ:
828 cond = nz_cond[mask];
829 if (cond == TCG_COND_NEVER) {
830 goto do_dynamic;
831 }
832 account_inline_branch(s, old_cc_op);
833 break;
834
835 case CC_OP_TM_32:
836 case CC_OP_TM_64:
837 switch (mask) {
838 case 8:
839 cond = TCG_COND_EQ;
840 break;
841 case 4 | 2 | 1:
842 cond = TCG_COND_NE;
843 break;
844 default:
845 goto do_dynamic;
846 }
847 account_inline_branch(s, old_cc_op);
848 break;
849
850 case CC_OP_ICM:
851 switch (mask) {
852 case 8:
853 cond = TCG_COND_EQ;
854 break;
855 case 4 | 2 | 1:
856 case 4 | 2:
857 cond = TCG_COND_NE;
858 break;
859 default:
860 goto do_dynamic;
861 }
862 account_inline_branch(s, old_cc_op);
863 break;
864
865 default:
866 do_dynamic:
867 /* Calculate cc value. */
868 gen_op_calc_cc(s);
869 /* FALLTHRU */
870
871 case CC_OP_STATIC:
872 /* Jump based on CC. We'll load up the real cond below;
873 the assignment here merely avoids a compiler warning. */
874 account_noninline_branch(s, old_cc_op);
875 old_cc_op = CC_OP_STATIC;
876 cond = TCG_COND_NEVER;
877 break;
878 }
879
880 /* Load up the arguments of the comparison. */
881 c->is_64 = true;
882 c->g1 = c->g2 = false;
883 switch (old_cc_op) {
884 case CC_OP_LTGT0_32:
885 c->is_64 = false;
886 c->u.s32.a = tcg_temp_new_i32();
887 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
888 c->u.s32.b = tcg_const_i32(0);
889 break;
890 case CC_OP_LTGT_32:
891 case CC_OP_LTUGTU_32:
892 c->is_64 = false;
893 c->u.s32.a = tcg_temp_new_i32();
894 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
895 c->u.s32.b = tcg_temp_new_i32();
896 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
897 break;
898
899 case CC_OP_LTGT0_64:
900 case CC_OP_NZ:
901 c->u.s64.a = cc_dst;
902 c->u.s64.b = tcg_const_i64(0);
903 c->g1 = true;
904 break;
905 case CC_OP_LTGT_64:
906 case CC_OP_LTUGTU_64:
907 c->u.s64.a = cc_src;
908 c->u.s64.b = cc_dst;
909 c->g1 = c->g2 = true;
910 break;
911
912 case CC_OP_TM_32:
913 case CC_OP_TM_64:
914 case CC_OP_ICM:
915 c->u.s64.a = tcg_temp_new_i64();
916 c->u.s64.b = tcg_const_i64(0);
917 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
918 break;
919
920 case CC_OP_STATIC:
921 c->is_64 = false;
922 c->u.s32.a = cc_op;
923 c->g1 = true;
924 switch (mask) {
925 case 0x8 | 0x4 | 0x2: /* cc != 3 */
926 cond = TCG_COND_NE;
927 c->u.s32.b = tcg_const_i32(3);
928 break;
929 case 0x8 | 0x4 | 0x1: /* cc != 2 */
930 cond = TCG_COND_NE;
931 c->u.s32.b = tcg_const_i32(2);
932 break;
933 case 0x8 | 0x2 | 0x1: /* cc != 1 */
934 cond = TCG_COND_NE;
935 c->u.s32.b = tcg_const_i32(1);
936 break;
937 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
938 cond = TCG_COND_EQ;
939 c->g1 = false;
940 c->u.s32.a = tcg_temp_new_i32();
941 c->u.s32.b = tcg_const_i32(0);
942 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
943 break;
944 case 0x8 | 0x4: /* cc < 2 */
945 cond = TCG_COND_LTU;
946 c->u.s32.b = tcg_const_i32(2);
947 break;
948 case 0x8: /* cc == 0 */
949 cond = TCG_COND_EQ;
950 c->u.s32.b = tcg_const_i32(0);
951 break;
952 case 0x4 | 0x2 | 0x1: /* cc != 0 */
953 cond = TCG_COND_NE;
954 c->u.s32.b = tcg_const_i32(0);
955 break;
956 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
957 cond = TCG_COND_NE;
958 c->g1 = false;
959 c->u.s32.a = tcg_temp_new_i32();
960 c->u.s32.b = tcg_const_i32(0);
961 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
962 break;
963 case 0x4: /* cc == 1 */
964 cond = TCG_COND_EQ;
965 c->u.s32.b = tcg_const_i32(1);
966 break;
967 case 0x2 | 0x1: /* cc > 1 */
968 cond = TCG_COND_GTU;
969 c->u.s32.b = tcg_const_i32(1);
970 break;
971 case 0x2: /* cc == 2 */
972 cond = TCG_COND_EQ;
973 c->u.s32.b = tcg_const_i32(2);
974 break;
975 case 0x1: /* cc == 3 */
976 cond = TCG_COND_EQ;
977 c->u.s32.b = tcg_const_i32(3);
978 break;
979 default:
980 /* CC is masked by something else: (8 >> cc) & mask. */
981 cond = TCG_COND_NE;
982 c->g1 = false;
983 c->u.s32.a = tcg_const_i32(8);
984 c->u.s32.b = tcg_const_i32(0);
985 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
986 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
987 break;
988 }
989 break;
990
991 default:
992 abort();
993 }
994 c->cond = cond;
995 }
996
997 static void free_compare(DisasCompare *c)
998 {
999 if (!c->g1) {
1000 if (c->is_64) {
1001 tcg_temp_free_i64(c->u.s64.a);
1002 } else {
1003 tcg_temp_free_i32(c->u.s32.a);
1004 }
1005 }
1006 if (!c->g2) {
1007 if (c->is_64) {
1008 tcg_temp_free_i64(c->u.s64.b);
1009 } else {
1010 tcg_temp_free_i32(c->u.s32.b);
1011 }
1012 }
1013 }
1014
1015 static void disas_e3(CPUS390XState *env, DisasContext* s, int op, int r1,
1016 int x2, int b2, int d2)
1017 {
1018 TCGv_i64 addr, tmp2;
1019 TCGv_i32 tmp32_1;
1020
1021 LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
1022 op, r1, x2, b2, d2);
1023 addr = get_address(s, x2, b2, d2);
1024 switch (op) {
1025 case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
1026 tmp2 = tcg_temp_new_i64();
1027 tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
1028 tcg_gen_bswap64_i64(tmp2, tmp2);
1029 store_reg(r1, tmp2);
1030 tcg_temp_free_i64(tmp2);
1031 break;
1032 case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
1033 tmp2 = tcg_temp_new_i64();
1034 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1035 tcg_gen_andi_i64(tmp2, tmp2, 0x7fffffffULL);
1036 store_reg(r1, tmp2);
1037 tcg_temp_free_i64(tmp2);
1038 break;
1039 case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
1040 tmp2 = tcg_temp_new_i64();
1041 tmp32_1 = tcg_temp_new_i32();
1042 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1043 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1044 tcg_temp_free_i64(tmp2);
1045 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1046 store_reg32(r1, tmp32_1);
1047 tcg_temp_free_i32(tmp32_1);
1048 break;
1049 case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
1050 tmp2 = tcg_temp_new_i64();
1051 tmp32_1 = tcg_temp_new_i32();
1052 tcg_gen_qemu_ld16u(tmp2, addr, get_mem_index(s));
1053 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1054 tcg_temp_free_i64(tmp2);
1055 tcg_gen_bswap16_i32(tmp32_1, tmp32_1);
1056 store_reg16(r1, tmp32_1);
1057 tcg_temp_free_i32(tmp32_1);
1058 break;
1059 case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
1060 tmp32_1 = load_reg32(r1);
1061 tmp2 = tcg_temp_new_i64();
1062 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1063 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1064 tcg_temp_free_i32(tmp32_1);
1065 tcg_gen_qemu_st32(tmp2, addr, get_mem_index(s));
1066 tcg_temp_free_i64(tmp2);
1067 break;
1068 default:
1069 LOG_DISAS("illegal e3 operation 0x%x\n", op);
1070 gen_illegal_opcode(s);
1071 break;
1072 }
1073 tcg_temp_free_i64(addr);
1074 }
1075
1076 #ifndef CONFIG_USER_ONLY
1077 static void disas_e5(CPUS390XState *env, DisasContext* s, uint64_t insn)
1078 {
1079 TCGv_i64 tmp, tmp2;
1080 int op = (insn >> 32) & 0xff;
1081
1082 tmp = get_address(s, 0, (insn >> 28) & 0xf, (insn >> 16) & 0xfff);
1083 tmp2 = get_address(s, 0, (insn >> 12) & 0xf, insn & 0xfff);
1084
1085 LOG_DISAS("disas_e5: insn %" PRIx64 "\n", insn);
1086 switch (op) {
1087 case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
1088 /* Test Protection */
1089 potential_page_fault(s);
1090 gen_helper_tprot(cc_op, tmp, tmp2);
1091 set_cc_static(s);
1092 break;
1093 default:
1094 LOG_DISAS("illegal e5 operation 0x%x\n", op);
1095 gen_illegal_opcode(s);
1096 break;
1097 }
1098
1099 tcg_temp_free_i64(tmp);
1100 tcg_temp_free_i64(tmp2);
1101 }
1102 #endif
1103
1104 static void disas_eb(CPUS390XState *env, DisasContext *s, int op, int r1,
1105 int r3, int b2, int d2)
1106 {
1107 TCGv_i64 tmp;
1108 TCGv_i32 tmp32_1, tmp32_2;
1109
1110 LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
1111 op, r1, r3, b2, d2);
1112 switch (op) {
1113 #ifndef CONFIG_USER_ONLY
1114 case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
1115 /* Load Control */
1116 check_privileged(s);
1117 tmp = get_address(s, 0, b2, d2);
1118 tmp32_1 = tcg_const_i32(r1);
1119 tmp32_2 = tcg_const_i32(r3);
1120 potential_page_fault(s);
1121 gen_helper_lctlg(cpu_env, tmp32_1, tmp, tmp32_2);
1122 tcg_temp_free_i64(tmp);
1123 tcg_temp_free_i32(tmp32_1);
1124 tcg_temp_free_i32(tmp32_2);
1125 break;
1126 case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
1127 /* Store Control */
1128 check_privileged(s);
1129 tmp = get_address(s, 0, b2, d2);
1130 tmp32_1 = tcg_const_i32(r1);
1131 tmp32_2 = tcg_const_i32(r3);
1132 potential_page_fault(s);
1133 gen_helper_stctg(cpu_env, tmp32_1, tmp, tmp32_2);
1134 tcg_temp_free_i64(tmp);
1135 tcg_temp_free_i32(tmp32_1);
1136 tcg_temp_free_i32(tmp32_2);
1137 break;
1138 #endif
1139 default:
1140 LOG_DISAS("illegal eb operation 0x%x\n", op);
1141 gen_illegal_opcode(s);
1142 break;
1143 }
1144 }
1145
1146 static void disas_ed(CPUS390XState *env, DisasContext *s, int op, int r1,
1147 int x2, int b2, int d2, int r1b)
1148 {
1149 TCGv_i32 tmp_r1, tmp32;
1150 TCGv_i64 addr, tmp;
1151 addr = get_address(s, x2, b2, d2);
1152 tmp_r1 = tcg_const_i32(r1);
1153 switch (op) {
1154 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1155 potential_page_fault(s);
1156 gen_helper_ldeb(cpu_env, tmp_r1, addr);
1157 break;
1158 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1159 potential_page_fault(s);
1160 gen_helper_lxdb(cpu_env, tmp_r1, addr);
1161 break;
1162 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1163 tmp = tcg_temp_new_i64();
1164 tmp32 = load_freg32(r1);
1165 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1166 set_cc_cmp_f32_i64(s, tmp32, tmp);
1167 tcg_temp_free_i64(tmp);
1168 tcg_temp_free_i32(tmp32);
1169 break;
1170 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1171 tmp = tcg_temp_new_i64();
1172 tmp32 = tcg_temp_new_i32();
1173 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1174 tcg_gen_trunc_i64_i32(tmp32, tmp);
1175 gen_helper_aeb(cpu_env, tmp_r1, tmp32);
1176 tcg_temp_free_i64(tmp);
1177 tcg_temp_free_i32(tmp32);
1178
1179 tmp32 = load_freg32(r1);
1180 gen_set_cc_nz_f32(s, tmp32);
1181 tcg_temp_free_i32(tmp32);
1182 break;
1183 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1184 tmp = tcg_temp_new_i64();
1185 tmp32 = tcg_temp_new_i32();
1186 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1187 tcg_gen_trunc_i64_i32(tmp32, tmp);
1188 gen_helper_seb(cpu_env, tmp_r1, tmp32);
1189 tcg_temp_free_i64(tmp);
1190 tcg_temp_free_i32(tmp32);
1191
1192 tmp32 = load_freg32(r1);
1193 gen_set_cc_nz_f32(s, tmp32);
1194 tcg_temp_free_i32(tmp32);
1195 break;
1196 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1197 tmp = tcg_temp_new_i64();
1198 tmp32 = tcg_temp_new_i32();
1199 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1200 tcg_gen_trunc_i64_i32(tmp32, tmp);
1201 gen_helper_deb(cpu_env, tmp_r1, tmp32);
1202 tcg_temp_free_i64(tmp);
1203 tcg_temp_free_i32(tmp32);
1204 break;
1205 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1206 potential_page_fault(s);
1207 gen_helper_tceb(cc_op, cpu_env, tmp_r1, addr);
1208 set_cc_static(s);
1209 break;
1210 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1211 potential_page_fault(s);
1212 gen_helper_tcdb(cc_op, cpu_env, tmp_r1, addr);
1213 set_cc_static(s);
1214 break;
1215 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1216 potential_page_fault(s);
1217 gen_helper_tcxb(cc_op, cpu_env, tmp_r1, addr);
1218 set_cc_static(s);
1219 break;
1220 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1221 tmp = tcg_temp_new_i64();
1222 tmp32 = tcg_temp_new_i32();
1223 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1224 tcg_gen_trunc_i64_i32(tmp32, tmp);
1225 gen_helper_meeb(cpu_env, tmp_r1, tmp32);
1226 tcg_temp_free_i64(tmp);
1227 tcg_temp_free_i32(tmp32);
1228 break;
1229 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1230 potential_page_fault(s);
1231 gen_helper_cdb(cc_op, cpu_env, tmp_r1, addr);
1232 set_cc_static(s);
1233 break;
1234 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1235 potential_page_fault(s);
1236 gen_helper_adb(cc_op, cpu_env, tmp_r1, addr);
1237 set_cc_static(s);
1238 break;
1239 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1240 potential_page_fault(s);
1241 gen_helper_sdb(cc_op, cpu_env, tmp_r1, addr);
1242 set_cc_static(s);
1243 break;
1244 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1245 potential_page_fault(s);
1246 gen_helper_mdb(cpu_env, tmp_r1, addr);
1247 break;
1248 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1249 potential_page_fault(s);
1250 gen_helper_ddb(cpu_env, tmp_r1, addr);
1251 break;
1252 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1253 /* for RXF insns, r1 is R3 and r1b is R1 */
1254 tmp32 = tcg_const_i32(r1b);
1255 potential_page_fault(s);
1256 gen_helper_madb(cpu_env, tmp32, addr, tmp_r1);
1257 tcg_temp_free_i32(tmp32);
1258 break;
1259 default:
1260 LOG_DISAS("illegal ed operation 0x%x\n", op);
1261 gen_illegal_opcode(s);
1262 return;
1263 }
1264 tcg_temp_free_i32(tmp_r1);
1265 tcg_temp_free_i64(addr);
1266 }
1267
1268 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1269 uint32_t insn)
1270 {
1271 TCGv_i64 tmp, tmp2, tmp3;
1272 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1273 int r1, r2;
1274 #ifndef CONFIG_USER_ONLY
1275 int r3, d2, b2;
1276 #endif
1277
1278 r1 = (insn >> 4) & 0xf;
1279 r2 = insn & 0xf;
1280
1281 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1282
1283 switch (op) {
1284 case 0x22: /* IPM R1 [RRE] */
1285 tmp32_1 = tcg_const_i32(r1);
1286 gen_op_calc_cc(s);
1287 gen_helper_ipm(cpu_env, cc_op, tmp32_1);
1288 tcg_temp_free_i32(tmp32_1);
1289 break;
1290 case 0x41: /* CKSM R1,R2 [RRE] */
1291 tmp32_1 = tcg_const_i32(r1);
1292 tmp32_2 = tcg_const_i32(r2);
1293 potential_page_fault(s);
1294 gen_helper_cksm(cpu_env, tmp32_1, tmp32_2);
1295 tcg_temp_free_i32(tmp32_1);
1296 tcg_temp_free_i32(tmp32_2);
1297 gen_op_movi_cc(s, 0);
1298 break;
1299 case 0x4e: /* SAR R1,R2 [RRE] */
1300 tmp32_1 = load_reg32(r2);
1301 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r1]));
1302 tcg_temp_free_i32(tmp32_1);
1303 break;
1304 case 0x4f: /* EAR R1,R2 [RRE] */
1305 tmp32_1 = tcg_temp_new_i32();
1306 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1307 store_reg32(r1, tmp32_1);
1308 tcg_temp_free_i32(tmp32_1);
1309 break;
1310 case 0x54: /* MVPG R1,R2 [RRE] */
1311 tmp = load_reg(0);
1312 tmp2 = load_reg(r1);
1313 tmp3 = load_reg(r2);
1314 potential_page_fault(s);
1315 gen_helper_mvpg(cpu_env, tmp, tmp2, tmp3);
1316 tcg_temp_free_i64(tmp);
1317 tcg_temp_free_i64(tmp2);
1318 tcg_temp_free_i64(tmp3);
1319 /* XXX check CCO bit and set CC accordingly */
1320 gen_op_movi_cc(s, 0);
1321 break;
1322 case 0x55: /* MVST R1,R2 [RRE] */
1323 tmp32_1 = load_reg32(0);
1324 tmp32_2 = tcg_const_i32(r1);
1325 tmp32_3 = tcg_const_i32(r2);
1326 potential_page_fault(s);
1327 gen_helper_mvst(cpu_env, tmp32_1, tmp32_2, tmp32_3);
1328 tcg_temp_free_i32(tmp32_1);
1329 tcg_temp_free_i32(tmp32_2);
1330 tcg_temp_free_i32(tmp32_3);
1331 gen_op_movi_cc(s, 1);
1332 break;
1333 case 0x5d: /* CLST R1,R2 [RRE] */
1334 tmp32_1 = load_reg32(0);
1335 tmp32_2 = tcg_const_i32(r1);
1336 tmp32_3 = tcg_const_i32(r2);
1337 potential_page_fault(s);
1338 gen_helper_clst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1339 set_cc_static(s);
1340 tcg_temp_free_i32(tmp32_1);
1341 tcg_temp_free_i32(tmp32_2);
1342 tcg_temp_free_i32(tmp32_3);
1343 break;
1344 case 0x5e: /* SRST R1,R2 [RRE] */
1345 tmp32_1 = load_reg32(0);
1346 tmp32_2 = tcg_const_i32(r1);
1347 tmp32_3 = tcg_const_i32(r2);
1348 potential_page_fault(s);
1349 gen_helper_srst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1350 set_cc_static(s);
1351 tcg_temp_free_i32(tmp32_1);
1352 tcg_temp_free_i32(tmp32_2);
1353 tcg_temp_free_i32(tmp32_3);
1354 break;
1355
1356 #ifndef CONFIG_USER_ONLY
1357 case 0x02: /* STIDP D2(B2) [S] */
1358 /* Store CPU ID */
1359 check_privileged(s);
1360 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1361 tmp = get_address(s, 0, b2, d2);
1362 potential_page_fault(s);
1363 gen_helper_stidp(cpu_env, tmp);
1364 tcg_temp_free_i64(tmp);
1365 break;
1366 case 0x04: /* SCK D2(B2) [S] */
1367 /* Set Clock */
1368 check_privileged(s);
1369 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1370 tmp = get_address(s, 0, b2, d2);
1371 potential_page_fault(s);
1372 gen_helper_sck(cc_op, tmp);
1373 set_cc_static(s);
1374 tcg_temp_free_i64(tmp);
1375 break;
1376 case 0x05: /* STCK D2(B2) [S] */
1377 /* Store Clock */
1378 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1379 tmp = get_address(s, 0, b2, d2);
1380 potential_page_fault(s);
1381 gen_helper_stck(cc_op, cpu_env, tmp);
1382 set_cc_static(s);
1383 tcg_temp_free_i64(tmp);
1384 break;
1385 case 0x06: /* SCKC D2(B2) [S] */
1386 /* Set Clock Comparator */
1387 check_privileged(s);
1388 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1389 tmp = get_address(s, 0, b2, d2);
1390 potential_page_fault(s);
1391 gen_helper_sckc(cpu_env, tmp);
1392 tcg_temp_free_i64(tmp);
1393 break;
1394 case 0x07: /* STCKC D2(B2) [S] */
1395 /* Store Clock Comparator */
1396 check_privileged(s);
1397 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1398 tmp = get_address(s, 0, b2, d2);
1399 potential_page_fault(s);
1400 gen_helper_stckc(cpu_env, tmp);
1401 tcg_temp_free_i64(tmp);
1402 break;
1403 case 0x08: /* SPT D2(B2) [S] */
1404 /* Set CPU Timer */
1405 check_privileged(s);
1406 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1407 tmp = get_address(s, 0, b2, d2);
1408 potential_page_fault(s);
1409 gen_helper_spt(cpu_env, tmp);
1410 tcg_temp_free_i64(tmp);
1411 break;
1412 case 0x09: /* STPT D2(B2) [S] */
1413 /* Store CPU Timer */
1414 check_privileged(s);
1415 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1416 tmp = get_address(s, 0, b2, d2);
1417 potential_page_fault(s);
1418 gen_helper_stpt(cpu_env, tmp);
1419 tcg_temp_free_i64(tmp);
1420 break;
1421 case 0x0a: /* SPKA D2(B2) [S] */
1422 /* Set PSW Key from Address */
1423 check_privileged(s);
1424 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1425 tmp = get_address(s, 0, b2, d2);
1426 tmp2 = tcg_temp_new_i64();
1427 tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
1428 tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
1429 tcg_gen_or_i64(psw_mask, tmp2, tmp);
1430 tcg_temp_free_i64(tmp2);
1431 tcg_temp_free_i64(tmp);
1432 break;
1433 case 0x0d: /* PTLB [S] */
1434 /* Purge TLB */
1435 check_privileged(s);
1436 gen_helper_ptlb(cpu_env);
1437 break;
1438 case 0x10: /* SPX D2(B2) [S] */
1439 /* Set Prefix Register */
1440 check_privileged(s);
1441 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1442 tmp = get_address(s, 0, b2, d2);
1443 potential_page_fault(s);
1444 gen_helper_spx(cpu_env, tmp);
1445 tcg_temp_free_i64(tmp);
1446 break;
1447 case 0x11: /* STPX D2(B2) [S] */
1448 /* Store Prefix */
1449 check_privileged(s);
1450 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1451 tmp = get_address(s, 0, b2, d2);
1452 tmp2 = tcg_temp_new_i64();
1453 tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1454 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1455 tcg_temp_free_i64(tmp);
1456 tcg_temp_free_i64(tmp2);
1457 break;
1458 case 0x12: /* STAP D2(B2) [S] */
1459 /* Store CPU Address */
1460 check_privileged(s);
1461 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1462 tmp = get_address(s, 0, b2, d2);
1463 tmp2 = tcg_temp_new_i64();
1464 tmp32_1 = tcg_temp_new_i32();
1465 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1466 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1467 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1468 tcg_temp_free_i64(tmp);
1469 tcg_temp_free_i64(tmp2);
1470 tcg_temp_free_i32(tmp32_1);
1471 break;
1472 case 0x21: /* IPTE R1,R2 [RRE] */
1473 /* Invalidate PTE */
1474 check_privileged(s);
1475 r1 = (insn >> 4) & 0xf;
1476 r2 = insn & 0xf;
1477 tmp = load_reg(r1);
1478 tmp2 = load_reg(r2);
1479 gen_helper_ipte(cpu_env, tmp, tmp2);
1480 tcg_temp_free_i64(tmp);
1481 tcg_temp_free_i64(tmp2);
1482 break;
1483 case 0x29: /* ISKE R1,R2 [RRE] */
1484 /* Insert Storage Key Extended */
1485 check_privileged(s);
1486 r1 = (insn >> 4) & 0xf;
1487 r2 = insn & 0xf;
1488 tmp = load_reg(r2);
1489 tmp2 = tcg_temp_new_i64();
1490 gen_helper_iske(tmp2, cpu_env, tmp);
1491 store_reg(r1, tmp2);
1492 tcg_temp_free_i64(tmp);
1493 tcg_temp_free_i64(tmp2);
1494 break;
1495 case 0x2a: /* RRBE R1,R2 [RRE] */
1496 /* Set Storage Key Extended */
1497 check_privileged(s);
1498 r1 = (insn >> 4) & 0xf;
1499 r2 = insn & 0xf;
1500 tmp32_1 = load_reg32(r1);
1501 tmp = load_reg(r2);
1502 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1503 set_cc_static(s);
1504 tcg_temp_free_i32(tmp32_1);
1505 tcg_temp_free_i64(tmp);
1506 break;
1507 case 0x2b: /* SSKE R1,R2 [RRE] */
1508 /* Set Storage Key Extended */
1509 check_privileged(s);
1510 r1 = (insn >> 4) & 0xf;
1511 r2 = insn & 0xf;
1512 tmp32_1 = load_reg32(r1);
1513 tmp = load_reg(r2);
1514 gen_helper_sske(cpu_env, tmp32_1, tmp);
1515 tcg_temp_free_i32(tmp32_1);
1516 tcg_temp_free_i64(tmp);
1517 break;
1518 case 0x34: /* STCH ? */
1519 /* Store Subchannel */
1520 check_privileged(s);
1521 gen_op_movi_cc(s, 3);
1522 break;
1523 case 0x46: /* STURA R1,R2 [RRE] */
1524 /* Store Using Real Address */
1525 check_privileged(s);
1526 r1 = (insn >> 4) & 0xf;
1527 r2 = insn & 0xf;
1528 tmp32_1 = load_reg32(r1);
1529 tmp = load_reg(r2);
1530 potential_page_fault(s);
1531 gen_helper_stura(cpu_env, tmp, tmp32_1);
1532 tcg_temp_free_i32(tmp32_1);
1533 tcg_temp_free_i64(tmp);
1534 break;
1535 case 0x50: /* CSP R1,R2 [RRE] */
1536 /* Compare And Swap And Purge */
1537 check_privileged(s);
1538 r1 = (insn >> 4) & 0xf;
1539 r2 = insn & 0xf;
1540 tmp32_1 = tcg_const_i32(r1);
1541 tmp32_2 = tcg_const_i32(r2);
1542 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1543 set_cc_static(s);
1544 tcg_temp_free_i32(tmp32_1);
1545 tcg_temp_free_i32(tmp32_2);
1546 break;
1547 case 0x5f: /* CHSC ? */
1548 /* Channel Subsystem Call */
1549 check_privileged(s);
1550 gen_op_movi_cc(s, 3);
1551 break;
1552 case 0x78: /* STCKE D2(B2) [S] */
1553 /* Store Clock Extended */
1554 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1555 tmp = get_address(s, 0, b2, d2);
1556 potential_page_fault(s);
1557 gen_helper_stcke(cc_op, cpu_env, tmp);
1558 set_cc_static(s);
1559 tcg_temp_free_i64(tmp);
1560 break;
1561 case 0x79: /* SACF D2(B2) [S] */
1562 /* Set Address Space Control Fast */
1563 check_privileged(s);
1564 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1565 tmp = get_address(s, 0, b2, d2);
1566 potential_page_fault(s);
1567 gen_helper_sacf(cpu_env, tmp);
1568 tcg_temp_free_i64(tmp);
1569 /* addressing mode has changed, so end the block */
1570 s->pc = s->next_pc;
1571 update_psw_addr(s);
1572 s->is_jmp = DISAS_JUMP;
1573 break;
1574 case 0x7d: /* STSI D2,(B2) [S] */
1575 check_privileged(s);
1576 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1577 tmp = get_address(s, 0, b2, d2);
1578 tmp32_1 = load_reg32(0);
1579 tmp32_2 = load_reg32(1);
1580 potential_page_fault(s);
1581 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1582 set_cc_static(s);
1583 tcg_temp_free_i64(tmp);
1584 tcg_temp_free_i32(tmp32_1);
1585 tcg_temp_free_i32(tmp32_2);
1586 break;
1587 case 0x9d: /* LFPC D2(B2) [S] */
1588 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1589 tmp = get_address(s, 0, b2, d2);
1590 tmp2 = tcg_temp_new_i64();
1591 tmp32_1 = tcg_temp_new_i32();
1592 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1593 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1594 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1595 tcg_temp_free_i64(tmp);
1596 tcg_temp_free_i64(tmp2);
1597 tcg_temp_free_i32(tmp32_1);
1598 break;
1599 case 0xb1: /* STFL D2(B2) [S] */
1600 /* Store Facility List (CPU features) at 200 */
1601 check_privileged(s);
1602 tmp2 = tcg_const_i64(0xc0000000);
1603 tmp = tcg_const_i64(200);
1604 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1605 tcg_temp_free_i64(tmp2);
1606 tcg_temp_free_i64(tmp);
1607 break;
1608 case 0xb2: /* LPSWE D2(B2) [S] */
1609 /* Load PSW Extended */
1610 check_privileged(s);
1611 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1612 tmp = get_address(s, 0, b2, d2);
1613 tmp2 = tcg_temp_new_i64();
1614 tmp3 = tcg_temp_new_i64();
1615 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1616 tcg_gen_addi_i64(tmp, tmp, 8);
1617 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1618 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1619 /* we need to keep cc_op intact */
1620 s->is_jmp = DISAS_JUMP;
1621 tcg_temp_free_i64(tmp);
1622 tcg_temp_free_i64(tmp2);
1623 tcg_temp_free_i64(tmp3);
1624 break;
1625 case 0x20: /* SERVC R1,R2 [RRE] */
1626 /* SCLP Service call (PV hypercall) */
1627 check_privileged(s);
1628 potential_page_fault(s);
1629 tmp32_1 = load_reg32(r2);
1630 tmp = load_reg(r1);
1631 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1632 set_cc_static(s);
1633 tcg_temp_free_i32(tmp32_1);
1634 tcg_temp_free_i64(tmp);
1635 break;
1636 #endif
1637 default:
1638 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1639 gen_illegal_opcode(s);
1640 break;
1641 }
1642 }
1643
1644 static void disas_b3(CPUS390XState *env, DisasContext *s, int op, int m3,
1645 int r1, int r2)
1646 {
1647 TCGv_i64 tmp;
1648 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1649 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
1650 #define FP_HELPER(i) \
1651 tmp32_1 = tcg_const_i32(r1); \
1652 tmp32_2 = tcg_const_i32(r2); \
1653 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1654 tcg_temp_free_i32(tmp32_1); \
1655 tcg_temp_free_i32(tmp32_2);
1656
1657 #define FP_HELPER_CC(i) \
1658 tmp32_1 = tcg_const_i32(r1); \
1659 tmp32_2 = tcg_const_i32(r2); \
1660 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1661 set_cc_static(s); \
1662 tcg_temp_free_i32(tmp32_1); \
1663 tcg_temp_free_i32(tmp32_2);
1664
1665 switch (op) {
1666 case 0x0: /* LPEBR R1,R2 [RRE] */
1667 FP_HELPER_CC(lpebr);
1668 break;
1669 case 0x2: /* LTEBR R1,R2 [RRE] */
1670 FP_HELPER_CC(ltebr);
1671 break;
1672 case 0x3: /* LCEBR R1,R2 [RRE] */
1673 FP_HELPER_CC(lcebr);
1674 break;
1675 case 0x4: /* LDEBR R1,R2 [RRE] */
1676 FP_HELPER(ldebr);
1677 break;
1678 case 0x5: /* LXDBR R1,R2 [RRE] */
1679 FP_HELPER(lxdbr);
1680 break;
1681 case 0x9: /* CEBR R1,R2 [RRE] */
1682 FP_HELPER_CC(cebr);
1683 break;
1684 case 0xa: /* AEBR R1,R2 [RRE] */
1685 FP_HELPER_CC(aebr);
1686 break;
1687 case 0xb: /* SEBR R1,R2 [RRE] */
1688 FP_HELPER_CC(sebr);
1689 break;
1690 case 0xd: /* DEBR R1,R2 [RRE] */
1691 FP_HELPER(debr);
1692 break;
1693 case 0x10: /* LPDBR R1,R2 [RRE] */
1694 FP_HELPER_CC(lpdbr);
1695 break;
1696 case 0x12: /* LTDBR R1,R2 [RRE] */
1697 FP_HELPER_CC(ltdbr);
1698 break;
1699 case 0x13: /* LCDBR R1,R2 [RRE] */
1700 FP_HELPER_CC(lcdbr);
1701 break;
1702 case 0x15: /* SQBDR R1,R2 [RRE] */
1703 FP_HELPER(sqdbr);
1704 break;
1705 case 0x17: /* MEEBR R1,R2 [RRE] */
1706 FP_HELPER(meebr);
1707 break;
1708 case 0x19: /* CDBR R1,R2 [RRE] */
1709 FP_HELPER_CC(cdbr);
1710 break;
1711 case 0x1a: /* ADBR R1,R2 [RRE] */
1712 FP_HELPER_CC(adbr);
1713 break;
1714 case 0x1b: /* SDBR R1,R2 [RRE] */
1715 FP_HELPER_CC(sdbr);
1716 break;
1717 case 0x1c: /* MDBR R1,R2 [RRE] */
1718 FP_HELPER(mdbr);
1719 break;
1720 case 0x1d: /* DDBR R1,R2 [RRE] */
1721 FP_HELPER(ddbr);
1722 break;
1723 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
1724 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
1725 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
1726 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
1727 tmp32_1 = tcg_const_i32(m3);
1728 tmp32_2 = tcg_const_i32(r2);
1729 tmp32_3 = tcg_const_i32(r1);
1730 switch (op) {
1731 case 0xe:
1732 gen_helper_maebr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1733 break;
1734 case 0x1e:
1735 gen_helper_madbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1736 break;
1737 case 0x1f:
1738 gen_helper_msdbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1739 break;
1740 default:
1741 tcg_abort();
1742 }
1743 tcg_temp_free_i32(tmp32_1);
1744 tcg_temp_free_i32(tmp32_2);
1745 tcg_temp_free_i32(tmp32_3);
1746 break;
1747 case 0x40: /* LPXBR R1,R2 [RRE] */
1748 FP_HELPER_CC(lpxbr);
1749 break;
1750 case 0x42: /* LTXBR R1,R2 [RRE] */
1751 FP_HELPER_CC(ltxbr);
1752 break;
1753 case 0x43: /* LCXBR R1,R2 [RRE] */
1754 FP_HELPER_CC(lcxbr);
1755 break;
1756 case 0x44: /* LEDBR R1,R2 [RRE] */
1757 FP_HELPER(ledbr);
1758 break;
1759 case 0x45: /* LDXBR R1,R2 [RRE] */
1760 FP_HELPER(ldxbr);
1761 break;
1762 case 0x46: /* LEXBR R1,R2 [RRE] */
1763 FP_HELPER(lexbr);
1764 break;
1765 case 0x49: /* CXBR R1,R2 [RRE] */
1766 FP_HELPER_CC(cxbr);
1767 break;
1768 case 0x4a: /* AXBR R1,R2 [RRE] */
1769 FP_HELPER_CC(axbr);
1770 break;
1771 case 0x4b: /* SXBR R1,R2 [RRE] */
1772 FP_HELPER_CC(sxbr);
1773 break;
1774 case 0x4c: /* MXBR R1,R2 [RRE] */
1775 FP_HELPER(mxbr);
1776 break;
1777 case 0x4d: /* DXBR R1,R2 [RRE] */
1778 FP_HELPER(dxbr);
1779 break;
1780 case 0x65: /* LXR R1,R2 [RRE] */
1781 tmp = load_freg(r2);
1782 store_freg(r1, tmp);
1783 tcg_temp_free_i64(tmp);
1784 tmp = load_freg(r2 + 2);
1785 store_freg(r1 + 2, tmp);
1786 tcg_temp_free_i64(tmp);
1787 break;
1788 case 0x74: /* LZER R1 [RRE] */
1789 tmp32_1 = tcg_const_i32(r1);
1790 gen_helper_lzer(cpu_env, tmp32_1);
1791 tcg_temp_free_i32(tmp32_1);
1792 break;
1793 case 0x75: /* LZDR R1 [RRE] */
1794 tmp32_1 = tcg_const_i32(r1);
1795 gen_helper_lzdr(cpu_env, tmp32_1);
1796 tcg_temp_free_i32(tmp32_1);
1797 break;
1798 case 0x76: /* LZXR R1 [RRE] */
1799 tmp32_1 = tcg_const_i32(r1);
1800 gen_helper_lzxr(cpu_env, tmp32_1);
1801 tcg_temp_free_i32(tmp32_1);
1802 break;
1803 case 0x84: /* SFPC R1 [RRE] */
1804 tmp32_1 = load_reg32(r1);
1805 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1806 tcg_temp_free_i32(tmp32_1);
1807 break;
1808 case 0x94: /* CEFBR R1,R2 [RRE] */
1809 case 0x95: /* CDFBR R1,R2 [RRE] */
1810 case 0x96: /* CXFBR R1,R2 [RRE] */
1811 tmp32_1 = tcg_const_i32(r1);
1812 tmp32_2 = load_reg32(r2);
1813 switch (op) {
1814 case 0x94:
1815 gen_helper_cefbr(cpu_env, tmp32_1, tmp32_2);
1816 break;
1817 case 0x95:
1818 gen_helper_cdfbr(cpu_env, tmp32_1, tmp32_2);
1819 break;
1820 case 0x96:
1821 gen_helper_cxfbr(cpu_env, tmp32_1, tmp32_2);
1822 break;
1823 default:
1824 tcg_abort();
1825 }
1826 tcg_temp_free_i32(tmp32_1);
1827 tcg_temp_free_i32(tmp32_2);
1828 break;
1829 case 0x98: /* CFEBR R1,R2 [RRE] */
1830 case 0x99: /* CFDBR R1,R2 [RRE] */
1831 case 0x9a: /* CFXBR R1,R2 [RRE] */
1832 tmp32_1 = tcg_const_i32(r1);
1833 tmp32_2 = tcg_const_i32(r2);
1834 tmp32_3 = tcg_const_i32(m3);
1835 switch (op) {
1836 case 0x98:
1837 gen_helper_cfebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1838 break;
1839 case 0x99:
1840 gen_helper_cfdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1841 break;
1842 case 0x9a:
1843 gen_helper_cfxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1844 break;
1845 default:
1846 tcg_abort();
1847 }
1848 set_cc_static(s);
1849 tcg_temp_free_i32(tmp32_1);
1850 tcg_temp_free_i32(tmp32_2);
1851 tcg_temp_free_i32(tmp32_3);
1852 break;
1853 case 0xa4: /* CEGBR R1,R2 [RRE] */
1854 case 0xa5: /* CDGBR R1,R2 [RRE] */
1855 tmp32_1 = tcg_const_i32(r1);
1856 tmp = load_reg(r2);
1857 switch (op) {
1858 case 0xa4:
1859 gen_helper_cegbr(cpu_env, tmp32_1, tmp);
1860 break;
1861 case 0xa5:
1862 gen_helper_cdgbr(cpu_env, tmp32_1, tmp);
1863 break;
1864 default:
1865 tcg_abort();
1866 }
1867 tcg_temp_free_i32(tmp32_1);
1868 tcg_temp_free_i64(tmp);
1869 break;
1870 case 0xa6: /* CXGBR R1,R2 [RRE] */
1871 tmp32_1 = tcg_const_i32(r1);
1872 tmp = load_reg(r2);
1873 gen_helper_cxgbr(cpu_env, tmp32_1, tmp);
1874 tcg_temp_free_i32(tmp32_1);
1875 tcg_temp_free_i64(tmp);
1876 break;
1877 case 0xa8: /* CGEBR R1,R2 [RRE] */
1878 tmp32_1 = tcg_const_i32(r1);
1879 tmp32_2 = tcg_const_i32(r2);
1880 tmp32_3 = tcg_const_i32(m3);
1881 gen_helper_cgebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1882 set_cc_static(s);
1883 tcg_temp_free_i32(tmp32_1);
1884 tcg_temp_free_i32(tmp32_2);
1885 tcg_temp_free_i32(tmp32_3);
1886 break;
1887 case 0xa9: /* CGDBR R1,R2 [RRE] */
1888 tmp32_1 = tcg_const_i32(r1);
1889 tmp32_2 = tcg_const_i32(r2);
1890 tmp32_3 = tcg_const_i32(m3);
1891 gen_helper_cgdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1892 set_cc_static(s);
1893 tcg_temp_free_i32(tmp32_1);
1894 tcg_temp_free_i32(tmp32_2);
1895 tcg_temp_free_i32(tmp32_3);
1896 break;
1897 case 0xaa: /* CGXBR R1,R2 [RRE] */
1898 tmp32_1 = tcg_const_i32(r1);
1899 tmp32_2 = tcg_const_i32(r2);
1900 tmp32_3 = tcg_const_i32(m3);
1901 gen_helper_cgxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1902 set_cc_static(s);
1903 tcg_temp_free_i32(tmp32_1);
1904 tcg_temp_free_i32(tmp32_2);
1905 tcg_temp_free_i32(tmp32_3);
1906 break;
1907 default:
1908 LOG_DISAS("illegal b3 operation 0x%x\n", op);
1909 gen_illegal_opcode(s);
1910 break;
1911 }
1912
1913 #undef FP_HELPER_CC
1914 #undef FP_HELPER
1915 }
1916
1917 static void disas_b9(CPUS390XState *env, DisasContext *s, int op, int r1,
1918 int r2)
1919 {
1920 TCGv_i64 tmp;
1921 TCGv_i32 tmp32_1;
1922
1923 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1924 switch (op) {
1925 case 0x17: /* LLGTR R1,R2 [RRE] */
1926 tmp32_1 = load_reg32(r2);
1927 tmp = tcg_temp_new_i64();
1928 tcg_gen_andi_i32(tmp32_1, tmp32_1, 0x7fffffffUL);
1929 tcg_gen_extu_i32_i64(tmp, tmp32_1);
1930 store_reg(r1, tmp);
1931 tcg_temp_free_i32(tmp32_1);
1932 tcg_temp_free_i64(tmp);
1933 break;
1934 case 0x0f: /* LRVGR R1,R2 [RRE] */
1935 tcg_gen_bswap64_i64(regs[r1], regs[r2]);
1936 break;
1937 case 0x1f: /* LRVR R1,R2 [RRE] */
1938 tmp32_1 = load_reg32(r2);
1939 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1940 store_reg32(r1, tmp32_1);
1941 tcg_temp_free_i32(tmp32_1);
1942 break;
1943 case 0x83: /* FLOGR R1,R2 [RRE] */
1944 tmp = load_reg(r2);
1945 tmp32_1 = tcg_const_i32(r1);
1946 gen_helper_flogr(cc_op, cpu_env, tmp32_1, tmp);
1947 set_cc_static(s);
1948 tcg_temp_free_i64(tmp);
1949 tcg_temp_free_i32(tmp32_1);
1950 break;
1951 default:
1952 LOG_DISAS("illegal b9 operation 0x%x\n", op);
1953 gen_illegal_opcode(s);
1954 break;
1955 }
1956 }
1957
1958 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1959 {
1960 unsigned char opc;
1961 uint64_t insn;
1962 int op, r1, r2, r3, d2, x2, b2, r1b;
1963
1964 opc = cpu_ldub_code(env, s->pc);
1965 LOG_DISAS("opc 0x%x\n", opc);
1966
1967 switch (opc) {
1968 case 0xb2:
1969 insn = ld_code4(env, s->pc);
1970 op = (insn >> 16) & 0xff;
1971 disas_b2(env, s, op, insn);
1972 break;
1973 case 0xb3:
1974 insn = ld_code4(env, s->pc);
1975 op = (insn >> 16) & 0xff;
1976 r3 = (insn >> 12) & 0xf; /* aka m3 */
1977 r1 = (insn >> 4) & 0xf;
1978 r2 = insn & 0xf;
1979 disas_b3(env, s, op, r3, r1, r2);
1980 break;
1981 case 0xb9:
1982 insn = ld_code4(env, s->pc);
1983 r1 = (insn >> 4) & 0xf;
1984 r2 = insn & 0xf;
1985 op = (insn >> 16) & 0xff;
1986 disas_b9(env, s, op, r1, r2);
1987 break;
1988 case 0xe3:
1989 insn = ld_code6(env, s->pc);
1990 debug_insn(insn);
1991 op = insn & 0xff;
1992 r1 = (insn >> 36) & 0xf;
1993 x2 = (insn >> 32) & 0xf;
1994 b2 = (insn >> 28) & 0xf;
1995 d2 = ((int)((((insn >> 16) & 0xfff)
1996 | ((insn << 4) & 0xff000)) << 12)) >> 12;
1997 disas_e3(env, s, op, r1, x2, b2, d2 );
1998 break;
1999 #ifndef CONFIG_USER_ONLY
2000 case 0xe5:
2001 /* Test Protection */
2002 check_privileged(s);
2003 insn = ld_code6(env, s->pc);
2004 debug_insn(insn);
2005 disas_e5(env, s, insn);
2006 break;
2007 #endif
2008 case 0xeb:
2009 insn = ld_code6(env, s->pc);
2010 debug_insn(insn);
2011 op = insn & 0xff;
2012 r1 = (insn >> 36) & 0xf;
2013 r3 = (insn >> 32) & 0xf;
2014 b2 = (insn >> 28) & 0xf;
2015 d2 = ((int)((((insn >> 16) & 0xfff)
2016 | ((insn << 4) & 0xff000)) << 12)) >> 12;
2017 disas_eb(env, s, op, r1, r3, b2, d2);
2018 break;
2019 case 0xed:
2020 insn = ld_code6(env, s->pc);
2021 debug_insn(insn);
2022 op = insn & 0xff;
2023 r1 = (insn >> 36) & 0xf;
2024 x2 = (insn >> 32) & 0xf;
2025 b2 = (insn >> 28) & 0xf;
2026 d2 = (short)((insn >> 16) & 0xfff);
2027 r1b = (insn >> 12) & 0xf;
2028 disas_ed(env, s, op, r1, x2, b2, d2, r1b);
2029 break;
2030 default:
2031 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
2032 gen_illegal_opcode(s);
2033 break;
2034 }
2035 }
2036
2037 /* ====================================================================== */
2038 /* Define the insn format enumeration. */
2039 #define F0(N) FMT_##N,
2040 #define F1(N, X1) F0(N)
2041 #define F2(N, X1, X2) F0(N)
2042 #define F3(N, X1, X2, X3) F0(N)
2043 #define F4(N, X1, X2, X3, X4) F0(N)
2044 #define F5(N, X1, X2, X3, X4, X5) F0(N)
2045
2046 typedef enum {
2047 #include "insn-format.def"
2048 } DisasFormat;
2049
2050 #undef F0
2051 #undef F1
2052 #undef F2
2053 #undef F3
2054 #undef F4
2055 #undef F5
2056
2057 /* Define a structure to hold the decoded fields. We'll store each inside
2058 an array indexed by an enum. In order to conserve memory, we'll arrange
2059 for fields that do not exist at the same time to overlap, thus the "C"
2060 for compact. For checking purposes there is an "O" for original index
2061 as well that will be applied to availability bitmaps. */
2062
2063 enum DisasFieldIndexO {
2064 FLD_O_r1,
2065 FLD_O_r2,
2066 FLD_O_r3,
2067 FLD_O_m1,
2068 FLD_O_m3,
2069 FLD_O_m4,
2070 FLD_O_b1,
2071 FLD_O_b2,
2072 FLD_O_b4,
2073 FLD_O_d1,
2074 FLD_O_d2,
2075 FLD_O_d4,
2076 FLD_O_x2,
2077 FLD_O_l1,
2078 FLD_O_l2,
2079 FLD_O_i1,
2080 FLD_O_i2,
2081 FLD_O_i3,
2082 FLD_O_i4,
2083 FLD_O_i5
2084 };
2085
2086 enum DisasFieldIndexC {
2087 FLD_C_r1 = 0,
2088 FLD_C_m1 = 0,
2089 FLD_C_b1 = 0,
2090 FLD_C_i1 = 0,
2091
2092 FLD_C_r2 = 1,
2093 FLD_C_b2 = 1,
2094 FLD_C_i2 = 1,
2095
2096 FLD_C_r3 = 2,
2097 FLD_C_m3 = 2,
2098 FLD_C_i3 = 2,
2099
2100 FLD_C_m4 = 3,
2101 FLD_C_b4 = 3,
2102 FLD_C_i4 = 3,
2103 FLD_C_l1 = 3,
2104
2105 FLD_C_i5 = 4,
2106 FLD_C_d1 = 4,
2107
2108 FLD_C_d2 = 5,
2109
2110 FLD_C_d4 = 6,
2111 FLD_C_x2 = 6,
2112 FLD_C_l2 = 6,
2113
2114 NUM_C_FIELD = 7
2115 };
2116
2117 struct DisasFields {
2118 unsigned op:8;
2119 unsigned op2:8;
2120 unsigned presentC:16;
2121 unsigned int presentO;
2122 int c[NUM_C_FIELD];
2123 };
2124
2125 /* This is the way fields are to be accessed out of DisasFields. */
2126 #define have_field(S, F) have_field1((S), FLD_O_##F)
2127 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
2128
2129 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
2130 {
2131 return (f->presentO >> c) & 1;
2132 }
2133
2134 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
2135 enum DisasFieldIndexC c)
2136 {
2137 assert(have_field1(f, o));
2138 return f->c[c];
2139 }
2140
2141 /* Describe the layout of each field in each format. */
2142 typedef struct DisasField {
2143 unsigned int beg:8;
2144 unsigned int size:8;
2145 unsigned int type:2;
2146 unsigned int indexC:6;
2147 enum DisasFieldIndexO indexO:8;
2148 } DisasField;
2149
2150 typedef struct DisasFormatInfo {
2151 DisasField op[NUM_C_FIELD];
2152 } DisasFormatInfo;
2153
2154 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
2155 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
2156 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2157 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
2158 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2159 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2160 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
2161 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2162 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2163 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2164 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2165 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2166 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
2167 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
2168
2169 #define F0(N) { { } },
2170 #define F1(N, X1) { { X1 } },
2171 #define F2(N, X1, X2) { { X1, X2 } },
2172 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
2173 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
2174 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
2175
2176 static const DisasFormatInfo format_info[] = {
2177 #include "insn-format.def"
2178 };
2179
2180 #undef F0
2181 #undef F1
2182 #undef F2
2183 #undef F3
2184 #undef F4
2185 #undef F5
2186 #undef R
2187 #undef M
2188 #undef BD
2189 #undef BXD
2190 #undef BDL
2191 #undef BXDL
2192 #undef I
2193 #undef L
2194
2195 /* Generally, we'll extract operands into this structures, operate upon
2196 them, and store them back. See the "in1", "in2", "prep", "wout" sets
2197 of routines below for more details. */
2198 typedef struct {
2199 bool g_out, g_out2, g_in1, g_in2;
2200 TCGv_i64 out, out2, in1, in2;
2201 TCGv_i64 addr1;
2202 } DisasOps;
2203
2204 /* Return values from translate_one, indicating the state of the TB. */
2205 typedef enum {
2206 /* Continue the TB. */
2207 NO_EXIT,
2208 /* We have emitted one or more goto_tb. No fixup required. */
2209 EXIT_GOTO_TB,
2210 /* We are not using a goto_tb (for whatever reason), but have updated
2211 the PC (for whatever reason), so there's no need to do it again on
2212 exiting the TB. */
2213 EXIT_PC_UPDATED,
2214 /* We are exiting the TB, but have neither emitted a goto_tb, nor
2215 updated the PC for the next instruction to be executed. */
2216 EXIT_PC_STALE,
2217 /* We are ending the TB with a noreturn function call, e.g. longjmp.
2218 No following code will be executed. */
2219 EXIT_NORETURN,
2220 } ExitStatus;
2221
2222 typedef enum DisasFacility {
2223 FAC_Z, /* zarch (default) */
2224 FAC_CASS, /* compare and swap and store */
2225 FAC_CASS2, /* compare and swap and store 2*/
2226 FAC_DFP, /* decimal floating point */
2227 FAC_DFPR, /* decimal floating point rounding */
2228 FAC_DO, /* distinct operands */
2229 FAC_EE, /* execute extensions */
2230 FAC_EI, /* extended immediate */
2231 FAC_FPE, /* floating point extension */
2232 FAC_FPSSH, /* floating point support sign handling */
2233 FAC_FPRGR, /* FPR-GR transfer */
2234 FAC_GIE, /* general instructions extension */
2235 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
2236 FAC_HW, /* high-word */
2237 FAC_IEEEE_SIM, /* IEEE exception sumilation */
2238 FAC_LOC, /* load/store on condition */
2239 FAC_LD, /* long displacement */
2240 FAC_PC, /* population count */
2241 FAC_SCF, /* store clock fast */
2242 FAC_SFLE, /* store facility list extended */
2243 } DisasFacility;
2244
2245 struct DisasInsn {
2246 unsigned opc:16;
2247 DisasFormat fmt:6;
2248 DisasFacility fac:6;
2249
2250 const char *name;
2251
2252 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
2253 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
2254 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
2255 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
2256 void (*help_cout)(DisasContext *, DisasOps *);
2257 ExitStatus (*help_op)(DisasContext *, DisasOps *);
2258
2259 uint64_t data;
2260 };
2261
2262 /* ====================================================================== */
2263 /* Miscelaneous helpers, used by several operations. */
2264
2265 static void help_l2_shift(DisasContext *s, DisasFields *f,
2266 DisasOps *o, int mask)
2267 {
2268 int b2 = get_field(f, b2);
2269 int d2 = get_field(f, d2);
2270
2271 if (b2 == 0) {
2272 o->in2 = tcg_const_i64(d2 & mask);
2273 } else {
2274 o->in2 = get_address(s, 0, b2, d2);
2275 tcg_gen_andi_i64(o->in2, o->in2, mask);
2276 }
2277 }
2278
2279 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
2280 {
2281 if (dest == s->next_pc) {
2282 return NO_EXIT;
2283 }
2284 if (use_goto_tb(s, dest)) {
2285 gen_update_cc_op(s);
2286 tcg_gen_goto_tb(0);
2287 tcg_gen_movi_i64(psw_addr, dest);
2288 tcg_gen_exit_tb((tcg_target_long)s->tb);
2289 return EXIT_GOTO_TB;
2290 } else {
2291 tcg_gen_movi_i64(psw_addr, dest);
2292 return EXIT_PC_UPDATED;
2293 }
2294 }
2295
2296 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
2297 bool is_imm, int imm, TCGv_i64 cdest)
2298 {
2299 ExitStatus ret;
2300 uint64_t dest = s->pc + 2 * imm;
2301 int lab;
2302
2303 /* Take care of the special cases first. */
2304 if (c->cond == TCG_COND_NEVER) {
2305 ret = NO_EXIT;
2306 goto egress;
2307 }
2308 if (is_imm) {
2309 if (dest == s->next_pc) {
2310 /* Branch to next. */
2311 ret = NO_EXIT;
2312 goto egress;
2313 }
2314 if (c->cond == TCG_COND_ALWAYS) {
2315 ret = help_goto_direct(s, dest);
2316 goto egress;
2317 }
2318 } else {
2319 if (TCGV_IS_UNUSED_I64(cdest)) {
2320 /* E.g. bcr %r0 -> no branch. */
2321 ret = NO_EXIT;
2322 goto egress;
2323 }
2324 if (c->cond == TCG_COND_ALWAYS) {
2325 tcg_gen_mov_i64(psw_addr, cdest);
2326 ret = EXIT_PC_UPDATED;
2327 goto egress;
2328 }
2329 }
2330
2331 if (use_goto_tb(s, s->next_pc)) {
2332 if (is_imm && use_goto_tb(s, dest)) {
2333 /* Both exits can use goto_tb. */
2334 gen_update_cc_op(s);
2335
2336 lab = gen_new_label();
2337 if (c->is_64) {
2338 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
2339 } else {
2340 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
2341 }
2342
2343 /* Branch not taken. */
2344 tcg_gen_goto_tb(0);
2345 tcg_gen_movi_i64(psw_addr, s->next_pc);
2346 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
2347
2348 /* Branch taken. */
2349 gen_set_label(lab);
2350 tcg_gen_goto_tb(1);
2351 tcg_gen_movi_i64(psw_addr, dest);
2352 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
2353
2354 ret = EXIT_GOTO_TB;
2355 } else {
2356 /* Fallthru can use goto_tb, but taken branch cannot. */
2357 /* Store taken branch destination before the brcond. This
2358 avoids having to allocate a new local temp to hold it.
2359 We'll overwrite this in the not taken case anyway. */
2360 if (!is_imm) {
2361 tcg_gen_mov_i64(psw_addr, cdest);
2362 }
2363
2364 lab = gen_new_label();
2365 if (c->is_64) {
2366 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
2367 } else {
2368 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
2369 }
2370
2371 /* Branch not taken. */
2372 gen_update_cc_op(s);
2373 tcg_gen_goto_tb(0);
2374 tcg_gen_movi_i64(psw_addr, s->next_pc);
2375 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
2376
2377 gen_set_label(lab);
2378 if (is_imm) {
2379 tcg_gen_movi_i64(psw_addr, dest);
2380 }
2381 ret = EXIT_PC_UPDATED;
2382 }
2383 } else {
2384 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
2385 Most commonly we're single-stepping or some other condition that
2386 disables all use of goto_tb. Just update the PC and exit. */
2387
2388 TCGv_i64 next = tcg_const_i64(s->next_pc);
2389 if (is_imm) {
2390 cdest = tcg_const_i64(dest);
2391 }
2392
2393 if (c->is_64) {
2394 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
2395 cdest, next);
2396 } else {
2397 TCGv_i32 t0 = tcg_temp_new_i32();
2398 TCGv_i64 t1 = tcg_temp_new_i64();
2399 TCGv_i64 z = tcg_const_i64(0);
2400 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
2401 tcg_gen_extu_i32_i64(t1, t0);
2402 tcg_temp_free_i32(t0);
2403 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
2404 tcg_temp_free_i64(t1);
2405 tcg_temp_free_i64(z);
2406 }
2407
2408 if (is_imm) {
2409 tcg_temp_free_i64(cdest);
2410 }
2411 tcg_temp_free_i64(next);
2412
2413 ret = EXIT_PC_UPDATED;
2414 }
2415
2416 egress:
2417 free_compare(c);
2418 return ret;
2419 }
2420
2421 /* ====================================================================== */
2422 /* The operations. These perform the bulk of the work for any insn,
2423 usually after the operands have been loaded and output initialized. */
2424
2425 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
2426 {
2427 gen_helper_abs_i64(o->out, o->in2);
2428 return NO_EXIT;
2429 }
2430
2431 static ExitStatus op_add(DisasContext *s, DisasOps *o)
2432 {
2433 tcg_gen_add_i64(o->out, o->in1, o->in2);
2434 return NO_EXIT;
2435 }
2436
2437 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
2438 {
2439 TCGv_i64 cc;
2440
2441 tcg_gen_add_i64(o->out, o->in1, o->in2);
2442
2443 /* XXX possible optimization point */
2444 gen_op_calc_cc(s);
2445 cc = tcg_temp_new_i64();
2446 tcg_gen_extu_i32_i64(cc, cc_op);
2447 tcg_gen_shri_i64(cc, cc, 1);
2448
2449 tcg_gen_add_i64(o->out, o->out, cc);
2450 tcg_temp_free_i64(cc);
2451 return NO_EXIT;
2452 }
2453
2454 static ExitStatus op_and(DisasContext *s, DisasOps *o)
2455 {
2456 tcg_gen_and_i64(o->out, o->in1, o->in2);
2457 return NO_EXIT;
2458 }
2459
2460 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
2461 {
2462 int shift = s->insn->data & 0xff;
2463 int size = s->insn->data >> 8;
2464 uint64_t mask = ((1ull << size) - 1) << shift;
2465
2466 assert(!o->g_in2);
2467 tcg_gen_shli_i64(o->in2, o->in2, shift);
2468 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2469 tcg_gen_and_i64(o->out, o->in1, o->in2);
2470
2471 /* Produce the CC from only the bits manipulated. */
2472 tcg_gen_andi_i64(cc_dst, o->out, mask);
2473 set_cc_nz_u64(s, cc_dst);
2474 return NO_EXIT;
2475 }
2476
2477 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
2478 {
2479 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2480 if (!TCGV_IS_UNUSED_I64(o->in2)) {
2481 tcg_gen_mov_i64(psw_addr, o->in2);
2482 return EXIT_PC_UPDATED;
2483 } else {
2484 return NO_EXIT;
2485 }
2486 }
2487
2488 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
2489 {
2490 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2491 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
2492 }
2493
2494 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
2495 {
2496 int m1 = get_field(s->fields, m1);
2497 bool is_imm = have_field(s->fields, i2);
2498 int imm = is_imm ? get_field(s->fields, i2) : 0;
2499 DisasCompare c;
2500
2501 disas_jcc(s, &c, m1);
2502 return help_branch(s, &c, is_imm, imm, o->in2);
2503 }
2504
2505 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
2506 {
2507 int r1 = get_field(s->fields, r1);
2508 bool is_imm = have_field(s->fields, i2);
2509 int imm = is_imm ? get_field(s->fields, i2) : 0;
2510 DisasCompare c;
2511 TCGv_i64 t;
2512
2513 c.cond = TCG_COND_NE;
2514 c.is_64 = false;
2515 c.g1 = false;
2516 c.g2 = false;
2517
2518 t = tcg_temp_new_i64();
2519 tcg_gen_subi_i64(t, regs[r1], 1);
2520 store_reg32_i64(r1, t);
2521 c.u.s32.a = tcg_temp_new_i32();
2522 c.u.s32.b = tcg_const_i32(0);
2523 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
2524 tcg_temp_free_i64(t);
2525
2526 return help_branch(s, &c, is_imm, imm, o->in2);
2527 }
2528
2529 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
2530 {
2531 int r1 = get_field(s->fields, r1);
2532 bool is_imm = have_field(s->fields, i2);
2533 int imm = is_imm ? get_field(s->fields, i2) : 0;
2534 DisasCompare c;
2535
2536 c.cond = TCG_COND_NE;
2537 c.is_64 = true;
2538 c.g1 = true;
2539 c.g2 = false;
2540
2541 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
2542 c.u.s64.a = regs[r1];
2543 c.u.s64.b = tcg_const_i64(0);
2544
2545 return help_branch(s, &c, is_imm, imm, o->in2);
2546 }
2547
2548 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
2549 {
2550 int l = get_field(s->fields, l1);
2551 TCGv_i32 vl;
2552
2553 switch (l + 1) {
2554 case 1:
2555 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2556 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2557 break;
2558 case 2:
2559 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2560 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2561 break;
2562 case 4:
2563 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2564 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2565 break;
2566 case 8:
2567 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2568 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2569 break;
2570 default:
2571 potential_page_fault(s);
2572 vl = tcg_const_i32(l);
2573 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2574 tcg_temp_free_i32(vl);
2575 set_cc_static(s);
2576 return NO_EXIT;
2577 }
2578 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2579 return NO_EXIT;
2580 }
2581
2582 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
2583 {
2584 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2585 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2586 potential_page_fault(s);
2587 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
2588 tcg_temp_free_i32(r1);
2589 tcg_temp_free_i32(r3);
2590 set_cc_static(s);
2591 return NO_EXIT;
2592 }
2593
2594 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
2595 {
2596 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2597 TCGv_i32 t1 = tcg_temp_new_i32();
2598 tcg_gen_trunc_i64_i32(t1, o->in1);
2599 potential_page_fault(s);
2600 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2601 set_cc_static(s);
2602 tcg_temp_free_i32(t1);
2603 tcg_temp_free_i32(m3);
2604 return NO_EXIT;
2605 }
2606
2607 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
2608 {
2609 int r3 = get_field(s->fields, r3);
2610 potential_page_fault(s);
2611 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2612 set_cc_static(s);
2613 return NO_EXIT;
2614 }
2615
2616 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
2617 {
2618 int r3 = get_field(s->fields, r3);
2619 potential_page_fault(s);
2620 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2621 set_cc_static(s);
2622 return NO_EXIT;
2623 }
2624
2625 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
2626 {
2627 int r3 = get_field(s->fields, r3);
2628 TCGv_i64 in3 = tcg_temp_new_i64();
2629 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
2630 potential_page_fault(s);
2631 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
2632 tcg_temp_free_i64(in3);
2633 set_cc_static(s);
2634 return NO_EXIT;
2635 }
2636
2637 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2638 {
2639 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2640 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2641 potential_page_fault(s);
2642 /* XXX rewrite in tcg */
2643 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
2644 set_cc_static(s);
2645 return NO_EXIT;
2646 }
2647
2648 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2649 {
2650 TCGv_i64 t1 = tcg_temp_new_i64();
2651 TCGv_i32 t2 = tcg_temp_new_i32();
2652 tcg_gen_trunc_i64_i32(t2, o->in1);
2653 gen_helper_cvd(t1, t2);
2654 tcg_temp_free_i32(t2);
2655 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2656 tcg_temp_free_i64(t1);
2657 return NO_EXIT;
2658 }
2659
2660 #ifndef CONFIG_USER_ONLY
2661 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2662 {
2663 TCGv_i32 tmp;
2664
2665 check_privileged(s);
2666 potential_page_fault(s);
2667
2668 /* We pretend the format is RX_a so that D2 is the field we want. */
2669 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2670 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2671 tcg_temp_free_i32(tmp);
2672 return NO_EXIT;
2673 }
2674 #endif
2675
2676 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2677 {
2678 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2679 return_low128(o->out);
2680 return NO_EXIT;
2681 }
2682
2683 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2684 {
2685 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2686 return_low128(o->out);
2687 return NO_EXIT;
2688 }
2689
2690 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2691 {
2692 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2693 return_low128(o->out);
2694 return NO_EXIT;
2695 }
2696
2697 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2698 {
2699 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2700 return_low128(o->out);
2701 return NO_EXIT;
2702 }
2703
2704 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2705 {
2706 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2707 return NO_EXIT;
2708 }
2709
2710 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2711 {
2712 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2713 tb->flags, (ab)use the tb->cs_base field as the address of
2714 the template in memory, and grab 8 bits of tb->flags/cflags for
2715 the contents of the register. We would then recognize all this
2716 in gen_intermediate_code_internal, generating code for exactly
2717 one instruction. This new TB then gets executed normally.
2718
2719 On the other hand, this seems to be mostly used for modifying
2720 MVC inside of memcpy, which needs a helper call anyway. So
2721 perhaps this doesn't bear thinking about any further. */
2722
2723 TCGv_i64 tmp;
2724
2725 update_psw_addr(s);
2726 gen_op_calc_cc(s);
2727
2728 tmp = tcg_const_i64(s->next_pc);
2729 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2730 tcg_temp_free_i64(tmp);
2731
2732 set_cc_static(s);
2733 return NO_EXIT;
2734 }
2735
2736 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2737 {
2738 int m3 = get_field(s->fields, m3);
2739 int pos, len, base = s->insn->data;
2740 TCGv_i64 tmp = tcg_temp_new_i64();
2741 uint64_t ccm;
2742
2743 switch (m3) {
2744 case 0xf:
2745 /* Effectively a 32-bit load. */
2746 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2747 len = 32;
2748 goto one_insert;
2749
2750 case 0xc:
2751 case 0x6:
2752 case 0x3:
2753 /* Effectively a 16-bit load. */
2754 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2755 len = 16;
2756 goto one_insert;
2757
2758 case 0x8:
2759 case 0x4:
2760 case 0x2:
2761 case 0x1:
2762 /* Effectively an 8-bit load. */
2763 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2764 len = 8;
2765 goto one_insert;
2766
2767 one_insert:
2768 pos = base + ctz32(m3) * 8;
2769 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2770 ccm = ((1ull << len) - 1) << pos;
2771 break;
2772
2773 default:
2774 /* This is going to be a sequence of loads and inserts. */
2775 pos = base + 32 - 8;
2776 ccm = 0;
2777 while (m3) {
2778 if (m3 & 0x8) {
2779 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2780 tcg_gen_addi_i64(o->in2, o->in2, 1);
2781 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2782 ccm |= 0xff << pos;
2783 }
2784 m3 = (m3 << 1) & 0xf;
2785 pos -= 8;
2786 }
2787 break;
2788 }
2789
2790 tcg_gen_movi_i64(tmp, ccm);
2791 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2792 tcg_temp_free_i64(tmp);
2793 return NO_EXIT;
2794 }
2795
2796 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2797 {
2798 int shift = s->insn->data & 0xff;
2799 int size = s->insn->data >> 8;
2800 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2801 return NO_EXIT;
2802 }
2803
2804 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2805 {
2806 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2807 return NO_EXIT;
2808 }
2809
2810 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2811 {
2812 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2813 return NO_EXIT;
2814 }
2815
2816 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2817 {
2818 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2819 return NO_EXIT;
2820 }
2821
2822 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2823 {
2824 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2825 return NO_EXIT;
2826 }
2827
2828 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2829 {
2830 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2831 return NO_EXIT;
2832 }
2833
2834 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2835 {
2836 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2837 return NO_EXIT;
2838 }
2839
2840 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2841 {
2842 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2843 return NO_EXIT;
2844 }
2845
2846 #ifndef CONFIG_USER_ONLY
2847 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2848 {
2849 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2850 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2851 check_privileged(s);
2852 potential_page_fault(s);
2853 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2854 tcg_temp_free_i32(r1);
2855 tcg_temp_free_i32(r3);
2856 return NO_EXIT;
2857 }
2858
2859 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2860 {
2861 check_privileged(s);
2862 potential_page_fault(s);
2863 gen_helper_lra(o->out, cpu_env, o->in2);
2864 set_cc_static(s);
2865 return NO_EXIT;
2866 }
2867
2868 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2869 {
2870 TCGv_i64 t1, t2;
2871
2872 check_privileged(s);
2873
2874 t1 = tcg_temp_new_i64();
2875 t2 = tcg_temp_new_i64();
2876 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2877 tcg_gen_addi_i64(o->in2, o->in2, 4);
2878 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2879 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2880 tcg_gen_shli_i64(t1, t1, 32);
2881 gen_helper_load_psw(cpu_env, t1, t2);
2882 tcg_temp_free_i64(t1);
2883 tcg_temp_free_i64(t2);
2884 return EXIT_NORETURN;
2885 }
2886 #endif
2887
2888 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2889 {
2890 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2891 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2892 potential_page_fault(s);
2893 gen_helper_lam(cpu_env, r1, o->in2, r3);
2894 tcg_temp_free_i32(r1);
2895 tcg_temp_free_i32(r3);
2896 return NO_EXIT;
2897 }
2898
2899 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2900 {
2901 int r1 = get_field(s->fields, r1);
2902 int r3 = get_field(s->fields, r3);
2903 TCGv_i64 t = tcg_temp_new_i64();
2904 TCGv_i64 t4 = tcg_const_i64(4);
2905
2906 while (1) {
2907 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2908 store_reg32_i64(r1, t);
2909 if (r1 == r3) {
2910 break;
2911 }
2912 tcg_gen_add_i64(o->in2, o->in2, t4);
2913 r1 = (r1 + 1) & 15;
2914 }
2915
2916 tcg_temp_free_i64(t);
2917 tcg_temp_free_i64(t4);
2918 return NO_EXIT;
2919 }
2920
2921 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2922 {
2923 int r1 = get_field(s->fields, r1);
2924 int r3 = get_field(s->fields, r3);
2925 TCGv_i64 t = tcg_temp_new_i64();
2926 TCGv_i64 t4 = tcg_const_i64(4);
2927
2928 while (1) {
2929 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2930 store_reg32h_i64(r1, t);
2931 if (r1 == r3) {
2932 break;
2933 }
2934 tcg_gen_add_i64(o->in2, o->in2, t4);
2935 r1 = (r1 + 1) & 15;
2936 }
2937
2938 tcg_temp_free_i64(t);
2939 tcg_temp_free_i64(t4);
2940 return NO_EXIT;
2941 }
2942
2943 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2944 {
2945 int r1 = get_field(s->fields, r1);
2946 int r3 = get_field(s->fields, r3);
2947 TCGv_i64 t8 = tcg_const_i64(8);
2948
2949 while (1) {
2950 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2951 if (r1 == r3) {
2952 break;
2953 }
2954 tcg_gen_add_i64(o->in2, o->in2, t8);
2955 r1 = (r1 + 1) & 15;
2956 }
2957
2958 tcg_temp_free_i64(t8);
2959 return NO_EXIT;
2960 }
2961
2962 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2963 {
2964 o->out = o->in2;
2965 o->g_out = o->g_in2;
2966 TCGV_UNUSED_I64(o->in2);
2967 o->g_in2 = false;
2968 return NO_EXIT;
2969 }
2970
2971 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2972 {
2973 o->out = o->in1;
2974 o->out2 = o->in2;
2975 o->g_out = o->g_in1;
2976 o->g_out2 = o->g_in2;
2977 TCGV_UNUSED_I64(o->in1);
2978 TCGV_UNUSED_I64(o->in2);
2979 o->g_in1 = o->g_in2 = false;
2980 return NO_EXIT;
2981 }
2982
2983 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2984 {
2985 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2986 potential_page_fault(s);
2987 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2988 tcg_temp_free_i32(l);
2989 return NO_EXIT;
2990 }
2991
2992 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2993 {
2994 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2995 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2996 potential_page_fault(s);
2997 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2998 tcg_temp_free_i32(r1);
2999 tcg_temp_free_i32(r2);
3000 set_cc_static(s);
3001 return NO_EXIT;
3002 }
3003
3004 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3005 {
3006 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3007 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3008 potential_page_fault(s);
3009 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
3010 tcg_temp_free_i32(r1);
3011 tcg_temp_free_i32(r3);
3012 set_cc_static(s);
3013 return NO_EXIT;
3014 }
3015
3016 #ifndef CONFIG_USER_ONLY
3017 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3018 {
3019 int r1 = get_field(s->fields, l1);
3020 check_privileged(s);
3021 potential_page_fault(s);
3022 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3023 set_cc_static(s);
3024 return NO_EXIT;
3025 }
3026
3027 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3028 {
3029 int r1 = get_field(s->fields, l1);
3030 check_privileged(s);
3031 potential_page_fault(s);
3032 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3033 set_cc_static(s);
3034 return NO_EXIT;
3035 }
3036 #endif
3037
3038 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3039 {
3040 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3041 return NO_EXIT;
3042 }
3043
3044 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3045 {
3046 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
3047 return_low128(o->out2);
3048 return NO_EXIT;
3049 }
3050
3051 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3052 {
3053 gen_helper_nabs_i64(o->out, o->in2);
3054 return NO_EXIT;
3055 }
3056
3057 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3058 {
3059 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3060 potential_page_fault(s);
3061 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3062 tcg_temp_free_i32(l);
3063 set_cc_static(s);
3064 return NO_EXIT;
3065 }
3066
3067 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3068 {
3069 tcg_gen_neg_i64(o->out, o->in2);
3070 return NO_EXIT;
3071 }
3072
3073 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3074 {
3075 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3076 potential_page_fault(s);
3077 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3078 tcg_temp_free_i32(l);
3079 set_cc_static(s);
3080 return NO_EXIT;
3081 }
3082
3083 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3084 {
3085 tcg_gen_or_i64(o->out, o->in1, o->in2);
3086 return NO_EXIT;
3087 }
3088
3089 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3090 {
3091 int shift = s->insn->data & 0xff;
3092 int size = s->insn->data >> 8;
3093 uint64_t mask = ((1ull << size) - 1) << shift;
3094
3095 assert(!o->g_in2);
3096 tcg_gen_shli_i64(o->in2, o->in2, shift);
3097 tcg_gen_or_i64(o->out, o->in1, o->in2);
3098
3099 /* Produce the CC from only the bits manipulated. */
3100 tcg_gen_andi_i64(cc_dst, o->out, mask);
3101 set_cc_nz_u64(s, cc_dst);
3102 return NO_EXIT;
3103 }
3104
3105 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3106 {
3107 TCGv_i32 t1 = tcg_temp_new_i32();
3108 TCGv_i32 t2 = tcg_temp_new_i32();
3109 TCGv_i32 to = tcg_temp_new_i32();
3110 tcg_gen_trunc_i64_i32(t1, o->in1);
3111 tcg_gen_trunc_i64_i32(t2, o->in2);
3112 tcg_gen_rotl_i32(to, t1, t2);
3113 tcg_gen_extu_i32_i64(o->out, to);
3114 tcg_temp_free_i32(t1);
3115 tcg_temp_free_i32(t2);
3116 tcg_temp_free_i32(to);
3117 return NO_EXIT;
3118 }
3119
3120 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3121 {
3122 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3123 return NO_EXIT;
3124 }
3125
3126 #ifndef CONFIG_USER_ONLY
3127 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3128 {
3129 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3130 check_privileged(s);
3131 potential_page_fault(s);
3132 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3133 tcg_temp_free_i32(r1);
3134 return NO_EXIT;
3135 }
3136 #endif
3137
3138 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3139 {
3140 uint64_t sign = 1ull << s->insn->data;
3141 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3142 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3143 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3144 /* The arithmetic left shift is curious in that it does not affect
3145 the sign bit. Copy that over from the source unchanged. */
3146 tcg_gen_andi_i64(o->out, o->out, ~sign);
3147 tcg_gen_andi_i64(o->in1, o->in1, sign);
3148 tcg_gen_or_i64(o->out, o->out, o->in1);
3149 return NO_EXIT;
3150 }
3151
3152 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3153 {
3154 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3155 return NO_EXIT;
3156 }
3157
3158 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3159 {
3160 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3161 return NO_EXIT;
3162 }
3163
3164 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3165 {
3166 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3167 return NO_EXIT;
3168 }
3169
3170 #ifndef CONFIG_USER_ONLY
3171 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3172 {
3173 check_privileged(s);
3174 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3175 return NO_EXIT;
3176 }
3177
3178 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3179 {
3180 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3181 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3182 check_privileged(s);
3183 potential_page_fault(s);
3184 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3185 tcg_temp_free_i32(r1);
3186 tcg_temp_free_i32(r3);
3187 return NO_EXIT;
3188 }
3189
3190 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3191 {
3192 uint64_t i2 = get_field(s->fields, i2);
3193 TCGv_i64 t;
3194
3195 check_privileged(s);
3196
3197 /* It is important to do what the instruction name says: STORE THEN.
3198 If we let the output hook perform the store then if we fault and
3199 restart, we'll have the wrong SYSTEM MASK in place. */
3200 t = tcg_temp_new_i64();
3201 tcg_gen_shri_i64(t, psw_mask, 56);
3202 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3203 tcg_temp_free_i64(t);
3204
3205 if (s->fields->op == 0xac) {
3206 tcg_gen_andi_i64(psw_mask, psw_mask,
3207 (i2 << 56) | 0x00ffffffffffffffull);
3208 } else {
3209 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3210 }
3211 return NO_EXIT;
3212 }
3213 #endif
3214
3215 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3216 {
3217 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3218 return NO_EXIT;
3219 }
3220
3221 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3222 {
3223 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3224 return NO_EXIT;
3225 }
3226
3227 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3228 {
3229 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3230 return NO_EXIT;
3231 }
3232
3233 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3234 {
3235 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3236 return NO_EXIT;
3237 }
3238
3239 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3240 {
3241 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3242 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3243 potential_page_fault(s);
3244 gen_helper_stam(cpu_env, r1, o->in2, r3);
3245 tcg_temp_free_i32(r1);
3246 tcg_temp_free_i32(r3);
3247 return NO_EXIT;
3248 }
3249
3250 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3251 {
3252 int m3 = get_field(s->fields, m3);
3253 int pos, base = s->insn->data;
3254 TCGv_i64 tmp = tcg_temp_new_i64();
3255
3256 pos = base + ctz32(m3) * 8;
3257 switch (m3) {
3258 case 0xf:
3259 /* Effectively a 32-bit store. */
3260 tcg_gen_shri_i64(tmp, o->in1, pos);
3261 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3262 break;
3263
3264 case 0xc:
3265 case 0x6:
3266 case 0x3:
3267 /* Effectively a 16-bit store. */
3268 tcg_gen_shri_i64(tmp, o->in1, pos);
3269 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3270 break;
3271
3272 case 0x8:
3273 case 0x4:
3274 case 0x2:
3275 case 0x1:
3276 /* Effectively an 8-bit store. */
3277 tcg_gen_shri_i64(tmp, o->in1, pos);
3278 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3279 break;
3280
3281 default:
3282 /* This is going to be a sequence of shifts and stores. */
3283 pos = base + 32 - 8;
3284 while (m3) {
3285 if (m3 & 0x8) {
3286 tcg_gen_shri_i64(tmp, o->in1, pos);
3287 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3288 tcg_gen_addi_i64(o->in2, o->in2, 1);
3289 }
3290 m3 = (m3 << 1) & 0xf;
3291 pos -= 8;
3292 }
3293 break;
3294 }
3295 tcg_temp_free_i64(tmp);
3296 return NO_EXIT;
3297 }
3298
3299 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3300 {
3301 int r1 = get_field(s->fields, r1);
3302 int r3 = get_field(s->fields, r3);
3303 int size = s->insn->data;
3304 TCGv_i64 tsize = tcg_const_i64(size);
3305
3306 while (1) {
3307 if (size == 8) {
3308 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3309 } else {
3310 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3311 }
3312 if (r1 == r3) {
3313 break;
3314 }
3315 tcg_gen_add_i64(o->in2, o->in2, tsize);
3316 r1 = (r1 + 1) & 15;
3317 }
3318
3319 tcg_temp_free_i64(tsize);
3320 return NO_EXIT;
3321 }
3322
3323 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3324 {
3325 int r1 = get_field(s->fields, r1);
3326 int r3 = get_field(s->fields, r3);
3327 TCGv_i64 t = tcg_temp_new_i64();
3328 TCGv_i64 t4 = tcg_const_i64(4);
3329 TCGv_i64 t32 = tcg_const_i64(32);
3330
3331 while (1) {
3332 tcg_gen_shl_i64(t, regs[r1], t32);
3333 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3334 if (r1 == r3) {
3335 break;
3336 }
3337 tcg_gen_add_i64(o->in2, o->in2, t4);
3338 r1 = (r1 + 1) & 15;
3339 }
3340
3341 tcg_temp_free_i64(t);
3342 tcg_temp_free_i64(t4);
3343 tcg_temp_free_i64(t32);
3344 return NO_EXIT;
3345 }
3346
3347 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3348 {
3349 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3350 return NO_EXIT;
3351 }
3352
3353 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3354 {
3355 TCGv_i64 cc;
3356
3357 assert(!o->g_in2);
3358 tcg_gen_not_i64(o->in2, o->in2);
3359 tcg_gen_add_i64(o->out, o->in1, o->in2);
3360
3361 /* XXX possible optimization point */
3362 gen_op_calc_cc(s);
3363 cc = tcg_temp_new_i64();
3364 tcg_gen_extu_i32_i64(cc, cc_op);
3365 tcg_gen_shri_i64(cc, cc, 1);
3366 tcg_gen_add_i64(o->out, o->out, cc);
3367 tcg_temp_free_i64(cc);
3368 return NO_EXIT;
3369 }
3370
3371 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3372 {
3373 TCGv_i32 t;
3374
3375 update_psw_addr(s);
3376 gen_op_calc_cc(s);
3377
3378 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3379 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3380 tcg_temp_free_i32(t);
3381
3382 t = tcg_const_i32(s->next_pc - s->pc);
3383 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3384 tcg_temp_free_i32(t);
3385
3386 gen_exception(EXCP_SVC);
3387 return EXIT_NORETURN;
3388 }
3389
3390 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3391 {
3392 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3393 potential_page_fault(s);
3394 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3395 tcg_temp_free_i32(l);
3396 set_cc_static(s);
3397 return NO_EXIT;
3398 }
3399
3400 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3401 {
3402 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3403 potential_page_fault(s);
3404 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3405 tcg_temp_free_i32(l);
3406 return NO_EXIT;
3407 }
3408
3409 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3410 {
3411 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3412 potential_page_fault(s);
3413 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3414 tcg_temp_free_i32(l);
3415 set_cc_static(s);
3416 return NO_EXIT;
3417 }
3418
3419 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3420 {
3421 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3422 return NO_EXIT;
3423 }
3424
3425 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3426 {
3427 int shift = s->insn->data & 0xff;
3428 int size = s->insn->data >> 8;
3429 uint64_t mask = ((1ull << size) - 1) << shift;
3430
3431 assert(!o->g_in2);
3432 tcg_gen_shli_i64(o->in2, o->in2, shift);
3433 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3434
3435 /* Produce the CC from only the bits manipulated. */
3436 tcg_gen_andi_i64(cc_dst, o->out, mask);
3437 set_cc_nz_u64(s, cc_dst);
3438 return NO_EXIT;
3439 }
3440
3441 /* ====================================================================== */
3442 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3443 the original inputs), update the various cc data structures in order to
3444 be able to compute the new condition code. */
3445
3446 static void cout_abs32(DisasContext *s, DisasOps *o)
3447 {
3448 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3449 }
3450
3451 static void cout_abs64(DisasContext *s, DisasOps *o)
3452 {
3453 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3454 }
3455
3456 static void cout_adds32(DisasContext *s, DisasOps *o)
3457 {
3458 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3459 }
3460
3461 static void cout_adds64(DisasContext *s, DisasOps *o)
3462 {
3463 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3464 }
3465
3466 static void cout_addu32(DisasContext *s, DisasOps *o)
3467 {
3468 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3469 }
3470
3471 static void cout_addu64(DisasContext *s, DisasOps *o)
3472 {
3473 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3474 }
3475
3476 static void cout_addc32(DisasContext *s, DisasOps *o)
3477 {
3478 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3479 }
3480
3481 static void cout_addc64(DisasContext *s, DisasOps *o)
3482 {
3483 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3484 }
3485
3486 static void cout_cmps32(DisasContext *s, DisasOps *o)
3487 {
3488 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3489 }
3490
3491 static void cout_cmps64(DisasContext *s, DisasOps *o)
3492 {
3493 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3494 }
3495
3496 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3497 {
3498 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3499 }
3500
3501 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3502 {
3503 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3504 }
3505
3506 static void cout_nabs32(DisasContext *s, DisasOps *o)
3507 {
3508 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3509 }
3510
3511 static void cout_nabs64(DisasContext *s, DisasOps *o)
3512 {
3513 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3514 }
3515
3516 static void cout_neg32(DisasContext *s, DisasOps *o)
3517 {
3518 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3519 }
3520
3521 static void cout_neg64(DisasContext *s, DisasOps *o)
3522 {
3523 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3524 }
3525
3526 static void cout_nz32(DisasContext *s, DisasOps *o)
3527 {
3528 tcg_gen_ext32u_i64(cc_dst, o->out);
3529 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3530 }
3531
3532 static void cout_nz64(DisasContext *s, DisasOps *o)
3533 {
3534 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3535 }
3536
3537 static void cout_s32(DisasContext *s, DisasOps *o)
3538 {
3539 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3540 }
3541
3542 static void cout_s64(DisasContext *s, DisasOps *o)
3543 {
3544 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3545 }
3546
3547 static void cout_subs32(DisasContext *s, DisasOps *o)
3548 {
3549 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3550 }
3551
3552 static void cout_subs64(DisasContext *s, DisasOps *o)
3553 {
3554 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3555 }
3556
3557 static void cout_subu32(DisasContext *s, DisasOps *o)
3558 {
3559 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3560 }
3561
3562 static void cout_subu64(DisasContext *s, DisasOps *o)
3563 {
3564 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3565 }
3566
3567 static void cout_subb32(DisasContext *s, DisasOps *o)
3568 {
3569 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3570 }
3571
3572 static void cout_subb64(DisasContext *s, DisasOps *o)
3573 {
3574 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3575 }
3576
3577 static void cout_tm32(DisasContext *s, DisasOps *o)
3578 {
3579 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3580 }
3581
3582 static void cout_tm64(DisasContext *s, DisasOps *o)
3583 {
3584 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3585 }
3586
3587 /* ====================================================================== */
3588 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3589 with the TCG register to which we will write. Used in combination with
3590 the "wout" generators, in some cases we need a new temporary, and in
3591 some cases we can write to a TCG global. */
3592
3593 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3594 {
3595 o->out = tcg_temp_new_i64();
3596 }
3597
3598 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3599 {
3600 o->out = tcg_temp_new_i64();
3601 o->out2 = tcg_temp_new_i64();
3602 }
3603
3604 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3605 {
3606 o->out = regs[get_field(f, r1)];
3607 o->g_out = true;
3608 }
3609
3610 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3611 {
3612 /* ??? Specification exception: r1 must be even. */
3613 int r1 = get_field(f, r1);
3614 o->out = regs[r1];
3615 o->out2 = regs[(r1 + 1) & 15];
3616 o->g_out = o->g_out2 = true;
3617 }
3618
3619 /* ====================================================================== */
3620 /* The "Write OUTput" generators. These generally perform some non-trivial
3621 copy of data to TCG globals, or to main memory. The trivial cases are
3622 generally handled by having a "prep" generator install the TCG global
3623 as the destination of the operation. */
3624
3625 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3626 {
3627 store_reg(get_field(f, r1), o->out);
3628 }
3629
3630 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3631 {
3632 int r1 = get_field(f, r1);
3633 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3634 }
3635
3636 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3637 {
3638 store_reg32_i64(get_field(f, r1), o->out);
3639 }
3640
3641 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3642 {
3643 /* ??? Specification exception: r1 must be even. */
3644 int r1 = get_field(f, r1);
3645 store_reg32_i64(r1, o->out);
3646 store_reg32_i64((r1 + 1) & 15, o->out2);
3647 }
3648
3649 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3650 {
3651 /* ??? Specification exception: r1 must be even. */
3652 int r1 = get_field(f, r1);
3653 store_reg32_i64((r1 + 1) & 15, o->out);
3654 tcg_gen_shri_i64(o->out, o->out, 32);
3655 store_reg32_i64(r1, o->out);
3656 }
3657
3658 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3659 {
3660 store_freg32_i64(get_field(f, r1), o->out);
3661 }
3662
3663 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3664 {
3665 store_freg(get_field(f, r1), o->out);
3666 }
3667
3668 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3669 {
3670 int f1 = get_field(s->fields, r1);
3671 store_freg(f1, o->out);
3672 store_freg((f1 + 2) & 15, o->out2);
3673 }
3674
3675 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3676 {
3677 if (get_field(f, r1) != get_field(f, r2)) {
3678 store_reg32_i64(get_field(f, r1), o->out);
3679 }
3680 }
3681
3682 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3683 {
3684 if (get_field(f, r1) != get_field(f, r2)) {
3685 store_freg32_i64(get_field(f, r1), o->out);
3686 }
3687 }
3688
3689 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3690 {
3691 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3692 }
3693
3694 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3695 {
3696 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3697 }
3698
3699 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3700 {
3701 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3702 }
3703
3704 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3705 {
3706 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3707 }
3708
3709 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3710 {
3711 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3712 }
3713
3714 /* ====================================================================== */
3715 /* The "INput 1" generators. These load the first operand to an insn. */
3716
3717 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3718 {
3719 o->in1 = load_reg(get_field(f, r1));
3720 }
3721
3722 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3723 {
3724 o->in1 = regs[get_field(f, r1)];
3725 o->g_in1 = true;
3726 }
3727
3728 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3729 {
3730 o->in1 = tcg_temp_new_i64();
3731 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3732 }
3733
3734 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3735 {
3736 o->in1 = tcg_temp_new_i64();
3737 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3738 }
3739
3740 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3741 {
3742 o->in1 = tcg_temp_new_i64();
3743 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3744 }
3745
3746 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3747 {
3748 /* ??? Specification exception: r1 must be even. */
3749 int r1 = get_field(f, r1);
3750 o->in1 = load_reg((r1 + 1) & 15);
3751 }
3752
3753 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3754 {
3755 /* ??? Specification exception: r1 must be even. */
3756 int r1 = get_field(f, r1);
3757 o->in1 = tcg_temp_new_i64();
3758 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3759 }
3760
3761 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3762 {
3763 /* ??? Specification exception: r1 must be even. */
3764 int r1 = get_field(f, r1);
3765 o->in1 = tcg_temp_new_i64();
3766 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3767 }
3768
3769 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3770 {
3771 /* ??? Specification exception: r1 must be even. */
3772 int r1 = get_field(f, r1);
3773 o->in1 = tcg_temp_new_i64();
3774 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3775 }
3776
3777 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3778 {
3779 o->in1 = load_reg(get_field(f, r2));
3780 }
3781
3782 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3783 {
3784 o->in1 = load_reg(get_field(f, r3));
3785 }
3786
3787 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3788 {
3789 o->in1 = regs[get_field(f, r3)];
3790 o->g_in1 = true;
3791 }
3792
3793 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3794 {
3795 o->in1 = tcg_temp_new_i64();
3796 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3797 }
3798
3799 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3800 {
3801 o->in1 = tcg_temp_new_i64();
3802 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3803 }
3804
3805 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3806 {
3807 o->in1 = load_freg32_i64(get_field(f, r1));
3808 }
3809
3810 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3811 {
3812 o->in1 = fregs[get_field(f, r1)];
3813 o->g_in1 = true;
3814 }
3815
3816 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3817 {
3818 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3819 }
3820
3821 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3822 {
3823 in1_la1(s, f, o);
3824 o->in1 = tcg_temp_new_i64();
3825 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3826 }
3827
3828 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3829 {
3830 in1_la1(s, f, o);
3831 o->in1 = tcg_temp_new_i64();
3832 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3833 }
3834
3835 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3836 {
3837 in1_la1(s, f, o);
3838 o->in1 = tcg_temp_new_i64();
3839 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3840 }
3841
3842 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3843 {
3844 in1_la1(s, f, o);
3845 o->in1 = tcg_temp_new_i64();
3846 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3847 }
3848
3849 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3850 {
3851 in1_la1(s, f, o);
3852 o->in1 = tcg_temp_new_i64();
3853 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3854 }
3855
3856 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3857 {
3858 in1_la1(s, f, o);
3859 o->in1 = tcg_temp_new_i64();
3860 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3861 }
3862
3863 /* ====================================================================== */
3864 /* The "INput 2" generators. These load the second operand to an insn. */
3865
3866 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3867 {
3868 o->in2 = load_reg(get_field(f, r2));
3869 }
3870
3871 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3872 {
3873 o->in2 = regs[get_field(f, r2)];
3874 o->g_in2 = true;
3875 }
3876
3877 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3878 {
3879 int r2 = get_field(f, r2);
3880 if (r2 != 0) {
3881 o->in2 = load_reg(r2);
3882 }
3883 }
3884
3885 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3886 {
3887 o->in2 = tcg_temp_new_i64();
3888 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3889 }
3890
3891 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3892 {
3893 o->in2 = tcg_temp_new_i64();
3894 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3895 }
3896
3897 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3898 {
3899 o->in2 = tcg_temp_new_i64();
3900 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3901 }
3902
3903 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3904 {
3905 o->in2 = tcg_temp_new_i64();
3906 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3907 }
3908
3909 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3910 {
3911 o->in2 = load_reg(get_field(f, r3));
3912 }
3913
3914 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3915 {
3916 o->in2 = tcg_temp_new_i64();
3917 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3918 }
3919
3920 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3921 {
3922 o->in2 = tcg_temp_new_i64();
3923 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3924 }
3925
3926 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3927 {
3928 o->in2 = load_freg32_i64(get_field(f, r2));
3929 }
3930
3931 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3932 {
3933 o->in2 = fregs[get_field(f, r2)];
3934 o->g_in2 = true;
3935 }
3936
3937 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3938 {
3939 int f2 = get_field(f, r2);
3940 o->in1 = fregs[f2];
3941 o->in2 = fregs[(f2 + 2) & 15];
3942 o->g_in1 = o->g_in2 = true;
3943 }
3944
3945 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3946 {
3947 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3948 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3949 }
3950
3951 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3952 {
3953 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3954 }
3955
3956 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3957 {
3958 help_l2_shift(s, f, o, 31);
3959 }
3960
3961 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3962 {
3963 help_l2_shift(s, f, o, 63);
3964 }
3965
3966 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3967 {
3968 in2_a2(s, f, o);
3969 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3970 }
3971
3972 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3973 {
3974 in2_a2(s, f, o);
3975 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3976 }
3977
3978 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3979 {
3980 in2_a2(s, f, o);
3981 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3982 }
3983
3984 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3985 {
3986 in2_a2(s, f, o);
3987 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3988 }
3989
3990 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3991 {
3992 in2_a2(s, f, o);
3993 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3994 }
3995
3996 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3997 {
3998 in2_ri2(s, f, o);
3999 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4000 }
4001
4002 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4003 {
4004 in2_ri2(s, f, o);
4005 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4006 }
4007
4008 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4009 {
4010 in2_ri2(s, f, o);
4011 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4012 }
4013
4014 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4015 {
4016 in2_ri2(s, f, o);
4017 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4018 }
4019
4020 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4021 {
4022 o->in2 = tcg_const_i64(get_field(f, i2));
4023 }
4024
4025 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4026 {
4027 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4028 }
4029
4030 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4031 {
4032 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4033 }
4034
4035 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4036 {
4037 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4038 }
4039
4040 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4041 {
4042 uint64_t i2 = (uint16_t)get_field(f, i2);
4043 o->in2 = tcg_const_i64(i2 << s->insn->data);
4044 }
4045
4046 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4047 {
4048 uint64_t i2 = (uint32_t)get_field(f, i2);
4049 o->in2 = tcg_const_i64(i2 << s->insn->data);
4050 }
4051
4052 /* ====================================================================== */
4053
4054 /* Find opc within the table of insns. This is formulated as a switch
4055 statement so that (1) we get compile-time notice of cut-paste errors
4056 for duplicated opcodes, and (2) the compiler generates the binary
4057 search tree, rather than us having to post-process the table. */
4058
4059 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4060 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4061
4062 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4063
4064 enum DisasInsnEnum {
4065 #include "insn-data.def"
4066 };
4067
4068 #undef D
4069 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4070 .opc = OPC, \
4071 .fmt = FMT_##FT, \
4072 .fac = FAC_##FC, \
4073 .name = #NM, \
4074 .help_in1 = in1_##I1, \
4075 .help_in2 = in2_##I2, \
4076 .help_prep = prep_##P, \
4077 .help_wout = wout_##W, \
4078 .help_cout = cout_##CC, \
4079 .help_op = op_##OP, \
4080 .data = D \
4081 },
4082
4083 /* Allow 0 to be used for NULL in the table below. */
4084 #define in1_0 NULL
4085 #define in2_0 NULL
4086 #define prep_0 NULL
4087 #define wout_0 NULL
4088 #define cout_0 NULL
4089 #define op_0 NULL
4090
4091 static const DisasInsn insn_info[] = {
4092 #include "insn-data.def"
4093 };
4094
4095 #undef D
4096 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4097 case OPC: return &insn_info[insn_ ## NM];
4098
4099 static const DisasInsn *lookup_opc(uint16_t opc)
4100 {
4101 switch (opc) {
4102 #include "insn-data.def"
4103 default:
4104 return NULL;
4105 }
4106 }
4107
4108 #undef D
4109 #undef C
4110
4111 /* Extract a field from the insn. The INSN should be left-aligned in
4112 the uint64_t so that we can more easily utilize the big-bit-endian
4113 definitions we extract from the Principals of Operation. */
4114
4115 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4116 {
4117 uint32_t r, m;
4118
4119 if (f->size == 0) {
4120 return;
4121 }
4122
4123 /* Zero extract the field from the insn. */
4124 r = (insn << f->beg) >> (64 - f->size);
4125
4126 /* Sign-extend, or un-swap the field as necessary. */
4127 switch (f->type) {
4128 case 0: /* unsigned */
4129 break;
4130 case 1: /* signed */
4131 assert(f->size <= 32);
4132 m = 1u << (f->size - 1);
4133 r = (r ^ m) - m;
4134 break;
4135 case 2: /* dl+dh split, signed 20 bit. */
4136 r = ((int8_t)r << 12) | (r >> 8);
4137 break;
4138 default:
4139 abort();
4140 }
4141
4142 /* Validate that the "compressed" encoding we selected above is valid.
4143 I.e. we havn't make two different original fields overlap. */
4144 assert(((o->presentC >> f->indexC) & 1) == 0);
4145 o->presentC |= 1 << f->indexC;
4146 o->presentO |= 1 << f->indexO;
4147
4148 o->c[f->indexC] = r;
4149 }
4150
4151 /* Lookup the insn at the current PC, extracting the operands into O and
4152 returning the info struct for the insn. Returns NULL for invalid insn. */
4153
4154 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4155 DisasFields *f)
4156 {
4157 uint64_t insn, pc = s->pc;
4158 int op, op2, ilen;
4159 const DisasInsn *info;
4160
4161 insn = ld_code2(env, pc);
4162 op = (insn >> 8) & 0xff;
4163 ilen = get_ilen(op);
4164 s->next_pc = s->pc + ilen;
4165
4166 switch (ilen) {
4167 case 2:
4168 insn = insn << 48;
4169 break;
4170 case 4:
4171 insn = ld_code4(env, pc) << 32;
4172 break;
4173 case 6:
4174 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4175 break;
4176 default:
4177 abort();
4178 }
4179
4180 /* We can't actually determine the insn format until we've looked up
4181 the full insn opcode. Which we can't do without locating the
4182 secondary opcode. Assume by default that OP2 is at bit 40; for
4183 those smaller insns that don't actually have a secondary opcode
4184 this will correctly result in OP2 = 0. */
4185 switch (op) {
4186 case 0x01: /* E */
4187 case 0x80: /* S */
4188 case 0x82: /* S */
4189 case 0x93: /* S */
4190 case 0xb2: /* S, RRF, RRE */
4191 case 0xb3: /* RRE, RRD, RRF */
4192 case 0xb9: /* RRE, RRF */
4193 case 0xe5: /* SSE, SIL */
4194 op2 = (insn << 8) >> 56;
4195 break;
4196 case 0xa5: /* RI */
4197 case 0xa7: /* RI */
4198 case 0xc0: /* RIL */
4199 case 0xc2: /* RIL */
4200 case 0xc4: /* RIL */
4201 case 0xc6: /* RIL */
4202 case 0xc8: /* SSF */
4203 case 0xcc: /* RIL */
4204 op2 = (insn << 12) >> 60;
4205 break;
4206 case 0xd0 ... 0xdf: /* SS */
4207 case 0xe1: /* SS */
4208 case 0xe2: /* SS */
4209 case 0xe8: /* SS */
4210 case 0xe9: /* SS */
4211 case 0xea: /* SS */
4212 case 0xee ... 0xf3: /* SS */
4213 case 0xf8 ... 0xfd: /* SS */
4214 op2 = 0;
4215 break;
4216 default:
4217 op2 = (insn << 40) >> 56;
4218 break;
4219 }
4220
4221 memset(f, 0, sizeof(*f));
4222 f->op = op;
4223 f->op2 = op2;
4224
4225 /* Lookup the instruction. */
4226 info = lookup_opc(op << 8 | op2);
4227
4228 /* If we found it, extract the operands. */
4229 if (info != NULL) {
4230 DisasFormat fmt = info->fmt;
4231 int i;
4232
4233 for (i = 0; i < NUM_C_FIELD; ++i) {
4234 extract_field(f, &format_info[fmt].op[i], insn);
4235 }
4236 }
4237 return info;
4238 }
4239
4240 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4241 {
4242 const DisasInsn *insn;
4243 ExitStatus ret = NO_EXIT;
4244 DisasFields f;
4245 DisasOps o;
4246
4247 insn = extract_insn(env, s, &f);
4248
4249 /* If not found, try the old interpreter. This includes ILLOPC. */
4250 if (insn == NULL) {
4251 disas_s390_insn(env, s);
4252 switch (s->is_jmp) {
4253 case DISAS_NEXT:
4254 ret = NO_EXIT;
4255 break;
4256 case DISAS_TB_JUMP:
4257 ret = EXIT_GOTO_TB;
4258 break;
4259 case DISAS_JUMP:
4260 ret = EXIT_PC_UPDATED;
4261 break;
4262 case DISAS_EXCP:
4263 ret = EXIT_NORETURN;
4264 break;
4265 default:
4266 abort();
4267 }
4268
4269 s->pc = s->next_pc;
4270 return ret;
4271 }
4272
4273 /* Set up the strutures we use to communicate with the helpers. */
4274 s->insn = insn;
4275 s->fields = &f;
4276 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4277 TCGV_UNUSED_I64(o.out);
4278 TCGV_UNUSED_I64(o.out2);
4279 TCGV_UNUSED_I64(o.in1);
4280 TCGV_UNUSED_I64(o.in2);
4281 TCGV_UNUSED_I64(o.addr1);
4282
4283 /* Implement the instruction. */
4284 if (insn->help_in1) {
4285 insn->help_in1(s, &f, &o);
4286 }
4287 if (insn->help_in2) {
4288 insn->help_in2(s, &f, &o);
4289 }
4290 if (insn->help_prep) {
4291 insn->help_prep(s, &f, &o);
4292 }
4293 if (insn->help_op) {
4294 ret = insn->help_op(s, &o);
4295 }
4296 if (insn->help_wout) {
4297 insn->help_wout(s, &f, &o);
4298 }
4299 if (insn->help_cout) {
4300 insn->help_cout(s, &o);
4301 }
4302
4303 /* Free any temporaries created by the helpers. */
4304 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4305 tcg_temp_free_i64(o.out);
4306 }
4307 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4308 tcg_temp_free_i64(o.out2);
4309 }
4310 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4311 tcg_temp_free_i64(o.in1);
4312 }
4313 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4314 tcg_temp_free_i64(o.in2);
4315 }
4316 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4317 tcg_temp_free_i64(o.addr1);
4318 }
4319
4320 /* Advance to the next instruction. */
4321 s->pc = s->next_pc;
4322 return ret;
4323 }
4324
4325 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4326 TranslationBlock *tb,
4327 int search_pc)
4328 {
4329 DisasContext dc;
4330 target_ulong pc_start;
4331 uint64_t next_page_start;
4332 uint16_t *gen_opc_end;
4333 int j, lj = -1;
4334 int num_insns, max_insns;
4335 CPUBreakpoint *bp;
4336 ExitStatus status;
4337 bool do_debug;
4338
4339 pc_start = tb->pc;
4340
4341 /* 31-bit mode */
4342 if (!(tb->flags & FLAG_MASK_64)) {
4343 pc_start &= 0x7fffffff;
4344 }
4345
4346 dc.tb = tb;
4347 dc.pc = pc_start;
4348 dc.cc_op = CC_OP_DYNAMIC;
4349 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4350 dc.is_jmp = DISAS_NEXT;
4351
4352 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4353
4354 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4355
4356 num_insns = 0;
4357 max_insns = tb->cflags & CF_COUNT_MASK;
4358 if (max_insns == 0) {
4359 max_insns = CF_COUNT_MASK;
4360 }
4361
4362 gen_icount_start();
4363
4364 do {
4365 if (search_pc) {
4366 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4367 if (lj < j) {
4368 lj++;
4369 while (lj < j) {
4370 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4371 }
4372 }
4373 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4374 gen_opc_cc_op[lj] = dc.cc_op;
4375 tcg_ctx.gen_opc_instr_start[lj] = 1;
4376 tcg_ctx.gen_opc_icount[lj] = num_insns;
4377 }
4378 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4379 gen_io_start();
4380 }
4381
4382 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4383 tcg_gen_debug_insn_start(dc.pc);
4384 }
4385
4386 status = NO_EXIT;
4387 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4388 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4389 if (bp->pc == dc.pc) {
4390 status = EXIT_PC_STALE;
4391 do_debug = true;
4392 break;
4393 }
4394 }
4395 }
4396 if (status == NO_EXIT) {
4397 status = translate_one(env, &dc);
4398 }
4399
4400 /* If we reach a page boundary, are single stepping,
4401 or exhaust instruction count, stop generation. */
4402 if (status == NO_EXIT
4403 && (dc.pc >= next_page_start
4404 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4405 || num_insns >= max_insns
4406 || singlestep
4407 || env->singlestep_enabled)) {
4408 status = EXIT_PC_STALE;
4409 }
4410 } while (status == NO_EXIT);
4411
4412 if (tb->cflags & CF_LAST_IO) {
4413 gen_io_end();
4414 }
4415
4416 switch (status) {
4417 case EXIT_GOTO_TB:
4418 case EXIT_NORETURN:
4419 break;
4420 case EXIT_PC_STALE:
4421 update_psw_addr(&dc);
4422 /* FALLTHRU */
4423 case EXIT_PC_UPDATED:
4424 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4425 gen_op_calc_cc(&dc);
4426 } else {
4427 /* Next TB starts off with CC_OP_DYNAMIC,
4428 so make sure the cc op type is in env */
4429 gen_op_set_cc_op(&dc);
4430 }
4431 if (do_debug) {
4432 gen_exception(EXCP_DEBUG);
4433 } else {
4434 /* Generate the return instruction */
4435 tcg_gen_exit_tb(0);
4436 }
4437 break;
4438 default:
4439 abort();
4440 }
4441
4442 gen_icount_end(tb, num_insns);
4443 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4444 if (search_pc) {
4445 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4446 lj++;
4447 while (lj <= j) {
4448 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4449 }
4450 } else {
4451 tb->size = dc.pc - pc_start;
4452 tb->icount = num_insns;
4453 }
4454
4455 #if defined(S390X_DEBUG_DISAS)
4456 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4457 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4458 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4459 qemu_log("\n");
4460 }
4461 #endif
4462 }
4463
4464 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4465 {
4466 gen_intermediate_code_internal(env, tb, 0);
4467 }
4468
4469 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4470 {
4471 gen_intermediate_code_internal(env, tb, 1);
4472 }
4473
4474 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4475 {
4476 int cc_op;
4477 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4478 cc_op = gen_opc_cc_op[pc_pos];
4479 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4480 env->cc_op = cc_op;
4481 }
4482 }