]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Convert SCKC, STCKC
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static inline void debug_insn(uint64_t insn)
83 {
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85 }
86
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 {
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
92 }
93 }
94 return pc;
95 }
96
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
99 {
100 int i;
101
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
108 }
109
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
116 }
117 }
118
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
125 }
126 }
127
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
135 }
136 }
137 #endif
138
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
143 }
144 #endif
145
146 cpu_fprintf(f, "\n");
147 }
148
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
151
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
156
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
160
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162
163 void s390x_translate_init(void)
164 {
165 int i;
166
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
174
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
196 }
197
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
201 }
202
203 static inline TCGv_i64 load_reg(int reg)
204 {
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
208 }
209
210 static inline TCGv_i64 load_freg(int reg)
211 {
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
215 }
216
217 static inline TCGv_i32 load_freg32(int reg)
218 {
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
226 }
227
228 static inline TCGv_i64 load_freg32_i64(int reg)
229 {
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
233 }
234
235 static inline TCGv_i32 load_reg32(int reg)
236 {
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
240 }
241
242 static inline TCGv_i64 load_reg32_i64(int reg)
243 {
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
247 }
248
249 static inline void store_reg(int reg, TCGv_i64 v)
250 {
251 tcg_gen_mov_i64(regs[reg], v);
252 }
253
254 static inline void store_freg(int reg, TCGv_i64 v)
255 {
256 tcg_gen_mov_i64(fregs[reg], v);
257 }
258
259 static inline void store_reg32(int reg, TCGv_i32 v)
260 {
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
268 }
269
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
271 {
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274 }
275
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
277 {
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
279 }
280
281 static inline void store_freg32(int reg, TCGv_i32 v)
282 {
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
286 #else
287 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
289 #endif
290 }
291
292 static inline void store_freg32_i64(int reg, TCGv_i64 v)
293 {
294 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
295 }
296
297 static inline void return_low128(TCGv_i64 dest)
298 {
299 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
300 }
301
302 static inline void update_psw_addr(DisasContext *s)
303 {
304 /* psw.addr */
305 tcg_gen_movi_i64(psw_addr, s->pc);
306 }
307
308 static inline void potential_page_fault(DisasContext *s)
309 {
310 #ifndef CONFIG_USER_ONLY
311 update_psw_addr(s);
312 gen_op_calc_cc(s);
313 #endif
314 }
315
316 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
317 {
318 return (uint64_t)cpu_lduw_code(env, pc);
319 }
320
321 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
322 {
323 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
324 }
325
326 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
327 {
328 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
329 }
330
331 static inline int get_mem_index(DisasContext *s)
332 {
333 switch (s->tb->flags & FLAG_MASK_ASC) {
334 case PSW_ASC_PRIMARY >> 32:
335 return 0;
336 case PSW_ASC_SECONDARY >> 32:
337 return 1;
338 case PSW_ASC_HOME >> 32:
339 return 2;
340 default:
341 tcg_abort();
342 break;
343 }
344 }
345
346 static void gen_exception(int excp)
347 {
348 TCGv_i32 tmp = tcg_const_i32(excp);
349 gen_helper_exception(cpu_env, tmp);
350 tcg_temp_free_i32(tmp);
351 }
352
353 static void gen_program_exception(DisasContext *s, int code)
354 {
355 TCGv_i32 tmp;
356
357 /* Remember what pgm exeption this was. */
358 tmp = tcg_const_i32(code);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
360 tcg_temp_free_i32(tmp);
361
362 tmp = tcg_const_i32(s->next_pc - s->pc);
363 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
364 tcg_temp_free_i32(tmp);
365
366 /* Advance past instruction. */
367 s->pc = s->next_pc;
368 update_psw_addr(s);
369
370 /* Save off cc. */
371 gen_op_calc_cc(s);
372
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM);
375
376 /* End TB here. */
377 s->is_jmp = DISAS_EXCP;
378 }
379
380 static inline void gen_illegal_opcode(DisasContext *s)
381 {
382 gen_program_exception(s, PGM_SPECIFICATION);
383 }
384
385 static inline void check_privileged(DisasContext *s)
386 {
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
389 }
390 }
391
392 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
393 {
394 TCGv_i64 tmp;
395
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s->tb->flags & FLAG_MASK_64)) {
398 d2 &= 0x7fffffffUL;
399 }
400
401 if (x2) {
402 if (d2) {
403 tmp = tcg_const_i64(d2);
404 tcg_gen_add_i64(tmp, tmp, regs[x2]);
405 } else {
406 tmp = load_reg(x2);
407 }
408 if (b2) {
409 tcg_gen_add_i64(tmp, tmp, regs[b2]);
410 }
411 } else if (b2) {
412 if (d2) {
413 tmp = tcg_const_i64(d2);
414 tcg_gen_add_i64(tmp, tmp, regs[b2]);
415 } else {
416 tmp = load_reg(b2);
417 }
418 } else {
419 tmp = tcg_const_i64(d2);
420 }
421
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
424 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
425 }
426
427 return tmp;
428 }
429
430 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
431 {
432 s->cc_op = CC_OP_CONST0 + val;
433 }
434
435 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
436 {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_mov_i64(cc_dst, dst);
439 tcg_gen_discard_i64(cc_vr);
440 s->cc_op = op;
441 }
442
443 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
444 {
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_extu_i32_i64(cc_dst, dst);
447 tcg_gen_discard_i64(cc_vr);
448 s->cc_op = op;
449 }
450
451 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
452 TCGv_i64 dst)
453 {
454 tcg_gen_mov_i64(cc_src, src);
455 tcg_gen_mov_i64(cc_dst, dst);
456 tcg_gen_discard_i64(cc_vr);
457 s->cc_op = op;
458 }
459
460 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
461 TCGv_i32 dst)
462 {
463 tcg_gen_extu_i32_i64(cc_src, src);
464 tcg_gen_extu_i32_i64(cc_dst, dst);
465 tcg_gen_discard_i64(cc_vr);
466 s->cc_op = op;
467 }
468
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
471 {
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
475 s->cc_op = op;
476 }
477
478 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
479 {
480 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
481 }
482
483 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
484 {
485 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
486 }
487
488 static inline void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
489 {
490 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
491 }
492
493 static inline void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
494 {
495 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
496 }
497
498 static inline void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
499 {
500 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
501 }
502
503 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
504 enum cc_op cond)
505 {
506 gen_op_update2_cc_i32(s, cond, v1, v2);
507 }
508
509 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
510 enum cc_op cond)
511 {
512 gen_op_update2_cc_i64(s, cond, v1, v2);
513 }
514
515 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
516 {
517 cmp_32(s, v1, v2, CC_OP_LTGT_32);
518 }
519
520 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
521 {
522 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
523 }
524
525 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
526 {
527 /* XXX optimize for the constant? put it in s? */
528 TCGv_i32 tmp = tcg_const_i32(v2);
529 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
530 tcg_temp_free_i32(tmp);
531 }
532
533 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
534 {
535 TCGv_i32 tmp = tcg_const_i32(v2);
536 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
537 tcg_temp_free_i32(tmp);
538 }
539
540 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
541 {
542 cmp_64(s, v1, v2, CC_OP_LTGT_64);
543 }
544
545 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
546 {
547 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
548 }
549
550 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
551 {
552 TCGv_i64 tmp = tcg_const_i64(v2);
553 cmp_s64(s, v1, tmp);
554 tcg_temp_free_i64(tmp);
555 }
556
557 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
558 {
559 TCGv_i64 tmp = tcg_const_i64(v2);
560 cmp_u64(s, v1, tmp);
561 tcg_temp_free_i64(tmp);
562 }
563
564 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
565 {
566 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
567 }
568
569 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
570 {
571 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
572 }
573
574 /* CC value is in env->cc_op */
575 static inline void set_cc_static(DisasContext *s)
576 {
577 tcg_gen_discard_i64(cc_src);
578 tcg_gen_discard_i64(cc_dst);
579 tcg_gen_discard_i64(cc_vr);
580 s->cc_op = CC_OP_STATIC;
581 }
582
583 static inline void gen_op_set_cc_op(DisasContext *s)
584 {
585 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
586 tcg_gen_movi_i32(cc_op, s->cc_op);
587 }
588 }
589
590 static inline void gen_update_cc_op(DisasContext *s)
591 {
592 gen_op_set_cc_op(s);
593 }
594
595 /* calculates cc into cc_op */
596 static void gen_op_calc_cc(DisasContext *s)
597 {
598 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
599 TCGv_i64 dummy = tcg_const_i64(0);
600
601 switch (s->cc_op) {
602 case CC_OP_CONST0:
603 case CC_OP_CONST1:
604 case CC_OP_CONST2:
605 case CC_OP_CONST3:
606 /* s->cc_op is the cc value */
607 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
608 break;
609 case CC_OP_STATIC:
610 /* env->cc_op already is the cc value */
611 break;
612 case CC_OP_NZ:
613 case CC_OP_ABS_64:
614 case CC_OP_NABS_64:
615 case CC_OP_ABS_32:
616 case CC_OP_NABS_32:
617 case CC_OP_LTGT0_32:
618 case CC_OP_LTGT0_64:
619 case CC_OP_COMP_32:
620 case CC_OP_COMP_64:
621 case CC_OP_NZ_F32:
622 case CC_OP_NZ_F64:
623 case CC_OP_FLOGR:
624 /* 1 argument */
625 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
626 break;
627 case CC_OP_ICM:
628 case CC_OP_LTGT_32:
629 case CC_OP_LTGT_64:
630 case CC_OP_LTUGTU_32:
631 case CC_OP_LTUGTU_64:
632 case CC_OP_TM_32:
633 case CC_OP_TM_64:
634 case CC_OP_SLA_32:
635 case CC_OP_SLA_64:
636 case CC_OP_NZ_F128:
637 /* 2 arguments */
638 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
639 break;
640 case CC_OP_ADD_64:
641 case CC_OP_ADDU_64:
642 case CC_OP_ADDC_64:
643 case CC_OP_SUB_64:
644 case CC_OP_SUBU_64:
645 case CC_OP_SUBB_64:
646 case CC_OP_ADD_32:
647 case CC_OP_ADDU_32:
648 case CC_OP_ADDC_32:
649 case CC_OP_SUB_32:
650 case CC_OP_SUBU_32:
651 case CC_OP_SUBB_32:
652 /* 3 arguments */
653 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
654 break;
655 case CC_OP_DYNAMIC:
656 /* unknown operation - assume 3 arguments and cc_op in env */
657 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
658 break;
659 default:
660 tcg_abort();
661 }
662
663 tcg_temp_free_i32(local_cc_op);
664 tcg_temp_free_i64(dummy);
665
666 /* We now have cc in cc_op as constant */
667 set_cc_static(s);
668 }
669
670 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
671 {
672 debug_insn(insn);
673
674 *r1 = (insn >> 4) & 0xf;
675 *r2 = insn & 0xf;
676 }
677
678 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
679 int *x2, int *b2, int *d2)
680 {
681 debug_insn(insn);
682
683 *r1 = (insn >> 20) & 0xf;
684 *x2 = (insn >> 16) & 0xf;
685 *b2 = (insn >> 12) & 0xf;
686 *d2 = insn & 0xfff;
687
688 return get_address(s, *x2, *b2, *d2);
689 }
690
691 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
692 int *b2, int *d2)
693 {
694 debug_insn(insn);
695
696 *r1 = (insn >> 20) & 0xf;
697 /* aka m3 */
698 *r3 = (insn >> 16) & 0xf;
699 *b2 = (insn >> 12) & 0xf;
700 *d2 = insn & 0xfff;
701 }
702
703 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
704 int *b1, int *d1)
705 {
706 debug_insn(insn);
707
708 *i2 = (insn >> 16) & 0xff;
709 *b1 = (insn >> 12) & 0xf;
710 *d1 = insn & 0xfff;
711
712 return get_address(s, 0, *b1, *d1);
713 }
714
715 static int use_goto_tb(DisasContext *s, uint64_t dest)
716 {
717 /* NOTE: we handle the case where the TB spans two pages here */
718 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
719 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
720 && !s->singlestep_enabled
721 && !(s->tb->cflags & CF_LAST_IO));
722 }
723
724 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
725 {
726 gen_update_cc_op(s);
727
728 if (use_goto_tb(s, pc)) {
729 tcg_gen_goto_tb(tb_num);
730 tcg_gen_movi_i64(psw_addr, pc);
731 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
732 } else {
733 /* jump to another page: currently not optimized */
734 tcg_gen_movi_i64(psw_addr, pc);
735 tcg_gen_exit_tb(0);
736 }
737 }
738
739 static inline void account_noninline_branch(DisasContext *s, int cc_op)
740 {
741 #ifdef DEBUG_INLINE_BRANCHES
742 inline_branch_miss[cc_op]++;
743 #endif
744 }
745
746 static inline void account_inline_branch(DisasContext *s, int cc_op)
747 {
748 #ifdef DEBUG_INLINE_BRANCHES
749 inline_branch_hit[cc_op]++;
750 #endif
751 }
752
753 /* Table of mask values to comparison codes, given a comparison as input.
754 For a true comparison CC=3 will never be set, but we treat this
755 conservatively for possible use when CC=3 indicates overflow. */
756 static const TCGCond ltgt_cond[16] = {
757 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
758 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
759 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
760 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
761 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
762 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
763 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
764 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
765 };
766
767 /* Table of mask values to comparison codes, given a logic op as input.
768 For such, only CC=0 and CC=1 should be possible. */
769 static const TCGCond nz_cond[16] = {
770 /* | | x | x */
771 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
772 /* | NE | x | x */
773 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
774 /* EQ | | x | x */
775 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
776 /* EQ | NE | x | x */
777 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
778 };
779
780 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
781 details required to generate a TCG comparison. */
782 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
783 {
784 TCGCond cond;
785 enum cc_op old_cc_op = s->cc_op;
786
787 if (mask == 15 || mask == 0) {
788 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
789 c->u.s32.a = cc_op;
790 c->u.s32.b = cc_op;
791 c->g1 = c->g2 = true;
792 c->is_64 = false;
793 return;
794 }
795
796 /* Find the TCG condition for the mask + cc op. */
797 switch (old_cc_op) {
798 case CC_OP_LTGT0_32:
799 case CC_OP_LTGT0_64:
800 case CC_OP_LTGT_32:
801 case CC_OP_LTGT_64:
802 cond = ltgt_cond[mask];
803 if (cond == TCG_COND_NEVER) {
804 goto do_dynamic;
805 }
806 account_inline_branch(s, old_cc_op);
807 break;
808
809 case CC_OP_LTUGTU_32:
810 case CC_OP_LTUGTU_64:
811 cond = tcg_unsigned_cond(ltgt_cond[mask]);
812 if (cond == TCG_COND_NEVER) {
813 goto do_dynamic;
814 }
815 account_inline_branch(s, old_cc_op);
816 break;
817
818 case CC_OP_NZ:
819 cond = nz_cond[mask];
820 if (cond == TCG_COND_NEVER) {
821 goto do_dynamic;
822 }
823 account_inline_branch(s, old_cc_op);
824 break;
825
826 case CC_OP_TM_32:
827 case CC_OP_TM_64:
828 switch (mask) {
829 case 8:
830 cond = TCG_COND_EQ;
831 break;
832 case 4 | 2 | 1:
833 cond = TCG_COND_NE;
834 break;
835 default:
836 goto do_dynamic;
837 }
838 account_inline_branch(s, old_cc_op);
839 break;
840
841 case CC_OP_ICM:
842 switch (mask) {
843 case 8:
844 cond = TCG_COND_EQ;
845 break;
846 case 4 | 2 | 1:
847 case 4 | 2:
848 cond = TCG_COND_NE;
849 break;
850 default:
851 goto do_dynamic;
852 }
853 account_inline_branch(s, old_cc_op);
854 break;
855
856 case CC_OP_FLOGR:
857 switch (mask & 0xa) {
858 case 8: /* src == 0 -> no one bit found */
859 cond = TCG_COND_EQ;
860 break;
861 case 2: /* src != 0 -> one bit found */
862 cond = TCG_COND_NE;
863 break;
864 default:
865 goto do_dynamic;
866 }
867 account_inline_branch(s, old_cc_op);
868 break;
869
870 default:
871 do_dynamic:
872 /* Calculate cc value. */
873 gen_op_calc_cc(s);
874 /* FALLTHRU */
875
876 case CC_OP_STATIC:
877 /* Jump based on CC. We'll load up the real cond below;
878 the assignment here merely avoids a compiler warning. */
879 account_noninline_branch(s, old_cc_op);
880 old_cc_op = CC_OP_STATIC;
881 cond = TCG_COND_NEVER;
882 break;
883 }
884
885 /* Load up the arguments of the comparison. */
886 c->is_64 = true;
887 c->g1 = c->g2 = false;
888 switch (old_cc_op) {
889 case CC_OP_LTGT0_32:
890 c->is_64 = false;
891 c->u.s32.a = tcg_temp_new_i32();
892 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
893 c->u.s32.b = tcg_const_i32(0);
894 break;
895 case CC_OP_LTGT_32:
896 case CC_OP_LTUGTU_32:
897 c->is_64 = false;
898 c->u.s32.a = tcg_temp_new_i32();
899 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
900 c->u.s32.b = tcg_temp_new_i32();
901 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
902 break;
903
904 case CC_OP_LTGT0_64:
905 case CC_OP_NZ:
906 case CC_OP_FLOGR:
907 c->u.s64.a = cc_dst;
908 c->u.s64.b = tcg_const_i64(0);
909 c->g1 = true;
910 break;
911 case CC_OP_LTGT_64:
912 case CC_OP_LTUGTU_64:
913 c->u.s64.a = cc_src;
914 c->u.s64.b = cc_dst;
915 c->g1 = c->g2 = true;
916 break;
917
918 case CC_OP_TM_32:
919 case CC_OP_TM_64:
920 case CC_OP_ICM:
921 c->u.s64.a = tcg_temp_new_i64();
922 c->u.s64.b = tcg_const_i64(0);
923 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
924 break;
925
926 case CC_OP_STATIC:
927 c->is_64 = false;
928 c->u.s32.a = cc_op;
929 c->g1 = true;
930 switch (mask) {
931 case 0x8 | 0x4 | 0x2: /* cc != 3 */
932 cond = TCG_COND_NE;
933 c->u.s32.b = tcg_const_i32(3);
934 break;
935 case 0x8 | 0x4 | 0x1: /* cc != 2 */
936 cond = TCG_COND_NE;
937 c->u.s32.b = tcg_const_i32(2);
938 break;
939 case 0x8 | 0x2 | 0x1: /* cc != 1 */
940 cond = TCG_COND_NE;
941 c->u.s32.b = tcg_const_i32(1);
942 break;
943 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
944 cond = TCG_COND_EQ;
945 c->g1 = false;
946 c->u.s32.a = tcg_temp_new_i32();
947 c->u.s32.b = tcg_const_i32(0);
948 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
949 break;
950 case 0x8 | 0x4: /* cc < 2 */
951 cond = TCG_COND_LTU;
952 c->u.s32.b = tcg_const_i32(2);
953 break;
954 case 0x8: /* cc == 0 */
955 cond = TCG_COND_EQ;
956 c->u.s32.b = tcg_const_i32(0);
957 break;
958 case 0x4 | 0x2 | 0x1: /* cc != 0 */
959 cond = TCG_COND_NE;
960 c->u.s32.b = tcg_const_i32(0);
961 break;
962 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
963 cond = TCG_COND_NE;
964 c->g1 = false;
965 c->u.s32.a = tcg_temp_new_i32();
966 c->u.s32.b = tcg_const_i32(0);
967 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
968 break;
969 case 0x4: /* cc == 1 */
970 cond = TCG_COND_EQ;
971 c->u.s32.b = tcg_const_i32(1);
972 break;
973 case 0x2 | 0x1: /* cc > 1 */
974 cond = TCG_COND_GTU;
975 c->u.s32.b = tcg_const_i32(1);
976 break;
977 case 0x2: /* cc == 2 */
978 cond = TCG_COND_EQ;
979 c->u.s32.b = tcg_const_i32(2);
980 break;
981 case 0x1: /* cc == 3 */
982 cond = TCG_COND_EQ;
983 c->u.s32.b = tcg_const_i32(3);
984 break;
985 default:
986 /* CC is masked by something else: (8 >> cc) & mask. */
987 cond = TCG_COND_NE;
988 c->g1 = false;
989 c->u.s32.a = tcg_const_i32(8);
990 c->u.s32.b = tcg_const_i32(0);
991 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
992 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
993 break;
994 }
995 break;
996
997 default:
998 abort();
999 }
1000 c->cond = cond;
1001 }
1002
1003 static void free_compare(DisasCompare *c)
1004 {
1005 if (!c->g1) {
1006 if (c->is_64) {
1007 tcg_temp_free_i64(c->u.s64.a);
1008 } else {
1009 tcg_temp_free_i32(c->u.s32.a);
1010 }
1011 }
1012 if (!c->g2) {
1013 if (c->is_64) {
1014 tcg_temp_free_i64(c->u.s64.b);
1015 } else {
1016 tcg_temp_free_i32(c->u.s32.b);
1017 }
1018 }
1019 }
1020
1021 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1022 uint32_t insn)
1023 {
1024 #ifndef CONFIG_USER_ONLY
1025 TCGv_i64 tmp, tmp2, tmp3;
1026 TCGv_i32 tmp32_1, tmp32_2;
1027 int r1, r2;
1028 int r3, d2, b2;
1029
1030 r1 = (insn >> 4) & 0xf;
1031 r2 = insn & 0xf;
1032
1033 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1034
1035 switch (op) {
1036 case 0x08: /* SPT D2(B2) [S] */
1037 /* Set CPU Timer */
1038 check_privileged(s);
1039 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1040 tmp = get_address(s, 0, b2, d2);
1041 potential_page_fault(s);
1042 gen_helper_spt(cpu_env, tmp);
1043 tcg_temp_free_i64(tmp);
1044 break;
1045 case 0x09: /* STPT D2(B2) [S] */
1046 /* Store CPU Timer */
1047 check_privileged(s);
1048 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1049 tmp = get_address(s, 0, b2, d2);
1050 potential_page_fault(s);
1051 gen_helper_stpt(cpu_env, tmp);
1052 tcg_temp_free_i64(tmp);
1053 break;
1054 case 0x0a: /* SPKA D2(B2) [S] */
1055 /* Set PSW Key from Address */
1056 check_privileged(s);
1057 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1058 tmp = get_address(s, 0, b2, d2);
1059 tmp2 = tcg_temp_new_i64();
1060 tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
1061 tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
1062 tcg_gen_or_i64(psw_mask, tmp2, tmp);
1063 tcg_temp_free_i64(tmp2);
1064 tcg_temp_free_i64(tmp);
1065 break;
1066 case 0x0d: /* PTLB [S] */
1067 /* Purge TLB */
1068 check_privileged(s);
1069 gen_helper_ptlb(cpu_env);
1070 break;
1071 case 0x10: /* SPX D2(B2) [S] */
1072 /* Set Prefix Register */
1073 check_privileged(s);
1074 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1075 tmp = get_address(s, 0, b2, d2);
1076 potential_page_fault(s);
1077 gen_helper_spx(cpu_env, tmp);
1078 tcg_temp_free_i64(tmp);
1079 break;
1080 case 0x11: /* STPX D2(B2) [S] */
1081 /* Store Prefix */
1082 check_privileged(s);
1083 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1084 tmp = get_address(s, 0, b2, d2);
1085 tmp2 = tcg_temp_new_i64();
1086 tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1087 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1088 tcg_temp_free_i64(tmp);
1089 tcg_temp_free_i64(tmp2);
1090 break;
1091 case 0x12: /* STAP D2(B2) [S] */
1092 /* Store CPU Address */
1093 check_privileged(s);
1094 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1095 tmp = get_address(s, 0, b2, d2);
1096 tmp2 = tcg_temp_new_i64();
1097 tmp32_1 = tcg_temp_new_i32();
1098 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1099 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1100 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1101 tcg_temp_free_i64(tmp);
1102 tcg_temp_free_i64(tmp2);
1103 tcg_temp_free_i32(tmp32_1);
1104 break;
1105 case 0x21: /* IPTE R1,R2 [RRE] */
1106 /* Invalidate PTE */
1107 check_privileged(s);
1108 r1 = (insn >> 4) & 0xf;
1109 r2 = insn & 0xf;
1110 tmp = load_reg(r1);
1111 tmp2 = load_reg(r2);
1112 gen_helper_ipte(cpu_env, tmp, tmp2);
1113 tcg_temp_free_i64(tmp);
1114 tcg_temp_free_i64(tmp2);
1115 break;
1116 case 0x29: /* ISKE R1,R2 [RRE] */
1117 /* Insert Storage Key Extended */
1118 check_privileged(s);
1119 r1 = (insn >> 4) & 0xf;
1120 r2 = insn & 0xf;
1121 tmp = load_reg(r2);
1122 tmp2 = tcg_temp_new_i64();
1123 gen_helper_iske(tmp2, cpu_env, tmp);
1124 store_reg(r1, tmp2);
1125 tcg_temp_free_i64(tmp);
1126 tcg_temp_free_i64(tmp2);
1127 break;
1128 case 0x2a: /* RRBE R1,R2 [RRE] */
1129 /* Set Storage Key Extended */
1130 check_privileged(s);
1131 r1 = (insn >> 4) & 0xf;
1132 r2 = insn & 0xf;
1133 tmp32_1 = load_reg32(r1);
1134 tmp = load_reg(r2);
1135 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1136 set_cc_static(s);
1137 tcg_temp_free_i32(tmp32_1);
1138 tcg_temp_free_i64(tmp);
1139 break;
1140 case 0x2b: /* SSKE R1,R2 [RRE] */
1141 /* Set Storage Key Extended */
1142 check_privileged(s);
1143 r1 = (insn >> 4) & 0xf;
1144 r2 = insn & 0xf;
1145 tmp32_1 = load_reg32(r1);
1146 tmp = load_reg(r2);
1147 gen_helper_sske(cpu_env, tmp32_1, tmp);
1148 tcg_temp_free_i32(tmp32_1);
1149 tcg_temp_free_i64(tmp);
1150 break;
1151 case 0x34: /* STCH ? */
1152 /* Store Subchannel */
1153 check_privileged(s);
1154 gen_op_movi_cc(s, 3);
1155 break;
1156 case 0x46: /* STURA R1,R2 [RRE] */
1157 /* Store Using Real Address */
1158 check_privileged(s);
1159 r1 = (insn >> 4) & 0xf;
1160 r2 = insn & 0xf;
1161 tmp32_1 = load_reg32(r1);
1162 tmp = load_reg(r2);
1163 potential_page_fault(s);
1164 gen_helper_stura(cpu_env, tmp, tmp32_1);
1165 tcg_temp_free_i32(tmp32_1);
1166 tcg_temp_free_i64(tmp);
1167 break;
1168 case 0x50: /* CSP R1,R2 [RRE] */
1169 /* Compare And Swap And Purge */
1170 check_privileged(s);
1171 r1 = (insn >> 4) & 0xf;
1172 r2 = insn & 0xf;
1173 tmp32_1 = tcg_const_i32(r1);
1174 tmp32_2 = tcg_const_i32(r2);
1175 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1176 set_cc_static(s);
1177 tcg_temp_free_i32(tmp32_1);
1178 tcg_temp_free_i32(tmp32_2);
1179 break;
1180 case 0x5f: /* CHSC ? */
1181 /* Channel Subsystem Call */
1182 check_privileged(s);
1183 gen_op_movi_cc(s, 3);
1184 break;
1185 case 0x78: /* STCKE D2(B2) [S] */
1186 /* Store Clock Extended */
1187 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1188 tmp = get_address(s, 0, b2, d2);
1189 potential_page_fault(s);
1190 gen_helper_stcke(cc_op, cpu_env, tmp);
1191 set_cc_static(s);
1192 tcg_temp_free_i64(tmp);
1193 break;
1194 case 0x79: /* SACF D2(B2) [S] */
1195 /* Set Address Space Control Fast */
1196 check_privileged(s);
1197 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1198 tmp = get_address(s, 0, b2, d2);
1199 potential_page_fault(s);
1200 gen_helper_sacf(cpu_env, tmp);
1201 tcg_temp_free_i64(tmp);
1202 /* addressing mode has changed, so end the block */
1203 s->pc = s->next_pc;
1204 update_psw_addr(s);
1205 s->is_jmp = DISAS_JUMP;
1206 break;
1207 case 0x7d: /* STSI D2,(B2) [S] */
1208 check_privileged(s);
1209 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1210 tmp = get_address(s, 0, b2, d2);
1211 tmp32_1 = load_reg32(0);
1212 tmp32_2 = load_reg32(1);
1213 potential_page_fault(s);
1214 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1215 set_cc_static(s);
1216 tcg_temp_free_i64(tmp);
1217 tcg_temp_free_i32(tmp32_1);
1218 tcg_temp_free_i32(tmp32_2);
1219 break;
1220 case 0xb1: /* STFL D2(B2) [S] */
1221 /* Store Facility List (CPU features) at 200 */
1222 check_privileged(s);
1223 tmp2 = tcg_const_i64(0xc0000000);
1224 tmp = tcg_const_i64(200);
1225 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1226 tcg_temp_free_i64(tmp2);
1227 tcg_temp_free_i64(tmp);
1228 break;
1229 case 0xb2: /* LPSWE D2(B2) [S] */
1230 /* Load PSW Extended */
1231 check_privileged(s);
1232 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1233 tmp = get_address(s, 0, b2, d2);
1234 tmp2 = tcg_temp_new_i64();
1235 tmp3 = tcg_temp_new_i64();
1236 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1237 tcg_gen_addi_i64(tmp, tmp, 8);
1238 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1239 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1240 /* we need to keep cc_op intact */
1241 s->is_jmp = DISAS_JUMP;
1242 tcg_temp_free_i64(tmp);
1243 tcg_temp_free_i64(tmp2);
1244 tcg_temp_free_i64(tmp3);
1245 break;
1246 case 0x20: /* SERVC R1,R2 [RRE] */
1247 /* SCLP Service call (PV hypercall) */
1248 check_privileged(s);
1249 potential_page_fault(s);
1250 tmp32_1 = load_reg32(r2);
1251 tmp = load_reg(r1);
1252 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1253 set_cc_static(s);
1254 tcg_temp_free_i32(tmp32_1);
1255 tcg_temp_free_i64(tmp);
1256 break;
1257 default:
1258 #endif
1259 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1260 gen_illegal_opcode(s);
1261 #ifndef CONFIG_USER_ONLY
1262 break;
1263 }
1264 #endif
1265 }
1266
1267 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1268 {
1269 unsigned char opc;
1270 uint64_t insn;
1271 int op;
1272
1273 opc = cpu_ldub_code(env, s->pc);
1274 LOG_DISAS("opc 0x%x\n", opc);
1275
1276 switch (opc) {
1277 case 0xb2:
1278 insn = ld_code4(env, s->pc);
1279 op = (insn >> 16) & 0xff;
1280 disas_b2(env, s, op, insn);
1281 break;
1282 default:
1283 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
1284 gen_illegal_opcode(s);
1285 break;
1286 }
1287 }
1288
1289 /* ====================================================================== */
1290 /* Define the insn format enumeration. */
1291 #define F0(N) FMT_##N,
1292 #define F1(N, X1) F0(N)
1293 #define F2(N, X1, X2) F0(N)
1294 #define F3(N, X1, X2, X3) F0(N)
1295 #define F4(N, X1, X2, X3, X4) F0(N)
1296 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1297
1298 typedef enum {
1299 #include "insn-format.def"
1300 } DisasFormat;
1301
1302 #undef F0
1303 #undef F1
1304 #undef F2
1305 #undef F3
1306 #undef F4
1307 #undef F5
1308
1309 /* Define a structure to hold the decoded fields. We'll store each inside
1310 an array indexed by an enum. In order to conserve memory, we'll arrange
1311 for fields that do not exist at the same time to overlap, thus the "C"
1312 for compact. For checking purposes there is an "O" for original index
1313 as well that will be applied to availability bitmaps. */
1314
1315 enum DisasFieldIndexO {
1316 FLD_O_r1,
1317 FLD_O_r2,
1318 FLD_O_r3,
1319 FLD_O_m1,
1320 FLD_O_m3,
1321 FLD_O_m4,
1322 FLD_O_b1,
1323 FLD_O_b2,
1324 FLD_O_b4,
1325 FLD_O_d1,
1326 FLD_O_d2,
1327 FLD_O_d4,
1328 FLD_O_x2,
1329 FLD_O_l1,
1330 FLD_O_l2,
1331 FLD_O_i1,
1332 FLD_O_i2,
1333 FLD_O_i3,
1334 FLD_O_i4,
1335 FLD_O_i5
1336 };
1337
1338 enum DisasFieldIndexC {
1339 FLD_C_r1 = 0,
1340 FLD_C_m1 = 0,
1341 FLD_C_b1 = 0,
1342 FLD_C_i1 = 0,
1343
1344 FLD_C_r2 = 1,
1345 FLD_C_b2 = 1,
1346 FLD_C_i2 = 1,
1347
1348 FLD_C_r3 = 2,
1349 FLD_C_m3 = 2,
1350 FLD_C_i3 = 2,
1351
1352 FLD_C_m4 = 3,
1353 FLD_C_b4 = 3,
1354 FLD_C_i4 = 3,
1355 FLD_C_l1 = 3,
1356
1357 FLD_C_i5 = 4,
1358 FLD_C_d1 = 4,
1359
1360 FLD_C_d2 = 5,
1361
1362 FLD_C_d4 = 6,
1363 FLD_C_x2 = 6,
1364 FLD_C_l2 = 6,
1365
1366 NUM_C_FIELD = 7
1367 };
1368
1369 struct DisasFields {
1370 unsigned op:8;
1371 unsigned op2:8;
1372 unsigned presentC:16;
1373 unsigned int presentO;
1374 int c[NUM_C_FIELD];
1375 };
1376
1377 /* This is the way fields are to be accessed out of DisasFields. */
1378 #define have_field(S, F) have_field1((S), FLD_O_##F)
1379 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1380
1381 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1382 {
1383 return (f->presentO >> c) & 1;
1384 }
1385
1386 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1387 enum DisasFieldIndexC c)
1388 {
1389 assert(have_field1(f, o));
1390 return f->c[c];
1391 }
1392
1393 /* Describe the layout of each field in each format. */
1394 typedef struct DisasField {
1395 unsigned int beg:8;
1396 unsigned int size:8;
1397 unsigned int type:2;
1398 unsigned int indexC:6;
1399 enum DisasFieldIndexO indexO:8;
1400 } DisasField;
1401
1402 typedef struct DisasFormatInfo {
1403 DisasField op[NUM_C_FIELD];
1404 } DisasFormatInfo;
1405
1406 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1407 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1408 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1409 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1410 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1411 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1412 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1413 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1414 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1415 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1416 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1417 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1418 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1419 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1420
1421 #define F0(N) { { } },
1422 #define F1(N, X1) { { X1 } },
1423 #define F2(N, X1, X2) { { X1, X2 } },
1424 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1425 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1426 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1427
1428 static const DisasFormatInfo format_info[] = {
1429 #include "insn-format.def"
1430 };
1431
1432 #undef F0
1433 #undef F1
1434 #undef F2
1435 #undef F3
1436 #undef F4
1437 #undef F5
1438 #undef R
1439 #undef M
1440 #undef BD
1441 #undef BXD
1442 #undef BDL
1443 #undef BXDL
1444 #undef I
1445 #undef L
1446
1447 /* Generally, we'll extract operands into this structures, operate upon
1448 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1449 of routines below for more details. */
1450 typedef struct {
1451 bool g_out, g_out2, g_in1, g_in2;
1452 TCGv_i64 out, out2, in1, in2;
1453 TCGv_i64 addr1;
1454 } DisasOps;
1455
1456 /* Return values from translate_one, indicating the state of the TB. */
1457 typedef enum {
1458 /* Continue the TB. */
1459 NO_EXIT,
1460 /* We have emitted one or more goto_tb. No fixup required. */
1461 EXIT_GOTO_TB,
1462 /* We are not using a goto_tb (for whatever reason), but have updated
1463 the PC (for whatever reason), so there's no need to do it again on
1464 exiting the TB. */
1465 EXIT_PC_UPDATED,
1466 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1467 updated the PC for the next instruction to be executed. */
1468 EXIT_PC_STALE,
1469 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1470 No following code will be executed. */
1471 EXIT_NORETURN,
1472 } ExitStatus;
1473
1474 typedef enum DisasFacility {
1475 FAC_Z, /* zarch (default) */
1476 FAC_CASS, /* compare and swap and store */
1477 FAC_CASS2, /* compare and swap and store 2*/
1478 FAC_DFP, /* decimal floating point */
1479 FAC_DFPR, /* decimal floating point rounding */
1480 FAC_DO, /* distinct operands */
1481 FAC_EE, /* execute extensions */
1482 FAC_EI, /* extended immediate */
1483 FAC_FPE, /* floating point extension */
1484 FAC_FPSSH, /* floating point support sign handling */
1485 FAC_FPRGR, /* FPR-GR transfer */
1486 FAC_GIE, /* general instructions extension */
1487 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1488 FAC_HW, /* high-word */
1489 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1490 FAC_LOC, /* load/store on condition */
1491 FAC_LD, /* long displacement */
1492 FAC_PC, /* population count */
1493 FAC_SCF, /* store clock fast */
1494 FAC_SFLE, /* store facility list extended */
1495 } DisasFacility;
1496
1497 struct DisasInsn {
1498 unsigned opc:16;
1499 DisasFormat fmt:6;
1500 DisasFacility fac:6;
1501
1502 const char *name;
1503
1504 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1505 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1506 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1507 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1508 void (*help_cout)(DisasContext *, DisasOps *);
1509 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1510
1511 uint64_t data;
1512 };
1513
1514 /* ====================================================================== */
1515 /* Miscelaneous helpers, used by several operations. */
1516
1517 static void help_l2_shift(DisasContext *s, DisasFields *f,
1518 DisasOps *o, int mask)
1519 {
1520 int b2 = get_field(f, b2);
1521 int d2 = get_field(f, d2);
1522
1523 if (b2 == 0) {
1524 o->in2 = tcg_const_i64(d2 & mask);
1525 } else {
1526 o->in2 = get_address(s, 0, b2, d2);
1527 tcg_gen_andi_i64(o->in2, o->in2, mask);
1528 }
1529 }
1530
1531 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1532 {
1533 if (dest == s->next_pc) {
1534 return NO_EXIT;
1535 }
1536 if (use_goto_tb(s, dest)) {
1537 gen_update_cc_op(s);
1538 tcg_gen_goto_tb(0);
1539 tcg_gen_movi_i64(psw_addr, dest);
1540 tcg_gen_exit_tb((tcg_target_long)s->tb);
1541 return EXIT_GOTO_TB;
1542 } else {
1543 tcg_gen_movi_i64(psw_addr, dest);
1544 return EXIT_PC_UPDATED;
1545 }
1546 }
1547
1548 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1549 bool is_imm, int imm, TCGv_i64 cdest)
1550 {
1551 ExitStatus ret;
1552 uint64_t dest = s->pc + 2 * imm;
1553 int lab;
1554
1555 /* Take care of the special cases first. */
1556 if (c->cond == TCG_COND_NEVER) {
1557 ret = NO_EXIT;
1558 goto egress;
1559 }
1560 if (is_imm) {
1561 if (dest == s->next_pc) {
1562 /* Branch to next. */
1563 ret = NO_EXIT;
1564 goto egress;
1565 }
1566 if (c->cond == TCG_COND_ALWAYS) {
1567 ret = help_goto_direct(s, dest);
1568 goto egress;
1569 }
1570 } else {
1571 if (TCGV_IS_UNUSED_I64(cdest)) {
1572 /* E.g. bcr %r0 -> no branch. */
1573 ret = NO_EXIT;
1574 goto egress;
1575 }
1576 if (c->cond == TCG_COND_ALWAYS) {
1577 tcg_gen_mov_i64(psw_addr, cdest);
1578 ret = EXIT_PC_UPDATED;
1579 goto egress;
1580 }
1581 }
1582
1583 if (use_goto_tb(s, s->next_pc)) {
1584 if (is_imm && use_goto_tb(s, dest)) {
1585 /* Both exits can use goto_tb. */
1586 gen_update_cc_op(s);
1587
1588 lab = gen_new_label();
1589 if (c->is_64) {
1590 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1591 } else {
1592 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1593 }
1594
1595 /* Branch not taken. */
1596 tcg_gen_goto_tb(0);
1597 tcg_gen_movi_i64(psw_addr, s->next_pc);
1598 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1599
1600 /* Branch taken. */
1601 gen_set_label(lab);
1602 tcg_gen_goto_tb(1);
1603 tcg_gen_movi_i64(psw_addr, dest);
1604 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1605
1606 ret = EXIT_GOTO_TB;
1607 } else {
1608 /* Fallthru can use goto_tb, but taken branch cannot. */
1609 /* Store taken branch destination before the brcond. This
1610 avoids having to allocate a new local temp to hold it.
1611 We'll overwrite this in the not taken case anyway. */
1612 if (!is_imm) {
1613 tcg_gen_mov_i64(psw_addr, cdest);
1614 }
1615
1616 lab = gen_new_label();
1617 if (c->is_64) {
1618 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1619 } else {
1620 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1621 }
1622
1623 /* Branch not taken. */
1624 gen_update_cc_op(s);
1625 tcg_gen_goto_tb(0);
1626 tcg_gen_movi_i64(psw_addr, s->next_pc);
1627 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1628
1629 gen_set_label(lab);
1630 if (is_imm) {
1631 tcg_gen_movi_i64(psw_addr, dest);
1632 }
1633 ret = EXIT_PC_UPDATED;
1634 }
1635 } else {
1636 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1637 Most commonly we're single-stepping or some other condition that
1638 disables all use of goto_tb. Just update the PC and exit. */
1639
1640 TCGv_i64 next = tcg_const_i64(s->next_pc);
1641 if (is_imm) {
1642 cdest = tcg_const_i64(dest);
1643 }
1644
1645 if (c->is_64) {
1646 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1647 cdest, next);
1648 } else {
1649 TCGv_i32 t0 = tcg_temp_new_i32();
1650 TCGv_i64 t1 = tcg_temp_new_i64();
1651 TCGv_i64 z = tcg_const_i64(0);
1652 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1653 tcg_gen_extu_i32_i64(t1, t0);
1654 tcg_temp_free_i32(t0);
1655 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1656 tcg_temp_free_i64(t1);
1657 tcg_temp_free_i64(z);
1658 }
1659
1660 if (is_imm) {
1661 tcg_temp_free_i64(cdest);
1662 }
1663 tcg_temp_free_i64(next);
1664
1665 ret = EXIT_PC_UPDATED;
1666 }
1667
1668 egress:
1669 free_compare(c);
1670 return ret;
1671 }
1672
1673 /* ====================================================================== */
1674 /* The operations. These perform the bulk of the work for any insn,
1675 usually after the operands have been loaded and output initialized. */
1676
1677 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1678 {
1679 gen_helper_abs_i64(o->out, o->in2);
1680 return NO_EXIT;
1681 }
1682
1683 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1684 {
1685 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1686 return NO_EXIT;
1687 }
1688
1689 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1690 {
1691 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1692 return NO_EXIT;
1693 }
1694
1695 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1696 {
1697 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1698 tcg_gen_mov_i64(o->out2, o->in2);
1699 return NO_EXIT;
1700 }
1701
1702 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1703 {
1704 tcg_gen_add_i64(o->out, o->in1, o->in2);
1705 return NO_EXIT;
1706 }
1707
1708 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1709 {
1710 TCGv_i64 cc;
1711
1712 tcg_gen_add_i64(o->out, o->in1, o->in2);
1713
1714 /* XXX possible optimization point */
1715 gen_op_calc_cc(s);
1716 cc = tcg_temp_new_i64();
1717 tcg_gen_extu_i32_i64(cc, cc_op);
1718 tcg_gen_shri_i64(cc, cc, 1);
1719
1720 tcg_gen_add_i64(o->out, o->out, cc);
1721 tcg_temp_free_i64(cc);
1722 return NO_EXIT;
1723 }
1724
1725 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1726 {
1727 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1728 return NO_EXIT;
1729 }
1730
1731 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1732 {
1733 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1734 return NO_EXIT;
1735 }
1736
1737 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1738 {
1739 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1740 return_low128(o->out2);
1741 return NO_EXIT;
1742 }
1743
1744 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1745 {
1746 tcg_gen_and_i64(o->out, o->in1, o->in2);
1747 return NO_EXIT;
1748 }
1749
1750 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1751 {
1752 int shift = s->insn->data & 0xff;
1753 int size = s->insn->data >> 8;
1754 uint64_t mask = ((1ull << size) - 1) << shift;
1755
1756 assert(!o->g_in2);
1757 tcg_gen_shli_i64(o->in2, o->in2, shift);
1758 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1759 tcg_gen_and_i64(o->out, o->in1, o->in2);
1760
1761 /* Produce the CC from only the bits manipulated. */
1762 tcg_gen_andi_i64(cc_dst, o->out, mask);
1763 set_cc_nz_u64(s, cc_dst);
1764 return NO_EXIT;
1765 }
1766
1767 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1768 {
1769 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1770 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1771 tcg_gen_mov_i64(psw_addr, o->in2);
1772 return EXIT_PC_UPDATED;
1773 } else {
1774 return NO_EXIT;
1775 }
1776 }
1777
1778 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1779 {
1780 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1781 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1782 }
1783
1784 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1785 {
1786 int m1 = get_field(s->fields, m1);
1787 bool is_imm = have_field(s->fields, i2);
1788 int imm = is_imm ? get_field(s->fields, i2) : 0;
1789 DisasCompare c;
1790
1791 disas_jcc(s, &c, m1);
1792 return help_branch(s, &c, is_imm, imm, o->in2);
1793 }
1794
1795 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1796 {
1797 int r1 = get_field(s->fields, r1);
1798 bool is_imm = have_field(s->fields, i2);
1799 int imm = is_imm ? get_field(s->fields, i2) : 0;
1800 DisasCompare c;
1801 TCGv_i64 t;
1802
1803 c.cond = TCG_COND_NE;
1804 c.is_64 = false;
1805 c.g1 = false;
1806 c.g2 = false;
1807
1808 t = tcg_temp_new_i64();
1809 tcg_gen_subi_i64(t, regs[r1], 1);
1810 store_reg32_i64(r1, t);
1811 c.u.s32.a = tcg_temp_new_i32();
1812 c.u.s32.b = tcg_const_i32(0);
1813 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1814 tcg_temp_free_i64(t);
1815
1816 return help_branch(s, &c, is_imm, imm, o->in2);
1817 }
1818
1819 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1820 {
1821 int r1 = get_field(s->fields, r1);
1822 bool is_imm = have_field(s->fields, i2);
1823 int imm = is_imm ? get_field(s->fields, i2) : 0;
1824 DisasCompare c;
1825
1826 c.cond = TCG_COND_NE;
1827 c.is_64 = true;
1828 c.g1 = true;
1829 c.g2 = false;
1830
1831 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1832 c.u.s64.a = regs[r1];
1833 c.u.s64.b = tcg_const_i64(0);
1834
1835 return help_branch(s, &c, is_imm, imm, o->in2);
1836 }
1837
1838 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1839 {
1840 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1841 set_cc_static(s);
1842 return NO_EXIT;
1843 }
1844
1845 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1846 {
1847 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1848 set_cc_static(s);
1849 return NO_EXIT;
1850 }
1851
1852 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1853 {
1854 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1855 set_cc_static(s);
1856 return NO_EXIT;
1857 }
1858
1859 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1860 {
1861 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1862 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1863 tcg_temp_free_i32(m3);
1864 gen_set_cc_nz_f32(s, o->in2);
1865 return NO_EXIT;
1866 }
1867
1868 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1869 {
1870 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1871 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1872 tcg_temp_free_i32(m3);
1873 gen_set_cc_nz_f64(s, o->in2);
1874 return NO_EXIT;
1875 }
1876
1877 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1878 {
1879 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1880 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1881 tcg_temp_free_i32(m3);
1882 gen_set_cc_nz_f128(s, o->in1, o->in2);
1883 return NO_EXIT;
1884 }
1885
1886 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1887 {
1888 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1889 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1890 tcg_temp_free_i32(m3);
1891 gen_set_cc_nz_f32(s, o->in2);
1892 return NO_EXIT;
1893 }
1894
1895 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1896 {
1897 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1898 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1899 tcg_temp_free_i32(m3);
1900 gen_set_cc_nz_f64(s, o->in2);
1901 return NO_EXIT;
1902 }
1903
1904 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1905 {
1906 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1907 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1908 tcg_temp_free_i32(m3);
1909 gen_set_cc_nz_f128(s, o->in1, o->in2);
1910 return NO_EXIT;
1911 }
1912
1913 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1914 {
1915 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1916 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1917 tcg_temp_free_i32(m3);
1918 return NO_EXIT;
1919 }
1920
1921 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1922 {
1923 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1924 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1925 tcg_temp_free_i32(m3);
1926 return NO_EXIT;
1927 }
1928
1929 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1930 {
1931 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1932 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1933 tcg_temp_free_i32(m3);
1934 return_low128(o->out2);
1935 return NO_EXIT;
1936 }
1937
1938 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1939 {
1940 int r2 = get_field(s->fields, r2);
1941 TCGv_i64 len = tcg_temp_new_i64();
1942
1943 potential_page_fault(s);
1944 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1945 set_cc_static(s);
1946 return_low128(o->out);
1947
1948 tcg_gen_add_i64(regs[r2], regs[r2], len);
1949 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1950 tcg_temp_free_i64(len);
1951
1952 return NO_EXIT;
1953 }
1954
1955 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1956 {
1957 int l = get_field(s->fields, l1);
1958 TCGv_i32 vl;
1959
1960 switch (l + 1) {
1961 case 1:
1962 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1963 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1964 break;
1965 case 2:
1966 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1967 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1968 break;
1969 case 4:
1970 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1971 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1972 break;
1973 case 8:
1974 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1975 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1976 break;
1977 default:
1978 potential_page_fault(s);
1979 vl = tcg_const_i32(l);
1980 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1981 tcg_temp_free_i32(vl);
1982 set_cc_static(s);
1983 return NO_EXIT;
1984 }
1985 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1986 return NO_EXIT;
1987 }
1988
1989 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1990 {
1991 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1992 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1993 potential_page_fault(s);
1994 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1995 tcg_temp_free_i32(r1);
1996 tcg_temp_free_i32(r3);
1997 set_cc_static(s);
1998 return NO_EXIT;
1999 }
2000
2001 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
2002 {
2003 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2004 TCGv_i32 t1 = tcg_temp_new_i32();
2005 tcg_gen_trunc_i64_i32(t1, o->in1);
2006 potential_page_fault(s);
2007 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2008 set_cc_static(s);
2009 tcg_temp_free_i32(t1);
2010 tcg_temp_free_i32(m3);
2011 return NO_EXIT;
2012 }
2013
2014 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
2015 {
2016 potential_page_fault(s);
2017 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2018 set_cc_static(s);
2019 return_low128(o->in2);
2020 return NO_EXIT;
2021 }
2022
2023 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
2024 {
2025 int r3 = get_field(s->fields, r3);
2026 potential_page_fault(s);
2027 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2028 set_cc_static(s);
2029 return NO_EXIT;
2030 }
2031
2032 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
2033 {
2034 int r3 = get_field(s->fields, r3);
2035 potential_page_fault(s);
2036 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2037 set_cc_static(s);
2038 return NO_EXIT;
2039 }
2040
2041 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
2042 {
2043 int r3 = get_field(s->fields, r3);
2044 TCGv_i64 in3 = tcg_temp_new_i64();
2045 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
2046 potential_page_fault(s);
2047 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
2048 tcg_temp_free_i64(in3);
2049 set_cc_static(s);
2050 return NO_EXIT;
2051 }
2052
2053 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2054 {
2055 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2056 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2057 potential_page_fault(s);
2058 /* XXX rewrite in tcg */
2059 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
2060 set_cc_static(s);
2061 return NO_EXIT;
2062 }
2063
2064 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2065 {
2066 TCGv_i64 t1 = tcg_temp_new_i64();
2067 TCGv_i32 t2 = tcg_temp_new_i32();
2068 tcg_gen_trunc_i64_i32(t2, o->in1);
2069 gen_helper_cvd(t1, t2);
2070 tcg_temp_free_i32(t2);
2071 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2072 tcg_temp_free_i64(t1);
2073 return NO_EXIT;
2074 }
2075
2076 #ifndef CONFIG_USER_ONLY
2077 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2078 {
2079 TCGv_i32 tmp;
2080
2081 check_privileged(s);
2082 potential_page_fault(s);
2083
2084 /* We pretend the format is RX_a so that D2 is the field we want. */
2085 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2086 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2087 tcg_temp_free_i32(tmp);
2088 return NO_EXIT;
2089 }
2090 #endif
2091
2092 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2093 {
2094 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2095 return_low128(o->out);
2096 return NO_EXIT;
2097 }
2098
2099 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2100 {
2101 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2102 return_low128(o->out);
2103 return NO_EXIT;
2104 }
2105
2106 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2107 {
2108 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2109 return_low128(o->out);
2110 return NO_EXIT;
2111 }
2112
2113 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2114 {
2115 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2116 return_low128(o->out);
2117 return NO_EXIT;
2118 }
2119
2120 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2121 {
2122 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2123 return NO_EXIT;
2124 }
2125
2126 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2127 {
2128 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2129 return NO_EXIT;
2130 }
2131
2132 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2133 {
2134 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2135 return_low128(o->out2);
2136 return NO_EXIT;
2137 }
2138
2139 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2140 {
2141 int r2 = get_field(s->fields, r2);
2142 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2143 return NO_EXIT;
2144 }
2145
2146 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2147 {
2148 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2149 return NO_EXIT;
2150 }
2151
2152 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2153 {
2154 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2155 tb->flags, (ab)use the tb->cs_base field as the address of
2156 the template in memory, and grab 8 bits of tb->flags/cflags for
2157 the contents of the register. We would then recognize all this
2158 in gen_intermediate_code_internal, generating code for exactly
2159 one instruction. This new TB then gets executed normally.
2160
2161 On the other hand, this seems to be mostly used for modifying
2162 MVC inside of memcpy, which needs a helper call anyway. So
2163 perhaps this doesn't bear thinking about any further. */
2164
2165 TCGv_i64 tmp;
2166
2167 update_psw_addr(s);
2168 gen_op_calc_cc(s);
2169
2170 tmp = tcg_const_i64(s->next_pc);
2171 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2172 tcg_temp_free_i64(tmp);
2173
2174 set_cc_static(s);
2175 return NO_EXIT;
2176 }
2177
2178 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2179 {
2180 /* We'll use the original input for cc computation, since we get to
2181 compare that against 0, which ought to be better than comparing
2182 the real output against 64. It also lets cc_dst be a convenient
2183 temporary during our computation. */
2184 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2185
2186 /* R1 = IN ? CLZ(IN) : 64. */
2187 gen_helper_clz(o->out, o->in2);
2188
2189 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2190 value by 64, which is undefined. But since the shift is 64 iff the
2191 input is zero, we still get the correct result after and'ing. */
2192 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2193 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2194 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2195 return NO_EXIT;
2196 }
2197
2198 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2199 {
2200 int m3 = get_field(s->fields, m3);
2201 int pos, len, base = s->insn->data;
2202 TCGv_i64 tmp = tcg_temp_new_i64();
2203 uint64_t ccm;
2204
2205 switch (m3) {
2206 case 0xf:
2207 /* Effectively a 32-bit load. */
2208 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2209 len = 32;
2210 goto one_insert;
2211
2212 case 0xc:
2213 case 0x6:
2214 case 0x3:
2215 /* Effectively a 16-bit load. */
2216 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2217 len = 16;
2218 goto one_insert;
2219
2220 case 0x8:
2221 case 0x4:
2222 case 0x2:
2223 case 0x1:
2224 /* Effectively an 8-bit load. */
2225 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2226 len = 8;
2227 goto one_insert;
2228
2229 one_insert:
2230 pos = base + ctz32(m3) * 8;
2231 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2232 ccm = ((1ull << len) - 1) << pos;
2233 break;
2234
2235 default:
2236 /* This is going to be a sequence of loads and inserts. */
2237 pos = base + 32 - 8;
2238 ccm = 0;
2239 while (m3) {
2240 if (m3 & 0x8) {
2241 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2242 tcg_gen_addi_i64(o->in2, o->in2, 1);
2243 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2244 ccm |= 0xff << pos;
2245 }
2246 m3 = (m3 << 1) & 0xf;
2247 pos -= 8;
2248 }
2249 break;
2250 }
2251
2252 tcg_gen_movi_i64(tmp, ccm);
2253 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2254 tcg_temp_free_i64(tmp);
2255 return NO_EXIT;
2256 }
2257
2258 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2259 {
2260 int shift = s->insn->data & 0xff;
2261 int size = s->insn->data >> 8;
2262 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2263 return NO_EXIT;
2264 }
2265
2266 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2267 {
2268 TCGv_i64 t1;
2269
2270 gen_op_calc_cc(s);
2271 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2272
2273 t1 = tcg_temp_new_i64();
2274 tcg_gen_shli_i64(t1, psw_mask, 20);
2275 tcg_gen_shri_i64(t1, t1, 36);
2276 tcg_gen_or_i64(o->out, o->out, t1);
2277
2278 tcg_gen_extu_i32_i64(t1, cc_op);
2279 tcg_gen_shli_i64(t1, t1, 28);
2280 tcg_gen_or_i64(o->out, o->out, t1);
2281 tcg_temp_free_i64(t1);
2282 return NO_EXIT;
2283 }
2284
2285 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2286 {
2287 gen_helper_ldeb(o->out, cpu_env, o->in2);
2288 return NO_EXIT;
2289 }
2290
2291 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2292 {
2293 gen_helper_ledb(o->out, cpu_env, o->in2);
2294 return NO_EXIT;
2295 }
2296
2297 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2298 {
2299 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2300 return NO_EXIT;
2301 }
2302
2303 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2304 {
2305 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2306 return NO_EXIT;
2307 }
2308
2309 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2310 {
2311 gen_helper_lxdb(o->out, cpu_env, o->in2);
2312 return_low128(o->out2);
2313 return NO_EXIT;
2314 }
2315
2316 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2317 {
2318 gen_helper_lxeb(o->out, cpu_env, o->in2);
2319 return_low128(o->out2);
2320 return NO_EXIT;
2321 }
2322
2323 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2324 {
2325 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2326 return NO_EXIT;
2327 }
2328
2329 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2330 {
2331 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2332 return NO_EXIT;
2333 }
2334
2335 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2336 {
2337 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2338 return NO_EXIT;
2339 }
2340
2341 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2342 {
2343 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2344 return NO_EXIT;
2345 }
2346
2347 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2348 {
2349 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2350 return NO_EXIT;
2351 }
2352
2353 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2354 {
2355 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2356 return NO_EXIT;
2357 }
2358
2359 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2360 {
2361 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2362 return NO_EXIT;
2363 }
2364
2365 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2366 {
2367 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2368 return NO_EXIT;
2369 }
2370
2371 #ifndef CONFIG_USER_ONLY
2372 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2373 {
2374 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2375 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2376 check_privileged(s);
2377 potential_page_fault(s);
2378 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2379 tcg_temp_free_i32(r1);
2380 tcg_temp_free_i32(r3);
2381 return NO_EXIT;
2382 }
2383
2384 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2385 {
2386 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2387 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2388 check_privileged(s);
2389 potential_page_fault(s);
2390 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2391 tcg_temp_free_i32(r1);
2392 tcg_temp_free_i32(r3);
2393 return NO_EXIT;
2394 }
2395 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2396 {
2397 check_privileged(s);
2398 potential_page_fault(s);
2399 gen_helper_lra(o->out, cpu_env, o->in2);
2400 set_cc_static(s);
2401 return NO_EXIT;
2402 }
2403
2404 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2405 {
2406 TCGv_i64 t1, t2;
2407
2408 check_privileged(s);
2409
2410 t1 = tcg_temp_new_i64();
2411 t2 = tcg_temp_new_i64();
2412 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2413 tcg_gen_addi_i64(o->in2, o->in2, 4);
2414 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2415 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2416 tcg_gen_shli_i64(t1, t1, 32);
2417 gen_helper_load_psw(cpu_env, t1, t2);
2418 tcg_temp_free_i64(t1);
2419 tcg_temp_free_i64(t2);
2420 return EXIT_NORETURN;
2421 }
2422 #endif
2423
2424 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2425 {
2426 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2427 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2428 potential_page_fault(s);
2429 gen_helper_lam(cpu_env, r1, o->in2, r3);
2430 tcg_temp_free_i32(r1);
2431 tcg_temp_free_i32(r3);
2432 return NO_EXIT;
2433 }
2434
2435 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2436 {
2437 int r1 = get_field(s->fields, r1);
2438 int r3 = get_field(s->fields, r3);
2439 TCGv_i64 t = tcg_temp_new_i64();
2440 TCGv_i64 t4 = tcg_const_i64(4);
2441
2442 while (1) {
2443 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2444 store_reg32_i64(r1, t);
2445 if (r1 == r3) {
2446 break;
2447 }
2448 tcg_gen_add_i64(o->in2, o->in2, t4);
2449 r1 = (r1 + 1) & 15;
2450 }
2451
2452 tcg_temp_free_i64(t);
2453 tcg_temp_free_i64(t4);
2454 return NO_EXIT;
2455 }
2456
2457 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2458 {
2459 int r1 = get_field(s->fields, r1);
2460 int r3 = get_field(s->fields, r3);
2461 TCGv_i64 t = tcg_temp_new_i64();
2462 TCGv_i64 t4 = tcg_const_i64(4);
2463
2464 while (1) {
2465 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2466 store_reg32h_i64(r1, t);
2467 if (r1 == r3) {
2468 break;
2469 }
2470 tcg_gen_add_i64(o->in2, o->in2, t4);
2471 r1 = (r1 + 1) & 15;
2472 }
2473
2474 tcg_temp_free_i64(t);
2475 tcg_temp_free_i64(t4);
2476 return NO_EXIT;
2477 }
2478
2479 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2480 {
2481 int r1 = get_field(s->fields, r1);
2482 int r3 = get_field(s->fields, r3);
2483 TCGv_i64 t8 = tcg_const_i64(8);
2484
2485 while (1) {
2486 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2487 if (r1 == r3) {
2488 break;
2489 }
2490 tcg_gen_add_i64(o->in2, o->in2, t8);
2491 r1 = (r1 + 1) & 15;
2492 }
2493
2494 tcg_temp_free_i64(t8);
2495 return NO_EXIT;
2496 }
2497
2498 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2499 {
2500 o->out = o->in2;
2501 o->g_out = o->g_in2;
2502 TCGV_UNUSED_I64(o->in2);
2503 o->g_in2 = false;
2504 return NO_EXIT;
2505 }
2506
2507 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2508 {
2509 o->out = o->in1;
2510 o->out2 = o->in2;
2511 o->g_out = o->g_in1;
2512 o->g_out2 = o->g_in2;
2513 TCGV_UNUSED_I64(o->in1);
2514 TCGV_UNUSED_I64(o->in2);
2515 o->g_in1 = o->g_in2 = false;
2516 return NO_EXIT;
2517 }
2518
2519 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2520 {
2521 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2522 potential_page_fault(s);
2523 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2524 tcg_temp_free_i32(l);
2525 return NO_EXIT;
2526 }
2527
2528 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2529 {
2530 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2531 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2532 potential_page_fault(s);
2533 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2534 tcg_temp_free_i32(r1);
2535 tcg_temp_free_i32(r2);
2536 set_cc_static(s);
2537 return NO_EXIT;
2538 }
2539
2540 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2541 {
2542 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2543 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2544 potential_page_fault(s);
2545 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2546 tcg_temp_free_i32(r1);
2547 tcg_temp_free_i32(r3);
2548 set_cc_static(s);
2549 return NO_EXIT;
2550 }
2551
2552 #ifndef CONFIG_USER_ONLY
2553 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2554 {
2555 int r1 = get_field(s->fields, l1);
2556 check_privileged(s);
2557 potential_page_fault(s);
2558 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2559 set_cc_static(s);
2560 return NO_EXIT;
2561 }
2562
2563 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2564 {
2565 int r1 = get_field(s->fields, l1);
2566 check_privileged(s);
2567 potential_page_fault(s);
2568 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2569 set_cc_static(s);
2570 return NO_EXIT;
2571 }
2572 #endif
2573
2574 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2575 {
2576 potential_page_fault(s);
2577 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2578 set_cc_static(s);
2579 return NO_EXIT;
2580 }
2581
2582 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2583 {
2584 potential_page_fault(s);
2585 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2586 set_cc_static(s);
2587 return_low128(o->in2);
2588 return NO_EXIT;
2589 }
2590
2591 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2592 {
2593 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2594 return NO_EXIT;
2595 }
2596
2597 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2598 {
2599 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2600 return_low128(o->out2);
2601 return NO_EXIT;
2602 }
2603
2604 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2605 {
2606 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2607 return NO_EXIT;
2608 }
2609
2610 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2611 {
2612 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2613 return NO_EXIT;
2614 }
2615
2616 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2617 {
2618 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2619 return NO_EXIT;
2620 }
2621
2622 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2623 {
2624 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2625 return_low128(o->out2);
2626 return NO_EXIT;
2627 }
2628
2629 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2630 {
2631 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2632 return_low128(o->out2);
2633 return NO_EXIT;
2634 }
2635
2636 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2637 {
2638 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2639 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2640 tcg_temp_free_i64(r3);
2641 return NO_EXIT;
2642 }
2643
2644 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2645 {
2646 int r3 = get_field(s->fields, r3);
2647 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2648 return NO_EXIT;
2649 }
2650
2651 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2652 {
2653 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2654 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2655 tcg_temp_free_i64(r3);
2656 return NO_EXIT;
2657 }
2658
2659 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2660 {
2661 int r3 = get_field(s->fields, r3);
2662 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2663 return NO_EXIT;
2664 }
2665
2666 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2667 {
2668 gen_helper_nabs_i64(o->out, o->in2);
2669 return NO_EXIT;
2670 }
2671
2672 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2673 {
2674 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2675 return NO_EXIT;
2676 }
2677
2678 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2679 {
2680 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2681 return NO_EXIT;
2682 }
2683
2684 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2685 {
2686 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2687 tcg_gen_mov_i64(o->out2, o->in2);
2688 return NO_EXIT;
2689 }
2690
2691 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2692 {
2693 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2694 potential_page_fault(s);
2695 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2696 tcg_temp_free_i32(l);
2697 set_cc_static(s);
2698 return NO_EXIT;
2699 }
2700
2701 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2702 {
2703 tcg_gen_neg_i64(o->out, o->in2);
2704 return NO_EXIT;
2705 }
2706
2707 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2708 {
2709 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2710 return NO_EXIT;
2711 }
2712
2713 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2714 {
2715 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2716 return NO_EXIT;
2717 }
2718
2719 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2720 {
2721 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2722 tcg_gen_mov_i64(o->out2, o->in2);
2723 return NO_EXIT;
2724 }
2725
2726 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2727 {
2728 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2729 potential_page_fault(s);
2730 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2731 tcg_temp_free_i32(l);
2732 set_cc_static(s);
2733 return NO_EXIT;
2734 }
2735
2736 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2737 {
2738 tcg_gen_or_i64(o->out, o->in1, o->in2);
2739 return NO_EXIT;
2740 }
2741
2742 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2743 {
2744 int shift = s->insn->data & 0xff;
2745 int size = s->insn->data >> 8;
2746 uint64_t mask = ((1ull << size) - 1) << shift;
2747
2748 assert(!o->g_in2);
2749 tcg_gen_shli_i64(o->in2, o->in2, shift);
2750 tcg_gen_or_i64(o->out, o->in1, o->in2);
2751
2752 /* Produce the CC from only the bits manipulated. */
2753 tcg_gen_andi_i64(cc_dst, o->out, mask);
2754 set_cc_nz_u64(s, cc_dst);
2755 return NO_EXIT;
2756 }
2757
2758 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2759 {
2760 tcg_gen_bswap16_i64(o->out, o->in2);
2761 return NO_EXIT;
2762 }
2763
2764 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2765 {
2766 tcg_gen_bswap32_i64(o->out, o->in2);
2767 return NO_EXIT;
2768 }
2769
2770 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2771 {
2772 tcg_gen_bswap64_i64(o->out, o->in2);
2773 return NO_EXIT;
2774 }
2775
2776 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2777 {
2778 TCGv_i32 t1 = tcg_temp_new_i32();
2779 TCGv_i32 t2 = tcg_temp_new_i32();
2780 TCGv_i32 to = tcg_temp_new_i32();
2781 tcg_gen_trunc_i64_i32(t1, o->in1);
2782 tcg_gen_trunc_i64_i32(t2, o->in2);
2783 tcg_gen_rotl_i32(to, t1, t2);
2784 tcg_gen_extu_i32_i64(o->out, to);
2785 tcg_temp_free_i32(t1);
2786 tcg_temp_free_i32(t2);
2787 tcg_temp_free_i32(to);
2788 return NO_EXIT;
2789 }
2790
2791 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2792 {
2793 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2794 return NO_EXIT;
2795 }
2796
2797 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2798 {
2799 int r1 = get_field(s->fields, r1);
2800 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2801 return NO_EXIT;
2802 }
2803
2804 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2805 {
2806 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2807 return NO_EXIT;
2808 }
2809
2810 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2811 {
2812 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2813 return NO_EXIT;
2814 }
2815
2816 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2817 {
2818 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2819 return_low128(o->out2);
2820 return NO_EXIT;
2821 }
2822
2823 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2824 {
2825 gen_helper_sqeb(o->out, cpu_env, o->in2);
2826 return NO_EXIT;
2827 }
2828
2829 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2830 {
2831 gen_helper_sqdb(o->out, cpu_env, o->in2);
2832 return NO_EXIT;
2833 }
2834
2835 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2836 {
2837 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2838 return_low128(o->out2);
2839 return NO_EXIT;
2840 }
2841
2842 #ifndef CONFIG_USER_ONLY
2843 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2844 {
2845 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2846 check_privileged(s);
2847 potential_page_fault(s);
2848 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2849 tcg_temp_free_i32(r1);
2850 return NO_EXIT;
2851 }
2852 #endif
2853
2854 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2855 {
2856 uint64_t sign = 1ull << s->insn->data;
2857 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2858 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2859 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2860 /* The arithmetic left shift is curious in that it does not affect
2861 the sign bit. Copy that over from the source unchanged. */
2862 tcg_gen_andi_i64(o->out, o->out, ~sign);
2863 tcg_gen_andi_i64(o->in1, o->in1, sign);
2864 tcg_gen_or_i64(o->out, o->out, o->in1);
2865 return NO_EXIT;
2866 }
2867
2868 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2869 {
2870 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2871 return NO_EXIT;
2872 }
2873
2874 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2875 {
2876 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2877 return NO_EXIT;
2878 }
2879
2880 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2881 {
2882 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2883 return NO_EXIT;
2884 }
2885
2886 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2887 {
2888 gen_helper_sfpc(cpu_env, o->in2);
2889 return NO_EXIT;
2890 }
2891
2892 #ifndef CONFIG_USER_ONLY
2893 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2894 {
2895 check_privileged(s);
2896 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2897 return NO_EXIT;
2898 }
2899
2900 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
2901 {
2902 gen_helper_stck(o->out, cpu_env);
2903 /* ??? We don't implement clock states. */
2904 gen_op_movi_cc(s, 0);
2905 return NO_EXIT;
2906 }
2907
2908 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
2909 {
2910 check_privileged(s);
2911 gen_helper_sckc(cpu_env, o->in2);
2912 return NO_EXIT;
2913 }
2914
2915 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
2916 {
2917 check_privileged(s);
2918 gen_helper_stckc(o->out, cpu_env);
2919 return NO_EXIT;
2920 }
2921
2922 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2923 {
2924 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2925 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2926 check_privileged(s);
2927 potential_page_fault(s);
2928 gen_helper_stctg(cpu_env, r1, o->in2, r3);
2929 tcg_temp_free_i32(r1);
2930 tcg_temp_free_i32(r3);
2931 return NO_EXIT;
2932 }
2933
2934 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
2935 {
2936 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2937 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2938 check_privileged(s);
2939 potential_page_fault(s);
2940 gen_helper_stctl(cpu_env, r1, o->in2, r3);
2941 tcg_temp_free_i32(r1);
2942 tcg_temp_free_i32(r3);
2943 return NO_EXIT;
2944 }
2945
2946 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
2947 {
2948 check_privileged(s);
2949 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2950 return NO_EXIT;
2951 }
2952
2953 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
2954 {
2955 uint64_t i2 = get_field(s->fields, i2);
2956 TCGv_i64 t;
2957
2958 check_privileged(s);
2959
2960 /* It is important to do what the instruction name says: STORE THEN.
2961 If we let the output hook perform the store then if we fault and
2962 restart, we'll have the wrong SYSTEM MASK in place. */
2963 t = tcg_temp_new_i64();
2964 tcg_gen_shri_i64(t, psw_mask, 56);
2965 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
2966 tcg_temp_free_i64(t);
2967
2968 if (s->fields->op == 0xac) {
2969 tcg_gen_andi_i64(psw_mask, psw_mask,
2970 (i2 << 56) | 0x00ffffffffffffffull);
2971 } else {
2972 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
2973 }
2974 return NO_EXIT;
2975 }
2976 #endif
2977
2978 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
2979 {
2980 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
2981 return NO_EXIT;
2982 }
2983
2984 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
2985 {
2986 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
2987 return NO_EXIT;
2988 }
2989
2990 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
2991 {
2992 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
2993 return NO_EXIT;
2994 }
2995
2996 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
2997 {
2998 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
2999 return NO_EXIT;
3000 }
3001
3002 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3003 {
3004 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3005 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3006 potential_page_fault(s);
3007 gen_helper_stam(cpu_env, r1, o->in2, r3);
3008 tcg_temp_free_i32(r1);
3009 tcg_temp_free_i32(r3);
3010 return NO_EXIT;
3011 }
3012
3013 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3014 {
3015 int m3 = get_field(s->fields, m3);
3016 int pos, base = s->insn->data;
3017 TCGv_i64 tmp = tcg_temp_new_i64();
3018
3019 pos = base + ctz32(m3) * 8;
3020 switch (m3) {
3021 case 0xf:
3022 /* Effectively a 32-bit store. */
3023 tcg_gen_shri_i64(tmp, o->in1, pos);
3024 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3025 break;
3026
3027 case 0xc:
3028 case 0x6:
3029 case 0x3:
3030 /* Effectively a 16-bit store. */
3031 tcg_gen_shri_i64(tmp, o->in1, pos);
3032 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3033 break;
3034
3035 case 0x8:
3036 case 0x4:
3037 case 0x2:
3038 case 0x1:
3039 /* Effectively an 8-bit store. */
3040 tcg_gen_shri_i64(tmp, o->in1, pos);
3041 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3042 break;
3043
3044 default:
3045 /* This is going to be a sequence of shifts and stores. */
3046 pos = base + 32 - 8;
3047 while (m3) {
3048 if (m3 & 0x8) {
3049 tcg_gen_shri_i64(tmp, o->in1, pos);
3050 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3051 tcg_gen_addi_i64(o->in2, o->in2, 1);
3052 }
3053 m3 = (m3 << 1) & 0xf;
3054 pos -= 8;
3055 }
3056 break;
3057 }
3058 tcg_temp_free_i64(tmp);
3059 return NO_EXIT;
3060 }
3061
3062 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3063 {
3064 int r1 = get_field(s->fields, r1);
3065 int r3 = get_field(s->fields, r3);
3066 int size = s->insn->data;
3067 TCGv_i64 tsize = tcg_const_i64(size);
3068
3069 while (1) {
3070 if (size == 8) {
3071 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3072 } else {
3073 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3074 }
3075 if (r1 == r3) {
3076 break;
3077 }
3078 tcg_gen_add_i64(o->in2, o->in2, tsize);
3079 r1 = (r1 + 1) & 15;
3080 }
3081
3082 tcg_temp_free_i64(tsize);
3083 return NO_EXIT;
3084 }
3085
3086 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3087 {
3088 int r1 = get_field(s->fields, r1);
3089 int r3 = get_field(s->fields, r3);
3090 TCGv_i64 t = tcg_temp_new_i64();
3091 TCGv_i64 t4 = tcg_const_i64(4);
3092 TCGv_i64 t32 = tcg_const_i64(32);
3093
3094 while (1) {
3095 tcg_gen_shl_i64(t, regs[r1], t32);
3096 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3097 if (r1 == r3) {
3098 break;
3099 }
3100 tcg_gen_add_i64(o->in2, o->in2, t4);
3101 r1 = (r1 + 1) & 15;
3102 }
3103
3104 tcg_temp_free_i64(t);
3105 tcg_temp_free_i64(t4);
3106 tcg_temp_free_i64(t32);
3107 return NO_EXIT;
3108 }
3109
3110 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3111 {
3112 potential_page_fault(s);
3113 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3114 set_cc_static(s);
3115 return_low128(o->in2);
3116 return NO_EXIT;
3117 }
3118
3119 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3120 {
3121 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3122 return NO_EXIT;
3123 }
3124
3125 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3126 {
3127 TCGv_i64 cc;
3128
3129 assert(!o->g_in2);
3130 tcg_gen_not_i64(o->in2, o->in2);
3131 tcg_gen_add_i64(o->out, o->in1, o->in2);
3132
3133 /* XXX possible optimization point */
3134 gen_op_calc_cc(s);
3135 cc = tcg_temp_new_i64();
3136 tcg_gen_extu_i32_i64(cc, cc_op);
3137 tcg_gen_shri_i64(cc, cc, 1);
3138 tcg_gen_add_i64(o->out, o->out, cc);
3139 tcg_temp_free_i64(cc);
3140 return NO_EXIT;
3141 }
3142
3143 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3144 {
3145 TCGv_i32 t;
3146
3147 update_psw_addr(s);
3148 gen_op_calc_cc(s);
3149
3150 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3151 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3152 tcg_temp_free_i32(t);
3153
3154 t = tcg_const_i32(s->next_pc - s->pc);
3155 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3156 tcg_temp_free_i32(t);
3157
3158 gen_exception(EXCP_SVC);
3159 return EXIT_NORETURN;
3160 }
3161
3162 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3163 {
3164 gen_helper_tceb(cc_op, o->in1, o->in2);
3165 set_cc_static(s);
3166 return NO_EXIT;
3167 }
3168
3169 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3170 {
3171 gen_helper_tcdb(cc_op, o->in1, o->in2);
3172 set_cc_static(s);
3173 return NO_EXIT;
3174 }
3175
3176 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3177 {
3178 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3179 set_cc_static(s);
3180 return NO_EXIT;
3181 }
3182
3183 #ifndef CONFIG_USER_ONLY
3184 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3185 {
3186 potential_page_fault(s);
3187 gen_helper_tprot(cc_op, o->addr1, o->in2);
3188 set_cc_static(s);
3189 return NO_EXIT;
3190 }
3191 #endif
3192
3193 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3194 {
3195 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3196 potential_page_fault(s);
3197 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3198 tcg_temp_free_i32(l);
3199 set_cc_static(s);
3200 return NO_EXIT;
3201 }
3202
3203 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3204 {
3205 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3206 potential_page_fault(s);
3207 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3208 tcg_temp_free_i32(l);
3209 return NO_EXIT;
3210 }
3211
3212 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3213 {
3214 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3215 potential_page_fault(s);
3216 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3217 tcg_temp_free_i32(l);
3218 set_cc_static(s);
3219 return NO_EXIT;
3220 }
3221
3222 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3223 {
3224 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3225 return NO_EXIT;
3226 }
3227
3228 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3229 {
3230 int shift = s->insn->data & 0xff;
3231 int size = s->insn->data >> 8;
3232 uint64_t mask = ((1ull << size) - 1) << shift;
3233
3234 assert(!o->g_in2);
3235 tcg_gen_shli_i64(o->in2, o->in2, shift);
3236 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3237
3238 /* Produce the CC from only the bits manipulated. */
3239 tcg_gen_andi_i64(cc_dst, o->out, mask);
3240 set_cc_nz_u64(s, cc_dst);
3241 return NO_EXIT;
3242 }
3243
3244 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3245 {
3246 o->out = tcg_const_i64(0);
3247 return NO_EXIT;
3248 }
3249
3250 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3251 {
3252 o->out = tcg_const_i64(0);
3253 o->out2 = o->out;
3254 o->g_out2 = true;
3255 return NO_EXIT;
3256 }
3257
3258 /* ====================================================================== */
3259 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3260 the original inputs), update the various cc data structures in order to
3261 be able to compute the new condition code. */
3262
3263 static void cout_abs32(DisasContext *s, DisasOps *o)
3264 {
3265 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3266 }
3267
3268 static void cout_abs64(DisasContext *s, DisasOps *o)
3269 {
3270 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3271 }
3272
3273 static void cout_adds32(DisasContext *s, DisasOps *o)
3274 {
3275 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3276 }
3277
3278 static void cout_adds64(DisasContext *s, DisasOps *o)
3279 {
3280 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3281 }
3282
3283 static void cout_addu32(DisasContext *s, DisasOps *o)
3284 {
3285 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3286 }
3287
3288 static void cout_addu64(DisasContext *s, DisasOps *o)
3289 {
3290 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3291 }
3292
3293 static void cout_addc32(DisasContext *s, DisasOps *o)
3294 {
3295 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3296 }
3297
3298 static void cout_addc64(DisasContext *s, DisasOps *o)
3299 {
3300 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3301 }
3302
3303 static void cout_cmps32(DisasContext *s, DisasOps *o)
3304 {
3305 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3306 }
3307
3308 static void cout_cmps64(DisasContext *s, DisasOps *o)
3309 {
3310 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3311 }
3312
3313 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3314 {
3315 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3316 }
3317
3318 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3319 {
3320 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3321 }
3322
3323 static void cout_f32(DisasContext *s, DisasOps *o)
3324 {
3325 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3326 }
3327
3328 static void cout_f64(DisasContext *s, DisasOps *o)
3329 {
3330 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3331 }
3332
3333 static void cout_f128(DisasContext *s, DisasOps *o)
3334 {
3335 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3336 }
3337
3338 static void cout_nabs32(DisasContext *s, DisasOps *o)
3339 {
3340 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3341 }
3342
3343 static void cout_nabs64(DisasContext *s, DisasOps *o)
3344 {
3345 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3346 }
3347
3348 static void cout_neg32(DisasContext *s, DisasOps *o)
3349 {
3350 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3351 }
3352
3353 static void cout_neg64(DisasContext *s, DisasOps *o)
3354 {
3355 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3356 }
3357
3358 static void cout_nz32(DisasContext *s, DisasOps *o)
3359 {
3360 tcg_gen_ext32u_i64(cc_dst, o->out);
3361 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3362 }
3363
3364 static void cout_nz64(DisasContext *s, DisasOps *o)
3365 {
3366 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3367 }
3368
3369 static void cout_s32(DisasContext *s, DisasOps *o)
3370 {
3371 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3372 }
3373
3374 static void cout_s64(DisasContext *s, DisasOps *o)
3375 {
3376 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3377 }
3378
3379 static void cout_subs32(DisasContext *s, DisasOps *o)
3380 {
3381 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3382 }
3383
3384 static void cout_subs64(DisasContext *s, DisasOps *o)
3385 {
3386 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3387 }
3388
3389 static void cout_subu32(DisasContext *s, DisasOps *o)
3390 {
3391 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3392 }
3393
3394 static void cout_subu64(DisasContext *s, DisasOps *o)
3395 {
3396 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3397 }
3398
3399 static void cout_subb32(DisasContext *s, DisasOps *o)
3400 {
3401 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3402 }
3403
3404 static void cout_subb64(DisasContext *s, DisasOps *o)
3405 {
3406 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3407 }
3408
3409 static void cout_tm32(DisasContext *s, DisasOps *o)
3410 {
3411 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3412 }
3413
3414 static void cout_tm64(DisasContext *s, DisasOps *o)
3415 {
3416 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3417 }
3418
3419 /* ====================================================================== */
3420 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3421 with the TCG register to which we will write. Used in combination with
3422 the "wout" generators, in some cases we need a new temporary, and in
3423 some cases we can write to a TCG global. */
3424
3425 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3426 {
3427 o->out = tcg_temp_new_i64();
3428 }
3429
3430 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3431 {
3432 o->out = tcg_temp_new_i64();
3433 o->out2 = tcg_temp_new_i64();
3434 }
3435
3436 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3437 {
3438 o->out = regs[get_field(f, r1)];
3439 o->g_out = true;
3440 }
3441
3442 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3443 {
3444 /* ??? Specification exception: r1 must be even. */
3445 int r1 = get_field(f, r1);
3446 o->out = regs[r1];
3447 o->out2 = regs[(r1 + 1) & 15];
3448 o->g_out = o->g_out2 = true;
3449 }
3450
3451 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3452 {
3453 o->out = fregs[get_field(f, r1)];
3454 o->g_out = true;
3455 }
3456
3457 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3458 {
3459 /* ??? Specification exception: r1 must be < 14. */
3460 int r1 = get_field(f, r1);
3461 o->out = fregs[r1];
3462 o->out2 = fregs[(r1 + 2) & 15];
3463 o->g_out = o->g_out2 = true;
3464 }
3465
3466 /* ====================================================================== */
3467 /* The "Write OUTput" generators. These generally perform some non-trivial
3468 copy of data to TCG globals, or to main memory. The trivial cases are
3469 generally handled by having a "prep" generator install the TCG global
3470 as the destination of the operation. */
3471
3472 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3473 {
3474 store_reg(get_field(f, r1), o->out);
3475 }
3476
3477 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3478 {
3479 int r1 = get_field(f, r1);
3480 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3481 }
3482
3483 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3484 {
3485 int r1 = get_field(f, r1);
3486 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3487 }
3488
3489 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3490 {
3491 store_reg32_i64(get_field(f, r1), o->out);
3492 }
3493
3494 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3495 {
3496 /* ??? Specification exception: r1 must be even. */
3497 int r1 = get_field(f, r1);
3498 store_reg32_i64(r1, o->out);
3499 store_reg32_i64((r1 + 1) & 15, o->out2);
3500 }
3501
3502 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3503 {
3504 /* ??? Specification exception: r1 must be even. */
3505 int r1 = get_field(f, r1);
3506 store_reg32_i64((r1 + 1) & 15, o->out);
3507 tcg_gen_shri_i64(o->out, o->out, 32);
3508 store_reg32_i64(r1, o->out);
3509 }
3510
3511 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3512 {
3513 store_freg32_i64(get_field(f, r1), o->out);
3514 }
3515
3516 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3517 {
3518 store_freg(get_field(f, r1), o->out);
3519 }
3520
3521 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3522 {
3523 /* ??? Specification exception: r1 must be < 14. */
3524 int f1 = get_field(s->fields, r1);
3525 store_freg(f1, o->out);
3526 store_freg((f1 + 2) & 15, o->out2);
3527 }
3528
3529 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3530 {
3531 if (get_field(f, r1) != get_field(f, r2)) {
3532 store_reg32_i64(get_field(f, r1), o->out);
3533 }
3534 }
3535
3536 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3537 {
3538 if (get_field(f, r1) != get_field(f, r2)) {
3539 store_freg32_i64(get_field(f, r1), o->out);
3540 }
3541 }
3542
3543 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3544 {
3545 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3546 }
3547
3548 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3549 {
3550 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3551 }
3552
3553 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3554 {
3555 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3556 }
3557
3558 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3559 {
3560 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3561 }
3562
3563 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3564 {
3565 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3566 }
3567
3568 /* ====================================================================== */
3569 /* The "INput 1" generators. These load the first operand to an insn. */
3570
3571 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3572 {
3573 o->in1 = load_reg(get_field(f, r1));
3574 }
3575
3576 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3577 {
3578 o->in1 = regs[get_field(f, r1)];
3579 o->g_in1 = true;
3580 }
3581
3582 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3583 {
3584 o->in1 = tcg_temp_new_i64();
3585 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3586 }
3587
3588 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3589 {
3590 o->in1 = tcg_temp_new_i64();
3591 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3592 }
3593
3594 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3595 {
3596 o->in1 = tcg_temp_new_i64();
3597 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3598 }
3599
3600 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3601 {
3602 /* ??? Specification exception: r1 must be even. */
3603 int r1 = get_field(f, r1);
3604 o->in1 = load_reg((r1 + 1) & 15);
3605 }
3606
3607 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3608 {
3609 /* ??? Specification exception: r1 must be even. */
3610 int r1 = get_field(f, r1);
3611 o->in1 = tcg_temp_new_i64();
3612 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3613 }
3614
3615 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3616 {
3617 /* ??? Specification exception: r1 must be even. */
3618 int r1 = get_field(f, r1);
3619 o->in1 = tcg_temp_new_i64();
3620 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3621 }
3622
3623 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3624 {
3625 /* ??? Specification exception: r1 must be even. */
3626 int r1 = get_field(f, r1);
3627 o->in1 = tcg_temp_new_i64();
3628 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3629 }
3630
3631 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3632 {
3633 o->in1 = load_reg(get_field(f, r2));
3634 }
3635
3636 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3637 {
3638 o->in1 = load_reg(get_field(f, r3));
3639 }
3640
3641 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3642 {
3643 o->in1 = regs[get_field(f, r3)];
3644 o->g_in1 = true;
3645 }
3646
3647 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3648 {
3649 o->in1 = tcg_temp_new_i64();
3650 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3651 }
3652
3653 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3654 {
3655 o->in1 = tcg_temp_new_i64();
3656 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3657 }
3658
3659 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3660 {
3661 o->in1 = load_freg32_i64(get_field(f, r1));
3662 }
3663
3664 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3665 {
3666 o->in1 = fregs[get_field(f, r1)];
3667 o->g_in1 = true;
3668 }
3669
3670 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3671 {
3672 /* ??? Specification exception: r1 must be < 14. */
3673 int r1 = get_field(f, r1);
3674 o->out = fregs[r1];
3675 o->out2 = fregs[(r1 + 2) & 15];
3676 o->g_out = o->g_out2 = true;
3677 }
3678
3679 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3680 {
3681 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3682 }
3683
3684 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3685 {
3686 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3687 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3688 }
3689
3690 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3691 {
3692 in1_la1(s, f, o);
3693 o->in1 = tcg_temp_new_i64();
3694 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3695 }
3696
3697 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3698 {
3699 in1_la1(s, f, o);
3700 o->in1 = tcg_temp_new_i64();
3701 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3702 }
3703
3704 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3705 {
3706 in1_la1(s, f, o);
3707 o->in1 = tcg_temp_new_i64();
3708 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3709 }
3710
3711 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3712 {
3713 in1_la1(s, f, o);
3714 o->in1 = tcg_temp_new_i64();
3715 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3716 }
3717
3718 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3719 {
3720 in1_la1(s, f, o);
3721 o->in1 = tcg_temp_new_i64();
3722 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3723 }
3724
3725 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3726 {
3727 in1_la1(s, f, o);
3728 o->in1 = tcg_temp_new_i64();
3729 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3730 }
3731
3732 /* ====================================================================== */
3733 /* The "INput 2" generators. These load the second operand to an insn. */
3734
3735 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3736 {
3737 o->in2 = regs[get_field(f, r1)];
3738 o->g_in2 = true;
3739 }
3740
3741 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3742 {
3743 o->in2 = tcg_temp_new_i64();
3744 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3745 }
3746
3747 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3748 {
3749 o->in2 = tcg_temp_new_i64();
3750 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3751 }
3752
3753 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3754 {
3755 o->in2 = load_reg(get_field(f, r2));
3756 }
3757
3758 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3759 {
3760 o->in2 = regs[get_field(f, r2)];
3761 o->g_in2 = true;
3762 }
3763
3764 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3765 {
3766 int r2 = get_field(f, r2);
3767 if (r2 != 0) {
3768 o->in2 = load_reg(r2);
3769 }
3770 }
3771
3772 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3773 {
3774 o->in2 = tcg_temp_new_i64();
3775 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3776 }
3777
3778 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3779 {
3780 o->in2 = tcg_temp_new_i64();
3781 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3782 }
3783
3784 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3785 {
3786 o->in2 = tcg_temp_new_i64();
3787 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3788 }
3789
3790 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3791 {
3792 o->in2 = tcg_temp_new_i64();
3793 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3794 }
3795
3796 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3797 {
3798 o->in2 = load_reg(get_field(f, r3));
3799 }
3800
3801 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3802 {
3803 o->in2 = tcg_temp_new_i64();
3804 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3805 }
3806
3807 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3808 {
3809 o->in2 = tcg_temp_new_i64();
3810 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3811 }
3812
3813 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3814 {
3815 o->in2 = load_freg32_i64(get_field(f, r2));
3816 }
3817
3818 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3819 {
3820 o->in2 = fregs[get_field(f, r2)];
3821 o->g_in2 = true;
3822 }
3823
3824 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3825 {
3826 /* ??? Specification exception: r1 must be < 14. */
3827 int r2 = get_field(f, r2);
3828 o->in1 = fregs[r2];
3829 o->in2 = fregs[(r2 + 2) & 15];
3830 o->g_in1 = o->g_in2 = true;
3831 }
3832
3833 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
3834 {
3835 o->in2 = get_address(s, 0, get_field(f, r2), 0);
3836 }
3837
3838 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3839 {
3840 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3841 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3842 }
3843
3844 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3845 {
3846 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3847 }
3848
3849 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3850 {
3851 help_l2_shift(s, f, o, 31);
3852 }
3853
3854 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3855 {
3856 help_l2_shift(s, f, o, 63);
3857 }
3858
3859 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3860 {
3861 in2_a2(s, f, o);
3862 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3863 }
3864
3865 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3866 {
3867 in2_a2(s, f, o);
3868 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3869 }
3870
3871 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3872 {
3873 in2_a2(s, f, o);
3874 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3875 }
3876
3877 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3878 {
3879 in2_a2(s, f, o);
3880 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3881 }
3882
3883 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3884 {
3885 in2_a2(s, f, o);
3886 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3887 }
3888
3889 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3890 {
3891 in2_a2(s, f, o);
3892 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3893 }
3894
3895 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3896 {
3897 in2_ri2(s, f, o);
3898 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3899 }
3900
3901 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3902 {
3903 in2_ri2(s, f, o);
3904 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3905 }
3906
3907 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3908 {
3909 in2_ri2(s, f, o);
3910 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3911 }
3912
3913 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3914 {
3915 in2_ri2(s, f, o);
3916 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3917 }
3918
3919 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3920 {
3921 o->in2 = tcg_const_i64(get_field(f, i2));
3922 }
3923
3924 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3925 {
3926 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3927 }
3928
3929 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3930 {
3931 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3932 }
3933
3934 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3935 {
3936 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3937 }
3938
3939 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3940 {
3941 uint64_t i2 = (uint16_t)get_field(f, i2);
3942 o->in2 = tcg_const_i64(i2 << s->insn->data);
3943 }
3944
3945 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3946 {
3947 uint64_t i2 = (uint32_t)get_field(f, i2);
3948 o->in2 = tcg_const_i64(i2 << s->insn->data);
3949 }
3950
3951 /* ====================================================================== */
3952
3953 /* Find opc within the table of insns. This is formulated as a switch
3954 statement so that (1) we get compile-time notice of cut-paste errors
3955 for duplicated opcodes, and (2) the compiler generates the binary
3956 search tree, rather than us having to post-process the table. */
3957
3958 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3959 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3960
3961 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3962
3963 enum DisasInsnEnum {
3964 #include "insn-data.def"
3965 };
3966
3967 #undef D
3968 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3969 .opc = OPC, \
3970 .fmt = FMT_##FT, \
3971 .fac = FAC_##FC, \
3972 .name = #NM, \
3973 .help_in1 = in1_##I1, \
3974 .help_in2 = in2_##I2, \
3975 .help_prep = prep_##P, \
3976 .help_wout = wout_##W, \
3977 .help_cout = cout_##CC, \
3978 .help_op = op_##OP, \
3979 .data = D \
3980 },
3981
3982 /* Allow 0 to be used for NULL in the table below. */
3983 #define in1_0 NULL
3984 #define in2_0 NULL
3985 #define prep_0 NULL
3986 #define wout_0 NULL
3987 #define cout_0 NULL
3988 #define op_0 NULL
3989
3990 static const DisasInsn insn_info[] = {
3991 #include "insn-data.def"
3992 };
3993
3994 #undef D
3995 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3996 case OPC: return &insn_info[insn_ ## NM];
3997
3998 static const DisasInsn *lookup_opc(uint16_t opc)
3999 {
4000 switch (opc) {
4001 #include "insn-data.def"
4002 default:
4003 return NULL;
4004 }
4005 }
4006
4007 #undef D
4008 #undef C
4009
4010 /* Extract a field from the insn. The INSN should be left-aligned in
4011 the uint64_t so that we can more easily utilize the big-bit-endian
4012 definitions we extract from the Principals of Operation. */
4013
4014 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4015 {
4016 uint32_t r, m;
4017
4018 if (f->size == 0) {
4019 return;
4020 }
4021
4022 /* Zero extract the field from the insn. */
4023 r = (insn << f->beg) >> (64 - f->size);
4024
4025 /* Sign-extend, or un-swap the field as necessary. */
4026 switch (f->type) {
4027 case 0: /* unsigned */
4028 break;
4029 case 1: /* signed */
4030 assert(f->size <= 32);
4031 m = 1u << (f->size - 1);
4032 r = (r ^ m) - m;
4033 break;
4034 case 2: /* dl+dh split, signed 20 bit. */
4035 r = ((int8_t)r << 12) | (r >> 8);
4036 break;
4037 default:
4038 abort();
4039 }
4040
4041 /* Validate that the "compressed" encoding we selected above is valid.
4042 I.e. we havn't make two different original fields overlap. */
4043 assert(((o->presentC >> f->indexC) & 1) == 0);
4044 o->presentC |= 1 << f->indexC;
4045 o->presentO |= 1 << f->indexO;
4046
4047 o->c[f->indexC] = r;
4048 }
4049
4050 /* Lookup the insn at the current PC, extracting the operands into O and
4051 returning the info struct for the insn. Returns NULL for invalid insn. */
4052
4053 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4054 DisasFields *f)
4055 {
4056 uint64_t insn, pc = s->pc;
4057 int op, op2, ilen;
4058 const DisasInsn *info;
4059
4060 insn = ld_code2(env, pc);
4061 op = (insn >> 8) & 0xff;
4062 ilen = get_ilen(op);
4063 s->next_pc = s->pc + ilen;
4064
4065 switch (ilen) {
4066 case 2:
4067 insn = insn << 48;
4068 break;
4069 case 4:
4070 insn = ld_code4(env, pc) << 32;
4071 break;
4072 case 6:
4073 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4074 break;
4075 default:
4076 abort();
4077 }
4078
4079 /* We can't actually determine the insn format until we've looked up
4080 the full insn opcode. Which we can't do without locating the
4081 secondary opcode. Assume by default that OP2 is at bit 40; for
4082 those smaller insns that don't actually have a secondary opcode
4083 this will correctly result in OP2 = 0. */
4084 switch (op) {
4085 case 0x01: /* E */
4086 case 0x80: /* S */
4087 case 0x82: /* S */
4088 case 0x93: /* S */
4089 case 0xb2: /* S, RRF, RRE */
4090 case 0xb3: /* RRE, RRD, RRF */
4091 case 0xb9: /* RRE, RRF */
4092 case 0xe5: /* SSE, SIL */
4093 op2 = (insn << 8) >> 56;
4094 break;
4095 case 0xa5: /* RI */
4096 case 0xa7: /* RI */
4097 case 0xc0: /* RIL */
4098 case 0xc2: /* RIL */
4099 case 0xc4: /* RIL */
4100 case 0xc6: /* RIL */
4101 case 0xc8: /* SSF */
4102 case 0xcc: /* RIL */
4103 op2 = (insn << 12) >> 60;
4104 break;
4105 case 0xd0 ... 0xdf: /* SS */
4106 case 0xe1: /* SS */
4107 case 0xe2: /* SS */
4108 case 0xe8: /* SS */
4109 case 0xe9: /* SS */
4110 case 0xea: /* SS */
4111 case 0xee ... 0xf3: /* SS */
4112 case 0xf8 ... 0xfd: /* SS */
4113 op2 = 0;
4114 break;
4115 default:
4116 op2 = (insn << 40) >> 56;
4117 break;
4118 }
4119
4120 memset(f, 0, sizeof(*f));
4121 f->op = op;
4122 f->op2 = op2;
4123
4124 /* Lookup the instruction. */
4125 info = lookup_opc(op << 8 | op2);
4126
4127 /* If we found it, extract the operands. */
4128 if (info != NULL) {
4129 DisasFormat fmt = info->fmt;
4130 int i;
4131
4132 for (i = 0; i < NUM_C_FIELD; ++i) {
4133 extract_field(f, &format_info[fmt].op[i], insn);
4134 }
4135 }
4136 return info;
4137 }
4138
4139 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4140 {
4141 const DisasInsn *insn;
4142 ExitStatus ret = NO_EXIT;
4143 DisasFields f;
4144 DisasOps o;
4145
4146 insn = extract_insn(env, s, &f);
4147
4148 /* If not found, try the old interpreter. This includes ILLOPC. */
4149 if (insn == NULL) {
4150 disas_s390_insn(env, s);
4151 switch (s->is_jmp) {
4152 case DISAS_NEXT:
4153 ret = NO_EXIT;
4154 break;
4155 case DISAS_TB_JUMP:
4156 ret = EXIT_GOTO_TB;
4157 break;
4158 case DISAS_JUMP:
4159 ret = EXIT_PC_UPDATED;
4160 break;
4161 case DISAS_EXCP:
4162 ret = EXIT_NORETURN;
4163 break;
4164 default:
4165 abort();
4166 }
4167
4168 s->pc = s->next_pc;
4169 return ret;
4170 }
4171
4172 /* Set up the strutures we use to communicate with the helpers. */
4173 s->insn = insn;
4174 s->fields = &f;
4175 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4176 TCGV_UNUSED_I64(o.out);
4177 TCGV_UNUSED_I64(o.out2);
4178 TCGV_UNUSED_I64(o.in1);
4179 TCGV_UNUSED_I64(o.in2);
4180 TCGV_UNUSED_I64(o.addr1);
4181
4182 /* Implement the instruction. */
4183 if (insn->help_in1) {
4184 insn->help_in1(s, &f, &o);
4185 }
4186 if (insn->help_in2) {
4187 insn->help_in2(s, &f, &o);
4188 }
4189 if (insn->help_prep) {
4190 insn->help_prep(s, &f, &o);
4191 }
4192 if (insn->help_op) {
4193 ret = insn->help_op(s, &o);
4194 }
4195 if (insn->help_wout) {
4196 insn->help_wout(s, &f, &o);
4197 }
4198 if (insn->help_cout) {
4199 insn->help_cout(s, &o);
4200 }
4201
4202 /* Free any temporaries created by the helpers. */
4203 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4204 tcg_temp_free_i64(o.out);
4205 }
4206 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4207 tcg_temp_free_i64(o.out2);
4208 }
4209 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4210 tcg_temp_free_i64(o.in1);
4211 }
4212 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4213 tcg_temp_free_i64(o.in2);
4214 }
4215 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4216 tcg_temp_free_i64(o.addr1);
4217 }
4218
4219 /* Advance to the next instruction. */
4220 s->pc = s->next_pc;
4221 return ret;
4222 }
4223
4224 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4225 TranslationBlock *tb,
4226 int search_pc)
4227 {
4228 DisasContext dc;
4229 target_ulong pc_start;
4230 uint64_t next_page_start;
4231 uint16_t *gen_opc_end;
4232 int j, lj = -1;
4233 int num_insns, max_insns;
4234 CPUBreakpoint *bp;
4235 ExitStatus status;
4236 bool do_debug;
4237
4238 pc_start = tb->pc;
4239
4240 /* 31-bit mode */
4241 if (!(tb->flags & FLAG_MASK_64)) {
4242 pc_start &= 0x7fffffff;
4243 }
4244
4245 dc.tb = tb;
4246 dc.pc = pc_start;
4247 dc.cc_op = CC_OP_DYNAMIC;
4248 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4249 dc.is_jmp = DISAS_NEXT;
4250
4251 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4252
4253 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4254
4255 num_insns = 0;
4256 max_insns = tb->cflags & CF_COUNT_MASK;
4257 if (max_insns == 0) {
4258 max_insns = CF_COUNT_MASK;
4259 }
4260
4261 gen_icount_start();
4262
4263 do {
4264 if (search_pc) {
4265 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4266 if (lj < j) {
4267 lj++;
4268 while (lj < j) {
4269 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4270 }
4271 }
4272 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4273 gen_opc_cc_op[lj] = dc.cc_op;
4274 tcg_ctx.gen_opc_instr_start[lj] = 1;
4275 tcg_ctx.gen_opc_icount[lj] = num_insns;
4276 }
4277 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4278 gen_io_start();
4279 }
4280
4281 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4282 tcg_gen_debug_insn_start(dc.pc);
4283 }
4284
4285 status = NO_EXIT;
4286 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4287 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4288 if (bp->pc == dc.pc) {
4289 status = EXIT_PC_STALE;
4290 do_debug = true;
4291 break;
4292 }
4293 }
4294 }
4295 if (status == NO_EXIT) {
4296 status = translate_one(env, &dc);
4297 }
4298
4299 /* If we reach a page boundary, are single stepping,
4300 or exhaust instruction count, stop generation. */
4301 if (status == NO_EXIT
4302 && (dc.pc >= next_page_start
4303 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4304 || num_insns >= max_insns
4305 || singlestep
4306 || env->singlestep_enabled)) {
4307 status = EXIT_PC_STALE;
4308 }
4309 } while (status == NO_EXIT);
4310
4311 if (tb->cflags & CF_LAST_IO) {
4312 gen_io_end();
4313 }
4314
4315 switch (status) {
4316 case EXIT_GOTO_TB:
4317 case EXIT_NORETURN:
4318 break;
4319 case EXIT_PC_STALE:
4320 update_psw_addr(&dc);
4321 /* FALLTHRU */
4322 case EXIT_PC_UPDATED:
4323 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4324 gen_op_calc_cc(&dc);
4325 } else {
4326 /* Next TB starts off with CC_OP_DYNAMIC,
4327 so make sure the cc op type is in env */
4328 gen_op_set_cc_op(&dc);
4329 }
4330 if (do_debug) {
4331 gen_exception(EXCP_DEBUG);
4332 } else {
4333 /* Generate the return instruction */
4334 tcg_gen_exit_tb(0);
4335 }
4336 break;
4337 default:
4338 abort();
4339 }
4340
4341 gen_icount_end(tb, num_insns);
4342 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4343 if (search_pc) {
4344 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4345 lj++;
4346 while (lj <= j) {
4347 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4348 }
4349 } else {
4350 tb->size = dc.pc - pc_start;
4351 tb->icount = num_insns;
4352 }
4353
4354 #if defined(S390X_DEBUG_DISAS)
4355 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4356 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4357 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4358 qemu_log("\n");
4359 }
4360 #endif
4361 }
4362
4363 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4364 {
4365 gen_intermediate_code_internal(env, tb, 0);
4366 }
4367
4368 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4369 {
4370 gen_intermediate_code_internal(env, tb, 1);
4371 }
4372
4373 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4374 {
4375 int cc_op;
4376 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4377 cc_op = gen_opc_cc_op[pc_pos];
4378 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4379 env->cc_op = cc_op;
4380 }
4381 }