]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Convert LOAD ZERO
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static inline void debug_insn(uint64_t insn)
83 {
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85 }
86
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 {
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
92 }
93 }
94 return pc;
95 }
96
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
99 {
100 int i;
101
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
108 }
109
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
116 }
117 }
118
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
125 }
126 }
127
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
135 }
136 }
137 #endif
138
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
143 }
144 #endif
145
146 cpu_fprintf(f, "\n");
147 }
148
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
151
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
156
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
160
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162
163 void s390x_translate_init(void)
164 {
165 int i;
166
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
174
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
196 }
197
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
201 }
202
203 static inline TCGv_i64 load_reg(int reg)
204 {
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
208 }
209
210 static inline TCGv_i64 load_freg(int reg)
211 {
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
215 }
216
217 static inline TCGv_i32 load_freg32(int reg)
218 {
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
226 }
227
228 static inline TCGv_i64 load_freg32_i64(int reg)
229 {
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
233 }
234
235 static inline TCGv_i32 load_reg32(int reg)
236 {
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
240 }
241
242 static inline TCGv_i64 load_reg32_i64(int reg)
243 {
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
247 }
248
249 static inline void store_reg(int reg, TCGv_i64 v)
250 {
251 tcg_gen_mov_i64(regs[reg], v);
252 }
253
254 static inline void store_freg(int reg, TCGv_i64 v)
255 {
256 tcg_gen_mov_i64(fregs[reg], v);
257 }
258
259 static inline void store_reg32(int reg, TCGv_i32 v)
260 {
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
268 }
269
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
271 {
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274 }
275
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
277 {
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
279 }
280
281 static inline void store_freg32(int reg, TCGv_i32 v)
282 {
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
286 #else
287 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
289 #endif
290 }
291
292 static inline void store_freg32_i64(int reg, TCGv_i64 v)
293 {
294 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
295 }
296
297 static inline void return_low128(TCGv_i64 dest)
298 {
299 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
300 }
301
302 static inline void update_psw_addr(DisasContext *s)
303 {
304 /* psw.addr */
305 tcg_gen_movi_i64(psw_addr, s->pc);
306 }
307
308 static inline void potential_page_fault(DisasContext *s)
309 {
310 #ifndef CONFIG_USER_ONLY
311 update_psw_addr(s);
312 gen_op_calc_cc(s);
313 #endif
314 }
315
316 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
317 {
318 return (uint64_t)cpu_lduw_code(env, pc);
319 }
320
321 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
322 {
323 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
324 }
325
326 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
327 {
328 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
329 }
330
331 static inline int get_mem_index(DisasContext *s)
332 {
333 switch (s->tb->flags & FLAG_MASK_ASC) {
334 case PSW_ASC_PRIMARY >> 32:
335 return 0;
336 case PSW_ASC_SECONDARY >> 32:
337 return 1;
338 case PSW_ASC_HOME >> 32:
339 return 2;
340 default:
341 tcg_abort();
342 break;
343 }
344 }
345
346 static void gen_exception(int excp)
347 {
348 TCGv_i32 tmp = tcg_const_i32(excp);
349 gen_helper_exception(cpu_env, tmp);
350 tcg_temp_free_i32(tmp);
351 }
352
353 static void gen_program_exception(DisasContext *s, int code)
354 {
355 TCGv_i32 tmp;
356
357 /* Remember what pgm exeption this was. */
358 tmp = tcg_const_i32(code);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
360 tcg_temp_free_i32(tmp);
361
362 tmp = tcg_const_i32(s->next_pc - s->pc);
363 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
364 tcg_temp_free_i32(tmp);
365
366 /* Advance past instruction. */
367 s->pc = s->next_pc;
368 update_psw_addr(s);
369
370 /* Save off cc. */
371 gen_op_calc_cc(s);
372
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM);
375
376 /* End TB here. */
377 s->is_jmp = DISAS_EXCP;
378 }
379
380 static inline void gen_illegal_opcode(DisasContext *s)
381 {
382 gen_program_exception(s, PGM_SPECIFICATION);
383 }
384
385 static inline void check_privileged(DisasContext *s)
386 {
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
389 }
390 }
391
392 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
393 {
394 TCGv_i64 tmp;
395
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s->tb->flags & FLAG_MASK_64)) {
398 d2 &= 0x7fffffffUL;
399 }
400
401 if (x2) {
402 if (d2) {
403 tmp = tcg_const_i64(d2);
404 tcg_gen_add_i64(tmp, tmp, regs[x2]);
405 } else {
406 tmp = load_reg(x2);
407 }
408 if (b2) {
409 tcg_gen_add_i64(tmp, tmp, regs[b2]);
410 }
411 } else if (b2) {
412 if (d2) {
413 tmp = tcg_const_i64(d2);
414 tcg_gen_add_i64(tmp, tmp, regs[b2]);
415 } else {
416 tmp = load_reg(b2);
417 }
418 } else {
419 tmp = tcg_const_i64(d2);
420 }
421
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
424 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
425 }
426
427 return tmp;
428 }
429
430 static void gen_op_movi_cc(DisasContext *s, uint32_t val)
431 {
432 s->cc_op = CC_OP_CONST0 + val;
433 }
434
435 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
436 {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_mov_i64(cc_dst, dst);
439 tcg_gen_discard_i64(cc_vr);
440 s->cc_op = op;
441 }
442
443 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
444 {
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_extu_i32_i64(cc_dst, dst);
447 tcg_gen_discard_i64(cc_vr);
448 s->cc_op = op;
449 }
450
451 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
452 TCGv_i64 dst)
453 {
454 tcg_gen_mov_i64(cc_src, src);
455 tcg_gen_mov_i64(cc_dst, dst);
456 tcg_gen_discard_i64(cc_vr);
457 s->cc_op = op;
458 }
459
460 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
461 TCGv_i32 dst)
462 {
463 tcg_gen_extu_i32_i64(cc_src, src);
464 tcg_gen_extu_i32_i64(cc_dst, dst);
465 tcg_gen_discard_i64(cc_vr);
466 s->cc_op = op;
467 }
468
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
471 {
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
475 s->cc_op = op;
476 }
477
478 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
479 {
480 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
481 }
482
483 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
484 {
485 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
486 }
487
488 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
489 enum cc_op cond)
490 {
491 gen_op_update2_cc_i32(s, cond, v1, v2);
492 }
493
494 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
495 enum cc_op cond)
496 {
497 gen_op_update2_cc_i64(s, cond, v1, v2);
498 }
499
500 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
501 {
502 cmp_32(s, v1, v2, CC_OP_LTGT_32);
503 }
504
505 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
506 {
507 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
508 }
509
510 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
511 {
512 /* XXX optimize for the constant? put it in s? */
513 TCGv_i32 tmp = tcg_const_i32(v2);
514 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
515 tcg_temp_free_i32(tmp);
516 }
517
518 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
519 {
520 TCGv_i32 tmp = tcg_const_i32(v2);
521 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
522 tcg_temp_free_i32(tmp);
523 }
524
525 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
526 {
527 cmp_64(s, v1, v2, CC_OP_LTGT_64);
528 }
529
530 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
531 {
532 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
533 }
534
535 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
536 {
537 TCGv_i64 tmp = tcg_const_i64(v2);
538 cmp_s64(s, v1, tmp);
539 tcg_temp_free_i64(tmp);
540 }
541
542 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
543 {
544 TCGv_i64 tmp = tcg_const_i64(v2);
545 cmp_u64(s, v1, tmp);
546 tcg_temp_free_i64(tmp);
547 }
548
549 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
550 {
551 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
552 }
553
554 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
555 {
556 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
557 }
558
559 /* CC value is in env->cc_op */
560 static inline void set_cc_static(DisasContext *s)
561 {
562 tcg_gen_discard_i64(cc_src);
563 tcg_gen_discard_i64(cc_dst);
564 tcg_gen_discard_i64(cc_vr);
565 s->cc_op = CC_OP_STATIC;
566 }
567
568 static inline void gen_op_set_cc_op(DisasContext *s)
569 {
570 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
571 tcg_gen_movi_i32(cc_op, s->cc_op);
572 }
573 }
574
575 static inline void gen_update_cc_op(DisasContext *s)
576 {
577 gen_op_set_cc_op(s);
578 }
579
580 /* calculates cc into cc_op */
581 static void gen_op_calc_cc(DisasContext *s)
582 {
583 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
584 TCGv_i64 dummy = tcg_const_i64(0);
585
586 switch (s->cc_op) {
587 case CC_OP_CONST0:
588 case CC_OP_CONST1:
589 case CC_OP_CONST2:
590 case CC_OP_CONST3:
591 /* s->cc_op is the cc value */
592 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
593 break;
594 case CC_OP_STATIC:
595 /* env->cc_op already is the cc value */
596 break;
597 case CC_OP_NZ:
598 case CC_OP_ABS_64:
599 case CC_OP_NABS_64:
600 case CC_OP_ABS_32:
601 case CC_OP_NABS_32:
602 case CC_OP_LTGT0_32:
603 case CC_OP_LTGT0_64:
604 case CC_OP_COMP_32:
605 case CC_OP_COMP_64:
606 case CC_OP_NZ_F32:
607 case CC_OP_NZ_F64:
608 /* 1 argument */
609 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
610 break;
611 case CC_OP_ICM:
612 case CC_OP_LTGT_32:
613 case CC_OP_LTGT_64:
614 case CC_OP_LTUGTU_32:
615 case CC_OP_LTUGTU_64:
616 case CC_OP_TM_32:
617 case CC_OP_TM_64:
618 case CC_OP_SLA_32:
619 case CC_OP_SLA_64:
620 case CC_OP_NZ_F128:
621 /* 2 arguments */
622 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
623 break;
624 case CC_OP_ADD_64:
625 case CC_OP_ADDU_64:
626 case CC_OP_ADDC_64:
627 case CC_OP_SUB_64:
628 case CC_OP_SUBU_64:
629 case CC_OP_SUBB_64:
630 case CC_OP_ADD_32:
631 case CC_OP_ADDU_32:
632 case CC_OP_ADDC_32:
633 case CC_OP_SUB_32:
634 case CC_OP_SUBU_32:
635 case CC_OP_SUBB_32:
636 /* 3 arguments */
637 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
638 break;
639 case CC_OP_DYNAMIC:
640 /* unknown operation - assume 3 arguments and cc_op in env */
641 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
642 break;
643 default:
644 tcg_abort();
645 }
646
647 tcg_temp_free_i32(local_cc_op);
648 tcg_temp_free_i64(dummy);
649
650 /* We now have cc in cc_op as constant */
651 set_cc_static(s);
652 }
653
654 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
655 {
656 debug_insn(insn);
657
658 *r1 = (insn >> 4) & 0xf;
659 *r2 = insn & 0xf;
660 }
661
662 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
663 int *x2, int *b2, int *d2)
664 {
665 debug_insn(insn);
666
667 *r1 = (insn >> 20) & 0xf;
668 *x2 = (insn >> 16) & 0xf;
669 *b2 = (insn >> 12) & 0xf;
670 *d2 = insn & 0xfff;
671
672 return get_address(s, *x2, *b2, *d2);
673 }
674
675 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
676 int *b2, int *d2)
677 {
678 debug_insn(insn);
679
680 *r1 = (insn >> 20) & 0xf;
681 /* aka m3 */
682 *r3 = (insn >> 16) & 0xf;
683 *b2 = (insn >> 12) & 0xf;
684 *d2 = insn & 0xfff;
685 }
686
687 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
688 int *b1, int *d1)
689 {
690 debug_insn(insn);
691
692 *i2 = (insn >> 16) & 0xff;
693 *b1 = (insn >> 12) & 0xf;
694 *d1 = insn & 0xfff;
695
696 return get_address(s, 0, *b1, *d1);
697 }
698
699 static int use_goto_tb(DisasContext *s, uint64_t dest)
700 {
701 /* NOTE: we handle the case where the TB spans two pages here */
702 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
703 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
704 && !s->singlestep_enabled
705 && !(s->tb->cflags & CF_LAST_IO));
706 }
707
708 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
709 {
710 gen_update_cc_op(s);
711
712 if (use_goto_tb(s, pc)) {
713 tcg_gen_goto_tb(tb_num);
714 tcg_gen_movi_i64(psw_addr, pc);
715 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
716 } else {
717 /* jump to another page: currently not optimized */
718 tcg_gen_movi_i64(psw_addr, pc);
719 tcg_gen_exit_tb(0);
720 }
721 }
722
723 static inline void account_noninline_branch(DisasContext *s, int cc_op)
724 {
725 #ifdef DEBUG_INLINE_BRANCHES
726 inline_branch_miss[cc_op]++;
727 #endif
728 }
729
730 static inline void account_inline_branch(DisasContext *s, int cc_op)
731 {
732 #ifdef DEBUG_INLINE_BRANCHES
733 inline_branch_hit[cc_op]++;
734 #endif
735 }
736
737 /* Table of mask values to comparison codes, given a comparison as input.
738 For a true comparison CC=3 will never be set, but we treat this
739 conservatively for possible use when CC=3 indicates overflow. */
740 static const TCGCond ltgt_cond[16] = {
741 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
742 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
743 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
744 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
745 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
746 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
747 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
748 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
749 };
750
751 /* Table of mask values to comparison codes, given a logic op as input.
752 For such, only CC=0 and CC=1 should be possible. */
753 static const TCGCond nz_cond[16] = {
754 /* | | x | x */
755 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
756 /* | NE | x | x */
757 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
758 /* EQ | | x | x */
759 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
760 /* EQ | NE | x | x */
761 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
762 };
763
764 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
765 details required to generate a TCG comparison. */
766 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
767 {
768 TCGCond cond;
769 enum cc_op old_cc_op = s->cc_op;
770
771 if (mask == 15 || mask == 0) {
772 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
773 c->u.s32.a = cc_op;
774 c->u.s32.b = cc_op;
775 c->g1 = c->g2 = true;
776 c->is_64 = false;
777 return;
778 }
779
780 /* Find the TCG condition for the mask + cc op. */
781 switch (old_cc_op) {
782 case CC_OP_LTGT0_32:
783 case CC_OP_LTGT0_64:
784 case CC_OP_LTGT_32:
785 case CC_OP_LTGT_64:
786 cond = ltgt_cond[mask];
787 if (cond == TCG_COND_NEVER) {
788 goto do_dynamic;
789 }
790 account_inline_branch(s, old_cc_op);
791 break;
792
793 case CC_OP_LTUGTU_32:
794 case CC_OP_LTUGTU_64:
795 cond = tcg_unsigned_cond(ltgt_cond[mask]);
796 if (cond == TCG_COND_NEVER) {
797 goto do_dynamic;
798 }
799 account_inline_branch(s, old_cc_op);
800 break;
801
802 case CC_OP_NZ:
803 cond = nz_cond[mask];
804 if (cond == TCG_COND_NEVER) {
805 goto do_dynamic;
806 }
807 account_inline_branch(s, old_cc_op);
808 break;
809
810 case CC_OP_TM_32:
811 case CC_OP_TM_64:
812 switch (mask) {
813 case 8:
814 cond = TCG_COND_EQ;
815 break;
816 case 4 | 2 | 1:
817 cond = TCG_COND_NE;
818 break;
819 default:
820 goto do_dynamic;
821 }
822 account_inline_branch(s, old_cc_op);
823 break;
824
825 case CC_OP_ICM:
826 switch (mask) {
827 case 8:
828 cond = TCG_COND_EQ;
829 break;
830 case 4 | 2 | 1:
831 case 4 | 2:
832 cond = TCG_COND_NE;
833 break;
834 default:
835 goto do_dynamic;
836 }
837 account_inline_branch(s, old_cc_op);
838 break;
839
840 default:
841 do_dynamic:
842 /* Calculate cc value. */
843 gen_op_calc_cc(s);
844 /* FALLTHRU */
845
846 case CC_OP_STATIC:
847 /* Jump based on CC. We'll load up the real cond below;
848 the assignment here merely avoids a compiler warning. */
849 account_noninline_branch(s, old_cc_op);
850 old_cc_op = CC_OP_STATIC;
851 cond = TCG_COND_NEVER;
852 break;
853 }
854
855 /* Load up the arguments of the comparison. */
856 c->is_64 = true;
857 c->g1 = c->g2 = false;
858 switch (old_cc_op) {
859 case CC_OP_LTGT0_32:
860 c->is_64 = false;
861 c->u.s32.a = tcg_temp_new_i32();
862 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
863 c->u.s32.b = tcg_const_i32(0);
864 break;
865 case CC_OP_LTGT_32:
866 case CC_OP_LTUGTU_32:
867 c->is_64 = false;
868 c->u.s32.a = tcg_temp_new_i32();
869 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
870 c->u.s32.b = tcg_temp_new_i32();
871 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
872 break;
873
874 case CC_OP_LTGT0_64:
875 case CC_OP_NZ:
876 c->u.s64.a = cc_dst;
877 c->u.s64.b = tcg_const_i64(0);
878 c->g1 = true;
879 break;
880 case CC_OP_LTGT_64:
881 case CC_OP_LTUGTU_64:
882 c->u.s64.a = cc_src;
883 c->u.s64.b = cc_dst;
884 c->g1 = c->g2 = true;
885 break;
886
887 case CC_OP_TM_32:
888 case CC_OP_TM_64:
889 case CC_OP_ICM:
890 c->u.s64.a = tcg_temp_new_i64();
891 c->u.s64.b = tcg_const_i64(0);
892 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
893 break;
894
895 case CC_OP_STATIC:
896 c->is_64 = false;
897 c->u.s32.a = cc_op;
898 c->g1 = true;
899 switch (mask) {
900 case 0x8 | 0x4 | 0x2: /* cc != 3 */
901 cond = TCG_COND_NE;
902 c->u.s32.b = tcg_const_i32(3);
903 break;
904 case 0x8 | 0x4 | 0x1: /* cc != 2 */
905 cond = TCG_COND_NE;
906 c->u.s32.b = tcg_const_i32(2);
907 break;
908 case 0x8 | 0x2 | 0x1: /* cc != 1 */
909 cond = TCG_COND_NE;
910 c->u.s32.b = tcg_const_i32(1);
911 break;
912 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
913 cond = TCG_COND_EQ;
914 c->g1 = false;
915 c->u.s32.a = tcg_temp_new_i32();
916 c->u.s32.b = tcg_const_i32(0);
917 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
918 break;
919 case 0x8 | 0x4: /* cc < 2 */
920 cond = TCG_COND_LTU;
921 c->u.s32.b = tcg_const_i32(2);
922 break;
923 case 0x8: /* cc == 0 */
924 cond = TCG_COND_EQ;
925 c->u.s32.b = tcg_const_i32(0);
926 break;
927 case 0x4 | 0x2 | 0x1: /* cc != 0 */
928 cond = TCG_COND_NE;
929 c->u.s32.b = tcg_const_i32(0);
930 break;
931 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
932 cond = TCG_COND_NE;
933 c->g1 = false;
934 c->u.s32.a = tcg_temp_new_i32();
935 c->u.s32.b = tcg_const_i32(0);
936 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
937 break;
938 case 0x4: /* cc == 1 */
939 cond = TCG_COND_EQ;
940 c->u.s32.b = tcg_const_i32(1);
941 break;
942 case 0x2 | 0x1: /* cc > 1 */
943 cond = TCG_COND_GTU;
944 c->u.s32.b = tcg_const_i32(1);
945 break;
946 case 0x2: /* cc == 2 */
947 cond = TCG_COND_EQ;
948 c->u.s32.b = tcg_const_i32(2);
949 break;
950 case 0x1: /* cc == 3 */
951 cond = TCG_COND_EQ;
952 c->u.s32.b = tcg_const_i32(3);
953 break;
954 default:
955 /* CC is masked by something else: (8 >> cc) & mask. */
956 cond = TCG_COND_NE;
957 c->g1 = false;
958 c->u.s32.a = tcg_const_i32(8);
959 c->u.s32.b = tcg_const_i32(0);
960 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
961 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
962 break;
963 }
964 break;
965
966 default:
967 abort();
968 }
969 c->cond = cond;
970 }
971
972 static void free_compare(DisasCompare *c)
973 {
974 if (!c->g1) {
975 if (c->is_64) {
976 tcg_temp_free_i64(c->u.s64.a);
977 } else {
978 tcg_temp_free_i32(c->u.s32.a);
979 }
980 }
981 if (!c->g2) {
982 if (c->is_64) {
983 tcg_temp_free_i64(c->u.s64.b);
984 } else {
985 tcg_temp_free_i32(c->u.s32.b);
986 }
987 }
988 }
989
990 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
991 uint32_t insn)
992 {
993 TCGv_i64 tmp, tmp2, tmp3;
994 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
995 int r1, r2;
996 #ifndef CONFIG_USER_ONLY
997 int r3, d2, b2;
998 #endif
999
1000 r1 = (insn >> 4) & 0xf;
1001 r2 = insn & 0xf;
1002
1003 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1004
1005 switch (op) {
1006 case 0x22: /* IPM R1 [RRE] */
1007 tmp32_1 = tcg_const_i32(r1);
1008 gen_op_calc_cc(s);
1009 gen_helper_ipm(cpu_env, cc_op, tmp32_1);
1010 tcg_temp_free_i32(tmp32_1);
1011 break;
1012 case 0x41: /* CKSM R1,R2 [RRE] */
1013 tmp32_1 = tcg_const_i32(r1);
1014 tmp32_2 = tcg_const_i32(r2);
1015 potential_page_fault(s);
1016 gen_helper_cksm(cpu_env, tmp32_1, tmp32_2);
1017 tcg_temp_free_i32(tmp32_1);
1018 tcg_temp_free_i32(tmp32_2);
1019 gen_op_movi_cc(s, 0);
1020 break;
1021 case 0x4e: /* SAR R1,R2 [RRE] */
1022 tmp32_1 = load_reg32(r2);
1023 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r1]));
1024 tcg_temp_free_i32(tmp32_1);
1025 break;
1026 case 0x4f: /* EAR R1,R2 [RRE] */
1027 tmp32_1 = tcg_temp_new_i32();
1028 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1029 store_reg32(r1, tmp32_1);
1030 tcg_temp_free_i32(tmp32_1);
1031 break;
1032 case 0x54: /* MVPG R1,R2 [RRE] */
1033 tmp = load_reg(0);
1034 tmp2 = load_reg(r1);
1035 tmp3 = load_reg(r2);
1036 potential_page_fault(s);
1037 gen_helper_mvpg(cpu_env, tmp, tmp2, tmp3);
1038 tcg_temp_free_i64(tmp);
1039 tcg_temp_free_i64(tmp2);
1040 tcg_temp_free_i64(tmp3);
1041 /* XXX check CCO bit and set CC accordingly */
1042 gen_op_movi_cc(s, 0);
1043 break;
1044 case 0x55: /* MVST R1,R2 [RRE] */
1045 tmp32_1 = load_reg32(0);
1046 tmp32_2 = tcg_const_i32(r1);
1047 tmp32_3 = tcg_const_i32(r2);
1048 potential_page_fault(s);
1049 gen_helper_mvst(cpu_env, tmp32_1, tmp32_2, tmp32_3);
1050 tcg_temp_free_i32(tmp32_1);
1051 tcg_temp_free_i32(tmp32_2);
1052 tcg_temp_free_i32(tmp32_3);
1053 gen_op_movi_cc(s, 1);
1054 break;
1055 case 0x5d: /* CLST R1,R2 [RRE] */
1056 tmp32_1 = load_reg32(0);
1057 tmp32_2 = tcg_const_i32(r1);
1058 tmp32_3 = tcg_const_i32(r2);
1059 potential_page_fault(s);
1060 gen_helper_clst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1061 set_cc_static(s);
1062 tcg_temp_free_i32(tmp32_1);
1063 tcg_temp_free_i32(tmp32_2);
1064 tcg_temp_free_i32(tmp32_3);
1065 break;
1066 case 0x5e: /* SRST R1,R2 [RRE] */
1067 tmp32_1 = load_reg32(0);
1068 tmp32_2 = tcg_const_i32(r1);
1069 tmp32_3 = tcg_const_i32(r2);
1070 potential_page_fault(s);
1071 gen_helper_srst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1072 set_cc_static(s);
1073 tcg_temp_free_i32(tmp32_1);
1074 tcg_temp_free_i32(tmp32_2);
1075 tcg_temp_free_i32(tmp32_3);
1076 break;
1077
1078 #ifndef CONFIG_USER_ONLY
1079 case 0x02: /* STIDP D2(B2) [S] */
1080 /* Store CPU ID */
1081 check_privileged(s);
1082 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1083 tmp = get_address(s, 0, b2, d2);
1084 potential_page_fault(s);
1085 gen_helper_stidp(cpu_env, tmp);
1086 tcg_temp_free_i64(tmp);
1087 break;
1088 case 0x04: /* SCK D2(B2) [S] */
1089 /* Set Clock */
1090 check_privileged(s);
1091 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1092 tmp = get_address(s, 0, b2, d2);
1093 potential_page_fault(s);
1094 gen_helper_sck(cc_op, tmp);
1095 set_cc_static(s);
1096 tcg_temp_free_i64(tmp);
1097 break;
1098 case 0x05: /* STCK D2(B2) [S] */
1099 /* Store Clock */
1100 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1101 tmp = get_address(s, 0, b2, d2);
1102 potential_page_fault(s);
1103 gen_helper_stck(cc_op, cpu_env, tmp);
1104 set_cc_static(s);
1105 tcg_temp_free_i64(tmp);
1106 break;
1107 case 0x06: /* SCKC D2(B2) [S] */
1108 /* Set Clock Comparator */
1109 check_privileged(s);
1110 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1111 tmp = get_address(s, 0, b2, d2);
1112 potential_page_fault(s);
1113 gen_helper_sckc(cpu_env, tmp);
1114 tcg_temp_free_i64(tmp);
1115 break;
1116 case 0x07: /* STCKC D2(B2) [S] */
1117 /* Store Clock Comparator */
1118 check_privileged(s);
1119 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1120 tmp = get_address(s, 0, b2, d2);
1121 potential_page_fault(s);
1122 gen_helper_stckc(cpu_env, tmp);
1123 tcg_temp_free_i64(tmp);
1124 break;
1125 case 0x08: /* SPT D2(B2) [S] */
1126 /* Set CPU Timer */
1127 check_privileged(s);
1128 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1129 tmp = get_address(s, 0, b2, d2);
1130 potential_page_fault(s);
1131 gen_helper_spt(cpu_env, tmp);
1132 tcg_temp_free_i64(tmp);
1133 break;
1134 case 0x09: /* STPT D2(B2) [S] */
1135 /* Store CPU Timer */
1136 check_privileged(s);
1137 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1138 tmp = get_address(s, 0, b2, d2);
1139 potential_page_fault(s);
1140 gen_helper_stpt(cpu_env, tmp);
1141 tcg_temp_free_i64(tmp);
1142 break;
1143 case 0x0a: /* SPKA D2(B2) [S] */
1144 /* Set PSW Key from Address */
1145 check_privileged(s);
1146 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1147 tmp = get_address(s, 0, b2, d2);
1148 tmp2 = tcg_temp_new_i64();
1149 tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
1150 tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
1151 tcg_gen_or_i64(psw_mask, tmp2, tmp);
1152 tcg_temp_free_i64(tmp2);
1153 tcg_temp_free_i64(tmp);
1154 break;
1155 case 0x0d: /* PTLB [S] */
1156 /* Purge TLB */
1157 check_privileged(s);
1158 gen_helper_ptlb(cpu_env);
1159 break;
1160 case 0x10: /* SPX D2(B2) [S] */
1161 /* Set Prefix Register */
1162 check_privileged(s);
1163 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1164 tmp = get_address(s, 0, b2, d2);
1165 potential_page_fault(s);
1166 gen_helper_spx(cpu_env, tmp);
1167 tcg_temp_free_i64(tmp);
1168 break;
1169 case 0x11: /* STPX D2(B2) [S] */
1170 /* Store Prefix */
1171 check_privileged(s);
1172 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1173 tmp = get_address(s, 0, b2, d2);
1174 tmp2 = tcg_temp_new_i64();
1175 tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1176 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1177 tcg_temp_free_i64(tmp);
1178 tcg_temp_free_i64(tmp2);
1179 break;
1180 case 0x12: /* STAP D2(B2) [S] */
1181 /* Store CPU Address */
1182 check_privileged(s);
1183 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1184 tmp = get_address(s, 0, b2, d2);
1185 tmp2 = tcg_temp_new_i64();
1186 tmp32_1 = tcg_temp_new_i32();
1187 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1188 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1189 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1190 tcg_temp_free_i64(tmp);
1191 tcg_temp_free_i64(tmp2);
1192 tcg_temp_free_i32(tmp32_1);
1193 break;
1194 case 0x21: /* IPTE R1,R2 [RRE] */
1195 /* Invalidate PTE */
1196 check_privileged(s);
1197 r1 = (insn >> 4) & 0xf;
1198 r2 = insn & 0xf;
1199 tmp = load_reg(r1);
1200 tmp2 = load_reg(r2);
1201 gen_helper_ipte(cpu_env, tmp, tmp2);
1202 tcg_temp_free_i64(tmp);
1203 tcg_temp_free_i64(tmp2);
1204 break;
1205 case 0x29: /* ISKE R1,R2 [RRE] */
1206 /* Insert Storage Key Extended */
1207 check_privileged(s);
1208 r1 = (insn >> 4) & 0xf;
1209 r2 = insn & 0xf;
1210 tmp = load_reg(r2);
1211 tmp2 = tcg_temp_new_i64();
1212 gen_helper_iske(tmp2, cpu_env, tmp);
1213 store_reg(r1, tmp2);
1214 tcg_temp_free_i64(tmp);
1215 tcg_temp_free_i64(tmp2);
1216 break;
1217 case 0x2a: /* RRBE R1,R2 [RRE] */
1218 /* Set Storage Key Extended */
1219 check_privileged(s);
1220 r1 = (insn >> 4) & 0xf;
1221 r2 = insn & 0xf;
1222 tmp32_1 = load_reg32(r1);
1223 tmp = load_reg(r2);
1224 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1225 set_cc_static(s);
1226 tcg_temp_free_i32(tmp32_1);
1227 tcg_temp_free_i64(tmp);
1228 break;
1229 case 0x2b: /* SSKE R1,R2 [RRE] */
1230 /* Set Storage Key Extended */
1231 check_privileged(s);
1232 r1 = (insn >> 4) & 0xf;
1233 r2 = insn & 0xf;
1234 tmp32_1 = load_reg32(r1);
1235 tmp = load_reg(r2);
1236 gen_helper_sske(cpu_env, tmp32_1, tmp);
1237 tcg_temp_free_i32(tmp32_1);
1238 tcg_temp_free_i64(tmp);
1239 break;
1240 case 0x34: /* STCH ? */
1241 /* Store Subchannel */
1242 check_privileged(s);
1243 gen_op_movi_cc(s, 3);
1244 break;
1245 case 0x46: /* STURA R1,R2 [RRE] */
1246 /* Store Using Real Address */
1247 check_privileged(s);
1248 r1 = (insn >> 4) & 0xf;
1249 r2 = insn & 0xf;
1250 tmp32_1 = load_reg32(r1);
1251 tmp = load_reg(r2);
1252 potential_page_fault(s);
1253 gen_helper_stura(cpu_env, tmp, tmp32_1);
1254 tcg_temp_free_i32(tmp32_1);
1255 tcg_temp_free_i64(tmp);
1256 break;
1257 case 0x50: /* CSP R1,R2 [RRE] */
1258 /* Compare And Swap And Purge */
1259 check_privileged(s);
1260 r1 = (insn >> 4) & 0xf;
1261 r2 = insn & 0xf;
1262 tmp32_1 = tcg_const_i32(r1);
1263 tmp32_2 = tcg_const_i32(r2);
1264 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1265 set_cc_static(s);
1266 tcg_temp_free_i32(tmp32_1);
1267 tcg_temp_free_i32(tmp32_2);
1268 break;
1269 case 0x5f: /* CHSC ? */
1270 /* Channel Subsystem Call */
1271 check_privileged(s);
1272 gen_op_movi_cc(s, 3);
1273 break;
1274 case 0x78: /* STCKE D2(B2) [S] */
1275 /* Store Clock Extended */
1276 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1277 tmp = get_address(s, 0, b2, d2);
1278 potential_page_fault(s);
1279 gen_helper_stcke(cc_op, cpu_env, tmp);
1280 set_cc_static(s);
1281 tcg_temp_free_i64(tmp);
1282 break;
1283 case 0x79: /* SACF D2(B2) [S] */
1284 /* Set Address Space Control Fast */
1285 check_privileged(s);
1286 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1287 tmp = get_address(s, 0, b2, d2);
1288 potential_page_fault(s);
1289 gen_helper_sacf(cpu_env, tmp);
1290 tcg_temp_free_i64(tmp);
1291 /* addressing mode has changed, so end the block */
1292 s->pc = s->next_pc;
1293 update_psw_addr(s);
1294 s->is_jmp = DISAS_JUMP;
1295 break;
1296 case 0x7d: /* STSI D2,(B2) [S] */
1297 check_privileged(s);
1298 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1299 tmp = get_address(s, 0, b2, d2);
1300 tmp32_1 = load_reg32(0);
1301 tmp32_2 = load_reg32(1);
1302 potential_page_fault(s);
1303 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1304 set_cc_static(s);
1305 tcg_temp_free_i64(tmp);
1306 tcg_temp_free_i32(tmp32_1);
1307 tcg_temp_free_i32(tmp32_2);
1308 break;
1309 case 0x9d: /* LFPC D2(B2) [S] */
1310 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1311 tmp = get_address(s, 0, b2, d2);
1312 tmp2 = tcg_temp_new_i64();
1313 tmp32_1 = tcg_temp_new_i32();
1314 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1315 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1316 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1317 tcg_temp_free_i64(tmp);
1318 tcg_temp_free_i64(tmp2);
1319 tcg_temp_free_i32(tmp32_1);
1320 break;
1321 case 0xb1: /* STFL D2(B2) [S] */
1322 /* Store Facility List (CPU features) at 200 */
1323 check_privileged(s);
1324 tmp2 = tcg_const_i64(0xc0000000);
1325 tmp = tcg_const_i64(200);
1326 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1327 tcg_temp_free_i64(tmp2);
1328 tcg_temp_free_i64(tmp);
1329 break;
1330 case 0xb2: /* LPSWE D2(B2) [S] */
1331 /* Load PSW Extended */
1332 check_privileged(s);
1333 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1334 tmp = get_address(s, 0, b2, d2);
1335 tmp2 = tcg_temp_new_i64();
1336 tmp3 = tcg_temp_new_i64();
1337 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1338 tcg_gen_addi_i64(tmp, tmp, 8);
1339 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1340 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1341 /* we need to keep cc_op intact */
1342 s->is_jmp = DISAS_JUMP;
1343 tcg_temp_free_i64(tmp);
1344 tcg_temp_free_i64(tmp2);
1345 tcg_temp_free_i64(tmp3);
1346 break;
1347 case 0x20: /* SERVC R1,R2 [RRE] */
1348 /* SCLP Service call (PV hypercall) */
1349 check_privileged(s);
1350 potential_page_fault(s);
1351 tmp32_1 = load_reg32(r2);
1352 tmp = load_reg(r1);
1353 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1354 set_cc_static(s);
1355 tcg_temp_free_i32(tmp32_1);
1356 tcg_temp_free_i64(tmp);
1357 break;
1358 #endif
1359 default:
1360 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1361 gen_illegal_opcode(s);
1362 break;
1363 }
1364 }
1365
1366 static void disas_b3(CPUS390XState *env, DisasContext *s, int op, int m3,
1367 int r1, int r2)
1368 {
1369 TCGv_i64 tmp;
1370 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1371 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
1372 #define FP_HELPER(i) \
1373 tmp32_1 = tcg_const_i32(r1); \
1374 tmp32_2 = tcg_const_i32(r2); \
1375 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1376 tcg_temp_free_i32(tmp32_1); \
1377 tcg_temp_free_i32(tmp32_2);
1378
1379 #define FP_HELPER_CC(i) \
1380 tmp32_1 = tcg_const_i32(r1); \
1381 tmp32_2 = tcg_const_i32(r2); \
1382 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1383 set_cc_static(s); \
1384 tcg_temp_free_i32(tmp32_1); \
1385 tcg_temp_free_i32(tmp32_2);
1386
1387 switch (op) {
1388 case 0x84: /* SFPC R1 [RRE] */
1389 tmp32_1 = load_reg32(r1);
1390 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1391 tcg_temp_free_i32(tmp32_1);
1392 break;
1393 case 0x94: /* CEFBR R1,R2 [RRE] */
1394 case 0x95: /* CDFBR R1,R2 [RRE] */
1395 case 0x96: /* CXFBR R1,R2 [RRE] */
1396 tmp32_1 = tcg_const_i32(r1);
1397 tmp32_2 = load_reg32(r2);
1398 switch (op) {
1399 case 0x94:
1400 gen_helper_cefbr(cpu_env, tmp32_1, tmp32_2);
1401 break;
1402 case 0x95:
1403 gen_helper_cdfbr(cpu_env, tmp32_1, tmp32_2);
1404 break;
1405 case 0x96:
1406 gen_helper_cxfbr(cpu_env, tmp32_1, tmp32_2);
1407 break;
1408 default:
1409 tcg_abort();
1410 }
1411 tcg_temp_free_i32(tmp32_1);
1412 tcg_temp_free_i32(tmp32_2);
1413 break;
1414 case 0x98: /* CFEBR R1,R2 [RRE] */
1415 case 0x99: /* CFDBR R1,R2 [RRE] */
1416 case 0x9a: /* CFXBR R1,R2 [RRE] */
1417 tmp32_1 = tcg_const_i32(r1);
1418 tmp32_2 = tcg_const_i32(r2);
1419 tmp32_3 = tcg_const_i32(m3);
1420 switch (op) {
1421 case 0x98:
1422 gen_helper_cfebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1423 break;
1424 case 0x99:
1425 gen_helper_cfdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1426 break;
1427 case 0x9a:
1428 gen_helper_cfxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1429 break;
1430 default:
1431 tcg_abort();
1432 }
1433 set_cc_static(s);
1434 tcg_temp_free_i32(tmp32_1);
1435 tcg_temp_free_i32(tmp32_2);
1436 tcg_temp_free_i32(tmp32_3);
1437 break;
1438 case 0xa4: /* CEGBR R1,R2 [RRE] */
1439 case 0xa5: /* CDGBR R1,R2 [RRE] */
1440 tmp32_1 = tcg_const_i32(r1);
1441 tmp = load_reg(r2);
1442 switch (op) {
1443 case 0xa4:
1444 gen_helper_cegbr(cpu_env, tmp32_1, tmp);
1445 break;
1446 case 0xa5:
1447 gen_helper_cdgbr(cpu_env, tmp32_1, tmp);
1448 break;
1449 default:
1450 tcg_abort();
1451 }
1452 tcg_temp_free_i32(tmp32_1);
1453 tcg_temp_free_i64(tmp);
1454 break;
1455 case 0xa6: /* CXGBR R1,R2 [RRE] */
1456 tmp32_1 = tcg_const_i32(r1);
1457 tmp = load_reg(r2);
1458 gen_helper_cxgbr(cpu_env, tmp32_1, tmp);
1459 tcg_temp_free_i32(tmp32_1);
1460 tcg_temp_free_i64(tmp);
1461 break;
1462 case 0xa8: /* CGEBR R1,R2 [RRE] */
1463 tmp32_1 = tcg_const_i32(r1);
1464 tmp32_2 = tcg_const_i32(r2);
1465 tmp32_3 = tcg_const_i32(m3);
1466 gen_helper_cgebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1467 set_cc_static(s);
1468 tcg_temp_free_i32(tmp32_1);
1469 tcg_temp_free_i32(tmp32_2);
1470 tcg_temp_free_i32(tmp32_3);
1471 break;
1472 case 0xa9: /* CGDBR R1,R2 [RRE] */
1473 tmp32_1 = tcg_const_i32(r1);
1474 tmp32_2 = tcg_const_i32(r2);
1475 tmp32_3 = tcg_const_i32(m3);
1476 gen_helper_cgdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1477 set_cc_static(s);
1478 tcg_temp_free_i32(tmp32_1);
1479 tcg_temp_free_i32(tmp32_2);
1480 tcg_temp_free_i32(tmp32_3);
1481 break;
1482 case 0xaa: /* CGXBR R1,R2 [RRE] */
1483 tmp32_1 = tcg_const_i32(r1);
1484 tmp32_2 = tcg_const_i32(r2);
1485 tmp32_3 = tcg_const_i32(m3);
1486 gen_helper_cgxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1487 set_cc_static(s);
1488 tcg_temp_free_i32(tmp32_1);
1489 tcg_temp_free_i32(tmp32_2);
1490 tcg_temp_free_i32(tmp32_3);
1491 break;
1492 default:
1493 LOG_DISAS("illegal b3 operation 0x%x\n", op);
1494 gen_illegal_opcode(s);
1495 break;
1496 }
1497
1498 #undef FP_HELPER_CC
1499 #undef FP_HELPER
1500 }
1501
1502 static void disas_b9(CPUS390XState *env, DisasContext *s, int op, int r1,
1503 int r2)
1504 {
1505 TCGv_i64 tmp;
1506 TCGv_i32 tmp32_1;
1507
1508 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1509 switch (op) {
1510 case 0x83: /* FLOGR R1,R2 [RRE] */
1511 tmp = load_reg(r2);
1512 tmp32_1 = tcg_const_i32(r1);
1513 gen_helper_flogr(cc_op, cpu_env, tmp32_1, tmp);
1514 set_cc_static(s);
1515 tcg_temp_free_i64(tmp);
1516 tcg_temp_free_i32(tmp32_1);
1517 break;
1518 default:
1519 LOG_DISAS("illegal b9 operation 0x%x\n", op);
1520 gen_illegal_opcode(s);
1521 break;
1522 }
1523 }
1524
1525 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1526 {
1527 unsigned char opc;
1528 uint64_t insn;
1529 int op, r1, r2, r3;
1530
1531 opc = cpu_ldub_code(env, s->pc);
1532 LOG_DISAS("opc 0x%x\n", opc);
1533
1534 switch (opc) {
1535 case 0xb2:
1536 insn = ld_code4(env, s->pc);
1537 op = (insn >> 16) & 0xff;
1538 disas_b2(env, s, op, insn);
1539 break;
1540 case 0xb3:
1541 insn = ld_code4(env, s->pc);
1542 op = (insn >> 16) & 0xff;
1543 r3 = (insn >> 12) & 0xf; /* aka m3 */
1544 r1 = (insn >> 4) & 0xf;
1545 r2 = insn & 0xf;
1546 disas_b3(env, s, op, r3, r1, r2);
1547 break;
1548 case 0xb9:
1549 insn = ld_code4(env, s->pc);
1550 r1 = (insn >> 4) & 0xf;
1551 r2 = insn & 0xf;
1552 op = (insn >> 16) & 0xff;
1553 disas_b9(env, s, op, r1, r2);
1554 break;
1555 default:
1556 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
1557 gen_illegal_opcode(s);
1558 break;
1559 }
1560 }
1561
1562 /* ====================================================================== */
1563 /* Define the insn format enumeration. */
1564 #define F0(N) FMT_##N,
1565 #define F1(N, X1) F0(N)
1566 #define F2(N, X1, X2) F0(N)
1567 #define F3(N, X1, X2, X3) F0(N)
1568 #define F4(N, X1, X2, X3, X4) F0(N)
1569 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1570
1571 typedef enum {
1572 #include "insn-format.def"
1573 } DisasFormat;
1574
1575 #undef F0
1576 #undef F1
1577 #undef F2
1578 #undef F3
1579 #undef F4
1580 #undef F5
1581
1582 /* Define a structure to hold the decoded fields. We'll store each inside
1583 an array indexed by an enum. In order to conserve memory, we'll arrange
1584 for fields that do not exist at the same time to overlap, thus the "C"
1585 for compact. For checking purposes there is an "O" for original index
1586 as well that will be applied to availability bitmaps. */
1587
1588 enum DisasFieldIndexO {
1589 FLD_O_r1,
1590 FLD_O_r2,
1591 FLD_O_r3,
1592 FLD_O_m1,
1593 FLD_O_m3,
1594 FLD_O_m4,
1595 FLD_O_b1,
1596 FLD_O_b2,
1597 FLD_O_b4,
1598 FLD_O_d1,
1599 FLD_O_d2,
1600 FLD_O_d4,
1601 FLD_O_x2,
1602 FLD_O_l1,
1603 FLD_O_l2,
1604 FLD_O_i1,
1605 FLD_O_i2,
1606 FLD_O_i3,
1607 FLD_O_i4,
1608 FLD_O_i5
1609 };
1610
1611 enum DisasFieldIndexC {
1612 FLD_C_r1 = 0,
1613 FLD_C_m1 = 0,
1614 FLD_C_b1 = 0,
1615 FLD_C_i1 = 0,
1616
1617 FLD_C_r2 = 1,
1618 FLD_C_b2 = 1,
1619 FLD_C_i2 = 1,
1620
1621 FLD_C_r3 = 2,
1622 FLD_C_m3 = 2,
1623 FLD_C_i3 = 2,
1624
1625 FLD_C_m4 = 3,
1626 FLD_C_b4 = 3,
1627 FLD_C_i4 = 3,
1628 FLD_C_l1 = 3,
1629
1630 FLD_C_i5 = 4,
1631 FLD_C_d1 = 4,
1632
1633 FLD_C_d2 = 5,
1634
1635 FLD_C_d4 = 6,
1636 FLD_C_x2 = 6,
1637 FLD_C_l2 = 6,
1638
1639 NUM_C_FIELD = 7
1640 };
1641
1642 struct DisasFields {
1643 unsigned op:8;
1644 unsigned op2:8;
1645 unsigned presentC:16;
1646 unsigned int presentO;
1647 int c[NUM_C_FIELD];
1648 };
1649
1650 /* This is the way fields are to be accessed out of DisasFields. */
1651 #define have_field(S, F) have_field1((S), FLD_O_##F)
1652 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1653
1654 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1655 {
1656 return (f->presentO >> c) & 1;
1657 }
1658
1659 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1660 enum DisasFieldIndexC c)
1661 {
1662 assert(have_field1(f, o));
1663 return f->c[c];
1664 }
1665
1666 /* Describe the layout of each field in each format. */
1667 typedef struct DisasField {
1668 unsigned int beg:8;
1669 unsigned int size:8;
1670 unsigned int type:2;
1671 unsigned int indexC:6;
1672 enum DisasFieldIndexO indexO:8;
1673 } DisasField;
1674
1675 typedef struct DisasFormatInfo {
1676 DisasField op[NUM_C_FIELD];
1677 } DisasFormatInfo;
1678
1679 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1680 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1681 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1682 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1683 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1684 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1685 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1686 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1687 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1688 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1689 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1690 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1691 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1692 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1693
1694 #define F0(N) { { } },
1695 #define F1(N, X1) { { X1 } },
1696 #define F2(N, X1, X2) { { X1, X2 } },
1697 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1698 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1699 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1700
1701 static const DisasFormatInfo format_info[] = {
1702 #include "insn-format.def"
1703 };
1704
1705 #undef F0
1706 #undef F1
1707 #undef F2
1708 #undef F3
1709 #undef F4
1710 #undef F5
1711 #undef R
1712 #undef M
1713 #undef BD
1714 #undef BXD
1715 #undef BDL
1716 #undef BXDL
1717 #undef I
1718 #undef L
1719
1720 /* Generally, we'll extract operands into this structures, operate upon
1721 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1722 of routines below for more details. */
1723 typedef struct {
1724 bool g_out, g_out2, g_in1, g_in2;
1725 TCGv_i64 out, out2, in1, in2;
1726 TCGv_i64 addr1;
1727 } DisasOps;
1728
1729 /* Return values from translate_one, indicating the state of the TB. */
1730 typedef enum {
1731 /* Continue the TB. */
1732 NO_EXIT,
1733 /* We have emitted one or more goto_tb. No fixup required. */
1734 EXIT_GOTO_TB,
1735 /* We are not using a goto_tb (for whatever reason), but have updated
1736 the PC (for whatever reason), so there's no need to do it again on
1737 exiting the TB. */
1738 EXIT_PC_UPDATED,
1739 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1740 updated the PC for the next instruction to be executed. */
1741 EXIT_PC_STALE,
1742 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1743 No following code will be executed. */
1744 EXIT_NORETURN,
1745 } ExitStatus;
1746
1747 typedef enum DisasFacility {
1748 FAC_Z, /* zarch (default) */
1749 FAC_CASS, /* compare and swap and store */
1750 FAC_CASS2, /* compare and swap and store 2*/
1751 FAC_DFP, /* decimal floating point */
1752 FAC_DFPR, /* decimal floating point rounding */
1753 FAC_DO, /* distinct operands */
1754 FAC_EE, /* execute extensions */
1755 FAC_EI, /* extended immediate */
1756 FAC_FPE, /* floating point extension */
1757 FAC_FPSSH, /* floating point support sign handling */
1758 FAC_FPRGR, /* FPR-GR transfer */
1759 FAC_GIE, /* general instructions extension */
1760 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1761 FAC_HW, /* high-word */
1762 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1763 FAC_LOC, /* load/store on condition */
1764 FAC_LD, /* long displacement */
1765 FAC_PC, /* population count */
1766 FAC_SCF, /* store clock fast */
1767 FAC_SFLE, /* store facility list extended */
1768 } DisasFacility;
1769
1770 struct DisasInsn {
1771 unsigned opc:16;
1772 DisasFormat fmt:6;
1773 DisasFacility fac:6;
1774
1775 const char *name;
1776
1777 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1778 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1779 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1780 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1781 void (*help_cout)(DisasContext *, DisasOps *);
1782 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1783
1784 uint64_t data;
1785 };
1786
1787 /* ====================================================================== */
1788 /* Miscelaneous helpers, used by several operations. */
1789
1790 static void help_l2_shift(DisasContext *s, DisasFields *f,
1791 DisasOps *o, int mask)
1792 {
1793 int b2 = get_field(f, b2);
1794 int d2 = get_field(f, d2);
1795
1796 if (b2 == 0) {
1797 o->in2 = tcg_const_i64(d2 & mask);
1798 } else {
1799 o->in2 = get_address(s, 0, b2, d2);
1800 tcg_gen_andi_i64(o->in2, o->in2, mask);
1801 }
1802 }
1803
1804 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1805 {
1806 if (dest == s->next_pc) {
1807 return NO_EXIT;
1808 }
1809 if (use_goto_tb(s, dest)) {
1810 gen_update_cc_op(s);
1811 tcg_gen_goto_tb(0);
1812 tcg_gen_movi_i64(psw_addr, dest);
1813 tcg_gen_exit_tb((tcg_target_long)s->tb);
1814 return EXIT_GOTO_TB;
1815 } else {
1816 tcg_gen_movi_i64(psw_addr, dest);
1817 return EXIT_PC_UPDATED;
1818 }
1819 }
1820
1821 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1822 bool is_imm, int imm, TCGv_i64 cdest)
1823 {
1824 ExitStatus ret;
1825 uint64_t dest = s->pc + 2 * imm;
1826 int lab;
1827
1828 /* Take care of the special cases first. */
1829 if (c->cond == TCG_COND_NEVER) {
1830 ret = NO_EXIT;
1831 goto egress;
1832 }
1833 if (is_imm) {
1834 if (dest == s->next_pc) {
1835 /* Branch to next. */
1836 ret = NO_EXIT;
1837 goto egress;
1838 }
1839 if (c->cond == TCG_COND_ALWAYS) {
1840 ret = help_goto_direct(s, dest);
1841 goto egress;
1842 }
1843 } else {
1844 if (TCGV_IS_UNUSED_I64(cdest)) {
1845 /* E.g. bcr %r0 -> no branch. */
1846 ret = NO_EXIT;
1847 goto egress;
1848 }
1849 if (c->cond == TCG_COND_ALWAYS) {
1850 tcg_gen_mov_i64(psw_addr, cdest);
1851 ret = EXIT_PC_UPDATED;
1852 goto egress;
1853 }
1854 }
1855
1856 if (use_goto_tb(s, s->next_pc)) {
1857 if (is_imm && use_goto_tb(s, dest)) {
1858 /* Both exits can use goto_tb. */
1859 gen_update_cc_op(s);
1860
1861 lab = gen_new_label();
1862 if (c->is_64) {
1863 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1864 } else {
1865 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1866 }
1867
1868 /* Branch not taken. */
1869 tcg_gen_goto_tb(0);
1870 tcg_gen_movi_i64(psw_addr, s->next_pc);
1871 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1872
1873 /* Branch taken. */
1874 gen_set_label(lab);
1875 tcg_gen_goto_tb(1);
1876 tcg_gen_movi_i64(psw_addr, dest);
1877 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1878
1879 ret = EXIT_GOTO_TB;
1880 } else {
1881 /* Fallthru can use goto_tb, but taken branch cannot. */
1882 /* Store taken branch destination before the brcond. This
1883 avoids having to allocate a new local temp to hold it.
1884 We'll overwrite this in the not taken case anyway. */
1885 if (!is_imm) {
1886 tcg_gen_mov_i64(psw_addr, cdest);
1887 }
1888
1889 lab = gen_new_label();
1890 if (c->is_64) {
1891 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1892 } else {
1893 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1894 }
1895
1896 /* Branch not taken. */
1897 gen_update_cc_op(s);
1898 tcg_gen_goto_tb(0);
1899 tcg_gen_movi_i64(psw_addr, s->next_pc);
1900 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1901
1902 gen_set_label(lab);
1903 if (is_imm) {
1904 tcg_gen_movi_i64(psw_addr, dest);
1905 }
1906 ret = EXIT_PC_UPDATED;
1907 }
1908 } else {
1909 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1910 Most commonly we're single-stepping or some other condition that
1911 disables all use of goto_tb. Just update the PC and exit. */
1912
1913 TCGv_i64 next = tcg_const_i64(s->next_pc);
1914 if (is_imm) {
1915 cdest = tcg_const_i64(dest);
1916 }
1917
1918 if (c->is_64) {
1919 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1920 cdest, next);
1921 } else {
1922 TCGv_i32 t0 = tcg_temp_new_i32();
1923 TCGv_i64 t1 = tcg_temp_new_i64();
1924 TCGv_i64 z = tcg_const_i64(0);
1925 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1926 tcg_gen_extu_i32_i64(t1, t0);
1927 tcg_temp_free_i32(t0);
1928 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1929 tcg_temp_free_i64(t1);
1930 tcg_temp_free_i64(z);
1931 }
1932
1933 if (is_imm) {
1934 tcg_temp_free_i64(cdest);
1935 }
1936 tcg_temp_free_i64(next);
1937
1938 ret = EXIT_PC_UPDATED;
1939 }
1940
1941 egress:
1942 free_compare(c);
1943 return ret;
1944 }
1945
1946 /* ====================================================================== */
1947 /* The operations. These perform the bulk of the work for any insn,
1948 usually after the operands have been loaded and output initialized. */
1949
1950 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1951 {
1952 gen_helper_abs_i64(o->out, o->in2);
1953 return NO_EXIT;
1954 }
1955
1956 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1957 {
1958 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1959 return NO_EXIT;
1960 }
1961
1962 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1963 {
1964 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1965 return NO_EXIT;
1966 }
1967
1968 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1969 {
1970 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1971 tcg_gen_mov_i64(o->out2, o->in2);
1972 return NO_EXIT;
1973 }
1974
1975 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1976 {
1977 tcg_gen_add_i64(o->out, o->in1, o->in2);
1978 return NO_EXIT;
1979 }
1980
1981 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1982 {
1983 TCGv_i64 cc;
1984
1985 tcg_gen_add_i64(o->out, o->in1, o->in2);
1986
1987 /* XXX possible optimization point */
1988 gen_op_calc_cc(s);
1989 cc = tcg_temp_new_i64();
1990 tcg_gen_extu_i32_i64(cc, cc_op);
1991 tcg_gen_shri_i64(cc, cc, 1);
1992
1993 tcg_gen_add_i64(o->out, o->out, cc);
1994 tcg_temp_free_i64(cc);
1995 return NO_EXIT;
1996 }
1997
1998 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1999 {
2000 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
2001 return NO_EXIT;
2002 }
2003
2004 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
2005 {
2006 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
2007 return NO_EXIT;
2008 }
2009
2010 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
2011 {
2012 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2013 return_low128(o->out2);
2014 return NO_EXIT;
2015 }
2016
2017 static ExitStatus op_and(DisasContext *s, DisasOps *o)
2018 {
2019 tcg_gen_and_i64(o->out, o->in1, o->in2);
2020 return NO_EXIT;
2021 }
2022
2023 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
2024 {
2025 int shift = s->insn->data & 0xff;
2026 int size = s->insn->data >> 8;
2027 uint64_t mask = ((1ull << size) - 1) << shift;
2028
2029 assert(!o->g_in2);
2030 tcg_gen_shli_i64(o->in2, o->in2, shift);
2031 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2032 tcg_gen_and_i64(o->out, o->in1, o->in2);
2033
2034 /* Produce the CC from only the bits manipulated. */
2035 tcg_gen_andi_i64(cc_dst, o->out, mask);
2036 set_cc_nz_u64(s, cc_dst);
2037 return NO_EXIT;
2038 }
2039
2040 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
2041 {
2042 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2043 if (!TCGV_IS_UNUSED_I64(o->in2)) {
2044 tcg_gen_mov_i64(psw_addr, o->in2);
2045 return EXIT_PC_UPDATED;
2046 } else {
2047 return NO_EXIT;
2048 }
2049 }
2050
2051 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
2052 {
2053 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2054 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
2055 }
2056
2057 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
2058 {
2059 int m1 = get_field(s->fields, m1);
2060 bool is_imm = have_field(s->fields, i2);
2061 int imm = is_imm ? get_field(s->fields, i2) : 0;
2062 DisasCompare c;
2063
2064 disas_jcc(s, &c, m1);
2065 return help_branch(s, &c, is_imm, imm, o->in2);
2066 }
2067
2068 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
2069 {
2070 int r1 = get_field(s->fields, r1);
2071 bool is_imm = have_field(s->fields, i2);
2072 int imm = is_imm ? get_field(s->fields, i2) : 0;
2073 DisasCompare c;
2074 TCGv_i64 t;
2075
2076 c.cond = TCG_COND_NE;
2077 c.is_64 = false;
2078 c.g1 = false;
2079 c.g2 = false;
2080
2081 t = tcg_temp_new_i64();
2082 tcg_gen_subi_i64(t, regs[r1], 1);
2083 store_reg32_i64(r1, t);
2084 c.u.s32.a = tcg_temp_new_i32();
2085 c.u.s32.b = tcg_const_i32(0);
2086 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
2087 tcg_temp_free_i64(t);
2088
2089 return help_branch(s, &c, is_imm, imm, o->in2);
2090 }
2091
2092 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
2093 {
2094 int r1 = get_field(s->fields, r1);
2095 bool is_imm = have_field(s->fields, i2);
2096 int imm = is_imm ? get_field(s->fields, i2) : 0;
2097 DisasCompare c;
2098
2099 c.cond = TCG_COND_NE;
2100 c.is_64 = true;
2101 c.g1 = true;
2102 c.g2 = false;
2103
2104 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
2105 c.u.s64.a = regs[r1];
2106 c.u.s64.b = tcg_const_i64(0);
2107
2108 return help_branch(s, &c, is_imm, imm, o->in2);
2109 }
2110
2111 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
2112 {
2113 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
2114 set_cc_static(s);
2115 return NO_EXIT;
2116 }
2117
2118 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
2119 {
2120 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
2121 set_cc_static(s);
2122 return NO_EXIT;
2123 }
2124
2125 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
2126 {
2127 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2128 set_cc_static(s);
2129 return NO_EXIT;
2130 }
2131
2132 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
2133 {
2134 int l = get_field(s->fields, l1);
2135 TCGv_i32 vl;
2136
2137 switch (l + 1) {
2138 case 1:
2139 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2140 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2141 break;
2142 case 2:
2143 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2144 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2145 break;
2146 case 4:
2147 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2148 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2149 break;
2150 case 8:
2151 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2152 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2153 break;
2154 default:
2155 potential_page_fault(s);
2156 vl = tcg_const_i32(l);
2157 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2158 tcg_temp_free_i32(vl);
2159 set_cc_static(s);
2160 return NO_EXIT;
2161 }
2162 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2163 return NO_EXIT;
2164 }
2165
2166 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
2167 {
2168 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2169 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2170 potential_page_fault(s);
2171 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
2172 tcg_temp_free_i32(r1);
2173 tcg_temp_free_i32(r3);
2174 set_cc_static(s);
2175 return NO_EXIT;
2176 }
2177
2178 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
2179 {
2180 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2181 TCGv_i32 t1 = tcg_temp_new_i32();
2182 tcg_gen_trunc_i64_i32(t1, o->in1);
2183 potential_page_fault(s);
2184 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2185 set_cc_static(s);
2186 tcg_temp_free_i32(t1);
2187 tcg_temp_free_i32(m3);
2188 return NO_EXIT;
2189 }
2190
2191 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
2192 {
2193 int r3 = get_field(s->fields, r3);
2194 potential_page_fault(s);
2195 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2196 set_cc_static(s);
2197 return NO_EXIT;
2198 }
2199
2200 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
2201 {
2202 int r3 = get_field(s->fields, r3);
2203 potential_page_fault(s);
2204 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2205 set_cc_static(s);
2206 return NO_EXIT;
2207 }
2208
2209 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
2210 {
2211 int r3 = get_field(s->fields, r3);
2212 TCGv_i64 in3 = tcg_temp_new_i64();
2213 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
2214 potential_page_fault(s);
2215 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
2216 tcg_temp_free_i64(in3);
2217 set_cc_static(s);
2218 return NO_EXIT;
2219 }
2220
2221 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2222 {
2223 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2224 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2225 potential_page_fault(s);
2226 /* XXX rewrite in tcg */
2227 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
2228 set_cc_static(s);
2229 return NO_EXIT;
2230 }
2231
2232 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2233 {
2234 TCGv_i64 t1 = tcg_temp_new_i64();
2235 TCGv_i32 t2 = tcg_temp_new_i32();
2236 tcg_gen_trunc_i64_i32(t2, o->in1);
2237 gen_helper_cvd(t1, t2);
2238 tcg_temp_free_i32(t2);
2239 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2240 tcg_temp_free_i64(t1);
2241 return NO_EXIT;
2242 }
2243
2244 #ifndef CONFIG_USER_ONLY
2245 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2246 {
2247 TCGv_i32 tmp;
2248
2249 check_privileged(s);
2250 potential_page_fault(s);
2251
2252 /* We pretend the format is RX_a so that D2 is the field we want. */
2253 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2254 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2255 tcg_temp_free_i32(tmp);
2256 return NO_EXIT;
2257 }
2258 #endif
2259
2260 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2261 {
2262 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2263 return_low128(o->out);
2264 return NO_EXIT;
2265 }
2266
2267 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2268 {
2269 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2270 return_low128(o->out);
2271 return NO_EXIT;
2272 }
2273
2274 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2275 {
2276 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2277 return_low128(o->out);
2278 return NO_EXIT;
2279 }
2280
2281 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2282 {
2283 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2284 return_low128(o->out);
2285 return NO_EXIT;
2286 }
2287
2288 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2289 {
2290 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2291 return NO_EXIT;
2292 }
2293
2294 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2295 {
2296 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2297 return NO_EXIT;
2298 }
2299
2300 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2301 {
2302 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2303 return_low128(o->out2);
2304 return NO_EXIT;
2305 }
2306
2307 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2308 {
2309 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2310 return NO_EXIT;
2311 }
2312
2313 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2314 {
2315 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2316 tb->flags, (ab)use the tb->cs_base field as the address of
2317 the template in memory, and grab 8 bits of tb->flags/cflags for
2318 the contents of the register. We would then recognize all this
2319 in gen_intermediate_code_internal, generating code for exactly
2320 one instruction. This new TB then gets executed normally.
2321
2322 On the other hand, this seems to be mostly used for modifying
2323 MVC inside of memcpy, which needs a helper call anyway. So
2324 perhaps this doesn't bear thinking about any further. */
2325
2326 TCGv_i64 tmp;
2327
2328 update_psw_addr(s);
2329 gen_op_calc_cc(s);
2330
2331 tmp = tcg_const_i64(s->next_pc);
2332 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2333 tcg_temp_free_i64(tmp);
2334
2335 set_cc_static(s);
2336 return NO_EXIT;
2337 }
2338
2339 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2340 {
2341 int m3 = get_field(s->fields, m3);
2342 int pos, len, base = s->insn->data;
2343 TCGv_i64 tmp = tcg_temp_new_i64();
2344 uint64_t ccm;
2345
2346 switch (m3) {
2347 case 0xf:
2348 /* Effectively a 32-bit load. */
2349 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2350 len = 32;
2351 goto one_insert;
2352
2353 case 0xc:
2354 case 0x6:
2355 case 0x3:
2356 /* Effectively a 16-bit load. */
2357 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2358 len = 16;
2359 goto one_insert;
2360
2361 case 0x8:
2362 case 0x4:
2363 case 0x2:
2364 case 0x1:
2365 /* Effectively an 8-bit load. */
2366 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2367 len = 8;
2368 goto one_insert;
2369
2370 one_insert:
2371 pos = base + ctz32(m3) * 8;
2372 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2373 ccm = ((1ull << len) - 1) << pos;
2374 break;
2375
2376 default:
2377 /* This is going to be a sequence of loads and inserts. */
2378 pos = base + 32 - 8;
2379 ccm = 0;
2380 while (m3) {
2381 if (m3 & 0x8) {
2382 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2383 tcg_gen_addi_i64(o->in2, o->in2, 1);
2384 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2385 ccm |= 0xff << pos;
2386 }
2387 m3 = (m3 << 1) & 0xf;
2388 pos -= 8;
2389 }
2390 break;
2391 }
2392
2393 tcg_gen_movi_i64(tmp, ccm);
2394 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2395 tcg_temp_free_i64(tmp);
2396 return NO_EXIT;
2397 }
2398
2399 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2400 {
2401 int shift = s->insn->data & 0xff;
2402 int size = s->insn->data >> 8;
2403 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2404 return NO_EXIT;
2405 }
2406
2407 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2408 {
2409 gen_helper_ldeb(o->out, cpu_env, o->in2);
2410 return NO_EXIT;
2411 }
2412
2413 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2414 {
2415 gen_helper_ledb(o->out, cpu_env, o->in2);
2416 return NO_EXIT;
2417 }
2418
2419 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2420 {
2421 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2422 return NO_EXIT;
2423 }
2424
2425 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2426 {
2427 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2428 return NO_EXIT;
2429 }
2430
2431 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2432 {
2433 gen_helper_lxdb(o->out, cpu_env, o->in2);
2434 return_low128(o->out2);
2435 return NO_EXIT;
2436 }
2437
2438 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2439 {
2440 gen_helper_lxeb(o->out, cpu_env, o->in2);
2441 return_low128(o->out2);
2442 return NO_EXIT;
2443 }
2444
2445 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2446 {
2447 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2448 return NO_EXIT;
2449 }
2450
2451 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2452 {
2453 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2454 return NO_EXIT;
2455 }
2456
2457 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2458 {
2459 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2460 return NO_EXIT;
2461 }
2462
2463 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2464 {
2465 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2466 return NO_EXIT;
2467 }
2468
2469 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2470 {
2471 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2472 return NO_EXIT;
2473 }
2474
2475 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2476 {
2477 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2478 return NO_EXIT;
2479 }
2480
2481 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2482 {
2483 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2484 return NO_EXIT;
2485 }
2486
2487 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2488 {
2489 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2490 return NO_EXIT;
2491 }
2492
2493 #ifndef CONFIG_USER_ONLY
2494 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2495 {
2496 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2497 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2498 check_privileged(s);
2499 potential_page_fault(s);
2500 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2501 tcg_temp_free_i32(r1);
2502 tcg_temp_free_i32(r3);
2503 return NO_EXIT;
2504 }
2505
2506 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2507 {
2508 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2509 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2510 check_privileged(s);
2511 potential_page_fault(s);
2512 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2513 tcg_temp_free_i32(r1);
2514 tcg_temp_free_i32(r3);
2515 return NO_EXIT;
2516 }
2517 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2518 {
2519 check_privileged(s);
2520 potential_page_fault(s);
2521 gen_helper_lra(o->out, cpu_env, o->in2);
2522 set_cc_static(s);
2523 return NO_EXIT;
2524 }
2525
2526 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2527 {
2528 TCGv_i64 t1, t2;
2529
2530 check_privileged(s);
2531
2532 t1 = tcg_temp_new_i64();
2533 t2 = tcg_temp_new_i64();
2534 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2535 tcg_gen_addi_i64(o->in2, o->in2, 4);
2536 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2537 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2538 tcg_gen_shli_i64(t1, t1, 32);
2539 gen_helper_load_psw(cpu_env, t1, t2);
2540 tcg_temp_free_i64(t1);
2541 tcg_temp_free_i64(t2);
2542 return EXIT_NORETURN;
2543 }
2544 #endif
2545
2546 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2547 {
2548 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2549 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2550 potential_page_fault(s);
2551 gen_helper_lam(cpu_env, r1, o->in2, r3);
2552 tcg_temp_free_i32(r1);
2553 tcg_temp_free_i32(r3);
2554 return NO_EXIT;
2555 }
2556
2557 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2558 {
2559 int r1 = get_field(s->fields, r1);
2560 int r3 = get_field(s->fields, r3);
2561 TCGv_i64 t = tcg_temp_new_i64();
2562 TCGv_i64 t4 = tcg_const_i64(4);
2563
2564 while (1) {
2565 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2566 store_reg32_i64(r1, t);
2567 if (r1 == r3) {
2568 break;
2569 }
2570 tcg_gen_add_i64(o->in2, o->in2, t4);
2571 r1 = (r1 + 1) & 15;
2572 }
2573
2574 tcg_temp_free_i64(t);
2575 tcg_temp_free_i64(t4);
2576 return NO_EXIT;
2577 }
2578
2579 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2580 {
2581 int r1 = get_field(s->fields, r1);
2582 int r3 = get_field(s->fields, r3);
2583 TCGv_i64 t = tcg_temp_new_i64();
2584 TCGv_i64 t4 = tcg_const_i64(4);
2585
2586 while (1) {
2587 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2588 store_reg32h_i64(r1, t);
2589 if (r1 == r3) {
2590 break;
2591 }
2592 tcg_gen_add_i64(o->in2, o->in2, t4);
2593 r1 = (r1 + 1) & 15;
2594 }
2595
2596 tcg_temp_free_i64(t);
2597 tcg_temp_free_i64(t4);
2598 return NO_EXIT;
2599 }
2600
2601 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2602 {
2603 int r1 = get_field(s->fields, r1);
2604 int r3 = get_field(s->fields, r3);
2605 TCGv_i64 t8 = tcg_const_i64(8);
2606
2607 while (1) {
2608 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2609 if (r1 == r3) {
2610 break;
2611 }
2612 tcg_gen_add_i64(o->in2, o->in2, t8);
2613 r1 = (r1 + 1) & 15;
2614 }
2615
2616 tcg_temp_free_i64(t8);
2617 return NO_EXIT;
2618 }
2619
2620 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2621 {
2622 o->out = o->in2;
2623 o->g_out = o->g_in2;
2624 TCGV_UNUSED_I64(o->in2);
2625 o->g_in2 = false;
2626 return NO_EXIT;
2627 }
2628
2629 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2630 {
2631 o->out = o->in1;
2632 o->out2 = o->in2;
2633 o->g_out = o->g_in1;
2634 o->g_out2 = o->g_in2;
2635 TCGV_UNUSED_I64(o->in1);
2636 TCGV_UNUSED_I64(o->in2);
2637 o->g_in1 = o->g_in2 = false;
2638 return NO_EXIT;
2639 }
2640
2641 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2642 {
2643 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2644 potential_page_fault(s);
2645 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2646 tcg_temp_free_i32(l);
2647 return NO_EXIT;
2648 }
2649
2650 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2651 {
2652 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2653 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2654 potential_page_fault(s);
2655 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2656 tcg_temp_free_i32(r1);
2657 tcg_temp_free_i32(r2);
2658 set_cc_static(s);
2659 return NO_EXIT;
2660 }
2661
2662 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2663 {
2664 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2665 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2666 potential_page_fault(s);
2667 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2668 tcg_temp_free_i32(r1);
2669 tcg_temp_free_i32(r3);
2670 set_cc_static(s);
2671 return NO_EXIT;
2672 }
2673
2674 #ifndef CONFIG_USER_ONLY
2675 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2676 {
2677 int r1 = get_field(s->fields, l1);
2678 check_privileged(s);
2679 potential_page_fault(s);
2680 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2681 set_cc_static(s);
2682 return NO_EXIT;
2683 }
2684
2685 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2686 {
2687 int r1 = get_field(s->fields, l1);
2688 check_privileged(s);
2689 potential_page_fault(s);
2690 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2691 set_cc_static(s);
2692 return NO_EXIT;
2693 }
2694 #endif
2695
2696 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2697 {
2698 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2699 return NO_EXIT;
2700 }
2701
2702 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2703 {
2704 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2705 return_low128(o->out2);
2706 return NO_EXIT;
2707 }
2708
2709 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2710 {
2711 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2712 return NO_EXIT;
2713 }
2714
2715 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2716 {
2717 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2718 return NO_EXIT;
2719 }
2720
2721 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2722 {
2723 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2724 return NO_EXIT;
2725 }
2726
2727 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2728 {
2729 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2730 return_low128(o->out2);
2731 return NO_EXIT;
2732 }
2733
2734 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2735 {
2736 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2737 return_low128(o->out2);
2738 return NO_EXIT;
2739 }
2740
2741 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2742 {
2743 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2744 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2745 tcg_temp_free_i64(r3);
2746 return NO_EXIT;
2747 }
2748
2749 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2750 {
2751 int r3 = get_field(s->fields, r3);
2752 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2753 return NO_EXIT;
2754 }
2755
2756 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2757 {
2758 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2759 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2760 tcg_temp_free_i64(r3);
2761 return NO_EXIT;
2762 }
2763
2764 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2765 {
2766 int r3 = get_field(s->fields, r3);
2767 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2768 return NO_EXIT;
2769 }
2770
2771 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2772 {
2773 gen_helper_nabs_i64(o->out, o->in2);
2774 return NO_EXIT;
2775 }
2776
2777 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2778 {
2779 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2780 return NO_EXIT;
2781 }
2782
2783 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2784 {
2785 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2786 return NO_EXIT;
2787 }
2788
2789 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2790 {
2791 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2792 tcg_gen_mov_i64(o->out2, o->in2);
2793 return NO_EXIT;
2794 }
2795
2796 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2797 {
2798 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2799 potential_page_fault(s);
2800 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2801 tcg_temp_free_i32(l);
2802 set_cc_static(s);
2803 return NO_EXIT;
2804 }
2805
2806 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2807 {
2808 tcg_gen_neg_i64(o->out, o->in2);
2809 return NO_EXIT;
2810 }
2811
2812 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2813 {
2814 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2815 return NO_EXIT;
2816 }
2817
2818 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2819 {
2820 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2821 return NO_EXIT;
2822 }
2823
2824 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2825 {
2826 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2827 tcg_gen_mov_i64(o->out2, o->in2);
2828 return NO_EXIT;
2829 }
2830
2831 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2832 {
2833 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2834 potential_page_fault(s);
2835 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2836 tcg_temp_free_i32(l);
2837 set_cc_static(s);
2838 return NO_EXIT;
2839 }
2840
2841 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2842 {
2843 tcg_gen_or_i64(o->out, o->in1, o->in2);
2844 return NO_EXIT;
2845 }
2846
2847 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2848 {
2849 int shift = s->insn->data & 0xff;
2850 int size = s->insn->data >> 8;
2851 uint64_t mask = ((1ull << size) - 1) << shift;
2852
2853 assert(!o->g_in2);
2854 tcg_gen_shli_i64(o->in2, o->in2, shift);
2855 tcg_gen_or_i64(o->out, o->in1, o->in2);
2856
2857 /* Produce the CC from only the bits manipulated. */
2858 tcg_gen_andi_i64(cc_dst, o->out, mask);
2859 set_cc_nz_u64(s, cc_dst);
2860 return NO_EXIT;
2861 }
2862
2863 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2864 {
2865 tcg_gen_bswap16_i64(o->out, o->in2);
2866 return NO_EXIT;
2867 }
2868
2869 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2870 {
2871 tcg_gen_bswap32_i64(o->out, o->in2);
2872 return NO_EXIT;
2873 }
2874
2875 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2876 {
2877 tcg_gen_bswap64_i64(o->out, o->in2);
2878 return NO_EXIT;
2879 }
2880
2881 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2882 {
2883 TCGv_i32 t1 = tcg_temp_new_i32();
2884 TCGv_i32 t2 = tcg_temp_new_i32();
2885 TCGv_i32 to = tcg_temp_new_i32();
2886 tcg_gen_trunc_i64_i32(t1, o->in1);
2887 tcg_gen_trunc_i64_i32(t2, o->in2);
2888 tcg_gen_rotl_i32(to, t1, t2);
2889 tcg_gen_extu_i32_i64(o->out, to);
2890 tcg_temp_free_i32(t1);
2891 tcg_temp_free_i32(t2);
2892 tcg_temp_free_i32(to);
2893 return NO_EXIT;
2894 }
2895
2896 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2897 {
2898 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2899 return NO_EXIT;
2900 }
2901
2902 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2903 {
2904 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2905 return NO_EXIT;
2906 }
2907
2908 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2909 {
2910 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2911 return NO_EXIT;
2912 }
2913
2914 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2915 {
2916 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2917 return_low128(o->out2);
2918 return NO_EXIT;
2919 }
2920
2921 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2922 {
2923 gen_helper_sqeb(o->out, cpu_env, o->in2);
2924 return NO_EXIT;
2925 }
2926
2927 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2928 {
2929 gen_helper_sqdb(o->out, cpu_env, o->in2);
2930 return NO_EXIT;
2931 }
2932
2933 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2934 {
2935 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2936 return_low128(o->out2);
2937 return NO_EXIT;
2938 }
2939
2940 #ifndef CONFIG_USER_ONLY
2941 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2942 {
2943 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2944 check_privileged(s);
2945 potential_page_fault(s);
2946 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2947 tcg_temp_free_i32(r1);
2948 return NO_EXIT;
2949 }
2950 #endif
2951
2952 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2953 {
2954 uint64_t sign = 1ull << s->insn->data;
2955 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2956 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2957 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2958 /* The arithmetic left shift is curious in that it does not affect
2959 the sign bit. Copy that over from the source unchanged. */
2960 tcg_gen_andi_i64(o->out, o->out, ~sign);
2961 tcg_gen_andi_i64(o->in1, o->in1, sign);
2962 tcg_gen_or_i64(o->out, o->out, o->in1);
2963 return NO_EXIT;
2964 }
2965
2966 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2967 {
2968 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2969 return NO_EXIT;
2970 }
2971
2972 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2973 {
2974 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2975 return NO_EXIT;
2976 }
2977
2978 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2979 {
2980 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2981 return NO_EXIT;
2982 }
2983
2984 #ifndef CONFIG_USER_ONLY
2985 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2986 {
2987 check_privileged(s);
2988 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2989 return NO_EXIT;
2990 }
2991
2992 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2993 {
2994 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2995 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2996 check_privileged(s);
2997 potential_page_fault(s);
2998 gen_helper_stctg(cpu_env, r1, o->in2, r3);
2999 tcg_temp_free_i32(r1);
3000 tcg_temp_free_i32(r3);
3001 return NO_EXIT;
3002 }
3003
3004 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3005 {
3006 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3007 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3008 check_privileged(s);
3009 potential_page_fault(s);
3010 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3011 tcg_temp_free_i32(r1);
3012 tcg_temp_free_i32(r3);
3013 return NO_EXIT;
3014 }
3015
3016 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3017 {
3018 uint64_t i2 = get_field(s->fields, i2);
3019 TCGv_i64 t;
3020
3021 check_privileged(s);
3022
3023 /* It is important to do what the instruction name says: STORE THEN.
3024 If we let the output hook perform the store then if we fault and
3025 restart, we'll have the wrong SYSTEM MASK in place. */
3026 t = tcg_temp_new_i64();
3027 tcg_gen_shri_i64(t, psw_mask, 56);
3028 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3029 tcg_temp_free_i64(t);
3030
3031 if (s->fields->op == 0xac) {
3032 tcg_gen_andi_i64(psw_mask, psw_mask,
3033 (i2 << 56) | 0x00ffffffffffffffull);
3034 } else {
3035 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3036 }
3037 return NO_EXIT;
3038 }
3039 #endif
3040
3041 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3042 {
3043 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3044 return NO_EXIT;
3045 }
3046
3047 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3048 {
3049 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3050 return NO_EXIT;
3051 }
3052
3053 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3054 {
3055 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3056 return NO_EXIT;
3057 }
3058
3059 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3060 {
3061 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3062 return NO_EXIT;
3063 }
3064
3065 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3066 {
3067 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3068 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3069 potential_page_fault(s);
3070 gen_helper_stam(cpu_env, r1, o->in2, r3);
3071 tcg_temp_free_i32(r1);
3072 tcg_temp_free_i32(r3);
3073 return NO_EXIT;
3074 }
3075
3076 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3077 {
3078 int m3 = get_field(s->fields, m3);
3079 int pos, base = s->insn->data;
3080 TCGv_i64 tmp = tcg_temp_new_i64();
3081
3082 pos = base + ctz32(m3) * 8;
3083 switch (m3) {
3084 case 0xf:
3085 /* Effectively a 32-bit store. */
3086 tcg_gen_shri_i64(tmp, o->in1, pos);
3087 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3088 break;
3089
3090 case 0xc:
3091 case 0x6:
3092 case 0x3:
3093 /* Effectively a 16-bit store. */
3094 tcg_gen_shri_i64(tmp, o->in1, pos);
3095 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3096 break;
3097
3098 case 0x8:
3099 case 0x4:
3100 case 0x2:
3101 case 0x1:
3102 /* Effectively an 8-bit store. */
3103 tcg_gen_shri_i64(tmp, o->in1, pos);
3104 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3105 break;
3106
3107 default:
3108 /* This is going to be a sequence of shifts and stores. */
3109 pos = base + 32 - 8;
3110 while (m3) {
3111 if (m3 & 0x8) {
3112 tcg_gen_shri_i64(tmp, o->in1, pos);
3113 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3114 tcg_gen_addi_i64(o->in2, o->in2, 1);
3115 }
3116 m3 = (m3 << 1) & 0xf;
3117 pos -= 8;
3118 }
3119 break;
3120 }
3121 tcg_temp_free_i64(tmp);
3122 return NO_EXIT;
3123 }
3124
3125 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3126 {
3127 int r1 = get_field(s->fields, r1);
3128 int r3 = get_field(s->fields, r3);
3129 int size = s->insn->data;
3130 TCGv_i64 tsize = tcg_const_i64(size);
3131
3132 while (1) {
3133 if (size == 8) {
3134 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3135 } else {
3136 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3137 }
3138 if (r1 == r3) {
3139 break;
3140 }
3141 tcg_gen_add_i64(o->in2, o->in2, tsize);
3142 r1 = (r1 + 1) & 15;
3143 }
3144
3145 tcg_temp_free_i64(tsize);
3146 return NO_EXIT;
3147 }
3148
3149 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3150 {
3151 int r1 = get_field(s->fields, r1);
3152 int r3 = get_field(s->fields, r3);
3153 TCGv_i64 t = tcg_temp_new_i64();
3154 TCGv_i64 t4 = tcg_const_i64(4);
3155 TCGv_i64 t32 = tcg_const_i64(32);
3156
3157 while (1) {
3158 tcg_gen_shl_i64(t, regs[r1], t32);
3159 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3160 if (r1 == r3) {
3161 break;
3162 }
3163 tcg_gen_add_i64(o->in2, o->in2, t4);
3164 r1 = (r1 + 1) & 15;
3165 }
3166
3167 tcg_temp_free_i64(t);
3168 tcg_temp_free_i64(t4);
3169 tcg_temp_free_i64(t32);
3170 return NO_EXIT;
3171 }
3172
3173 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3174 {
3175 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3176 return NO_EXIT;
3177 }
3178
3179 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3180 {
3181 TCGv_i64 cc;
3182
3183 assert(!o->g_in2);
3184 tcg_gen_not_i64(o->in2, o->in2);
3185 tcg_gen_add_i64(o->out, o->in1, o->in2);
3186
3187 /* XXX possible optimization point */
3188 gen_op_calc_cc(s);
3189 cc = tcg_temp_new_i64();
3190 tcg_gen_extu_i32_i64(cc, cc_op);
3191 tcg_gen_shri_i64(cc, cc, 1);
3192 tcg_gen_add_i64(o->out, o->out, cc);
3193 tcg_temp_free_i64(cc);
3194 return NO_EXIT;
3195 }
3196
3197 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3198 {
3199 TCGv_i32 t;
3200
3201 update_psw_addr(s);
3202 gen_op_calc_cc(s);
3203
3204 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3205 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3206 tcg_temp_free_i32(t);
3207
3208 t = tcg_const_i32(s->next_pc - s->pc);
3209 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3210 tcg_temp_free_i32(t);
3211
3212 gen_exception(EXCP_SVC);
3213 return EXIT_NORETURN;
3214 }
3215
3216 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3217 {
3218 gen_helper_tceb(cc_op, o->in1, o->in2);
3219 set_cc_static(s);
3220 return NO_EXIT;
3221 }
3222
3223 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3224 {
3225 gen_helper_tcdb(cc_op, o->in1, o->in2);
3226 set_cc_static(s);
3227 return NO_EXIT;
3228 }
3229
3230 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3231 {
3232 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3233 set_cc_static(s);
3234 return NO_EXIT;
3235 }
3236
3237 #ifndef CONFIG_USER_ONLY
3238 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3239 {
3240 potential_page_fault(s);
3241 gen_helper_tprot(cc_op, o->addr1, o->in2);
3242 set_cc_static(s);
3243 return NO_EXIT;
3244 }
3245 #endif
3246
3247 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3248 {
3249 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3250 potential_page_fault(s);
3251 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3252 tcg_temp_free_i32(l);
3253 set_cc_static(s);
3254 return NO_EXIT;
3255 }
3256
3257 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3258 {
3259 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3260 potential_page_fault(s);
3261 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3262 tcg_temp_free_i32(l);
3263 return NO_EXIT;
3264 }
3265
3266 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3267 {
3268 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3269 potential_page_fault(s);
3270 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3271 tcg_temp_free_i32(l);
3272 set_cc_static(s);
3273 return NO_EXIT;
3274 }
3275
3276 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3277 {
3278 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3279 return NO_EXIT;
3280 }
3281
3282 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3283 {
3284 int shift = s->insn->data & 0xff;
3285 int size = s->insn->data >> 8;
3286 uint64_t mask = ((1ull << size) - 1) << shift;
3287
3288 assert(!o->g_in2);
3289 tcg_gen_shli_i64(o->in2, o->in2, shift);
3290 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3291
3292 /* Produce the CC from only the bits manipulated. */
3293 tcg_gen_andi_i64(cc_dst, o->out, mask);
3294 set_cc_nz_u64(s, cc_dst);
3295 return NO_EXIT;
3296 }
3297
3298 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3299 {
3300 o->out = tcg_const_i64(0);
3301 return NO_EXIT;
3302 }
3303
3304 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3305 {
3306 o->out = tcg_const_i64(0);
3307 o->out2 = o->out;
3308 o->g_out2 = true;
3309 return NO_EXIT;
3310 }
3311
3312 /* ====================================================================== */
3313 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3314 the original inputs), update the various cc data structures in order to
3315 be able to compute the new condition code. */
3316
3317 static void cout_abs32(DisasContext *s, DisasOps *o)
3318 {
3319 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3320 }
3321
3322 static void cout_abs64(DisasContext *s, DisasOps *o)
3323 {
3324 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3325 }
3326
3327 static void cout_adds32(DisasContext *s, DisasOps *o)
3328 {
3329 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3330 }
3331
3332 static void cout_adds64(DisasContext *s, DisasOps *o)
3333 {
3334 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3335 }
3336
3337 static void cout_addu32(DisasContext *s, DisasOps *o)
3338 {
3339 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3340 }
3341
3342 static void cout_addu64(DisasContext *s, DisasOps *o)
3343 {
3344 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3345 }
3346
3347 static void cout_addc32(DisasContext *s, DisasOps *o)
3348 {
3349 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3350 }
3351
3352 static void cout_addc64(DisasContext *s, DisasOps *o)
3353 {
3354 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3355 }
3356
3357 static void cout_cmps32(DisasContext *s, DisasOps *o)
3358 {
3359 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3360 }
3361
3362 static void cout_cmps64(DisasContext *s, DisasOps *o)
3363 {
3364 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3365 }
3366
3367 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3368 {
3369 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3370 }
3371
3372 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3373 {
3374 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3375 }
3376
3377 static void cout_f32(DisasContext *s, DisasOps *o)
3378 {
3379 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3380 }
3381
3382 static void cout_f64(DisasContext *s, DisasOps *o)
3383 {
3384 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3385 }
3386
3387 static void cout_f128(DisasContext *s, DisasOps *o)
3388 {
3389 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3390 }
3391
3392 static void cout_nabs32(DisasContext *s, DisasOps *o)
3393 {
3394 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3395 }
3396
3397 static void cout_nabs64(DisasContext *s, DisasOps *o)
3398 {
3399 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3400 }
3401
3402 static void cout_neg32(DisasContext *s, DisasOps *o)
3403 {
3404 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3405 }
3406
3407 static void cout_neg64(DisasContext *s, DisasOps *o)
3408 {
3409 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3410 }
3411
3412 static void cout_nz32(DisasContext *s, DisasOps *o)
3413 {
3414 tcg_gen_ext32u_i64(cc_dst, o->out);
3415 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3416 }
3417
3418 static void cout_nz64(DisasContext *s, DisasOps *o)
3419 {
3420 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3421 }
3422
3423 static void cout_s32(DisasContext *s, DisasOps *o)
3424 {
3425 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3426 }
3427
3428 static void cout_s64(DisasContext *s, DisasOps *o)
3429 {
3430 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3431 }
3432
3433 static void cout_subs32(DisasContext *s, DisasOps *o)
3434 {
3435 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3436 }
3437
3438 static void cout_subs64(DisasContext *s, DisasOps *o)
3439 {
3440 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3441 }
3442
3443 static void cout_subu32(DisasContext *s, DisasOps *o)
3444 {
3445 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3446 }
3447
3448 static void cout_subu64(DisasContext *s, DisasOps *o)
3449 {
3450 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3451 }
3452
3453 static void cout_subb32(DisasContext *s, DisasOps *o)
3454 {
3455 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3456 }
3457
3458 static void cout_subb64(DisasContext *s, DisasOps *o)
3459 {
3460 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3461 }
3462
3463 static void cout_tm32(DisasContext *s, DisasOps *o)
3464 {
3465 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3466 }
3467
3468 static void cout_tm64(DisasContext *s, DisasOps *o)
3469 {
3470 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3471 }
3472
3473 /* ====================================================================== */
3474 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3475 with the TCG register to which we will write. Used in combination with
3476 the "wout" generators, in some cases we need a new temporary, and in
3477 some cases we can write to a TCG global. */
3478
3479 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3480 {
3481 o->out = tcg_temp_new_i64();
3482 }
3483
3484 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3485 {
3486 o->out = tcg_temp_new_i64();
3487 o->out2 = tcg_temp_new_i64();
3488 }
3489
3490 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3491 {
3492 o->out = regs[get_field(f, r1)];
3493 o->g_out = true;
3494 }
3495
3496 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3497 {
3498 /* ??? Specification exception: r1 must be even. */
3499 int r1 = get_field(f, r1);
3500 o->out = regs[r1];
3501 o->out2 = regs[(r1 + 1) & 15];
3502 o->g_out = o->g_out2 = true;
3503 }
3504
3505 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3506 {
3507 o->out = fregs[get_field(f, r1)];
3508 o->g_out = true;
3509 }
3510
3511 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3512 {
3513 /* ??? Specification exception: r1 must be < 14. */
3514 int r1 = get_field(f, r1);
3515 o->out = fregs[r1];
3516 o->out2 = fregs[(r1 + 2) & 15];
3517 o->g_out = o->g_out2 = true;
3518 }
3519
3520 /* ====================================================================== */
3521 /* The "Write OUTput" generators. These generally perform some non-trivial
3522 copy of data to TCG globals, or to main memory. The trivial cases are
3523 generally handled by having a "prep" generator install the TCG global
3524 as the destination of the operation. */
3525
3526 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3527 {
3528 store_reg(get_field(f, r1), o->out);
3529 }
3530
3531 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3532 {
3533 int r1 = get_field(f, r1);
3534 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3535 }
3536
3537 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3538 {
3539 int r1 = get_field(f, r1);
3540 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3541 }
3542
3543 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3544 {
3545 store_reg32_i64(get_field(f, r1), o->out);
3546 }
3547
3548 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3549 {
3550 /* ??? Specification exception: r1 must be even. */
3551 int r1 = get_field(f, r1);
3552 store_reg32_i64(r1, o->out);
3553 store_reg32_i64((r1 + 1) & 15, o->out2);
3554 }
3555
3556 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3557 {
3558 /* ??? Specification exception: r1 must be even. */
3559 int r1 = get_field(f, r1);
3560 store_reg32_i64((r1 + 1) & 15, o->out);
3561 tcg_gen_shri_i64(o->out, o->out, 32);
3562 store_reg32_i64(r1, o->out);
3563 }
3564
3565 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3566 {
3567 store_freg32_i64(get_field(f, r1), o->out);
3568 }
3569
3570 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3571 {
3572 store_freg(get_field(f, r1), o->out);
3573 }
3574
3575 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3576 {
3577 /* ??? Specification exception: r1 must be < 14. */
3578 int f1 = get_field(s->fields, r1);
3579 store_freg(f1, o->out);
3580 store_freg((f1 + 2) & 15, o->out2);
3581 }
3582
3583 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3584 {
3585 if (get_field(f, r1) != get_field(f, r2)) {
3586 store_reg32_i64(get_field(f, r1), o->out);
3587 }
3588 }
3589
3590 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3591 {
3592 if (get_field(f, r1) != get_field(f, r2)) {
3593 store_freg32_i64(get_field(f, r1), o->out);
3594 }
3595 }
3596
3597 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3598 {
3599 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3600 }
3601
3602 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3603 {
3604 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3605 }
3606
3607 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3608 {
3609 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3610 }
3611
3612 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3613 {
3614 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3615 }
3616
3617 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3618 {
3619 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3620 }
3621
3622 /* ====================================================================== */
3623 /* The "INput 1" generators. These load the first operand to an insn. */
3624
3625 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3626 {
3627 o->in1 = load_reg(get_field(f, r1));
3628 }
3629
3630 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3631 {
3632 o->in1 = regs[get_field(f, r1)];
3633 o->g_in1 = true;
3634 }
3635
3636 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3637 {
3638 o->in1 = tcg_temp_new_i64();
3639 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3640 }
3641
3642 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3643 {
3644 o->in1 = tcg_temp_new_i64();
3645 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3646 }
3647
3648 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3649 {
3650 o->in1 = tcg_temp_new_i64();
3651 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3652 }
3653
3654 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3655 {
3656 /* ??? Specification exception: r1 must be even. */
3657 int r1 = get_field(f, r1);
3658 o->in1 = load_reg((r1 + 1) & 15);
3659 }
3660
3661 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3662 {
3663 /* ??? Specification exception: r1 must be even. */
3664 int r1 = get_field(f, r1);
3665 o->in1 = tcg_temp_new_i64();
3666 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3667 }
3668
3669 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3670 {
3671 /* ??? Specification exception: r1 must be even. */
3672 int r1 = get_field(f, r1);
3673 o->in1 = tcg_temp_new_i64();
3674 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3675 }
3676
3677 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3678 {
3679 /* ??? Specification exception: r1 must be even. */
3680 int r1 = get_field(f, r1);
3681 o->in1 = tcg_temp_new_i64();
3682 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3683 }
3684
3685 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3686 {
3687 o->in1 = load_reg(get_field(f, r2));
3688 }
3689
3690 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3691 {
3692 o->in1 = load_reg(get_field(f, r3));
3693 }
3694
3695 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3696 {
3697 o->in1 = regs[get_field(f, r3)];
3698 o->g_in1 = true;
3699 }
3700
3701 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3702 {
3703 o->in1 = tcg_temp_new_i64();
3704 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3705 }
3706
3707 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3708 {
3709 o->in1 = tcg_temp_new_i64();
3710 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3711 }
3712
3713 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3714 {
3715 o->in1 = load_freg32_i64(get_field(f, r1));
3716 }
3717
3718 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3719 {
3720 o->in1 = fregs[get_field(f, r1)];
3721 o->g_in1 = true;
3722 }
3723
3724 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3725 {
3726 /* ??? Specification exception: r1 must be < 14. */
3727 int r1 = get_field(f, r1);
3728 o->out = fregs[r1];
3729 o->out2 = fregs[(r1 + 2) & 15];
3730 o->g_out = o->g_out2 = true;
3731 }
3732
3733 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3734 {
3735 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3736 }
3737
3738 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3739 {
3740 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3741 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3742 }
3743
3744 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3745 {
3746 in1_la1(s, f, o);
3747 o->in1 = tcg_temp_new_i64();
3748 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3749 }
3750
3751 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3752 {
3753 in1_la1(s, f, o);
3754 o->in1 = tcg_temp_new_i64();
3755 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3756 }
3757
3758 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3759 {
3760 in1_la1(s, f, o);
3761 o->in1 = tcg_temp_new_i64();
3762 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3763 }
3764
3765 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3766 {
3767 in1_la1(s, f, o);
3768 o->in1 = tcg_temp_new_i64();
3769 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3770 }
3771
3772 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3773 {
3774 in1_la1(s, f, o);
3775 o->in1 = tcg_temp_new_i64();
3776 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3777 }
3778
3779 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3780 {
3781 in1_la1(s, f, o);
3782 o->in1 = tcg_temp_new_i64();
3783 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3784 }
3785
3786 /* ====================================================================== */
3787 /* The "INput 2" generators. These load the second operand to an insn. */
3788
3789 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3790 {
3791 o->in2 = regs[get_field(f, r1)];
3792 o->g_in2 = true;
3793 }
3794
3795 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3796 {
3797 o->in2 = tcg_temp_new_i64();
3798 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3799 }
3800
3801 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3802 {
3803 o->in2 = tcg_temp_new_i64();
3804 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3805 }
3806
3807 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3808 {
3809 o->in2 = load_reg(get_field(f, r2));
3810 }
3811
3812 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3813 {
3814 o->in2 = regs[get_field(f, r2)];
3815 o->g_in2 = true;
3816 }
3817
3818 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3819 {
3820 int r2 = get_field(f, r2);
3821 if (r2 != 0) {
3822 o->in2 = load_reg(r2);
3823 }
3824 }
3825
3826 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3827 {
3828 o->in2 = tcg_temp_new_i64();
3829 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3830 }
3831
3832 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3833 {
3834 o->in2 = tcg_temp_new_i64();
3835 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3836 }
3837
3838 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3839 {
3840 o->in2 = tcg_temp_new_i64();
3841 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3842 }
3843
3844 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3845 {
3846 o->in2 = tcg_temp_new_i64();
3847 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3848 }
3849
3850 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3851 {
3852 o->in2 = load_reg(get_field(f, r3));
3853 }
3854
3855 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3856 {
3857 o->in2 = tcg_temp_new_i64();
3858 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3859 }
3860
3861 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3862 {
3863 o->in2 = tcg_temp_new_i64();
3864 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3865 }
3866
3867 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3868 {
3869 o->in2 = load_freg32_i64(get_field(f, r2));
3870 }
3871
3872 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3873 {
3874 o->in2 = fregs[get_field(f, r2)];
3875 o->g_in2 = true;
3876 }
3877
3878 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3879 {
3880 /* ??? Specification exception: r1 must be < 14. */
3881 int r2 = get_field(f, r2);
3882 o->in1 = fregs[r2];
3883 o->in2 = fregs[(r2 + 2) & 15];
3884 o->g_in1 = o->g_in2 = true;
3885 }
3886
3887 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3888 {
3889 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3890 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3891 }
3892
3893 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3894 {
3895 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3896 }
3897
3898 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3899 {
3900 help_l2_shift(s, f, o, 31);
3901 }
3902
3903 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3904 {
3905 help_l2_shift(s, f, o, 63);
3906 }
3907
3908 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3909 {
3910 in2_a2(s, f, o);
3911 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3912 }
3913
3914 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3915 {
3916 in2_a2(s, f, o);
3917 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3918 }
3919
3920 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3921 {
3922 in2_a2(s, f, o);
3923 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3924 }
3925
3926 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3927 {
3928 in2_a2(s, f, o);
3929 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3930 }
3931
3932 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3933 {
3934 in2_a2(s, f, o);
3935 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3936 }
3937
3938 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3939 {
3940 in2_a2(s, f, o);
3941 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3942 }
3943
3944 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3945 {
3946 in2_ri2(s, f, o);
3947 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3948 }
3949
3950 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3951 {
3952 in2_ri2(s, f, o);
3953 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3954 }
3955
3956 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3957 {
3958 in2_ri2(s, f, o);
3959 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3960 }
3961
3962 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3963 {
3964 in2_ri2(s, f, o);
3965 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3966 }
3967
3968 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3969 {
3970 o->in2 = tcg_const_i64(get_field(f, i2));
3971 }
3972
3973 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3974 {
3975 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3976 }
3977
3978 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3979 {
3980 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3981 }
3982
3983 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3984 {
3985 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3986 }
3987
3988 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3989 {
3990 uint64_t i2 = (uint16_t)get_field(f, i2);
3991 o->in2 = tcg_const_i64(i2 << s->insn->data);
3992 }
3993
3994 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3995 {
3996 uint64_t i2 = (uint32_t)get_field(f, i2);
3997 o->in2 = tcg_const_i64(i2 << s->insn->data);
3998 }
3999
4000 /* ====================================================================== */
4001
4002 /* Find opc within the table of insns. This is formulated as a switch
4003 statement so that (1) we get compile-time notice of cut-paste errors
4004 for duplicated opcodes, and (2) the compiler generates the binary
4005 search tree, rather than us having to post-process the table. */
4006
4007 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4008 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4009
4010 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4011
4012 enum DisasInsnEnum {
4013 #include "insn-data.def"
4014 };
4015
4016 #undef D
4017 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4018 .opc = OPC, \
4019 .fmt = FMT_##FT, \
4020 .fac = FAC_##FC, \
4021 .name = #NM, \
4022 .help_in1 = in1_##I1, \
4023 .help_in2 = in2_##I2, \
4024 .help_prep = prep_##P, \
4025 .help_wout = wout_##W, \
4026 .help_cout = cout_##CC, \
4027 .help_op = op_##OP, \
4028 .data = D \
4029 },
4030
4031 /* Allow 0 to be used for NULL in the table below. */
4032 #define in1_0 NULL
4033 #define in2_0 NULL
4034 #define prep_0 NULL
4035 #define wout_0 NULL
4036 #define cout_0 NULL
4037 #define op_0 NULL
4038
4039 static const DisasInsn insn_info[] = {
4040 #include "insn-data.def"
4041 };
4042
4043 #undef D
4044 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4045 case OPC: return &insn_info[insn_ ## NM];
4046
4047 static const DisasInsn *lookup_opc(uint16_t opc)
4048 {
4049 switch (opc) {
4050 #include "insn-data.def"
4051 default:
4052 return NULL;
4053 }
4054 }
4055
4056 #undef D
4057 #undef C
4058
4059 /* Extract a field from the insn. The INSN should be left-aligned in
4060 the uint64_t so that we can more easily utilize the big-bit-endian
4061 definitions we extract from the Principals of Operation. */
4062
4063 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4064 {
4065 uint32_t r, m;
4066
4067 if (f->size == 0) {
4068 return;
4069 }
4070
4071 /* Zero extract the field from the insn. */
4072 r = (insn << f->beg) >> (64 - f->size);
4073
4074 /* Sign-extend, or un-swap the field as necessary. */
4075 switch (f->type) {
4076 case 0: /* unsigned */
4077 break;
4078 case 1: /* signed */
4079 assert(f->size <= 32);
4080 m = 1u << (f->size - 1);
4081 r = (r ^ m) - m;
4082 break;
4083 case 2: /* dl+dh split, signed 20 bit. */
4084 r = ((int8_t)r << 12) | (r >> 8);
4085 break;
4086 default:
4087 abort();
4088 }
4089
4090 /* Validate that the "compressed" encoding we selected above is valid.
4091 I.e. we havn't make two different original fields overlap. */
4092 assert(((o->presentC >> f->indexC) & 1) == 0);
4093 o->presentC |= 1 << f->indexC;
4094 o->presentO |= 1 << f->indexO;
4095
4096 o->c[f->indexC] = r;
4097 }
4098
4099 /* Lookup the insn at the current PC, extracting the operands into O and
4100 returning the info struct for the insn. Returns NULL for invalid insn. */
4101
4102 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4103 DisasFields *f)
4104 {
4105 uint64_t insn, pc = s->pc;
4106 int op, op2, ilen;
4107 const DisasInsn *info;
4108
4109 insn = ld_code2(env, pc);
4110 op = (insn >> 8) & 0xff;
4111 ilen = get_ilen(op);
4112 s->next_pc = s->pc + ilen;
4113
4114 switch (ilen) {
4115 case 2:
4116 insn = insn << 48;
4117 break;
4118 case 4:
4119 insn = ld_code4(env, pc) << 32;
4120 break;
4121 case 6:
4122 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4123 break;
4124 default:
4125 abort();
4126 }
4127
4128 /* We can't actually determine the insn format until we've looked up
4129 the full insn opcode. Which we can't do without locating the
4130 secondary opcode. Assume by default that OP2 is at bit 40; for
4131 those smaller insns that don't actually have a secondary opcode
4132 this will correctly result in OP2 = 0. */
4133 switch (op) {
4134 case 0x01: /* E */
4135 case 0x80: /* S */
4136 case 0x82: /* S */
4137 case 0x93: /* S */
4138 case 0xb2: /* S, RRF, RRE */
4139 case 0xb3: /* RRE, RRD, RRF */
4140 case 0xb9: /* RRE, RRF */
4141 case 0xe5: /* SSE, SIL */
4142 op2 = (insn << 8) >> 56;
4143 break;
4144 case 0xa5: /* RI */
4145 case 0xa7: /* RI */
4146 case 0xc0: /* RIL */
4147 case 0xc2: /* RIL */
4148 case 0xc4: /* RIL */
4149 case 0xc6: /* RIL */
4150 case 0xc8: /* SSF */
4151 case 0xcc: /* RIL */
4152 op2 = (insn << 12) >> 60;
4153 break;
4154 case 0xd0 ... 0xdf: /* SS */
4155 case 0xe1: /* SS */
4156 case 0xe2: /* SS */
4157 case 0xe8: /* SS */
4158 case 0xe9: /* SS */
4159 case 0xea: /* SS */
4160 case 0xee ... 0xf3: /* SS */
4161 case 0xf8 ... 0xfd: /* SS */
4162 op2 = 0;
4163 break;
4164 default:
4165 op2 = (insn << 40) >> 56;
4166 break;
4167 }
4168
4169 memset(f, 0, sizeof(*f));
4170 f->op = op;
4171 f->op2 = op2;
4172
4173 /* Lookup the instruction. */
4174 info = lookup_opc(op << 8 | op2);
4175
4176 /* If we found it, extract the operands. */
4177 if (info != NULL) {
4178 DisasFormat fmt = info->fmt;
4179 int i;
4180
4181 for (i = 0; i < NUM_C_FIELD; ++i) {
4182 extract_field(f, &format_info[fmt].op[i], insn);
4183 }
4184 }
4185 return info;
4186 }
4187
4188 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4189 {
4190 const DisasInsn *insn;
4191 ExitStatus ret = NO_EXIT;
4192 DisasFields f;
4193 DisasOps o;
4194
4195 insn = extract_insn(env, s, &f);
4196
4197 /* If not found, try the old interpreter. This includes ILLOPC. */
4198 if (insn == NULL) {
4199 disas_s390_insn(env, s);
4200 switch (s->is_jmp) {
4201 case DISAS_NEXT:
4202 ret = NO_EXIT;
4203 break;
4204 case DISAS_TB_JUMP:
4205 ret = EXIT_GOTO_TB;
4206 break;
4207 case DISAS_JUMP:
4208 ret = EXIT_PC_UPDATED;
4209 break;
4210 case DISAS_EXCP:
4211 ret = EXIT_NORETURN;
4212 break;
4213 default:
4214 abort();
4215 }
4216
4217 s->pc = s->next_pc;
4218 return ret;
4219 }
4220
4221 /* Set up the strutures we use to communicate with the helpers. */
4222 s->insn = insn;
4223 s->fields = &f;
4224 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4225 TCGV_UNUSED_I64(o.out);
4226 TCGV_UNUSED_I64(o.out2);
4227 TCGV_UNUSED_I64(o.in1);
4228 TCGV_UNUSED_I64(o.in2);
4229 TCGV_UNUSED_I64(o.addr1);
4230
4231 /* Implement the instruction. */
4232 if (insn->help_in1) {
4233 insn->help_in1(s, &f, &o);
4234 }
4235 if (insn->help_in2) {
4236 insn->help_in2(s, &f, &o);
4237 }
4238 if (insn->help_prep) {
4239 insn->help_prep(s, &f, &o);
4240 }
4241 if (insn->help_op) {
4242 ret = insn->help_op(s, &o);
4243 }
4244 if (insn->help_wout) {
4245 insn->help_wout(s, &f, &o);
4246 }
4247 if (insn->help_cout) {
4248 insn->help_cout(s, &o);
4249 }
4250
4251 /* Free any temporaries created by the helpers. */
4252 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4253 tcg_temp_free_i64(o.out);
4254 }
4255 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4256 tcg_temp_free_i64(o.out2);
4257 }
4258 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4259 tcg_temp_free_i64(o.in1);
4260 }
4261 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4262 tcg_temp_free_i64(o.in2);
4263 }
4264 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4265 tcg_temp_free_i64(o.addr1);
4266 }
4267
4268 /* Advance to the next instruction. */
4269 s->pc = s->next_pc;
4270 return ret;
4271 }
4272
4273 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4274 TranslationBlock *tb,
4275 int search_pc)
4276 {
4277 DisasContext dc;
4278 target_ulong pc_start;
4279 uint64_t next_page_start;
4280 uint16_t *gen_opc_end;
4281 int j, lj = -1;
4282 int num_insns, max_insns;
4283 CPUBreakpoint *bp;
4284 ExitStatus status;
4285 bool do_debug;
4286
4287 pc_start = tb->pc;
4288
4289 /* 31-bit mode */
4290 if (!(tb->flags & FLAG_MASK_64)) {
4291 pc_start &= 0x7fffffff;
4292 }
4293
4294 dc.tb = tb;
4295 dc.pc = pc_start;
4296 dc.cc_op = CC_OP_DYNAMIC;
4297 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4298 dc.is_jmp = DISAS_NEXT;
4299
4300 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4301
4302 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4303
4304 num_insns = 0;
4305 max_insns = tb->cflags & CF_COUNT_MASK;
4306 if (max_insns == 0) {
4307 max_insns = CF_COUNT_MASK;
4308 }
4309
4310 gen_icount_start();
4311
4312 do {
4313 if (search_pc) {
4314 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4315 if (lj < j) {
4316 lj++;
4317 while (lj < j) {
4318 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4319 }
4320 }
4321 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4322 gen_opc_cc_op[lj] = dc.cc_op;
4323 tcg_ctx.gen_opc_instr_start[lj] = 1;
4324 tcg_ctx.gen_opc_icount[lj] = num_insns;
4325 }
4326 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4327 gen_io_start();
4328 }
4329
4330 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4331 tcg_gen_debug_insn_start(dc.pc);
4332 }
4333
4334 status = NO_EXIT;
4335 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4336 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4337 if (bp->pc == dc.pc) {
4338 status = EXIT_PC_STALE;
4339 do_debug = true;
4340 break;
4341 }
4342 }
4343 }
4344 if (status == NO_EXIT) {
4345 status = translate_one(env, &dc);
4346 }
4347
4348 /* If we reach a page boundary, are single stepping,
4349 or exhaust instruction count, stop generation. */
4350 if (status == NO_EXIT
4351 && (dc.pc >= next_page_start
4352 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4353 || num_insns >= max_insns
4354 || singlestep
4355 || env->singlestep_enabled)) {
4356 status = EXIT_PC_STALE;
4357 }
4358 } while (status == NO_EXIT);
4359
4360 if (tb->cflags & CF_LAST_IO) {
4361 gen_io_end();
4362 }
4363
4364 switch (status) {
4365 case EXIT_GOTO_TB:
4366 case EXIT_NORETURN:
4367 break;
4368 case EXIT_PC_STALE:
4369 update_psw_addr(&dc);
4370 /* FALLTHRU */
4371 case EXIT_PC_UPDATED:
4372 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4373 gen_op_calc_cc(&dc);
4374 } else {
4375 /* Next TB starts off with CC_OP_DYNAMIC,
4376 so make sure the cc op type is in env */
4377 gen_op_set_cc_op(&dc);
4378 }
4379 if (do_debug) {
4380 gen_exception(EXCP_DEBUG);
4381 } else {
4382 /* Generate the return instruction */
4383 tcg_gen_exit_tb(0);
4384 }
4385 break;
4386 default:
4387 abort();
4388 }
4389
4390 gen_icount_end(tb, num_insns);
4391 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4392 if (search_pc) {
4393 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4394 lj++;
4395 while (lj <= j) {
4396 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4397 }
4398 } else {
4399 tb->size = dc.pc - pc_start;
4400 tb->icount = num_insns;
4401 }
4402
4403 #if defined(S390X_DEBUG_DISAS)
4404 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4405 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4406 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4407 qemu_log("\n");
4408 }
4409 #endif
4410 }
4411
4412 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4413 {
4414 gen_intermediate_code_internal(env, tb, 0);
4415 }
4416
4417 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4418 {
4419 gen_intermediate_code_internal(env, tb, 1);
4420 }
4421
4422 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4423 {
4424 int cc_op;
4425 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4426 cc_op = gen_opc_cc_op[pc_pos];
4427 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4428 env->cc_op = cc_op;
4429 }
4430 }