]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Convert COMPARE AND SWAP
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static inline void debug_insn(uint64_t insn)
83 {
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85 }
86
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 {
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
92 }
93 }
94 return pc;
95 }
96
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
99 {
100 int i;
101
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
108 }
109
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
116 }
117 }
118
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
125 }
126 }
127
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
135 }
136 }
137 #endif
138
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
143 }
144 #endif
145
146 cpu_fprintf(f, "\n");
147 }
148
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
151
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
156
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
160
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162
163 void s390x_translate_init(void)
164 {
165 int i;
166
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
174
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
196 }
197
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
201 }
202
203 static inline TCGv_i64 load_reg(int reg)
204 {
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
208 }
209
210 static inline TCGv_i64 load_freg(int reg)
211 {
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
215 }
216
217 static inline TCGv_i32 load_freg32(int reg)
218 {
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
226 }
227
228 static inline TCGv_i64 load_freg32_i64(int reg)
229 {
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
233 }
234
235 static inline TCGv_i32 load_reg32(int reg)
236 {
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
240 }
241
242 static inline TCGv_i64 load_reg32_i64(int reg)
243 {
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
247 }
248
249 static inline void store_reg(int reg, TCGv_i64 v)
250 {
251 tcg_gen_mov_i64(regs[reg], v);
252 }
253
254 static inline void store_freg(int reg, TCGv_i64 v)
255 {
256 tcg_gen_mov_i64(fregs[reg], v);
257 }
258
259 static inline void store_reg32(int reg, TCGv_i32 v)
260 {
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
268 }
269
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
271 {
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274 }
275
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
277 {
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
279 }
280
281 static inline void store_reg16(int reg, TCGv_i32 v)
282 {
283 /* 16 bit register writes keep the upper bytes */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_deposit_i32(TCGV_LOW(regs[reg]), TCGV_LOW(regs[reg]), v, 0, 16);
286 #else
287 tcg_gen_deposit_i64(regs[reg], regs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 16);
289 #endif
290 }
291
292 static inline void store_freg32(int reg, TCGv_i32 v)
293 {
294 /* 32 bit register writes keep the lower half */
295 #if HOST_LONG_BITS == 32
296 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
297 #else
298 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
299 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
300 #endif
301 }
302
303 static inline void store_freg32_i64(int reg, TCGv_i64 v)
304 {
305 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
306 }
307
308 static inline void return_low128(TCGv_i64 dest)
309 {
310 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
311 }
312
313 static inline void update_psw_addr(DisasContext *s)
314 {
315 /* psw.addr */
316 tcg_gen_movi_i64(psw_addr, s->pc);
317 }
318
319 static inline void potential_page_fault(DisasContext *s)
320 {
321 #ifndef CONFIG_USER_ONLY
322 update_psw_addr(s);
323 gen_op_calc_cc(s);
324 #endif
325 }
326
327 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
328 {
329 return (uint64_t)cpu_lduw_code(env, pc);
330 }
331
332 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
333 {
334 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
335 }
336
337 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
338 {
339 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
340 }
341
342 static inline int get_mem_index(DisasContext *s)
343 {
344 switch (s->tb->flags & FLAG_MASK_ASC) {
345 case PSW_ASC_PRIMARY >> 32:
346 return 0;
347 case PSW_ASC_SECONDARY >> 32:
348 return 1;
349 case PSW_ASC_HOME >> 32:
350 return 2;
351 default:
352 tcg_abort();
353 break;
354 }
355 }
356
357 static void gen_exception(int excp)
358 {
359 TCGv_i32 tmp = tcg_const_i32(excp);
360 gen_helper_exception(cpu_env, tmp);
361 tcg_temp_free_i32(tmp);
362 }
363
364 static void gen_program_exception(DisasContext *s, int code)
365 {
366 TCGv_i32 tmp;
367
368 /* Remember what pgm exeption this was. */
369 tmp = tcg_const_i32(code);
370 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
371 tcg_temp_free_i32(tmp);
372
373 tmp = tcg_const_i32(s->next_pc - s->pc);
374 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
375 tcg_temp_free_i32(tmp);
376
377 /* Advance past instruction. */
378 s->pc = s->next_pc;
379 update_psw_addr(s);
380
381 /* Save off cc. */
382 gen_op_calc_cc(s);
383
384 /* Trigger exception. */
385 gen_exception(EXCP_PGM);
386
387 /* End TB here. */
388 s->is_jmp = DISAS_EXCP;
389 }
390
391 static inline void gen_illegal_opcode(DisasContext *s)
392 {
393 gen_program_exception(s, PGM_SPECIFICATION);
394 }
395
396 static inline void check_privileged(DisasContext *s)
397 {
398 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
399 gen_program_exception(s, PGM_PRIVILEGED);
400 }
401 }
402
403 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
404 {
405 TCGv_i64 tmp;
406
407 /* 31-bitify the immediate part; register contents are dealt with below */
408 if (!(s->tb->flags & FLAG_MASK_64)) {
409 d2 &= 0x7fffffffUL;
410 }
411
412 if (x2) {
413 if (d2) {
414 tmp = tcg_const_i64(d2);
415 tcg_gen_add_i64(tmp, tmp, regs[x2]);
416 } else {
417 tmp = load_reg(x2);
418 }
419 if (b2) {
420 tcg_gen_add_i64(tmp, tmp, regs[b2]);
421 }
422 } else if (b2) {
423 if (d2) {
424 tmp = tcg_const_i64(d2);
425 tcg_gen_add_i64(tmp, tmp, regs[b2]);
426 } else {
427 tmp = load_reg(b2);
428 }
429 } else {
430 tmp = tcg_const_i64(d2);
431 }
432
433 /* 31-bit mode mask if there are values loaded from registers */
434 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
435 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
436 }
437
438 return tmp;
439 }
440
441 static void gen_op_movi_cc(DisasContext *s, uint32_t val)
442 {
443 s->cc_op = CC_OP_CONST0 + val;
444 }
445
446 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
447 {
448 tcg_gen_discard_i64(cc_src);
449 tcg_gen_mov_i64(cc_dst, dst);
450 tcg_gen_discard_i64(cc_vr);
451 s->cc_op = op;
452 }
453
454 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
455 {
456 tcg_gen_discard_i64(cc_src);
457 tcg_gen_extu_i32_i64(cc_dst, dst);
458 tcg_gen_discard_i64(cc_vr);
459 s->cc_op = op;
460 }
461
462 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
463 TCGv_i64 dst)
464 {
465 tcg_gen_mov_i64(cc_src, src);
466 tcg_gen_mov_i64(cc_dst, dst);
467 tcg_gen_discard_i64(cc_vr);
468 s->cc_op = op;
469 }
470
471 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
472 TCGv_i32 dst)
473 {
474 tcg_gen_extu_i32_i64(cc_src, src);
475 tcg_gen_extu_i32_i64(cc_dst, dst);
476 tcg_gen_discard_i64(cc_vr);
477 s->cc_op = op;
478 }
479
480 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
481 TCGv_i64 dst, TCGv_i64 vr)
482 {
483 tcg_gen_mov_i64(cc_src, src);
484 tcg_gen_mov_i64(cc_dst, dst);
485 tcg_gen_mov_i64(cc_vr, vr);
486 s->cc_op = op;
487 }
488
489 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
490 {
491 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
492 }
493
494 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
495 {
496 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
497 }
498
499 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
500 enum cc_op cond)
501 {
502 gen_op_update2_cc_i32(s, cond, v1, v2);
503 }
504
505 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
506 enum cc_op cond)
507 {
508 gen_op_update2_cc_i64(s, cond, v1, v2);
509 }
510
511 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
512 {
513 cmp_32(s, v1, v2, CC_OP_LTGT_32);
514 }
515
516 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
517 {
518 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
519 }
520
521 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
522 {
523 /* XXX optimize for the constant? put it in s? */
524 TCGv_i32 tmp = tcg_const_i32(v2);
525 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
526 tcg_temp_free_i32(tmp);
527 }
528
529 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
530 {
531 TCGv_i32 tmp = tcg_const_i32(v2);
532 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
533 tcg_temp_free_i32(tmp);
534 }
535
536 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
537 {
538 cmp_64(s, v1, v2, CC_OP_LTGT_64);
539 }
540
541 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
542 {
543 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
544 }
545
546 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
547 {
548 TCGv_i64 tmp = tcg_const_i64(v2);
549 cmp_s64(s, v1, tmp);
550 tcg_temp_free_i64(tmp);
551 }
552
553 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
554 {
555 TCGv_i64 tmp = tcg_const_i64(v2);
556 cmp_u64(s, v1, tmp);
557 tcg_temp_free_i64(tmp);
558 }
559
560 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
561 {
562 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
563 }
564
565 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
566 {
567 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
568 }
569
570 static void set_cc_cmp_f32_i64(DisasContext *s, TCGv_i32 v1, TCGv_i64 v2)
571 {
572 tcg_gen_extu_i32_i64(cc_src, v1);
573 tcg_gen_mov_i64(cc_dst, v2);
574 tcg_gen_discard_i64(cc_vr);
575 s->cc_op = CC_OP_LTGT_F32;
576 }
577
578 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i32 v1)
579 {
580 gen_op_update1_cc_i32(s, CC_OP_NZ_F32, v1);
581 }
582
583 /* CC value is in env->cc_op */
584 static inline void set_cc_static(DisasContext *s)
585 {
586 tcg_gen_discard_i64(cc_src);
587 tcg_gen_discard_i64(cc_dst);
588 tcg_gen_discard_i64(cc_vr);
589 s->cc_op = CC_OP_STATIC;
590 }
591
592 static inline void gen_op_set_cc_op(DisasContext *s)
593 {
594 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
595 tcg_gen_movi_i32(cc_op, s->cc_op);
596 }
597 }
598
599 static inline void gen_update_cc_op(DisasContext *s)
600 {
601 gen_op_set_cc_op(s);
602 }
603
604 /* calculates cc into cc_op */
605 static void gen_op_calc_cc(DisasContext *s)
606 {
607 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
608 TCGv_i64 dummy = tcg_const_i64(0);
609
610 switch (s->cc_op) {
611 case CC_OP_CONST0:
612 case CC_OP_CONST1:
613 case CC_OP_CONST2:
614 case CC_OP_CONST3:
615 /* s->cc_op is the cc value */
616 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
617 break;
618 case CC_OP_STATIC:
619 /* env->cc_op already is the cc value */
620 break;
621 case CC_OP_NZ:
622 case CC_OP_ABS_64:
623 case CC_OP_NABS_64:
624 case CC_OP_ABS_32:
625 case CC_OP_NABS_32:
626 case CC_OP_LTGT0_32:
627 case CC_OP_LTGT0_64:
628 case CC_OP_COMP_32:
629 case CC_OP_COMP_64:
630 case CC_OP_NZ_F32:
631 case CC_OP_NZ_F64:
632 /* 1 argument */
633 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
634 break;
635 case CC_OP_ICM:
636 case CC_OP_LTGT_32:
637 case CC_OP_LTGT_64:
638 case CC_OP_LTUGTU_32:
639 case CC_OP_LTUGTU_64:
640 case CC_OP_TM_32:
641 case CC_OP_TM_64:
642 case CC_OP_LTGT_F32:
643 case CC_OP_LTGT_F64:
644 case CC_OP_SLA_32:
645 case CC_OP_SLA_64:
646 /* 2 arguments */
647 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
648 break;
649 case CC_OP_ADD_64:
650 case CC_OP_ADDU_64:
651 case CC_OP_ADDC_64:
652 case CC_OP_SUB_64:
653 case CC_OP_SUBU_64:
654 case CC_OP_SUBB_64:
655 case CC_OP_ADD_32:
656 case CC_OP_ADDU_32:
657 case CC_OP_ADDC_32:
658 case CC_OP_SUB_32:
659 case CC_OP_SUBU_32:
660 case CC_OP_SUBB_32:
661 /* 3 arguments */
662 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
663 break;
664 case CC_OP_DYNAMIC:
665 /* unknown operation - assume 3 arguments and cc_op in env */
666 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
667 break;
668 default:
669 tcg_abort();
670 }
671
672 tcg_temp_free_i32(local_cc_op);
673 tcg_temp_free_i64(dummy);
674
675 /* We now have cc in cc_op as constant */
676 set_cc_static(s);
677 }
678
679 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
680 {
681 debug_insn(insn);
682
683 *r1 = (insn >> 4) & 0xf;
684 *r2 = insn & 0xf;
685 }
686
687 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
688 int *x2, int *b2, int *d2)
689 {
690 debug_insn(insn);
691
692 *r1 = (insn >> 20) & 0xf;
693 *x2 = (insn >> 16) & 0xf;
694 *b2 = (insn >> 12) & 0xf;
695 *d2 = insn & 0xfff;
696
697 return get_address(s, *x2, *b2, *d2);
698 }
699
700 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
701 int *b2, int *d2)
702 {
703 debug_insn(insn);
704
705 *r1 = (insn >> 20) & 0xf;
706 /* aka m3 */
707 *r3 = (insn >> 16) & 0xf;
708 *b2 = (insn >> 12) & 0xf;
709 *d2 = insn & 0xfff;
710 }
711
712 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
713 int *b1, int *d1)
714 {
715 debug_insn(insn);
716
717 *i2 = (insn >> 16) & 0xff;
718 *b1 = (insn >> 12) & 0xf;
719 *d1 = insn & 0xfff;
720
721 return get_address(s, 0, *b1, *d1);
722 }
723
724 static int use_goto_tb(DisasContext *s, uint64_t dest)
725 {
726 /* NOTE: we handle the case where the TB spans two pages here */
727 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
728 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
729 && !s->singlestep_enabled
730 && !(s->tb->cflags & CF_LAST_IO));
731 }
732
733 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
734 {
735 gen_update_cc_op(s);
736
737 if (use_goto_tb(s, pc)) {
738 tcg_gen_goto_tb(tb_num);
739 tcg_gen_movi_i64(psw_addr, pc);
740 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
741 } else {
742 /* jump to another page: currently not optimized */
743 tcg_gen_movi_i64(psw_addr, pc);
744 tcg_gen_exit_tb(0);
745 }
746 }
747
748 static inline void account_noninline_branch(DisasContext *s, int cc_op)
749 {
750 #ifdef DEBUG_INLINE_BRANCHES
751 inline_branch_miss[cc_op]++;
752 #endif
753 }
754
755 static inline void account_inline_branch(DisasContext *s, int cc_op)
756 {
757 #ifdef DEBUG_INLINE_BRANCHES
758 inline_branch_hit[cc_op]++;
759 #endif
760 }
761
762 /* Table of mask values to comparison codes, given a comparison as input.
763 For a true comparison CC=3 will never be set, but we treat this
764 conservatively for possible use when CC=3 indicates overflow. */
765 static const TCGCond ltgt_cond[16] = {
766 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
767 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
768 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
769 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
770 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
771 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
772 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
773 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
774 };
775
776 /* Table of mask values to comparison codes, given a logic op as input.
777 For such, only CC=0 and CC=1 should be possible. */
778 static const TCGCond nz_cond[16] = {
779 /* | | x | x */
780 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
781 /* | NE | x | x */
782 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
783 /* EQ | | x | x */
784 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
785 /* EQ | NE | x | x */
786 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
787 };
788
789 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
790 details required to generate a TCG comparison. */
791 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
792 {
793 TCGCond cond;
794 enum cc_op old_cc_op = s->cc_op;
795
796 if (mask == 15 || mask == 0) {
797 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
798 c->u.s32.a = cc_op;
799 c->u.s32.b = cc_op;
800 c->g1 = c->g2 = true;
801 c->is_64 = false;
802 return;
803 }
804
805 /* Find the TCG condition for the mask + cc op. */
806 switch (old_cc_op) {
807 case CC_OP_LTGT0_32:
808 case CC_OP_LTGT0_64:
809 case CC_OP_LTGT_32:
810 case CC_OP_LTGT_64:
811 cond = ltgt_cond[mask];
812 if (cond == TCG_COND_NEVER) {
813 goto do_dynamic;
814 }
815 account_inline_branch(s, old_cc_op);
816 break;
817
818 case CC_OP_LTUGTU_32:
819 case CC_OP_LTUGTU_64:
820 cond = tcg_unsigned_cond(ltgt_cond[mask]);
821 if (cond == TCG_COND_NEVER) {
822 goto do_dynamic;
823 }
824 account_inline_branch(s, old_cc_op);
825 break;
826
827 case CC_OP_NZ:
828 cond = nz_cond[mask];
829 if (cond == TCG_COND_NEVER) {
830 goto do_dynamic;
831 }
832 account_inline_branch(s, old_cc_op);
833 break;
834
835 case CC_OP_TM_32:
836 case CC_OP_TM_64:
837 switch (mask) {
838 case 8:
839 cond = TCG_COND_EQ;
840 break;
841 case 4 | 2 | 1:
842 cond = TCG_COND_NE;
843 break;
844 default:
845 goto do_dynamic;
846 }
847 account_inline_branch(s, old_cc_op);
848 break;
849
850 case CC_OP_ICM:
851 switch (mask) {
852 case 8:
853 cond = TCG_COND_EQ;
854 break;
855 case 4 | 2 | 1:
856 case 4 | 2:
857 cond = TCG_COND_NE;
858 break;
859 default:
860 goto do_dynamic;
861 }
862 account_inline_branch(s, old_cc_op);
863 break;
864
865 default:
866 do_dynamic:
867 /* Calculate cc value. */
868 gen_op_calc_cc(s);
869 /* FALLTHRU */
870
871 case CC_OP_STATIC:
872 /* Jump based on CC. We'll load up the real cond below;
873 the assignment here merely avoids a compiler warning. */
874 account_noninline_branch(s, old_cc_op);
875 old_cc_op = CC_OP_STATIC;
876 cond = TCG_COND_NEVER;
877 break;
878 }
879
880 /* Load up the arguments of the comparison. */
881 c->is_64 = true;
882 c->g1 = c->g2 = false;
883 switch (old_cc_op) {
884 case CC_OP_LTGT0_32:
885 c->is_64 = false;
886 c->u.s32.a = tcg_temp_new_i32();
887 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
888 c->u.s32.b = tcg_const_i32(0);
889 break;
890 case CC_OP_LTGT_32:
891 case CC_OP_LTUGTU_32:
892 c->is_64 = false;
893 c->u.s32.a = tcg_temp_new_i32();
894 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
895 c->u.s32.b = tcg_temp_new_i32();
896 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
897 break;
898
899 case CC_OP_LTGT0_64:
900 case CC_OP_NZ:
901 c->u.s64.a = cc_dst;
902 c->u.s64.b = tcg_const_i64(0);
903 c->g1 = true;
904 break;
905 case CC_OP_LTGT_64:
906 case CC_OP_LTUGTU_64:
907 c->u.s64.a = cc_src;
908 c->u.s64.b = cc_dst;
909 c->g1 = c->g2 = true;
910 break;
911
912 case CC_OP_TM_32:
913 case CC_OP_TM_64:
914 case CC_OP_ICM:
915 c->u.s64.a = tcg_temp_new_i64();
916 c->u.s64.b = tcg_const_i64(0);
917 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
918 break;
919
920 case CC_OP_STATIC:
921 c->is_64 = false;
922 c->u.s32.a = cc_op;
923 c->g1 = true;
924 switch (mask) {
925 case 0x8 | 0x4 | 0x2: /* cc != 3 */
926 cond = TCG_COND_NE;
927 c->u.s32.b = tcg_const_i32(3);
928 break;
929 case 0x8 | 0x4 | 0x1: /* cc != 2 */
930 cond = TCG_COND_NE;
931 c->u.s32.b = tcg_const_i32(2);
932 break;
933 case 0x8 | 0x2 | 0x1: /* cc != 1 */
934 cond = TCG_COND_NE;
935 c->u.s32.b = tcg_const_i32(1);
936 break;
937 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
938 cond = TCG_COND_EQ;
939 c->g1 = false;
940 c->u.s32.a = tcg_temp_new_i32();
941 c->u.s32.b = tcg_const_i32(0);
942 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
943 break;
944 case 0x8 | 0x4: /* cc < 2 */
945 cond = TCG_COND_LTU;
946 c->u.s32.b = tcg_const_i32(2);
947 break;
948 case 0x8: /* cc == 0 */
949 cond = TCG_COND_EQ;
950 c->u.s32.b = tcg_const_i32(0);
951 break;
952 case 0x4 | 0x2 | 0x1: /* cc != 0 */
953 cond = TCG_COND_NE;
954 c->u.s32.b = tcg_const_i32(0);
955 break;
956 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
957 cond = TCG_COND_NE;
958 c->g1 = false;
959 c->u.s32.a = tcg_temp_new_i32();
960 c->u.s32.b = tcg_const_i32(0);
961 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
962 break;
963 case 0x4: /* cc == 1 */
964 cond = TCG_COND_EQ;
965 c->u.s32.b = tcg_const_i32(1);
966 break;
967 case 0x2 | 0x1: /* cc > 1 */
968 cond = TCG_COND_GTU;
969 c->u.s32.b = tcg_const_i32(1);
970 break;
971 case 0x2: /* cc == 2 */
972 cond = TCG_COND_EQ;
973 c->u.s32.b = tcg_const_i32(2);
974 break;
975 case 0x1: /* cc == 3 */
976 cond = TCG_COND_EQ;
977 c->u.s32.b = tcg_const_i32(3);
978 break;
979 default:
980 /* CC is masked by something else: (8 >> cc) & mask. */
981 cond = TCG_COND_NE;
982 c->g1 = false;
983 c->u.s32.a = tcg_const_i32(8);
984 c->u.s32.b = tcg_const_i32(0);
985 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
986 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
987 break;
988 }
989 break;
990
991 default:
992 abort();
993 }
994 c->cond = cond;
995 }
996
997 static void free_compare(DisasCompare *c)
998 {
999 if (!c->g1) {
1000 if (c->is_64) {
1001 tcg_temp_free_i64(c->u.s64.a);
1002 } else {
1003 tcg_temp_free_i32(c->u.s32.a);
1004 }
1005 }
1006 if (!c->g2) {
1007 if (c->is_64) {
1008 tcg_temp_free_i64(c->u.s64.b);
1009 } else {
1010 tcg_temp_free_i32(c->u.s32.b);
1011 }
1012 }
1013 }
1014
1015 static void disas_e3(CPUS390XState *env, DisasContext* s, int op, int r1,
1016 int x2, int b2, int d2)
1017 {
1018 TCGv_i64 addr, tmp2;
1019 TCGv_i32 tmp32_1;
1020
1021 LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
1022 op, r1, x2, b2, d2);
1023 addr = get_address(s, x2, b2, d2);
1024 switch (op) {
1025 case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
1026 tmp2 = tcg_temp_new_i64();
1027 tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
1028 tcg_gen_bswap64_i64(tmp2, tmp2);
1029 store_reg(r1, tmp2);
1030 tcg_temp_free_i64(tmp2);
1031 break;
1032 case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
1033 tmp2 = tcg_temp_new_i64();
1034 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1035 tcg_gen_andi_i64(tmp2, tmp2, 0x7fffffffULL);
1036 store_reg(r1, tmp2);
1037 tcg_temp_free_i64(tmp2);
1038 break;
1039 case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
1040 tmp2 = tcg_temp_new_i64();
1041 tmp32_1 = tcg_temp_new_i32();
1042 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1043 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1044 tcg_temp_free_i64(tmp2);
1045 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1046 store_reg32(r1, tmp32_1);
1047 tcg_temp_free_i32(tmp32_1);
1048 break;
1049 case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
1050 tmp2 = tcg_temp_new_i64();
1051 tmp32_1 = tcg_temp_new_i32();
1052 tcg_gen_qemu_ld16u(tmp2, addr, get_mem_index(s));
1053 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1054 tcg_temp_free_i64(tmp2);
1055 tcg_gen_bswap16_i32(tmp32_1, tmp32_1);
1056 store_reg16(r1, tmp32_1);
1057 tcg_temp_free_i32(tmp32_1);
1058 break;
1059 case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
1060 tmp32_1 = load_reg32(r1);
1061 tmp2 = tcg_temp_new_i64();
1062 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1063 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1064 tcg_temp_free_i32(tmp32_1);
1065 tcg_gen_qemu_st32(tmp2, addr, get_mem_index(s));
1066 tcg_temp_free_i64(tmp2);
1067 break;
1068 default:
1069 LOG_DISAS("illegal e3 operation 0x%x\n", op);
1070 gen_illegal_opcode(s);
1071 break;
1072 }
1073 tcg_temp_free_i64(addr);
1074 }
1075
1076 #ifndef CONFIG_USER_ONLY
1077 static void disas_e5(CPUS390XState *env, DisasContext* s, uint64_t insn)
1078 {
1079 TCGv_i64 tmp, tmp2;
1080 int op = (insn >> 32) & 0xff;
1081
1082 tmp = get_address(s, 0, (insn >> 28) & 0xf, (insn >> 16) & 0xfff);
1083 tmp2 = get_address(s, 0, (insn >> 12) & 0xf, insn & 0xfff);
1084
1085 LOG_DISAS("disas_e5: insn %" PRIx64 "\n", insn);
1086 switch (op) {
1087 case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
1088 /* Test Protection */
1089 potential_page_fault(s);
1090 gen_helper_tprot(cc_op, tmp, tmp2);
1091 set_cc_static(s);
1092 break;
1093 default:
1094 LOG_DISAS("illegal e5 operation 0x%x\n", op);
1095 gen_illegal_opcode(s);
1096 break;
1097 }
1098
1099 tcg_temp_free_i64(tmp);
1100 tcg_temp_free_i64(tmp2);
1101 }
1102 #endif
1103
1104 static void disas_eb(CPUS390XState *env, DisasContext *s, int op, int r1,
1105 int r3, int b2, int d2)
1106 {
1107 TCGv_i64 tmp;
1108 TCGv_i32 tmp32_1, tmp32_2;
1109
1110 LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
1111 op, r1, r3, b2, d2);
1112 switch (op) {
1113 case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */
1114 tmp = get_address(s, 0, b2, d2);
1115 tmp32_1 = tcg_const_i32(r1);
1116 tmp32_2 = tcg_const_i32(r3);
1117 potential_page_fault(s);
1118 gen_helper_stcmh(cpu_env, tmp32_1, tmp, tmp32_2);
1119 tcg_temp_free_i64(tmp);
1120 tcg_temp_free_i32(tmp32_1);
1121 tcg_temp_free_i32(tmp32_2);
1122 break;
1123 #ifndef CONFIG_USER_ONLY
1124 case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
1125 /* Load Control */
1126 check_privileged(s);
1127 tmp = get_address(s, 0, b2, d2);
1128 tmp32_1 = tcg_const_i32(r1);
1129 tmp32_2 = tcg_const_i32(r3);
1130 potential_page_fault(s);
1131 gen_helper_lctlg(cpu_env, tmp32_1, tmp, tmp32_2);
1132 tcg_temp_free_i64(tmp);
1133 tcg_temp_free_i32(tmp32_1);
1134 tcg_temp_free_i32(tmp32_2);
1135 break;
1136 case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
1137 /* Store Control */
1138 check_privileged(s);
1139 tmp = get_address(s, 0, b2, d2);
1140 tmp32_1 = tcg_const_i32(r1);
1141 tmp32_2 = tcg_const_i32(r3);
1142 potential_page_fault(s);
1143 gen_helper_stctg(cpu_env, tmp32_1, tmp, tmp32_2);
1144 tcg_temp_free_i64(tmp);
1145 tcg_temp_free_i32(tmp32_1);
1146 tcg_temp_free_i32(tmp32_2);
1147 break;
1148 #endif
1149 default:
1150 LOG_DISAS("illegal eb operation 0x%x\n", op);
1151 gen_illegal_opcode(s);
1152 break;
1153 }
1154 }
1155
1156 static void disas_ed(CPUS390XState *env, DisasContext *s, int op, int r1,
1157 int x2, int b2, int d2, int r1b)
1158 {
1159 TCGv_i32 tmp_r1, tmp32;
1160 TCGv_i64 addr, tmp;
1161 addr = get_address(s, x2, b2, d2);
1162 tmp_r1 = tcg_const_i32(r1);
1163 switch (op) {
1164 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1165 potential_page_fault(s);
1166 gen_helper_ldeb(cpu_env, tmp_r1, addr);
1167 break;
1168 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1169 potential_page_fault(s);
1170 gen_helper_lxdb(cpu_env, tmp_r1, addr);
1171 break;
1172 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1173 tmp = tcg_temp_new_i64();
1174 tmp32 = load_freg32(r1);
1175 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1176 set_cc_cmp_f32_i64(s, tmp32, tmp);
1177 tcg_temp_free_i64(tmp);
1178 tcg_temp_free_i32(tmp32);
1179 break;
1180 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1181 tmp = tcg_temp_new_i64();
1182 tmp32 = tcg_temp_new_i32();
1183 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1184 tcg_gen_trunc_i64_i32(tmp32, tmp);
1185 gen_helper_aeb(cpu_env, tmp_r1, tmp32);
1186 tcg_temp_free_i64(tmp);
1187 tcg_temp_free_i32(tmp32);
1188
1189 tmp32 = load_freg32(r1);
1190 gen_set_cc_nz_f32(s, tmp32);
1191 tcg_temp_free_i32(tmp32);
1192 break;
1193 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1194 tmp = tcg_temp_new_i64();
1195 tmp32 = tcg_temp_new_i32();
1196 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1197 tcg_gen_trunc_i64_i32(tmp32, tmp);
1198 gen_helper_seb(cpu_env, tmp_r1, tmp32);
1199 tcg_temp_free_i64(tmp);
1200 tcg_temp_free_i32(tmp32);
1201
1202 tmp32 = load_freg32(r1);
1203 gen_set_cc_nz_f32(s, tmp32);
1204 tcg_temp_free_i32(tmp32);
1205 break;
1206 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1207 tmp = tcg_temp_new_i64();
1208 tmp32 = tcg_temp_new_i32();
1209 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1210 tcg_gen_trunc_i64_i32(tmp32, tmp);
1211 gen_helper_deb(cpu_env, tmp_r1, tmp32);
1212 tcg_temp_free_i64(tmp);
1213 tcg_temp_free_i32(tmp32);
1214 break;
1215 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1216 potential_page_fault(s);
1217 gen_helper_tceb(cc_op, cpu_env, tmp_r1, addr);
1218 set_cc_static(s);
1219 break;
1220 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1221 potential_page_fault(s);
1222 gen_helper_tcdb(cc_op, cpu_env, tmp_r1, addr);
1223 set_cc_static(s);
1224 break;
1225 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1226 potential_page_fault(s);
1227 gen_helper_tcxb(cc_op, cpu_env, tmp_r1, addr);
1228 set_cc_static(s);
1229 break;
1230 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1231 tmp = tcg_temp_new_i64();
1232 tmp32 = tcg_temp_new_i32();
1233 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1234 tcg_gen_trunc_i64_i32(tmp32, tmp);
1235 gen_helper_meeb(cpu_env, tmp_r1, tmp32);
1236 tcg_temp_free_i64(tmp);
1237 tcg_temp_free_i32(tmp32);
1238 break;
1239 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1240 potential_page_fault(s);
1241 gen_helper_cdb(cc_op, cpu_env, tmp_r1, addr);
1242 set_cc_static(s);
1243 break;
1244 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1245 potential_page_fault(s);
1246 gen_helper_adb(cc_op, cpu_env, tmp_r1, addr);
1247 set_cc_static(s);
1248 break;
1249 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1250 potential_page_fault(s);
1251 gen_helper_sdb(cc_op, cpu_env, tmp_r1, addr);
1252 set_cc_static(s);
1253 break;
1254 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1255 potential_page_fault(s);
1256 gen_helper_mdb(cpu_env, tmp_r1, addr);
1257 break;
1258 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1259 potential_page_fault(s);
1260 gen_helper_ddb(cpu_env, tmp_r1, addr);
1261 break;
1262 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1263 /* for RXF insns, r1 is R3 and r1b is R1 */
1264 tmp32 = tcg_const_i32(r1b);
1265 potential_page_fault(s);
1266 gen_helper_madb(cpu_env, tmp32, addr, tmp_r1);
1267 tcg_temp_free_i32(tmp32);
1268 break;
1269 default:
1270 LOG_DISAS("illegal ed operation 0x%x\n", op);
1271 gen_illegal_opcode(s);
1272 return;
1273 }
1274 tcg_temp_free_i32(tmp_r1);
1275 tcg_temp_free_i64(addr);
1276 }
1277
1278 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1279 uint32_t insn)
1280 {
1281 TCGv_i64 tmp, tmp2, tmp3;
1282 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1283 int r1, r2;
1284 #ifndef CONFIG_USER_ONLY
1285 int r3, d2, b2;
1286 #endif
1287
1288 r1 = (insn >> 4) & 0xf;
1289 r2 = insn & 0xf;
1290
1291 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1292
1293 switch (op) {
1294 case 0x22: /* IPM R1 [RRE] */
1295 tmp32_1 = tcg_const_i32(r1);
1296 gen_op_calc_cc(s);
1297 gen_helper_ipm(cpu_env, cc_op, tmp32_1);
1298 tcg_temp_free_i32(tmp32_1);
1299 break;
1300 case 0x41: /* CKSM R1,R2 [RRE] */
1301 tmp32_1 = tcg_const_i32(r1);
1302 tmp32_2 = tcg_const_i32(r2);
1303 potential_page_fault(s);
1304 gen_helper_cksm(cpu_env, tmp32_1, tmp32_2);
1305 tcg_temp_free_i32(tmp32_1);
1306 tcg_temp_free_i32(tmp32_2);
1307 gen_op_movi_cc(s, 0);
1308 break;
1309 case 0x4e: /* SAR R1,R2 [RRE] */
1310 tmp32_1 = load_reg32(r2);
1311 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r1]));
1312 tcg_temp_free_i32(tmp32_1);
1313 break;
1314 case 0x4f: /* EAR R1,R2 [RRE] */
1315 tmp32_1 = tcg_temp_new_i32();
1316 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1317 store_reg32(r1, tmp32_1);
1318 tcg_temp_free_i32(tmp32_1);
1319 break;
1320 case 0x54: /* MVPG R1,R2 [RRE] */
1321 tmp = load_reg(0);
1322 tmp2 = load_reg(r1);
1323 tmp3 = load_reg(r2);
1324 potential_page_fault(s);
1325 gen_helper_mvpg(cpu_env, tmp, tmp2, tmp3);
1326 tcg_temp_free_i64(tmp);
1327 tcg_temp_free_i64(tmp2);
1328 tcg_temp_free_i64(tmp3);
1329 /* XXX check CCO bit and set CC accordingly */
1330 gen_op_movi_cc(s, 0);
1331 break;
1332 case 0x55: /* MVST R1,R2 [RRE] */
1333 tmp32_1 = load_reg32(0);
1334 tmp32_2 = tcg_const_i32(r1);
1335 tmp32_3 = tcg_const_i32(r2);
1336 potential_page_fault(s);
1337 gen_helper_mvst(cpu_env, tmp32_1, tmp32_2, tmp32_3);
1338 tcg_temp_free_i32(tmp32_1);
1339 tcg_temp_free_i32(tmp32_2);
1340 tcg_temp_free_i32(tmp32_3);
1341 gen_op_movi_cc(s, 1);
1342 break;
1343 case 0x5d: /* CLST R1,R2 [RRE] */
1344 tmp32_1 = load_reg32(0);
1345 tmp32_2 = tcg_const_i32(r1);
1346 tmp32_3 = tcg_const_i32(r2);
1347 potential_page_fault(s);
1348 gen_helper_clst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1349 set_cc_static(s);
1350 tcg_temp_free_i32(tmp32_1);
1351 tcg_temp_free_i32(tmp32_2);
1352 tcg_temp_free_i32(tmp32_3);
1353 break;
1354 case 0x5e: /* SRST R1,R2 [RRE] */
1355 tmp32_1 = load_reg32(0);
1356 tmp32_2 = tcg_const_i32(r1);
1357 tmp32_3 = tcg_const_i32(r2);
1358 potential_page_fault(s);
1359 gen_helper_srst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1360 set_cc_static(s);
1361 tcg_temp_free_i32(tmp32_1);
1362 tcg_temp_free_i32(tmp32_2);
1363 tcg_temp_free_i32(tmp32_3);
1364 break;
1365
1366 #ifndef CONFIG_USER_ONLY
1367 case 0x02: /* STIDP D2(B2) [S] */
1368 /* Store CPU ID */
1369 check_privileged(s);
1370 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1371 tmp = get_address(s, 0, b2, d2);
1372 potential_page_fault(s);
1373 gen_helper_stidp(cpu_env, tmp);
1374 tcg_temp_free_i64(tmp);
1375 break;
1376 case 0x04: /* SCK D2(B2) [S] */
1377 /* Set Clock */
1378 check_privileged(s);
1379 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1380 tmp = get_address(s, 0, b2, d2);
1381 potential_page_fault(s);
1382 gen_helper_sck(cc_op, tmp);
1383 set_cc_static(s);
1384 tcg_temp_free_i64(tmp);
1385 break;
1386 case 0x05: /* STCK D2(B2) [S] */
1387 /* Store Clock */
1388 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1389 tmp = get_address(s, 0, b2, d2);
1390 potential_page_fault(s);
1391 gen_helper_stck(cc_op, cpu_env, tmp);
1392 set_cc_static(s);
1393 tcg_temp_free_i64(tmp);
1394 break;
1395 case 0x06: /* SCKC D2(B2) [S] */
1396 /* Set Clock Comparator */
1397 check_privileged(s);
1398 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1399 tmp = get_address(s, 0, b2, d2);
1400 potential_page_fault(s);
1401 gen_helper_sckc(cpu_env, tmp);
1402 tcg_temp_free_i64(tmp);
1403 break;
1404 case 0x07: /* STCKC D2(B2) [S] */
1405 /* Store Clock Comparator */
1406 check_privileged(s);
1407 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1408 tmp = get_address(s, 0, b2, d2);
1409 potential_page_fault(s);
1410 gen_helper_stckc(cpu_env, tmp);
1411 tcg_temp_free_i64(tmp);
1412 break;
1413 case 0x08: /* SPT D2(B2) [S] */
1414 /* Set CPU Timer */
1415 check_privileged(s);
1416 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1417 tmp = get_address(s, 0, b2, d2);
1418 potential_page_fault(s);
1419 gen_helper_spt(cpu_env, tmp);
1420 tcg_temp_free_i64(tmp);
1421 break;
1422 case 0x09: /* STPT D2(B2) [S] */
1423 /* Store CPU Timer */
1424 check_privileged(s);
1425 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1426 tmp = get_address(s, 0, b2, d2);
1427 potential_page_fault(s);
1428 gen_helper_stpt(cpu_env, tmp);
1429 tcg_temp_free_i64(tmp);
1430 break;
1431 case 0x0a: /* SPKA D2(B2) [S] */
1432 /* Set PSW Key from Address */
1433 check_privileged(s);
1434 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1435 tmp = get_address(s, 0, b2, d2);
1436 tmp2 = tcg_temp_new_i64();
1437 tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
1438 tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
1439 tcg_gen_or_i64(psw_mask, tmp2, tmp);
1440 tcg_temp_free_i64(tmp2);
1441 tcg_temp_free_i64(tmp);
1442 break;
1443 case 0x0d: /* PTLB [S] */
1444 /* Purge TLB */
1445 check_privileged(s);
1446 gen_helper_ptlb(cpu_env);
1447 break;
1448 case 0x10: /* SPX D2(B2) [S] */
1449 /* Set Prefix Register */
1450 check_privileged(s);
1451 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1452 tmp = get_address(s, 0, b2, d2);
1453 potential_page_fault(s);
1454 gen_helper_spx(cpu_env, tmp);
1455 tcg_temp_free_i64(tmp);
1456 break;
1457 case 0x11: /* STPX D2(B2) [S] */
1458 /* Store Prefix */
1459 check_privileged(s);
1460 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1461 tmp = get_address(s, 0, b2, d2);
1462 tmp2 = tcg_temp_new_i64();
1463 tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1464 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1465 tcg_temp_free_i64(tmp);
1466 tcg_temp_free_i64(tmp2);
1467 break;
1468 case 0x12: /* STAP D2(B2) [S] */
1469 /* Store CPU Address */
1470 check_privileged(s);
1471 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1472 tmp = get_address(s, 0, b2, d2);
1473 tmp2 = tcg_temp_new_i64();
1474 tmp32_1 = tcg_temp_new_i32();
1475 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1476 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1477 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1478 tcg_temp_free_i64(tmp);
1479 tcg_temp_free_i64(tmp2);
1480 tcg_temp_free_i32(tmp32_1);
1481 break;
1482 case 0x21: /* IPTE R1,R2 [RRE] */
1483 /* Invalidate PTE */
1484 check_privileged(s);
1485 r1 = (insn >> 4) & 0xf;
1486 r2 = insn & 0xf;
1487 tmp = load_reg(r1);
1488 tmp2 = load_reg(r2);
1489 gen_helper_ipte(cpu_env, tmp, tmp2);
1490 tcg_temp_free_i64(tmp);
1491 tcg_temp_free_i64(tmp2);
1492 break;
1493 case 0x29: /* ISKE R1,R2 [RRE] */
1494 /* Insert Storage Key Extended */
1495 check_privileged(s);
1496 r1 = (insn >> 4) & 0xf;
1497 r2 = insn & 0xf;
1498 tmp = load_reg(r2);
1499 tmp2 = tcg_temp_new_i64();
1500 gen_helper_iske(tmp2, cpu_env, tmp);
1501 store_reg(r1, tmp2);
1502 tcg_temp_free_i64(tmp);
1503 tcg_temp_free_i64(tmp2);
1504 break;
1505 case 0x2a: /* RRBE R1,R2 [RRE] */
1506 /* Set Storage Key Extended */
1507 check_privileged(s);
1508 r1 = (insn >> 4) & 0xf;
1509 r2 = insn & 0xf;
1510 tmp32_1 = load_reg32(r1);
1511 tmp = load_reg(r2);
1512 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1513 set_cc_static(s);
1514 tcg_temp_free_i32(tmp32_1);
1515 tcg_temp_free_i64(tmp);
1516 break;
1517 case 0x2b: /* SSKE R1,R2 [RRE] */
1518 /* Set Storage Key Extended */
1519 check_privileged(s);
1520 r1 = (insn >> 4) & 0xf;
1521 r2 = insn & 0xf;
1522 tmp32_1 = load_reg32(r1);
1523 tmp = load_reg(r2);
1524 gen_helper_sske(cpu_env, tmp32_1, tmp);
1525 tcg_temp_free_i32(tmp32_1);
1526 tcg_temp_free_i64(tmp);
1527 break;
1528 case 0x34: /* STCH ? */
1529 /* Store Subchannel */
1530 check_privileged(s);
1531 gen_op_movi_cc(s, 3);
1532 break;
1533 case 0x46: /* STURA R1,R2 [RRE] */
1534 /* Store Using Real Address */
1535 check_privileged(s);
1536 r1 = (insn >> 4) & 0xf;
1537 r2 = insn & 0xf;
1538 tmp32_1 = load_reg32(r1);
1539 tmp = load_reg(r2);
1540 potential_page_fault(s);
1541 gen_helper_stura(cpu_env, tmp, tmp32_1);
1542 tcg_temp_free_i32(tmp32_1);
1543 tcg_temp_free_i64(tmp);
1544 break;
1545 case 0x50: /* CSP R1,R2 [RRE] */
1546 /* Compare And Swap And Purge */
1547 check_privileged(s);
1548 r1 = (insn >> 4) & 0xf;
1549 r2 = insn & 0xf;
1550 tmp32_1 = tcg_const_i32(r1);
1551 tmp32_2 = tcg_const_i32(r2);
1552 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1553 set_cc_static(s);
1554 tcg_temp_free_i32(tmp32_1);
1555 tcg_temp_free_i32(tmp32_2);
1556 break;
1557 case 0x5f: /* CHSC ? */
1558 /* Channel Subsystem Call */
1559 check_privileged(s);
1560 gen_op_movi_cc(s, 3);
1561 break;
1562 case 0x78: /* STCKE D2(B2) [S] */
1563 /* Store Clock Extended */
1564 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1565 tmp = get_address(s, 0, b2, d2);
1566 potential_page_fault(s);
1567 gen_helper_stcke(cc_op, cpu_env, tmp);
1568 set_cc_static(s);
1569 tcg_temp_free_i64(tmp);
1570 break;
1571 case 0x79: /* SACF D2(B2) [S] */
1572 /* Set Address Space Control Fast */
1573 check_privileged(s);
1574 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1575 tmp = get_address(s, 0, b2, d2);
1576 potential_page_fault(s);
1577 gen_helper_sacf(cpu_env, tmp);
1578 tcg_temp_free_i64(tmp);
1579 /* addressing mode has changed, so end the block */
1580 s->pc = s->next_pc;
1581 update_psw_addr(s);
1582 s->is_jmp = DISAS_JUMP;
1583 break;
1584 case 0x7d: /* STSI D2,(B2) [S] */
1585 check_privileged(s);
1586 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1587 tmp = get_address(s, 0, b2, d2);
1588 tmp32_1 = load_reg32(0);
1589 tmp32_2 = load_reg32(1);
1590 potential_page_fault(s);
1591 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1592 set_cc_static(s);
1593 tcg_temp_free_i64(tmp);
1594 tcg_temp_free_i32(tmp32_1);
1595 tcg_temp_free_i32(tmp32_2);
1596 break;
1597 case 0x9d: /* LFPC D2(B2) [S] */
1598 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1599 tmp = get_address(s, 0, b2, d2);
1600 tmp2 = tcg_temp_new_i64();
1601 tmp32_1 = tcg_temp_new_i32();
1602 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1603 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1604 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1605 tcg_temp_free_i64(tmp);
1606 tcg_temp_free_i64(tmp2);
1607 tcg_temp_free_i32(tmp32_1);
1608 break;
1609 case 0xb1: /* STFL D2(B2) [S] */
1610 /* Store Facility List (CPU features) at 200 */
1611 check_privileged(s);
1612 tmp2 = tcg_const_i64(0xc0000000);
1613 tmp = tcg_const_i64(200);
1614 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1615 tcg_temp_free_i64(tmp2);
1616 tcg_temp_free_i64(tmp);
1617 break;
1618 case 0xb2: /* LPSWE D2(B2) [S] */
1619 /* Load PSW Extended */
1620 check_privileged(s);
1621 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1622 tmp = get_address(s, 0, b2, d2);
1623 tmp2 = tcg_temp_new_i64();
1624 tmp3 = tcg_temp_new_i64();
1625 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1626 tcg_gen_addi_i64(tmp, tmp, 8);
1627 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1628 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1629 /* we need to keep cc_op intact */
1630 s->is_jmp = DISAS_JUMP;
1631 tcg_temp_free_i64(tmp);
1632 tcg_temp_free_i64(tmp2);
1633 tcg_temp_free_i64(tmp3);
1634 break;
1635 case 0x20: /* SERVC R1,R2 [RRE] */
1636 /* SCLP Service call (PV hypercall) */
1637 check_privileged(s);
1638 potential_page_fault(s);
1639 tmp32_1 = load_reg32(r2);
1640 tmp = load_reg(r1);
1641 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1642 set_cc_static(s);
1643 tcg_temp_free_i32(tmp32_1);
1644 tcg_temp_free_i64(tmp);
1645 break;
1646 #endif
1647 default:
1648 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1649 gen_illegal_opcode(s);
1650 break;
1651 }
1652 }
1653
1654 static void disas_b3(CPUS390XState *env, DisasContext *s, int op, int m3,
1655 int r1, int r2)
1656 {
1657 TCGv_i64 tmp;
1658 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1659 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
1660 #define FP_HELPER(i) \
1661 tmp32_1 = tcg_const_i32(r1); \
1662 tmp32_2 = tcg_const_i32(r2); \
1663 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1664 tcg_temp_free_i32(tmp32_1); \
1665 tcg_temp_free_i32(tmp32_2);
1666
1667 #define FP_HELPER_CC(i) \
1668 tmp32_1 = tcg_const_i32(r1); \
1669 tmp32_2 = tcg_const_i32(r2); \
1670 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1671 set_cc_static(s); \
1672 tcg_temp_free_i32(tmp32_1); \
1673 tcg_temp_free_i32(tmp32_2);
1674
1675 switch (op) {
1676 case 0x0: /* LPEBR R1,R2 [RRE] */
1677 FP_HELPER_CC(lpebr);
1678 break;
1679 case 0x2: /* LTEBR R1,R2 [RRE] */
1680 FP_HELPER_CC(ltebr);
1681 break;
1682 case 0x3: /* LCEBR R1,R2 [RRE] */
1683 FP_HELPER_CC(lcebr);
1684 break;
1685 case 0x4: /* LDEBR R1,R2 [RRE] */
1686 FP_HELPER(ldebr);
1687 break;
1688 case 0x5: /* LXDBR R1,R2 [RRE] */
1689 FP_HELPER(lxdbr);
1690 break;
1691 case 0x9: /* CEBR R1,R2 [RRE] */
1692 FP_HELPER_CC(cebr);
1693 break;
1694 case 0xa: /* AEBR R1,R2 [RRE] */
1695 FP_HELPER_CC(aebr);
1696 break;
1697 case 0xb: /* SEBR R1,R2 [RRE] */
1698 FP_HELPER_CC(sebr);
1699 break;
1700 case 0xd: /* DEBR R1,R2 [RRE] */
1701 FP_HELPER(debr);
1702 break;
1703 case 0x10: /* LPDBR R1,R2 [RRE] */
1704 FP_HELPER_CC(lpdbr);
1705 break;
1706 case 0x12: /* LTDBR R1,R2 [RRE] */
1707 FP_HELPER_CC(ltdbr);
1708 break;
1709 case 0x13: /* LCDBR R1,R2 [RRE] */
1710 FP_HELPER_CC(lcdbr);
1711 break;
1712 case 0x15: /* SQBDR R1,R2 [RRE] */
1713 FP_HELPER(sqdbr);
1714 break;
1715 case 0x17: /* MEEBR R1,R2 [RRE] */
1716 FP_HELPER(meebr);
1717 break;
1718 case 0x19: /* CDBR R1,R2 [RRE] */
1719 FP_HELPER_CC(cdbr);
1720 break;
1721 case 0x1a: /* ADBR R1,R2 [RRE] */
1722 FP_HELPER_CC(adbr);
1723 break;
1724 case 0x1b: /* SDBR R1,R2 [RRE] */
1725 FP_HELPER_CC(sdbr);
1726 break;
1727 case 0x1c: /* MDBR R1,R2 [RRE] */
1728 FP_HELPER(mdbr);
1729 break;
1730 case 0x1d: /* DDBR R1,R2 [RRE] */
1731 FP_HELPER(ddbr);
1732 break;
1733 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
1734 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
1735 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
1736 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
1737 tmp32_1 = tcg_const_i32(m3);
1738 tmp32_2 = tcg_const_i32(r2);
1739 tmp32_3 = tcg_const_i32(r1);
1740 switch (op) {
1741 case 0xe:
1742 gen_helper_maebr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1743 break;
1744 case 0x1e:
1745 gen_helper_madbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1746 break;
1747 case 0x1f:
1748 gen_helper_msdbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1749 break;
1750 default:
1751 tcg_abort();
1752 }
1753 tcg_temp_free_i32(tmp32_1);
1754 tcg_temp_free_i32(tmp32_2);
1755 tcg_temp_free_i32(tmp32_3);
1756 break;
1757 case 0x40: /* LPXBR R1,R2 [RRE] */
1758 FP_HELPER_CC(lpxbr);
1759 break;
1760 case 0x42: /* LTXBR R1,R2 [RRE] */
1761 FP_HELPER_CC(ltxbr);
1762 break;
1763 case 0x43: /* LCXBR R1,R2 [RRE] */
1764 FP_HELPER_CC(lcxbr);
1765 break;
1766 case 0x44: /* LEDBR R1,R2 [RRE] */
1767 FP_HELPER(ledbr);
1768 break;
1769 case 0x45: /* LDXBR R1,R2 [RRE] */
1770 FP_HELPER(ldxbr);
1771 break;
1772 case 0x46: /* LEXBR R1,R2 [RRE] */
1773 FP_HELPER(lexbr);
1774 break;
1775 case 0x49: /* CXBR R1,R2 [RRE] */
1776 FP_HELPER_CC(cxbr);
1777 break;
1778 case 0x4a: /* AXBR R1,R2 [RRE] */
1779 FP_HELPER_CC(axbr);
1780 break;
1781 case 0x4b: /* SXBR R1,R2 [RRE] */
1782 FP_HELPER_CC(sxbr);
1783 break;
1784 case 0x4c: /* MXBR R1,R2 [RRE] */
1785 FP_HELPER(mxbr);
1786 break;
1787 case 0x4d: /* DXBR R1,R2 [RRE] */
1788 FP_HELPER(dxbr);
1789 break;
1790 case 0x65: /* LXR R1,R2 [RRE] */
1791 tmp = load_freg(r2);
1792 store_freg(r1, tmp);
1793 tcg_temp_free_i64(tmp);
1794 tmp = load_freg(r2 + 2);
1795 store_freg(r1 + 2, tmp);
1796 tcg_temp_free_i64(tmp);
1797 break;
1798 case 0x74: /* LZER R1 [RRE] */
1799 tmp32_1 = tcg_const_i32(r1);
1800 gen_helper_lzer(cpu_env, tmp32_1);
1801 tcg_temp_free_i32(tmp32_1);
1802 break;
1803 case 0x75: /* LZDR R1 [RRE] */
1804 tmp32_1 = tcg_const_i32(r1);
1805 gen_helper_lzdr(cpu_env, tmp32_1);
1806 tcg_temp_free_i32(tmp32_1);
1807 break;
1808 case 0x76: /* LZXR R1 [RRE] */
1809 tmp32_1 = tcg_const_i32(r1);
1810 gen_helper_lzxr(cpu_env, tmp32_1);
1811 tcg_temp_free_i32(tmp32_1);
1812 break;
1813 case 0x84: /* SFPC R1 [RRE] */
1814 tmp32_1 = load_reg32(r1);
1815 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1816 tcg_temp_free_i32(tmp32_1);
1817 break;
1818 case 0x94: /* CEFBR R1,R2 [RRE] */
1819 case 0x95: /* CDFBR R1,R2 [RRE] */
1820 case 0x96: /* CXFBR R1,R2 [RRE] */
1821 tmp32_1 = tcg_const_i32(r1);
1822 tmp32_2 = load_reg32(r2);
1823 switch (op) {
1824 case 0x94:
1825 gen_helper_cefbr(cpu_env, tmp32_1, tmp32_2);
1826 break;
1827 case 0x95:
1828 gen_helper_cdfbr(cpu_env, tmp32_1, tmp32_2);
1829 break;
1830 case 0x96:
1831 gen_helper_cxfbr(cpu_env, tmp32_1, tmp32_2);
1832 break;
1833 default:
1834 tcg_abort();
1835 }
1836 tcg_temp_free_i32(tmp32_1);
1837 tcg_temp_free_i32(tmp32_2);
1838 break;
1839 case 0x98: /* CFEBR R1,R2 [RRE] */
1840 case 0x99: /* CFDBR R1,R2 [RRE] */
1841 case 0x9a: /* CFXBR R1,R2 [RRE] */
1842 tmp32_1 = tcg_const_i32(r1);
1843 tmp32_2 = tcg_const_i32(r2);
1844 tmp32_3 = tcg_const_i32(m3);
1845 switch (op) {
1846 case 0x98:
1847 gen_helper_cfebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1848 break;
1849 case 0x99:
1850 gen_helper_cfdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1851 break;
1852 case 0x9a:
1853 gen_helper_cfxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1854 break;
1855 default:
1856 tcg_abort();
1857 }
1858 set_cc_static(s);
1859 tcg_temp_free_i32(tmp32_1);
1860 tcg_temp_free_i32(tmp32_2);
1861 tcg_temp_free_i32(tmp32_3);
1862 break;
1863 case 0xa4: /* CEGBR R1,R2 [RRE] */
1864 case 0xa5: /* CDGBR R1,R2 [RRE] */
1865 tmp32_1 = tcg_const_i32(r1);
1866 tmp = load_reg(r2);
1867 switch (op) {
1868 case 0xa4:
1869 gen_helper_cegbr(cpu_env, tmp32_1, tmp);
1870 break;
1871 case 0xa5:
1872 gen_helper_cdgbr(cpu_env, tmp32_1, tmp);
1873 break;
1874 default:
1875 tcg_abort();
1876 }
1877 tcg_temp_free_i32(tmp32_1);
1878 tcg_temp_free_i64(tmp);
1879 break;
1880 case 0xa6: /* CXGBR R1,R2 [RRE] */
1881 tmp32_1 = tcg_const_i32(r1);
1882 tmp = load_reg(r2);
1883 gen_helper_cxgbr(cpu_env, tmp32_1, tmp);
1884 tcg_temp_free_i32(tmp32_1);
1885 tcg_temp_free_i64(tmp);
1886 break;
1887 case 0xa8: /* CGEBR R1,R2 [RRE] */
1888 tmp32_1 = tcg_const_i32(r1);
1889 tmp32_2 = tcg_const_i32(r2);
1890 tmp32_3 = tcg_const_i32(m3);
1891 gen_helper_cgebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1892 set_cc_static(s);
1893 tcg_temp_free_i32(tmp32_1);
1894 tcg_temp_free_i32(tmp32_2);
1895 tcg_temp_free_i32(tmp32_3);
1896 break;
1897 case 0xa9: /* CGDBR R1,R2 [RRE] */
1898 tmp32_1 = tcg_const_i32(r1);
1899 tmp32_2 = tcg_const_i32(r2);
1900 tmp32_3 = tcg_const_i32(m3);
1901 gen_helper_cgdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1902 set_cc_static(s);
1903 tcg_temp_free_i32(tmp32_1);
1904 tcg_temp_free_i32(tmp32_2);
1905 tcg_temp_free_i32(tmp32_3);
1906 break;
1907 case 0xaa: /* CGXBR R1,R2 [RRE] */
1908 tmp32_1 = tcg_const_i32(r1);
1909 tmp32_2 = tcg_const_i32(r2);
1910 tmp32_3 = tcg_const_i32(m3);
1911 gen_helper_cgxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1912 set_cc_static(s);
1913 tcg_temp_free_i32(tmp32_1);
1914 tcg_temp_free_i32(tmp32_2);
1915 tcg_temp_free_i32(tmp32_3);
1916 break;
1917 default:
1918 LOG_DISAS("illegal b3 operation 0x%x\n", op);
1919 gen_illegal_opcode(s);
1920 break;
1921 }
1922
1923 #undef FP_HELPER_CC
1924 #undef FP_HELPER
1925 }
1926
1927 static void disas_b9(CPUS390XState *env, DisasContext *s, int op, int r1,
1928 int r2)
1929 {
1930 TCGv_i64 tmp;
1931 TCGv_i32 tmp32_1;
1932
1933 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1934 switch (op) {
1935 case 0x17: /* LLGTR R1,R2 [RRE] */
1936 tmp32_1 = load_reg32(r2);
1937 tmp = tcg_temp_new_i64();
1938 tcg_gen_andi_i32(tmp32_1, tmp32_1, 0x7fffffffUL);
1939 tcg_gen_extu_i32_i64(tmp, tmp32_1);
1940 store_reg(r1, tmp);
1941 tcg_temp_free_i32(tmp32_1);
1942 tcg_temp_free_i64(tmp);
1943 break;
1944 case 0x0f: /* LRVGR R1,R2 [RRE] */
1945 tcg_gen_bswap64_i64(regs[r1], regs[r2]);
1946 break;
1947 case 0x1f: /* LRVR R1,R2 [RRE] */
1948 tmp32_1 = load_reg32(r2);
1949 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1950 store_reg32(r1, tmp32_1);
1951 tcg_temp_free_i32(tmp32_1);
1952 break;
1953 case 0x83: /* FLOGR R1,R2 [RRE] */
1954 tmp = load_reg(r2);
1955 tmp32_1 = tcg_const_i32(r1);
1956 gen_helper_flogr(cc_op, cpu_env, tmp32_1, tmp);
1957 set_cc_static(s);
1958 tcg_temp_free_i64(tmp);
1959 tcg_temp_free_i32(tmp32_1);
1960 break;
1961 default:
1962 LOG_DISAS("illegal b9 operation 0x%x\n", op);
1963 gen_illegal_opcode(s);
1964 break;
1965 }
1966 }
1967
1968 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1969 {
1970 TCGv_i64 tmp;
1971 TCGv_i32 tmp32_1, tmp32_2;
1972 unsigned char opc;
1973 uint64_t insn;
1974 int op, r1, r2, r3, d2, x2, b2, r1b;
1975
1976 opc = cpu_ldub_code(env, s->pc);
1977 LOG_DISAS("opc 0x%x\n", opc);
1978
1979 switch (opc) {
1980 case 0xb2:
1981 insn = ld_code4(env, s->pc);
1982 op = (insn >> 16) & 0xff;
1983 disas_b2(env, s, op, insn);
1984 break;
1985 case 0xb3:
1986 insn = ld_code4(env, s->pc);
1987 op = (insn >> 16) & 0xff;
1988 r3 = (insn >> 12) & 0xf; /* aka m3 */
1989 r1 = (insn >> 4) & 0xf;
1990 r2 = insn & 0xf;
1991 disas_b3(env, s, op, r3, r1, r2);
1992 break;
1993 case 0xb9:
1994 insn = ld_code4(env, s->pc);
1995 r1 = (insn >> 4) & 0xf;
1996 r2 = insn & 0xf;
1997 op = (insn >> 16) & 0xff;
1998 disas_b9(env, s, op, r1, r2);
1999 break;
2000 case 0xbd: /* CLM R1,M3,D2(B2) [RS] */
2001 insn = ld_code4(env, s->pc);
2002 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2003 tmp = get_address(s, 0, b2, d2);
2004 tmp32_1 = load_reg32(r1);
2005 tmp32_2 = tcg_const_i32(r3);
2006 potential_page_fault(s);
2007 gen_helper_clm(cc_op, cpu_env, tmp32_1, tmp32_2, tmp);
2008 set_cc_static(s);
2009 tcg_temp_free_i64(tmp);
2010 tcg_temp_free_i32(tmp32_1);
2011 tcg_temp_free_i32(tmp32_2);
2012 break;
2013 case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
2014 insn = ld_code4(env, s->pc);
2015 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2016 tmp = get_address(s, 0, b2, d2);
2017 tmp32_1 = load_reg32(r1);
2018 tmp32_2 = tcg_const_i32(r3);
2019 potential_page_fault(s);
2020 gen_helper_stcm(cpu_env, tmp32_1, tmp32_2, tmp);
2021 tcg_temp_free_i64(tmp);
2022 tcg_temp_free_i32(tmp32_1);
2023 tcg_temp_free_i32(tmp32_2);
2024 break;
2025 case 0xe3:
2026 insn = ld_code6(env, s->pc);
2027 debug_insn(insn);
2028 op = insn & 0xff;
2029 r1 = (insn >> 36) & 0xf;
2030 x2 = (insn >> 32) & 0xf;
2031 b2 = (insn >> 28) & 0xf;
2032 d2 = ((int)((((insn >> 16) & 0xfff)
2033 | ((insn << 4) & 0xff000)) << 12)) >> 12;
2034 disas_e3(env, s, op, r1, x2, b2, d2 );
2035 break;
2036 #ifndef CONFIG_USER_ONLY
2037 case 0xe5:
2038 /* Test Protection */
2039 check_privileged(s);
2040 insn = ld_code6(env, s->pc);
2041 debug_insn(insn);
2042 disas_e5(env, s, insn);
2043 break;
2044 #endif
2045 case 0xeb:
2046 insn = ld_code6(env, s->pc);
2047 debug_insn(insn);
2048 op = insn & 0xff;
2049 r1 = (insn >> 36) & 0xf;
2050 r3 = (insn >> 32) & 0xf;
2051 b2 = (insn >> 28) & 0xf;
2052 d2 = ((int)((((insn >> 16) & 0xfff)
2053 | ((insn << 4) & 0xff000)) << 12)) >> 12;
2054 disas_eb(env, s, op, r1, r3, b2, d2);
2055 break;
2056 case 0xed:
2057 insn = ld_code6(env, s->pc);
2058 debug_insn(insn);
2059 op = insn & 0xff;
2060 r1 = (insn >> 36) & 0xf;
2061 x2 = (insn >> 32) & 0xf;
2062 b2 = (insn >> 28) & 0xf;
2063 d2 = (short)((insn >> 16) & 0xfff);
2064 r1b = (insn >> 12) & 0xf;
2065 disas_ed(env, s, op, r1, x2, b2, d2, r1b);
2066 break;
2067 default:
2068 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
2069 gen_illegal_opcode(s);
2070 break;
2071 }
2072 }
2073
2074 /* ====================================================================== */
2075 /* Define the insn format enumeration. */
2076 #define F0(N) FMT_##N,
2077 #define F1(N, X1) F0(N)
2078 #define F2(N, X1, X2) F0(N)
2079 #define F3(N, X1, X2, X3) F0(N)
2080 #define F4(N, X1, X2, X3, X4) F0(N)
2081 #define F5(N, X1, X2, X3, X4, X5) F0(N)
2082
2083 typedef enum {
2084 #include "insn-format.def"
2085 } DisasFormat;
2086
2087 #undef F0
2088 #undef F1
2089 #undef F2
2090 #undef F3
2091 #undef F4
2092 #undef F5
2093
2094 /* Define a structure to hold the decoded fields. We'll store each inside
2095 an array indexed by an enum. In order to conserve memory, we'll arrange
2096 for fields that do not exist at the same time to overlap, thus the "C"
2097 for compact. For checking purposes there is an "O" for original index
2098 as well that will be applied to availability bitmaps. */
2099
2100 enum DisasFieldIndexO {
2101 FLD_O_r1,
2102 FLD_O_r2,
2103 FLD_O_r3,
2104 FLD_O_m1,
2105 FLD_O_m3,
2106 FLD_O_m4,
2107 FLD_O_b1,
2108 FLD_O_b2,
2109 FLD_O_b4,
2110 FLD_O_d1,
2111 FLD_O_d2,
2112 FLD_O_d4,
2113 FLD_O_x2,
2114 FLD_O_l1,
2115 FLD_O_l2,
2116 FLD_O_i1,
2117 FLD_O_i2,
2118 FLD_O_i3,
2119 FLD_O_i4,
2120 FLD_O_i5
2121 };
2122
2123 enum DisasFieldIndexC {
2124 FLD_C_r1 = 0,
2125 FLD_C_m1 = 0,
2126 FLD_C_b1 = 0,
2127 FLD_C_i1 = 0,
2128
2129 FLD_C_r2 = 1,
2130 FLD_C_b2 = 1,
2131 FLD_C_i2 = 1,
2132
2133 FLD_C_r3 = 2,
2134 FLD_C_m3 = 2,
2135 FLD_C_i3 = 2,
2136
2137 FLD_C_m4 = 3,
2138 FLD_C_b4 = 3,
2139 FLD_C_i4 = 3,
2140 FLD_C_l1 = 3,
2141
2142 FLD_C_i5 = 4,
2143 FLD_C_d1 = 4,
2144
2145 FLD_C_d2 = 5,
2146
2147 FLD_C_d4 = 6,
2148 FLD_C_x2 = 6,
2149 FLD_C_l2 = 6,
2150
2151 NUM_C_FIELD = 7
2152 };
2153
2154 struct DisasFields {
2155 unsigned op:8;
2156 unsigned op2:8;
2157 unsigned presentC:16;
2158 unsigned int presentO;
2159 int c[NUM_C_FIELD];
2160 };
2161
2162 /* This is the way fields are to be accessed out of DisasFields. */
2163 #define have_field(S, F) have_field1((S), FLD_O_##F)
2164 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
2165
2166 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
2167 {
2168 return (f->presentO >> c) & 1;
2169 }
2170
2171 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
2172 enum DisasFieldIndexC c)
2173 {
2174 assert(have_field1(f, o));
2175 return f->c[c];
2176 }
2177
2178 /* Describe the layout of each field in each format. */
2179 typedef struct DisasField {
2180 unsigned int beg:8;
2181 unsigned int size:8;
2182 unsigned int type:2;
2183 unsigned int indexC:6;
2184 enum DisasFieldIndexO indexO:8;
2185 } DisasField;
2186
2187 typedef struct DisasFormatInfo {
2188 DisasField op[NUM_C_FIELD];
2189 } DisasFormatInfo;
2190
2191 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
2192 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
2193 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2194 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
2195 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2196 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2197 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
2198 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2199 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2200 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2201 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2202 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2203 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
2204 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
2205
2206 #define F0(N) { { } },
2207 #define F1(N, X1) { { X1 } },
2208 #define F2(N, X1, X2) { { X1, X2 } },
2209 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
2210 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
2211 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
2212
2213 static const DisasFormatInfo format_info[] = {
2214 #include "insn-format.def"
2215 };
2216
2217 #undef F0
2218 #undef F1
2219 #undef F2
2220 #undef F3
2221 #undef F4
2222 #undef F5
2223 #undef R
2224 #undef M
2225 #undef BD
2226 #undef BXD
2227 #undef BDL
2228 #undef BXDL
2229 #undef I
2230 #undef L
2231
2232 /* Generally, we'll extract operands into this structures, operate upon
2233 them, and store them back. See the "in1", "in2", "prep", "wout" sets
2234 of routines below for more details. */
2235 typedef struct {
2236 bool g_out, g_out2, g_in1, g_in2;
2237 TCGv_i64 out, out2, in1, in2;
2238 TCGv_i64 addr1;
2239 } DisasOps;
2240
2241 /* Return values from translate_one, indicating the state of the TB. */
2242 typedef enum {
2243 /* Continue the TB. */
2244 NO_EXIT,
2245 /* We have emitted one or more goto_tb. No fixup required. */
2246 EXIT_GOTO_TB,
2247 /* We are not using a goto_tb (for whatever reason), but have updated
2248 the PC (for whatever reason), so there's no need to do it again on
2249 exiting the TB. */
2250 EXIT_PC_UPDATED,
2251 /* We are exiting the TB, but have neither emitted a goto_tb, nor
2252 updated the PC for the next instruction to be executed. */
2253 EXIT_PC_STALE,
2254 /* We are ending the TB with a noreturn function call, e.g. longjmp.
2255 No following code will be executed. */
2256 EXIT_NORETURN,
2257 } ExitStatus;
2258
2259 typedef enum DisasFacility {
2260 FAC_Z, /* zarch (default) */
2261 FAC_CASS, /* compare and swap and store */
2262 FAC_CASS2, /* compare and swap and store 2*/
2263 FAC_DFP, /* decimal floating point */
2264 FAC_DFPR, /* decimal floating point rounding */
2265 FAC_DO, /* distinct operands */
2266 FAC_EE, /* execute extensions */
2267 FAC_EI, /* extended immediate */
2268 FAC_FPE, /* floating point extension */
2269 FAC_FPSSH, /* floating point support sign handling */
2270 FAC_FPRGR, /* FPR-GR transfer */
2271 FAC_GIE, /* general instructions extension */
2272 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
2273 FAC_HW, /* high-word */
2274 FAC_IEEEE_SIM, /* IEEE exception sumilation */
2275 FAC_LOC, /* load/store on condition */
2276 FAC_LD, /* long displacement */
2277 FAC_PC, /* population count */
2278 FAC_SCF, /* store clock fast */
2279 FAC_SFLE, /* store facility list extended */
2280 } DisasFacility;
2281
2282 struct DisasInsn {
2283 unsigned opc:16;
2284 DisasFormat fmt:6;
2285 DisasFacility fac:6;
2286
2287 const char *name;
2288
2289 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
2290 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
2291 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
2292 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
2293 void (*help_cout)(DisasContext *, DisasOps *);
2294 ExitStatus (*help_op)(DisasContext *, DisasOps *);
2295
2296 uint64_t data;
2297 };
2298
2299 /* ====================================================================== */
2300 /* Miscelaneous helpers, used by several operations. */
2301
2302 static void help_l2_shift(DisasContext *s, DisasFields *f,
2303 DisasOps *o, int mask)
2304 {
2305 int b2 = get_field(f, b2);
2306 int d2 = get_field(f, d2);
2307
2308 if (b2 == 0) {
2309 o->in2 = tcg_const_i64(d2 & mask);
2310 } else {
2311 o->in2 = get_address(s, 0, b2, d2);
2312 tcg_gen_andi_i64(o->in2, o->in2, mask);
2313 }
2314 }
2315
2316 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
2317 {
2318 if (dest == s->next_pc) {
2319 return NO_EXIT;
2320 }
2321 if (use_goto_tb(s, dest)) {
2322 gen_update_cc_op(s);
2323 tcg_gen_goto_tb(0);
2324 tcg_gen_movi_i64(psw_addr, dest);
2325 tcg_gen_exit_tb((tcg_target_long)s->tb);
2326 return EXIT_GOTO_TB;
2327 } else {
2328 tcg_gen_movi_i64(psw_addr, dest);
2329 return EXIT_PC_UPDATED;
2330 }
2331 }
2332
2333 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
2334 bool is_imm, int imm, TCGv_i64 cdest)
2335 {
2336 ExitStatus ret;
2337 uint64_t dest = s->pc + 2 * imm;
2338 int lab;
2339
2340 /* Take care of the special cases first. */
2341 if (c->cond == TCG_COND_NEVER) {
2342 ret = NO_EXIT;
2343 goto egress;
2344 }
2345 if (is_imm) {
2346 if (dest == s->next_pc) {
2347 /* Branch to next. */
2348 ret = NO_EXIT;
2349 goto egress;
2350 }
2351 if (c->cond == TCG_COND_ALWAYS) {
2352 ret = help_goto_direct(s, dest);
2353 goto egress;
2354 }
2355 } else {
2356 if (TCGV_IS_UNUSED_I64(cdest)) {
2357 /* E.g. bcr %r0 -> no branch. */
2358 ret = NO_EXIT;
2359 goto egress;
2360 }
2361 if (c->cond == TCG_COND_ALWAYS) {
2362 tcg_gen_mov_i64(psw_addr, cdest);
2363 ret = EXIT_PC_UPDATED;
2364 goto egress;
2365 }
2366 }
2367
2368 if (use_goto_tb(s, s->next_pc)) {
2369 if (is_imm && use_goto_tb(s, dest)) {
2370 /* Both exits can use goto_tb. */
2371 gen_update_cc_op(s);
2372
2373 lab = gen_new_label();
2374 if (c->is_64) {
2375 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
2376 } else {
2377 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
2378 }
2379
2380 /* Branch not taken. */
2381 tcg_gen_goto_tb(0);
2382 tcg_gen_movi_i64(psw_addr, s->next_pc);
2383 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
2384
2385 /* Branch taken. */
2386 gen_set_label(lab);
2387 tcg_gen_goto_tb(1);
2388 tcg_gen_movi_i64(psw_addr, dest);
2389 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
2390
2391 ret = EXIT_GOTO_TB;
2392 } else {
2393 /* Fallthru can use goto_tb, but taken branch cannot. */
2394 /* Store taken branch destination before the brcond. This
2395 avoids having to allocate a new local temp to hold it.
2396 We'll overwrite this in the not taken case anyway. */
2397 if (!is_imm) {
2398 tcg_gen_mov_i64(psw_addr, cdest);
2399 }
2400
2401 lab = gen_new_label();
2402 if (c->is_64) {
2403 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
2404 } else {
2405 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
2406 }
2407
2408 /* Branch not taken. */
2409 gen_update_cc_op(s);
2410 tcg_gen_goto_tb(0);
2411 tcg_gen_movi_i64(psw_addr, s->next_pc);
2412 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
2413
2414 gen_set_label(lab);
2415 if (is_imm) {
2416 tcg_gen_movi_i64(psw_addr, dest);
2417 }
2418 ret = EXIT_PC_UPDATED;
2419 }
2420 } else {
2421 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
2422 Most commonly we're single-stepping or some other condition that
2423 disables all use of goto_tb. Just update the PC and exit. */
2424
2425 TCGv_i64 next = tcg_const_i64(s->next_pc);
2426 if (is_imm) {
2427 cdest = tcg_const_i64(dest);
2428 }
2429
2430 if (c->is_64) {
2431 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
2432 cdest, next);
2433 } else {
2434 TCGv_i32 t0 = tcg_temp_new_i32();
2435 TCGv_i64 t1 = tcg_temp_new_i64();
2436 TCGv_i64 z = tcg_const_i64(0);
2437 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
2438 tcg_gen_extu_i32_i64(t1, t0);
2439 tcg_temp_free_i32(t0);
2440 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
2441 tcg_temp_free_i64(t1);
2442 tcg_temp_free_i64(z);
2443 }
2444
2445 if (is_imm) {
2446 tcg_temp_free_i64(cdest);
2447 }
2448 tcg_temp_free_i64(next);
2449
2450 ret = EXIT_PC_UPDATED;
2451 }
2452
2453 egress:
2454 free_compare(c);
2455 return ret;
2456 }
2457
2458 /* ====================================================================== */
2459 /* The operations. These perform the bulk of the work for any insn,
2460 usually after the operands have been loaded and output initialized. */
2461
2462 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
2463 {
2464 gen_helper_abs_i64(o->out, o->in2);
2465 return NO_EXIT;
2466 }
2467
2468 static ExitStatus op_add(DisasContext *s, DisasOps *o)
2469 {
2470 tcg_gen_add_i64(o->out, o->in1, o->in2);
2471 return NO_EXIT;
2472 }
2473
2474 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
2475 {
2476 TCGv_i64 cc;
2477
2478 tcg_gen_add_i64(o->out, o->in1, o->in2);
2479
2480 /* XXX possible optimization point */
2481 gen_op_calc_cc(s);
2482 cc = tcg_temp_new_i64();
2483 tcg_gen_extu_i32_i64(cc, cc_op);
2484 tcg_gen_shri_i64(cc, cc, 1);
2485
2486 tcg_gen_add_i64(o->out, o->out, cc);
2487 tcg_temp_free_i64(cc);
2488 return NO_EXIT;
2489 }
2490
2491 static ExitStatus op_and(DisasContext *s, DisasOps *o)
2492 {
2493 tcg_gen_and_i64(o->out, o->in1, o->in2);
2494 return NO_EXIT;
2495 }
2496
2497 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
2498 {
2499 int shift = s->insn->data & 0xff;
2500 int size = s->insn->data >> 8;
2501 uint64_t mask = ((1ull << size) - 1) << shift;
2502
2503 assert(!o->g_in2);
2504 tcg_gen_shli_i64(o->in2, o->in2, shift);
2505 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2506 tcg_gen_and_i64(o->out, o->in1, o->in2);
2507
2508 /* Produce the CC from only the bits manipulated. */
2509 tcg_gen_andi_i64(cc_dst, o->out, mask);
2510 set_cc_nz_u64(s, cc_dst);
2511 return NO_EXIT;
2512 }
2513
2514 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
2515 {
2516 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2517 if (!TCGV_IS_UNUSED_I64(o->in2)) {
2518 tcg_gen_mov_i64(psw_addr, o->in2);
2519 return EXIT_PC_UPDATED;
2520 } else {
2521 return NO_EXIT;
2522 }
2523 }
2524
2525 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
2526 {
2527 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2528 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
2529 }
2530
2531 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
2532 {
2533 int m1 = get_field(s->fields, m1);
2534 bool is_imm = have_field(s->fields, i2);
2535 int imm = is_imm ? get_field(s->fields, i2) : 0;
2536 DisasCompare c;
2537
2538 disas_jcc(s, &c, m1);
2539 return help_branch(s, &c, is_imm, imm, o->in2);
2540 }
2541
2542 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
2543 {
2544 int r1 = get_field(s->fields, r1);
2545 bool is_imm = have_field(s->fields, i2);
2546 int imm = is_imm ? get_field(s->fields, i2) : 0;
2547 DisasCompare c;
2548 TCGv_i64 t;
2549
2550 c.cond = TCG_COND_NE;
2551 c.is_64 = false;
2552 c.g1 = false;
2553 c.g2 = false;
2554
2555 t = tcg_temp_new_i64();
2556 tcg_gen_subi_i64(t, regs[r1], 1);
2557 store_reg32_i64(r1, t);
2558 c.u.s32.a = tcg_temp_new_i32();
2559 c.u.s32.b = tcg_const_i32(0);
2560 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
2561 tcg_temp_free_i64(t);
2562
2563 return help_branch(s, &c, is_imm, imm, o->in2);
2564 }
2565
2566 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
2567 {
2568 int r1 = get_field(s->fields, r1);
2569 bool is_imm = have_field(s->fields, i2);
2570 int imm = is_imm ? get_field(s->fields, i2) : 0;
2571 DisasCompare c;
2572
2573 c.cond = TCG_COND_NE;
2574 c.is_64 = true;
2575 c.g1 = true;
2576 c.g2 = false;
2577
2578 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
2579 c.u.s64.a = regs[r1];
2580 c.u.s64.b = tcg_const_i64(0);
2581
2582 return help_branch(s, &c, is_imm, imm, o->in2);
2583 }
2584
2585 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
2586 {
2587 int l = get_field(s->fields, l1);
2588 TCGv_i32 vl;
2589
2590 switch (l + 1) {
2591 case 1:
2592 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2593 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2594 break;
2595 case 2:
2596 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2597 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2598 break;
2599 case 4:
2600 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2601 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2602 break;
2603 case 8:
2604 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2605 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2606 break;
2607 default:
2608 potential_page_fault(s);
2609 vl = tcg_const_i32(l);
2610 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2611 tcg_temp_free_i32(vl);
2612 set_cc_static(s);
2613 return NO_EXIT;
2614 }
2615 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2616 return NO_EXIT;
2617 }
2618
2619 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
2620 {
2621 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2622 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2623 potential_page_fault(s);
2624 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
2625 tcg_temp_free_i32(r1);
2626 tcg_temp_free_i32(r3);
2627 set_cc_static(s);
2628 return NO_EXIT;
2629 }
2630
2631 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
2632 {
2633 int r3 = get_field(s->fields, r3);
2634 potential_page_fault(s);
2635 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2636 set_cc_static(s);
2637 return NO_EXIT;
2638 }
2639
2640 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
2641 {
2642 int r3 = get_field(s->fields, r3);
2643 potential_page_fault(s);
2644 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2645 set_cc_static(s);
2646 return NO_EXIT;
2647 }
2648
2649 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
2650 {
2651 int r3 = get_field(s->fields, r3);
2652 TCGv_i64 in3 = tcg_temp_new_i64();
2653 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
2654 potential_page_fault(s);
2655 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
2656 tcg_temp_free_i64(in3);
2657 set_cc_static(s);
2658 return NO_EXIT;
2659 }
2660
2661 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2662 {
2663 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2664 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2665 potential_page_fault(s);
2666 /* XXX rewrite in tcg */
2667 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
2668 set_cc_static(s);
2669 return NO_EXIT;
2670 }
2671
2672 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2673 {
2674 TCGv_i64 t1 = tcg_temp_new_i64();
2675 TCGv_i32 t2 = tcg_temp_new_i32();
2676 tcg_gen_trunc_i64_i32(t2, o->in1);
2677 gen_helper_cvd(t1, t2);
2678 tcg_temp_free_i32(t2);
2679 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2680 tcg_temp_free_i64(t1);
2681 return NO_EXIT;
2682 }
2683
2684 #ifndef CONFIG_USER_ONLY
2685 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2686 {
2687 TCGv_i32 tmp;
2688
2689 check_privileged(s);
2690 potential_page_fault(s);
2691
2692 /* We pretend the format is RX_a so that D2 is the field we want. */
2693 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2694 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2695 tcg_temp_free_i32(tmp);
2696 return NO_EXIT;
2697 }
2698 #endif
2699
2700 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2701 {
2702 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2703 return_low128(o->out);
2704 return NO_EXIT;
2705 }
2706
2707 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2708 {
2709 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2710 return_low128(o->out);
2711 return NO_EXIT;
2712 }
2713
2714 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2715 {
2716 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2717 return_low128(o->out);
2718 return NO_EXIT;
2719 }
2720
2721 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2722 {
2723 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2724 return_low128(o->out);
2725 return NO_EXIT;
2726 }
2727
2728 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2729 {
2730 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2731 return NO_EXIT;
2732 }
2733
2734 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2735 {
2736 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2737 tb->flags, (ab)use the tb->cs_base field as the address of
2738 the template in memory, and grab 8 bits of tb->flags/cflags for
2739 the contents of the register. We would then recognize all this
2740 in gen_intermediate_code_internal, generating code for exactly
2741 one instruction. This new TB then gets executed normally.
2742
2743 On the other hand, this seems to be mostly used for modifying
2744 MVC inside of memcpy, which needs a helper call anyway. So
2745 perhaps this doesn't bear thinking about any further. */
2746
2747 TCGv_i64 tmp;
2748
2749 update_psw_addr(s);
2750 gen_op_calc_cc(s);
2751
2752 tmp = tcg_const_i64(s->next_pc);
2753 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2754 tcg_temp_free_i64(tmp);
2755
2756 set_cc_static(s);
2757 return NO_EXIT;
2758 }
2759
2760 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2761 {
2762 int m3 = get_field(s->fields, m3);
2763 int pos, len, base = s->insn->data;
2764 TCGv_i64 tmp = tcg_temp_new_i64();
2765 uint64_t ccm;
2766
2767 switch (m3) {
2768 case 0xf:
2769 /* Effectively a 32-bit load. */
2770 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2771 len = 32;
2772 goto one_insert;
2773
2774 case 0xc:
2775 case 0x6:
2776 case 0x3:
2777 /* Effectively a 16-bit load. */
2778 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2779 len = 16;
2780 goto one_insert;
2781
2782 case 0x8:
2783 case 0x4:
2784 case 0x2:
2785 case 0x1:
2786 /* Effectively an 8-bit load. */
2787 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2788 len = 8;
2789 goto one_insert;
2790
2791 one_insert:
2792 pos = base + ctz32(m3) * 8;
2793 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2794 ccm = ((1ull << len) - 1) << pos;
2795 break;
2796
2797 default:
2798 /* This is going to be a sequence of loads and inserts. */
2799 pos = base + 32 - 8;
2800 ccm = 0;
2801 while (m3) {
2802 if (m3 & 0x8) {
2803 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2804 tcg_gen_addi_i64(o->in2, o->in2, 1);
2805 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2806 ccm |= 0xff << pos;
2807 }
2808 m3 = (m3 << 1) & 0xf;
2809 pos -= 8;
2810 }
2811 break;
2812 }
2813
2814 tcg_gen_movi_i64(tmp, ccm);
2815 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2816 tcg_temp_free_i64(tmp);
2817 return NO_EXIT;
2818 }
2819
2820 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2821 {
2822 int shift = s->insn->data & 0xff;
2823 int size = s->insn->data >> 8;
2824 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2825 return NO_EXIT;
2826 }
2827
2828 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2829 {
2830 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2831 return NO_EXIT;
2832 }
2833
2834 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2835 {
2836 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2837 return NO_EXIT;
2838 }
2839
2840 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2841 {
2842 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2843 return NO_EXIT;
2844 }
2845
2846 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2847 {
2848 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2849 return NO_EXIT;
2850 }
2851
2852 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2853 {
2854 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2855 return NO_EXIT;
2856 }
2857
2858 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2859 {
2860 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2861 return NO_EXIT;
2862 }
2863
2864 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2865 {
2866 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2867 return NO_EXIT;
2868 }
2869
2870 #ifndef CONFIG_USER_ONLY
2871 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2872 {
2873 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2874 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2875 check_privileged(s);
2876 potential_page_fault(s);
2877 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2878 tcg_temp_free_i32(r1);
2879 tcg_temp_free_i32(r3);
2880 return NO_EXIT;
2881 }
2882
2883 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2884 {
2885 check_privileged(s);
2886 potential_page_fault(s);
2887 gen_helper_lra(o->out, cpu_env, o->in2);
2888 set_cc_static(s);
2889 return NO_EXIT;
2890 }
2891
2892 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2893 {
2894 TCGv_i64 t1, t2;
2895
2896 check_privileged(s);
2897
2898 t1 = tcg_temp_new_i64();
2899 t2 = tcg_temp_new_i64();
2900 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2901 tcg_gen_addi_i64(o->in2, o->in2, 4);
2902 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2903 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2904 tcg_gen_shli_i64(t1, t1, 32);
2905 gen_helper_load_psw(cpu_env, t1, t2);
2906 tcg_temp_free_i64(t1);
2907 tcg_temp_free_i64(t2);
2908 return EXIT_NORETURN;
2909 }
2910 #endif
2911
2912 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2913 {
2914 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2915 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2916 potential_page_fault(s);
2917 gen_helper_lam(cpu_env, r1, o->in2, r3);
2918 tcg_temp_free_i32(r1);
2919 tcg_temp_free_i32(r3);
2920 return NO_EXIT;
2921 }
2922
2923 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2924 {
2925 int r1 = get_field(s->fields, r1);
2926 int r3 = get_field(s->fields, r3);
2927 TCGv_i64 t = tcg_temp_new_i64();
2928 TCGv_i64 t4 = tcg_const_i64(4);
2929
2930 while (1) {
2931 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2932 store_reg32_i64(r1, t);
2933 if (r1 == r3) {
2934 break;
2935 }
2936 tcg_gen_add_i64(o->in2, o->in2, t4);
2937 r1 = (r1 + 1) & 15;
2938 }
2939
2940 tcg_temp_free_i64(t);
2941 tcg_temp_free_i64(t4);
2942 return NO_EXIT;
2943 }
2944
2945 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2946 {
2947 int r1 = get_field(s->fields, r1);
2948 int r3 = get_field(s->fields, r3);
2949 TCGv_i64 t = tcg_temp_new_i64();
2950 TCGv_i64 t4 = tcg_const_i64(4);
2951
2952 while (1) {
2953 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2954 store_reg32h_i64(r1, t);
2955 if (r1 == r3) {
2956 break;
2957 }
2958 tcg_gen_add_i64(o->in2, o->in2, t4);
2959 r1 = (r1 + 1) & 15;
2960 }
2961
2962 tcg_temp_free_i64(t);
2963 tcg_temp_free_i64(t4);
2964 return NO_EXIT;
2965 }
2966
2967 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2968 {
2969 int r1 = get_field(s->fields, r1);
2970 int r3 = get_field(s->fields, r3);
2971 TCGv_i64 t8 = tcg_const_i64(8);
2972
2973 while (1) {
2974 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2975 if (r1 == r3) {
2976 break;
2977 }
2978 tcg_gen_add_i64(o->in2, o->in2, t8);
2979 r1 = (r1 + 1) & 15;
2980 }
2981
2982 tcg_temp_free_i64(t8);
2983 return NO_EXIT;
2984 }
2985
2986 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2987 {
2988 o->out = o->in2;
2989 o->g_out = o->g_in2;
2990 TCGV_UNUSED_I64(o->in2);
2991 o->g_in2 = false;
2992 return NO_EXIT;
2993 }
2994
2995 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2996 {
2997 o->out = o->in1;
2998 o->out2 = o->in2;
2999 o->g_out = o->g_in1;
3000 o->g_out2 = o->g_in2;
3001 TCGV_UNUSED_I64(o->in1);
3002 TCGV_UNUSED_I64(o->in2);
3003 o->g_in1 = o->g_in2 = false;
3004 return NO_EXIT;
3005 }
3006
3007 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
3008 {
3009 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3010 potential_page_fault(s);
3011 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3012 tcg_temp_free_i32(l);
3013 return NO_EXIT;
3014 }
3015
3016 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3017 {
3018 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3019 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
3020 potential_page_fault(s);
3021 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
3022 tcg_temp_free_i32(r1);
3023 tcg_temp_free_i32(r2);
3024 set_cc_static(s);
3025 return NO_EXIT;
3026 }
3027
3028 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3029 {
3030 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3031 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3032 potential_page_fault(s);
3033 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
3034 tcg_temp_free_i32(r1);
3035 tcg_temp_free_i32(r3);
3036 set_cc_static(s);
3037 return NO_EXIT;
3038 }
3039
3040 #ifndef CONFIG_USER_ONLY
3041 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3042 {
3043 int r1 = get_field(s->fields, l1);
3044 check_privileged(s);
3045 potential_page_fault(s);
3046 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3047 set_cc_static(s);
3048 return NO_EXIT;
3049 }
3050
3051 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3052 {
3053 int r1 = get_field(s->fields, l1);
3054 check_privileged(s);
3055 potential_page_fault(s);
3056 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3057 set_cc_static(s);
3058 return NO_EXIT;
3059 }
3060 #endif
3061
3062 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3063 {
3064 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3065 return NO_EXIT;
3066 }
3067
3068 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3069 {
3070 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
3071 return_low128(o->out2);
3072 return NO_EXIT;
3073 }
3074
3075 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3076 {
3077 gen_helper_nabs_i64(o->out, o->in2);
3078 return NO_EXIT;
3079 }
3080
3081 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3082 {
3083 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3084 potential_page_fault(s);
3085 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3086 tcg_temp_free_i32(l);
3087 set_cc_static(s);
3088 return NO_EXIT;
3089 }
3090
3091 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3092 {
3093 tcg_gen_neg_i64(o->out, o->in2);
3094 return NO_EXIT;
3095 }
3096
3097 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3098 {
3099 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3100 potential_page_fault(s);
3101 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3102 tcg_temp_free_i32(l);
3103 set_cc_static(s);
3104 return NO_EXIT;
3105 }
3106
3107 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3108 {
3109 tcg_gen_or_i64(o->out, o->in1, o->in2);
3110 return NO_EXIT;
3111 }
3112
3113 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3114 {
3115 int shift = s->insn->data & 0xff;
3116 int size = s->insn->data >> 8;
3117 uint64_t mask = ((1ull << size) - 1) << shift;
3118
3119 assert(!o->g_in2);
3120 tcg_gen_shli_i64(o->in2, o->in2, shift);
3121 tcg_gen_or_i64(o->out, o->in1, o->in2);
3122
3123 /* Produce the CC from only the bits manipulated. */
3124 tcg_gen_andi_i64(cc_dst, o->out, mask);
3125 set_cc_nz_u64(s, cc_dst);
3126 return NO_EXIT;
3127 }
3128
3129 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3130 {
3131 TCGv_i32 t1 = tcg_temp_new_i32();
3132 TCGv_i32 t2 = tcg_temp_new_i32();
3133 TCGv_i32 to = tcg_temp_new_i32();
3134 tcg_gen_trunc_i64_i32(t1, o->in1);
3135 tcg_gen_trunc_i64_i32(t2, o->in2);
3136 tcg_gen_rotl_i32(to, t1, t2);
3137 tcg_gen_extu_i32_i64(o->out, to);
3138 tcg_temp_free_i32(t1);
3139 tcg_temp_free_i32(t2);
3140 tcg_temp_free_i32(to);
3141 return NO_EXIT;
3142 }
3143
3144 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3145 {
3146 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3147 return NO_EXIT;
3148 }
3149
3150 #ifndef CONFIG_USER_ONLY
3151 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3152 {
3153 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3154 check_privileged(s);
3155 potential_page_fault(s);
3156 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3157 tcg_temp_free_i32(r1);
3158 return NO_EXIT;
3159 }
3160 #endif
3161
3162 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3163 {
3164 uint64_t sign = 1ull << s->insn->data;
3165 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3166 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3167 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3168 /* The arithmetic left shift is curious in that it does not affect
3169 the sign bit. Copy that over from the source unchanged. */
3170 tcg_gen_andi_i64(o->out, o->out, ~sign);
3171 tcg_gen_andi_i64(o->in1, o->in1, sign);
3172 tcg_gen_or_i64(o->out, o->out, o->in1);
3173 return NO_EXIT;
3174 }
3175
3176 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3177 {
3178 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3179 return NO_EXIT;
3180 }
3181
3182 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3183 {
3184 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3185 return NO_EXIT;
3186 }
3187
3188 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3189 {
3190 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3191 return NO_EXIT;
3192 }
3193
3194 #ifndef CONFIG_USER_ONLY
3195 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3196 {
3197 check_privileged(s);
3198 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3199 return NO_EXIT;
3200 }
3201
3202 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3203 {
3204 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3205 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3206 check_privileged(s);
3207 potential_page_fault(s);
3208 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3209 tcg_temp_free_i32(r1);
3210 tcg_temp_free_i32(r3);
3211 return NO_EXIT;
3212 }
3213
3214 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3215 {
3216 uint64_t i2 = get_field(s->fields, i2);
3217 TCGv_i64 t;
3218
3219 check_privileged(s);
3220
3221 /* It is important to do what the instruction name says: STORE THEN.
3222 If we let the output hook perform the store then if we fault and
3223 restart, we'll have the wrong SYSTEM MASK in place. */
3224 t = tcg_temp_new_i64();
3225 tcg_gen_shri_i64(t, psw_mask, 56);
3226 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3227 tcg_temp_free_i64(t);
3228
3229 if (s->fields->op == 0xac) {
3230 tcg_gen_andi_i64(psw_mask, psw_mask,
3231 (i2 << 56) | 0x00ffffffffffffffull);
3232 } else {
3233 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3234 }
3235 return NO_EXIT;
3236 }
3237 #endif
3238
3239 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3240 {
3241 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3242 return NO_EXIT;
3243 }
3244
3245 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3246 {
3247 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3248 return NO_EXIT;
3249 }
3250
3251 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3252 {
3253 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3254 return NO_EXIT;
3255 }
3256
3257 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3258 {
3259 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3260 return NO_EXIT;
3261 }
3262
3263 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3264 {
3265 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3266 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3267 potential_page_fault(s);
3268 gen_helper_stam(cpu_env, r1, o->in2, r3);
3269 tcg_temp_free_i32(r1);
3270 tcg_temp_free_i32(r3);
3271 return NO_EXIT;
3272 }
3273
3274 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3275 {
3276 int r1 = get_field(s->fields, r1);
3277 int r3 = get_field(s->fields, r3);
3278 int size = s->insn->data;
3279 TCGv_i64 tsize = tcg_const_i64(size);
3280
3281 while (1) {
3282 if (size == 8) {
3283 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3284 } else {
3285 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3286 }
3287 if (r1 == r3) {
3288 break;
3289 }
3290 tcg_gen_add_i64(o->in2, o->in2, tsize);
3291 r1 = (r1 + 1) & 15;
3292 }
3293
3294 tcg_temp_free_i64(tsize);
3295 return NO_EXIT;
3296 }
3297
3298 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3299 {
3300 int r1 = get_field(s->fields, r1);
3301 int r3 = get_field(s->fields, r3);
3302 TCGv_i64 t = tcg_temp_new_i64();
3303 TCGv_i64 t4 = tcg_const_i64(4);
3304 TCGv_i64 t32 = tcg_const_i64(32);
3305
3306 while (1) {
3307 tcg_gen_shl_i64(t, regs[r1], t32);
3308 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3309 if (r1 == r3) {
3310 break;
3311 }
3312 tcg_gen_add_i64(o->in2, o->in2, t4);
3313 r1 = (r1 + 1) & 15;
3314 }
3315
3316 tcg_temp_free_i64(t);
3317 tcg_temp_free_i64(t4);
3318 tcg_temp_free_i64(t32);
3319 return NO_EXIT;
3320 }
3321
3322 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3323 {
3324 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3325 return NO_EXIT;
3326 }
3327
3328 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3329 {
3330 TCGv_i64 cc;
3331
3332 assert(!o->g_in2);
3333 tcg_gen_not_i64(o->in2, o->in2);
3334 tcg_gen_add_i64(o->out, o->in1, o->in2);
3335
3336 /* XXX possible optimization point */
3337 gen_op_calc_cc(s);
3338 cc = tcg_temp_new_i64();
3339 tcg_gen_extu_i32_i64(cc, cc_op);
3340 tcg_gen_shri_i64(cc, cc, 1);
3341 tcg_gen_add_i64(o->out, o->out, cc);
3342 tcg_temp_free_i64(cc);
3343 return NO_EXIT;
3344 }
3345
3346 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3347 {
3348 TCGv_i32 t;
3349
3350 update_psw_addr(s);
3351 gen_op_calc_cc(s);
3352
3353 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3354 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3355 tcg_temp_free_i32(t);
3356
3357 t = tcg_const_i32(s->next_pc - s->pc);
3358 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3359 tcg_temp_free_i32(t);
3360
3361 gen_exception(EXCP_SVC);
3362 return EXIT_NORETURN;
3363 }
3364
3365 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3366 {
3367 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3368 potential_page_fault(s);
3369 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3370 tcg_temp_free_i32(l);
3371 set_cc_static(s);
3372 return NO_EXIT;
3373 }
3374
3375 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3376 {
3377 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3378 potential_page_fault(s);
3379 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3380 tcg_temp_free_i32(l);
3381 return NO_EXIT;
3382 }
3383
3384 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3385 {
3386 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3387 potential_page_fault(s);
3388 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3389 tcg_temp_free_i32(l);
3390 set_cc_static(s);
3391 return NO_EXIT;
3392 }
3393
3394 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3395 {
3396 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3397 return NO_EXIT;
3398 }
3399
3400 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3401 {
3402 int shift = s->insn->data & 0xff;
3403 int size = s->insn->data >> 8;
3404 uint64_t mask = ((1ull << size) - 1) << shift;
3405
3406 assert(!o->g_in2);
3407 tcg_gen_shli_i64(o->in2, o->in2, shift);
3408 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3409
3410 /* Produce the CC from only the bits manipulated. */
3411 tcg_gen_andi_i64(cc_dst, o->out, mask);
3412 set_cc_nz_u64(s, cc_dst);
3413 return NO_EXIT;
3414 }
3415
3416 /* ====================================================================== */
3417 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3418 the original inputs), update the various cc data structures in order to
3419 be able to compute the new condition code. */
3420
3421 static void cout_abs32(DisasContext *s, DisasOps *o)
3422 {
3423 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3424 }
3425
3426 static void cout_abs64(DisasContext *s, DisasOps *o)
3427 {
3428 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3429 }
3430
3431 static void cout_adds32(DisasContext *s, DisasOps *o)
3432 {
3433 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3434 }
3435
3436 static void cout_adds64(DisasContext *s, DisasOps *o)
3437 {
3438 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3439 }
3440
3441 static void cout_addu32(DisasContext *s, DisasOps *o)
3442 {
3443 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3444 }
3445
3446 static void cout_addu64(DisasContext *s, DisasOps *o)
3447 {
3448 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3449 }
3450
3451 static void cout_addc32(DisasContext *s, DisasOps *o)
3452 {
3453 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3454 }
3455
3456 static void cout_addc64(DisasContext *s, DisasOps *o)
3457 {
3458 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3459 }
3460
3461 static void cout_cmps32(DisasContext *s, DisasOps *o)
3462 {
3463 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3464 }
3465
3466 static void cout_cmps64(DisasContext *s, DisasOps *o)
3467 {
3468 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3469 }
3470
3471 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3472 {
3473 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3474 }
3475
3476 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3477 {
3478 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3479 }
3480
3481 static void cout_nabs32(DisasContext *s, DisasOps *o)
3482 {
3483 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3484 }
3485
3486 static void cout_nabs64(DisasContext *s, DisasOps *o)
3487 {
3488 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3489 }
3490
3491 static void cout_neg32(DisasContext *s, DisasOps *o)
3492 {
3493 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3494 }
3495
3496 static void cout_neg64(DisasContext *s, DisasOps *o)
3497 {
3498 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3499 }
3500
3501 static void cout_nz32(DisasContext *s, DisasOps *o)
3502 {
3503 tcg_gen_ext32u_i64(cc_dst, o->out);
3504 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3505 }
3506
3507 static void cout_nz64(DisasContext *s, DisasOps *o)
3508 {
3509 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3510 }
3511
3512 static void cout_s32(DisasContext *s, DisasOps *o)
3513 {
3514 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3515 }
3516
3517 static void cout_s64(DisasContext *s, DisasOps *o)
3518 {
3519 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3520 }
3521
3522 static void cout_subs32(DisasContext *s, DisasOps *o)
3523 {
3524 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3525 }
3526
3527 static void cout_subs64(DisasContext *s, DisasOps *o)
3528 {
3529 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3530 }
3531
3532 static void cout_subu32(DisasContext *s, DisasOps *o)
3533 {
3534 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3535 }
3536
3537 static void cout_subu64(DisasContext *s, DisasOps *o)
3538 {
3539 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3540 }
3541
3542 static void cout_subb32(DisasContext *s, DisasOps *o)
3543 {
3544 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3545 }
3546
3547 static void cout_subb64(DisasContext *s, DisasOps *o)
3548 {
3549 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3550 }
3551
3552 static void cout_tm32(DisasContext *s, DisasOps *o)
3553 {
3554 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3555 }
3556
3557 static void cout_tm64(DisasContext *s, DisasOps *o)
3558 {
3559 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3560 }
3561
3562 /* ====================================================================== */
3563 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3564 with the TCG register to which we will write. Used in combination with
3565 the "wout" generators, in some cases we need a new temporary, and in
3566 some cases we can write to a TCG global. */
3567
3568 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3569 {
3570 o->out = tcg_temp_new_i64();
3571 }
3572
3573 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3574 {
3575 o->out = tcg_temp_new_i64();
3576 o->out2 = tcg_temp_new_i64();
3577 }
3578
3579 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3580 {
3581 o->out = regs[get_field(f, r1)];
3582 o->g_out = true;
3583 }
3584
3585 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3586 {
3587 /* ??? Specification exception: r1 must be even. */
3588 int r1 = get_field(f, r1);
3589 o->out = regs[r1];
3590 o->out2 = regs[(r1 + 1) & 15];
3591 o->g_out = o->g_out2 = true;
3592 }
3593
3594 /* ====================================================================== */
3595 /* The "Write OUTput" generators. These generally perform some non-trivial
3596 copy of data to TCG globals, or to main memory. The trivial cases are
3597 generally handled by having a "prep" generator install the TCG global
3598 as the destination of the operation. */
3599
3600 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3601 {
3602 store_reg(get_field(f, r1), o->out);
3603 }
3604
3605 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3606 {
3607 int r1 = get_field(f, r1);
3608 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3609 }
3610
3611 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3612 {
3613 store_reg32_i64(get_field(f, r1), o->out);
3614 }
3615
3616 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3617 {
3618 /* ??? Specification exception: r1 must be even. */
3619 int r1 = get_field(f, r1);
3620 store_reg32_i64(r1, o->out);
3621 store_reg32_i64((r1 + 1) & 15, o->out2);
3622 }
3623
3624 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3625 {
3626 /* ??? Specification exception: r1 must be even. */
3627 int r1 = get_field(f, r1);
3628 store_reg32_i64((r1 + 1) & 15, o->out);
3629 tcg_gen_shri_i64(o->out, o->out, 32);
3630 store_reg32_i64(r1, o->out);
3631 }
3632
3633 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3634 {
3635 store_freg32_i64(get_field(f, r1), o->out);
3636 }
3637
3638 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3639 {
3640 store_freg(get_field(f, r1), o->out);
3641 }
3642
3643 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3644 {
3645 int f1 = get_field(s->fields, r1);
3646 store_freg(f1, o->out);
3647 store_freg((f1 + 2) & 15, o->out2);
3648 }
3649
3650 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3651 {
3652 if (get_field(f, r1) != get_field(f, r2)) {
3653 store_reg32_i64(get_field(f, r1), o->out);
3654 }
3655 }
3656
3657 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3658 {
3659 if (get_field(f, r1) != get_field(f, r2)) {
3660 store_freg32_i64(get_field(f, r1), o->out);
3661 }
3662 }
3663
3664 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3665 {
3666 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3667 }
3668
3669 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3670 {
3671 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3672 }
3673
3674 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3675 {
3676 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3677 }
3678
3679 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3680 {
3681 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3682 }
3683
3684 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3685 {
3686 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3687 }
3688
3689 /* ====================================================================== */
3690 /* The "INput 1" generators. These load the first operand to an insn. */
3691
3692 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3693 {
3694 o->in1 = load_reg(get_field(f, r1));
3695 }
3696
3697 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3698 {
3699 o->in1 = regs[get_field(f, r1)];
3700 o->g_in1 = true;
3701 }
3702
3703 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3704 {
3705 o->in1 = tcg_temp_new_i64();
3706 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3707 }
3708
3709 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3710 {
3711 o->in1 = tcg_temp_new_i64();
3712 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3713 }
3714
3715 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3716 {
3717 /* ??? Specification exception: r1 must be even. */
3718 int r1 = get_field(f, r1);
3719 o->in1 = load_reg((r1 + 1) & 15);
3720 }
3721
3722 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3723 {
3724 /* ??? Specification exception: r1 must be even. */
3725 int r1 = get_field(f, r1);
3726 o->in1 = tcg_temp_new_i64();
3727 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3728 }
3729
3730 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3731 {
3732 /* ??? Specification exception: r1 must be even. */
3733 int r1 = get_field(f, r1);
3734 o->in1 = tcg_temp_new_i64();
3735 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3736 }
3737
3738 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3739 {
3740 /* ??? Specification exception: r1 must be even. */
3741 int r1 = get_field(f, r1);
3742 o->in1 = tcg_temp_new_i64();
3743 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3744 }
3745
3746 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3747 {
3748 o->in1 = load_reg(get_field(f, r2));
3749 }
3750
3751 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3752 {
3753 o->in1 = load_reg(get_field(f, r3));
3754 }
3755
3756 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3757 {
3758 o->in1 = regs[get_field(f, r3)];
3759 o->g_in1 = true;
3760 }
3761
3762 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3763 {
3764 o->in1 = tcg_temp_new_i64();
3765 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3766 }
3767
3768 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3769 {
3770 o->in1 = tcg_temp_new_i64();
3771 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3772 }
3773
3774 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3775 {
3776 o->in1 = load_freg32_i64(get_field(f, r1));
3777 }
3778
3779 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3780 {
3781 o->in1 = fregs[get_field(f, r1)];
3782 o->g_in1 = true;
3783 }
3784
3785 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3786 {
3787 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3788 }
3789
3790 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3791 {
3792 in1_la1(s, f, o);
3793 o->in1 = tcg_temp_new_i64();
3794 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3795 }
3796
3797 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3798 {
3799 in1_la1(s, f, o);
3800 o->in1 = tcg_temp_new_i64();
3801 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3802 }
3803
3804 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3805 {
3806 in1_la1(s, f, o);
3807 o->in1 = tcg_temp_new_i64();
3808 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3809 }
3810
3811 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3812 {
3813 in1_la1(s, f, o);
3814 o->in1 = tcg_temp_new_i64();
3815 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3816 }
3817
3818 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3819 {
3820 in1_la1(s, f, o);
3821 o->in1 = tcg_temp_new_i64();
3822 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3823 }
3824
3825 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3826 {
3827 in1_la1(s, f, o);
3828 o->in1 = tcg_temp_new_i64();
3829 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3830 }
3831
3832 /* ====================================================================== */
3833 /* The "INput 2" generators. These load the second operand to an insn. */
3834
3835 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3836 {
3837 o->in2 = load_reg(get_field(f, r2));
3838 }
3839
3840 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3841 {
3842 o->in2 = regs[get_field(f, r2)];
3843 o->g_in2 = true;
3844 }
3845
3846 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3847 {
3848 int r2 = get_field(f, r2);
3849 if (r2 != 0) {
3850 o->in2 = load_reg(r2);
3851 }
3852 }
3853
3854 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3855 {
3856 o->in2 = tcg_temp_new_i64();
3857 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3858 }
3859
3860 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3861 {
3862 o->in2 = tcg_temp_new_i64();
3863 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3864 }
3865
3866 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3867 {
3868 o->in2 = tcg_temp_new_i64();
3869 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3870 }
3871
3872 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3873 {
3874 o->in2 = tcg_temp_new_i64();
3875 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3876 }
3877
3878 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3879 {
3880 o->in2 = load_reg(get_field(f, r3));
3881 }
3882
3883 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3884 {
3885 o->in2 = tcg_temp_new_i64();
3886 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3887 }
3888
3889 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3890 {
3891 o->in2 = tcg_temp_new_i64();
3892 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3893 }
3894
3895 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3896 {
3897 o->in2 = load_freg32_i64(get_field(f, r2));
3898 }
3899
3900 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3901 {
3902 o->in2 = fregs[get_field(f, r2)];
3903 o->g_in2 = true;
3904 }
3905
3906 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3907 {
3908 int f2 = get_field(f, r2);
3909 o->in1 = fregs[f2];
3910 o->in2 = fregs[(f2 + 2) & 15];
3911 o->g_in1 = o->g_in2 = true;
3912 }
3913
3914 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3915 {
3916 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3917 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3918 }
3919
3920 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3921 {
3922 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3923 }
3924
3925 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3926 {
3927 help_l2_shift(s, f, o, 31);
3928 }
3929
3930 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3931 {
3932 help_l2_shift(s, f, o, 63);
3933 }
3934
3935 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3936 {
3937 in2_a2(s, f, o);
3938 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3939 }
3940
3941 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3942 {
3943 in2_a2(s, f, o);
3944 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3945 }
3946
3947 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3948 {
3949 in2_a2(s, f, o);
3950 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3951 }
3952
3953 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3954 {
3955 in2_a2(s, f, o);
3956 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3957 }
3958
3959 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3960 {
3961 in2_a2(s, f, o);
3962 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3963 }
3964
3965 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3966 {
3967 in2_ri2(s, f, o);
3968 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3969 }
3970
3971 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3972 {
3973 in2_ri2(s, f, o);
3974 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3975 }
3976
3977 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3978 {
3979 in2_ri2(s, f, o);
3980 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3981 }
3982
3983 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3984 {
3985 in2_ri2(s, f, o);
3986 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3987 }
3988
3989 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3990 {
3991 o->in2 = tcg_const_i64(get_field(f, i2));
3992 }
3993
3994 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3995 {
3996 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3997 }
3998
3999 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4000 {
4001 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4002 }
4003
4004 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4005 {
4006 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4007 }
4008
4009 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4010 {
4011 uint64_t i2 = (uint16_t)get_field(f, i2);
4012 o->in2 = tcg_const_i64(i2 << s->insn->data);
4013 }
4014
4015 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4016 {
4017 uint64_t i2 = (uint32_t)get_field(f, i2);
4018 o->in2 = tcg_const_i64(i2 << s->insn->data);
4019 }
4020
4021 /* ====================================================================== */
4022
4023 /* Find opc within the table of insns. This is formulated as a switch
4024 statement so that (1) we get compile-time notice of cut-paste errors
4025 for duplicated opcodes, and (2) the compiler generates the binary
4026 search tree, rather than us having to post-process the table. */
4027
4028 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4029 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4030
4031 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4032
4033 enum DisasInsnEnum {
4034 #include "insn-data.def"
4035 };
4036
4037 #undef D
4038 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4039 .opc = OPC, \
4040 .fmt = FMT_##FT, \
4041 .fac = FAC_##FC, \
4042 .name = #NM, \
4043 .help_in1 = in1_##I1, \
4044 .help_in2 = in2_##I2, \
4045 .help_prep = prep_##P, \
4046 .help_wout = wout_##W, \
4047 .help_cout = cout_##CC, \
4048 .help_op = op_##OP, \
4049 .data = D \
4050 },
4051
4052 /* Allow 0 to be used for NULL in the table below. */
4053 #define in1_0 NULL
4054 #define in2_0 NULL
4055 #define prep_0 NULL
4056 #define wout_0 NULL
4057 #define cout_0 NULL
4058 #define op_0 NULL
4059
4060 static const DisasInsn insn_info[] = {
4061 #include "insn-data.def"
4062 };
4063
4064 #undef D
4065 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4066 case OPC: return &insn_info[insn_ ## NM];
4067
4068 static const DisasInsn *lookup_opc(uint16_t opc)
4069 {
4070 switch (opc) {
4071 #include "insn-data.def"
4072 default:
4073 return NULL;
4074 }
4075 }
4076
4077 #undef D
4078 #undef C
4079
4080 /* Extract a field from the insn. The INSN should be left-aligned in
4081 the uint64_t so that we can more easily utilize the big-bit-endian
4082 definitions we extract from the Principals of Operation. */
4083
4084 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4085 {
4086 uint32_t r, m;
4087
4088 if (f->size == 0) {
4089 return;
4090 }
4091
4092 /* Zero extract the field from the insn. */
4093 r = (insn << f->beg) >> (64 - f->size);
4094
4095 /* Sign-extend, or un-swap the field as necessary. */
4096 switch (f->type) {
4097 case 0: /* unsigned */
4098 break;
4099 case 1: /* signed */
4100 assert(f->size <= 32);
4101 m = 1u << (f->size - 1);
4102 r = (r ^ m) - m;
4103 break;
4104 case 2: /* dl+dh split, signed 20 bit. */
4105 r = ((int8_t)r << 12) | (r >> 8);
4106 break;
4107 default:
4108 abort();
4109 }
4110
4111 /* Validate that the "compressed" encoding we selected above is valid.
4112 I.e. we havn't make two different original fields overlap. */
4113 assert(((o->presentC >> f->indexC) & 1) == 0);
4114 o->presentC |= 1 << f->indexC;
4115 o->presentO |= 1 << f->indexO;
4116
4117 o->c[f->indexC] = r;
4118 }
4119
4120 /* Lookup the insn at the current PC, extracting the operands into O and
4121 returning the info struct for the insn. Returns NULL for invalid insn. */
4122
4123 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4124 DisasFields *f)
4125 {
4126 uint64_t insn, pc = s->pc;
4127 int op, op2, ilen;
4128 const DisasInsn *info;
4129
4130 insn = ld_code2(env, pc);
4131 op = (insn >> 8) & 0xff;
4132 ilen = get_ilen(op);
4133 s->next_pc = s->pc + ilen;
4134
4135 switch (ilen) {
4136 case 2:
4137 insn = insn << 48;
4138 break;
4139 case 4:
4140 insn = ld_code4(env, pc) << 32;
4141 break;
4142 case 6:
4143 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4144 break;
4145 default:
4146 abort();
4147 }
4148
4149 /* We can't actually determine the insn format until we've looked up
4150 the full insn opcode. Which we can't do without locating the
4151 secondary opcode. Assume by default that OP2 is at bit 40; for
4152 those smaller insns that don't actually have a secondary opcode
4153 this will correctly result in OP2 = 0. */
4154 switch (op) {
4155 case 0x01: /* E */
4156 case 0x80: /* S */
4157 case 0x82: /* S */
4158 case 0x93: /* S */
4159 case 0xb2: /* S, RRF, RRE */
4160 case 0xb3: /* RRE, RRD, RRF */
4161 case 0xb9: /* RRE, RRF */
4162 case 0xe5: /* SSE, SIL */
4163 op2 = (insn << 8) >> 56;
4164 break;
4165 case 0xa5: /* RI */
4166 case 0xa7: /* RI */
4167 case 0xc0: /* RIL */
4168 case 0xc2: /* RIL */
4169 case 0xc4: /* RIL */
4170 case 0xc6: /* RIL */
4171 case 0xc8: /* SSF */
4172 case 0xcc: /* RIL */
4173 op2 = (insn << 12) >> 60;
4174 break;
4175 case 0xd0 ... 0xdf: /* SS */
4176 case 0xe1: /* SS */
4177 case 0xe2: /* SS */
4178 case 0xe8: /* SS */
4179 case 0xe9: /* SS */
4180 case 0xea: /* SS */
4181 case 0xee ... 0xf3: /* SS */
4182 case 0xf8 ... 0xfd: /* SS */
4183 op2 = 0;
4184 break;
4185 default:
4186 op2 = (insn << 40) >> 56;
4187 break;
4188 }
4189
4190 memset(f, 0, sizeof(*f));
4191 f->op = op;
4192 f->op2 = op2;
4193
4194 /* Lookup the instruction. */
4195 info = lookup_opc(op << 8 | op2);
4196
4197 /* If we found it, extract the operands. */
4198 if (info != NULL) {
4199 DisasFormat fmt = info->fmt;
4200 int i;
4201
4202 for (i = 0; i < NUM_C_FIELD; ++i) {
4203 extract_field(f, &format_info[fmt].op[i], insn);
4204 }
4205 }
4206 return info;
4207 }
4208
4209 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4210 {
4211 const DisasInsn *insn;
4212 ExitStatus ret = NO_EXIT;
4213 DisasFields f;
4214 DisasOps o;
4215
4216 insn = extract_insn(env, s, &f);
4217
4218 /* If not found, try the old interpreter. This includes ILLOPC. */
4219 if (insn == NULL) {
4220 disas_s390_insn(env, s);
4221 switch (s->is_jmp) {
4222 case DISAS_NEXT:
4223 ret = NO_EXIT;
4224 break;
4225 case DISAS_TB_JUMP:
4226 ret = EXIT_GOTO_TB;
4227 break;
4228 case DISAS_JUMP:
4229 ret = EXIT_PC_UPDATED;
4230 break;
4231 case DISAS_EXCP:
4232 ret = EXIT_NORETURN;
4233 break;
4234 default:
4235 abort();
4236 }
4237
4238 s->pc = s->next_pc;
4239 return ret;
4240 }
4241
4242 /* Set up the strutures we use to communicate with the helpers. */
4243 s->insn = insn;
4244 s->fields = &f;
4245 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4246 TCGV_UNUSED_I64(o.out);
4247 TCGV_UNUSED_I64(o.out2);
4248 TCGV_UNUSED_I64(o.in1);
4249 TCGV_UNUSED_I64(o.in2);
4250 TCGV_UNUSED_I64(o.addr1);
4251
4252 /* Implement the instruction. */
4253 if (insn->help_in1) {
4254 insn->help_in1(s, &f, &o);
4255 }
4256 if (insn->help_in2) {
4257 insn->help_in2(s, &f, &o);
4258 }
4259 if (insn->help_prep) {
4260 insn->help_prep(s, &f, &o);
4261 }
4262 if (insn->help_op) {
4263 ret = insn->help_op(s, &o);
4264 }
4265 if (insn->help_wout) {
4266 insn->help_wout(s, &f, &o);
4267 }
4268 if (insn->help_cout) {
4269 insn->help_cout(s, &o);
4270 }
4271
4272 /* Free any temporaries created by the helpers. */
4273 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4274 tcg_temp_free_i64(o.out);
4275 }
4276 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4277 tcg_temp_free_i64(o.out2);
4278 }
4279 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4280 tcg_temp_free_i64(o.in1);
4281 }
4282 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4283 tcg_temp_free_i64(o.in2);
4284 }
4285 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4286 tcg_temp_free_i64(o.addr1);
4287 }
4288
4289 /* Advance to the next instruction. */
4290 s->pc = s->next_pc;
4291 return ret;
4292 }
4293
4294 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4295 TranslationBlock *tb,
4296 int search_pc)
4297 {
4298 DisasContext dc;
4299 target_ulong pc_start;
4300 uint64_t next_page_start;
4301 uint16_t *gen_opc_end;
4302 int j, lj = -1;
4303 int num_insns, max_insns;
4304 CPUBreakpoint *bp;
4305 ExitStatus status;
4306 bool do_debug;
4307
4308 pc_start = tb->pc;
4309
4310 /* 31-bit mode */
4311 if (!(tb->flags & FLAG_MASK_64)) {
4312 pc_start &= 0x7fffffff;
4313 }
4314
4315 dc.tb = tb;
4316 dc.pc = pc_start;
4317 dc.cc_op = CC_OP_DYNAMIC;
4318 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4319 dc.is_jmp = DISAS_NEXT;
4320
4321 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4322
4323 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4324
4325 num_insns = 0;
4326 max_insns = tb->cflags & CF_COUNT_MASK;
4327 if (max_insns == 0) {
4328 max_insns = CF_COUNT_MASK;
4329 }
4330
4331 gen_icount_start();
4332
4333 do {
4334 if (search_pc) {
4335 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4336 if (lj < j) {
4337 lj++;
4338 while (lj < j) {
4339 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4340 }
4341 }
4342 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4343 gen_opc_cc_op[lj] = dc.cc_op;
4344 tcg_ctx.gen_opc_instr_start[lj] = 1;
4345 tcg_ctx.gen_opc_icount[lj] = num_insns;
4346 }
4347 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4348 gen_io_start();
4349 }
4350
4351 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4352 tcg_gen_debug_insn_start(dc.pc);
4353 }
4354
4355 status = NO_EXIT;
4356 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4357 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4358 if (bp->pc == dc.pc) {
4359 status = EXIT_PC_STALE;
4360 do_debug = true;
4361 break;
4362 }
4363 }
4364 }
4365 if (status == NO_EXIT) {
4366 status = translate_one(env, &dc);
4367 }
4368
4369 /* If we reach a page boundary, are single stepping,
4370 or exhaust instruction count, stop generation. */
4371 if (status == NO_EXIT
4372 && (dc.pc >= next_page_start
4373 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4374 || num_insns >= max_insns
4375 || singlestep
4376 || env->singlestep_enabled)) {
4377 status = EXIT_PC_STALE;
4378 }
4379 } while (status == NO_EXIT);
4380
4381 if (tb->cflags & CF_LAST_IO) {
4382 gen_io_end();
4383 }
4384
4385 switch (status) {
4386 case EXIT_GOTO_TB:
4387 case EXIT_NORETURN:
4388 break;
4389 case EXIT_PC_STALE:
4390 update_psw_addr(&dc);
4391 /* FALLTHRU */
4392 case EXIT_PC_UPDATED:
4393 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4394 gen_op_calc_cc(&dc);
4395 } else {
4396 /* Next TB starts off with CC_OP_DYNAMIC,
4397 so make sure the cc op type is in env */
4398 gen_op_set_cc_op(&dc);
4399 }
4400 if (do_debug) {
4401 gen_exception(EXCP_DEBUG);
4402 } else {
4403 /* Generate the return instruction */
4404 tcg_gen_exit_tb(0);
4405 }
4406 break;
4407 default:
4408 abort();
4409 }
4410
4411 gen_icount_end(tb, num_insns);
4412 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4413 if (search_pc) {
4414 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4415 lj++;
4416 while (lj <= j) {
4417 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4418 }
4419 } else {
4420 tb->size = dc.pc - pc_start;
4421 tb->icount = num_insns;
4422 }
4423
4424 #if defined(S390X_DEBUG_DISAS)
4425 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4426 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4427 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4428 qemu_log("\n");
4429 }
4430 #endif
4431 }
4432
4433 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4434 {
4435 gen_intermediate_code_internal(env, tb, 0);
4436 }
4437
4438 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4439 {
4440 gen_intermediate_code_internal(env, tb, 1);
4441 }
4442
4443 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4444 {
4445 int cc_op;
4446 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4447 cc_op = gen_opc_cc_op[pc_pos];
4448 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4449 env->cc_op = cc_op;
4450 }
4451 }