]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Convert LLGT
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static inline void debug_insn(uint64_t insn)
83 {
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85 }
86
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 {
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
92 }
93 }
94 return pc;
95 }
96
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
99 {
100 int i;
101
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
108 }
109
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
116 }
117 }
118
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
125 }
126 }
127
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
135 }
136 }
137 #endif
138
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
143 }
144 #endif
145
146 cpu_fprintf(f, "\n");
147 }
148
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
151
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
156
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
160
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162
163 void s390x_translate_init(void)
164 {
165 int i;
166
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
174
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
196 }
197
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
201 }
202
203 static inline TCGv_i64 load_reg(int reg)
204 {
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
208 }
209
210 static inline TCGv_i64 load_freg(int reg)
211 {
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
215 }
216
217 static inline TCGv_i32 load_freg32(int reg)
218 {
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
226 }
227
228 static inline TCGv_i64 load_freg32_i64(int reg)
229 {
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
233 }
234
235 static inline TCGv_i32 load_reg32(int reg)
236 {
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
240 }
241
242 static inline TCGv_i64 load_reg32_i64(int reg)
243 {
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
247 }
248
249 static inline void store_reg(int reg, TCGv_i64 v)
250 {
251 tcg_gen_mov_i64(regs[reg], v);
252 }
253
254 static inline void store_freg(int reg, TCGv_i64 v)
255 {
256 tcg_gen_mov_i64(fregs[reg], v);
257 }
258
259 static inline void store_reg32(int reg, TCGv_i32 v)
260 {
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
268 }
269
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
271 {
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274 }
275
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
277 {
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
279 }
280
281 static inline void store_freg32(int reg, TCGv_i32 v)
282 {
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
286 #else
287 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
289 #endif
290 }
291
292 static inline void store_freg32_i64(int reg, TCGv_i64 v)
293 {
294 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
295 }
296
297 static inline void return_low128(TCGv_i64 dest)
298 {
299 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
300 }
301
302 static inline void update_psw_addr(DisasContext *s)
303 {
304 /* psw.addr */
305 tcg_gen_movi_i64(psw_addr, s->pc);
306 }
307
308 static inline void potential_page_fault(DisasContext *s)
309 {
310 #ifndef CONFIG_USER_ONLY
311 update_psw_addr(s);
312 gen_op_calc_cc(s);
313 #endif
314 }
315
316 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
317 {
318 return (uint64_t)cpu_lduw_code(env, pc);
319 }
320
321 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
322 {
323 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
324 }
325
326 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
327 {
328 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
329 }
330
331 static inline int get_mem_index(DisasContext *s)
332 {
333 switch (s->tb->flags & FLAG_MASK_ASC) {
334 case PSW_ASC_PRIMARY >> 32:
335 return 0;
336 case PSW_ASC_SECONDARY >> 32:
337 return 1;
338 case PSW_ASC_HOME >> 32:
339 return 2;
340 default:
341 tcg_abort();
342 break;
343 }
344 }
345
346 static void gen_exception(int excp)
347 {
348 TCGv_i32 tmp = tcg_const_i32(excp);
349 gen_helper_exception(cpu_env, tmp);
350 tcg_temp_free_i32(tmp);
351 }
352
353 static void gen_program_exception(DisasContext *s, int code)
354 {
355 TCGv_i32 tmp;
356
357 /* Remember what pgm exeption this was. */
358 tmp = tcg_const_i32(code);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
360 tcg_temp_free_i32(tmp);
361
362 tmp = tcg_const_i32(s->next_pc - s->pc);
363 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
364 tcg_temp_free_i32(tmp);
365
366 /* Advance past instruction. */
367 s->pc = s->next_pc;
368 update_psw_addr(s);
369
370 /* Save off cc. */
371 gen_op_calc_cc(s);
372
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM);
375
376 /* End TB here. */
377 s->is_jmp = DISAS_EXCP;
378 }
379
380 static inline void gen_illegal_opcode(DisasContext *s)
381 {
382 gen_program_exception(s, PGM_SPECIFICATION);
383 }
384
385 static inline void check_privileged(DisasContext *s)
386 {
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
389 }
390 }
391
392 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
393 {
394 TCGv_i64 tmp;
395
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s->tb->flags & FLAG_MASK_64)) {
398 d2 &= 0x7fffffffUL;
399 }
400
401 if (x2) {
402 if (d2) {
403 tmp = tcg_const_i64(d2);
404 tcg_gen_add_i64(tmp, tmp, regs[x2]);
405 } else {
406 tmp = load_reg(x2);
407 }
408 if (b2) {
409 tcg_gen_add_i64(tmp, tmp, regs[b2]);
410 }
411 } else if (b2) {
412 if (d2) {
413 tmp = tcg_const_i64(d2);
414 tcg_gen_add_i64(tmp, tmp, regs[b2]);
415 } else {
416 tmp = load_reg(b2);
417 }
418 } else {
419 tmp = tcg_const_i64(d2);
420 }
421
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
424 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
425 }
426
427 return tmp;
428 }
429
430 static void gen_op_movi_cc(DisasContext *s, uint32_t val)
431 {
432 s->cc_op = CC_OP_CONST0 + val;
433 }
434
435 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
436 {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_mov_i64(cc_dst, dst);
439 tcg_gen_discard_i64(cc_vr);
440 s->cc_op = op;
441 }
442
443 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
444 {
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_extu_i32_i64(cc_dst, dst);
447 tcg_gen_discard_i64(cc_vr);
448 s->cc_op = op;
449 }
450
451 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
452 TCGv_i64 dst)
453 {
454 tcg_gen_mov_i64(cc_src, src);
455 tcg_gen_mov_i64(cc_dst, dst);
456 tcg_gen_discard_i64(cc_vr);
457 s->cc_op = op;
458 }
459
460 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
461 TCGv_i32 dst)
462 {
463 tcg_gen_extu_i32_i64(cc_src, src);
464 tcg_gen_extu_i32_i64(cc_dst, dst);
465 tcg_gen_discard_i64(cc_vr);
466 s->cc_op = op;
467 }
468
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
471 {
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
475 s->cc_op = op;
476 }
477
478 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
479 {
480 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
481 }
482
483 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
484 {
485 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
486 }
487
488 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
489 enum cc_op cond)
490 {
491 gen_op_update2_cc_i32(s, cond, v1, v2);
492 }
493
494 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
495 enum cc_op cond)
496 {
497 gen_op_update2_cc_i64(s, cond, v1, v2);
498 }
499
500 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
501 {
502 cmp_32(s, v1, v2, CC_OP_LTGT_32);
503 }
504
505 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
506 {
507 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
508 }
509
510 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
511 {
512 /* XXX optimize for the constant? put it in s? */
513 TCGv_i32 tmp = tcg_const_i32(v2);
514 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
515 tcg_temp_free_i32(tmp);
516 }
517
518 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
519 {
520 TCGv_i32 tmp = tcg_const_i32(v2);
521 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
522 tcg_temp_free_i32(tmp);
523 }
524
525 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
526 {
527 cmp_64(s, v1, v2, CC_OP_LTGT_64);
528 }
529
530 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
531 {
532 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
533 }
534
535 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
536 {
537 TCGv_i64 tmp = tcg_const_i64(v2);
538 cmp_s64(s, v1, tmp);
539 tcg_temp_free_i64(tmp);
540 }
541
542 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
543 {
544 TCGv_i64 tmp = tcg_const_i64(v2);
545 cmp_u64(s, v1, tmp);
546 tcg_temp_free_i64(tmp);
547 }
548
549 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
550 {
551 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
552 }
553
554 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
555 {
556 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
557 }
558
559 static void set_cc_cmp_f32_i64(DisasContext *s, TCGv_i32 v1, TCGv_i64 v2)
560 {
561 tcg_gen_extu_i32_i64(cc_src, v1);
562 tcg_gen_mov_i64(cc_dst, v2);
563 tcg_gen_discard_i64(cc_vr);
564 s->cc_op = CC_OP_LTGT_F32;
565 }
566
567 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i32 v1)
568 {
569 gen_op_update1_cc_i32(s, CC_OP_NZ_F32, v1);
570 }
571
572 /* CC value is in env->cc_op */
573 static inline void set_cc_static(DisasContext *s)
574 {
575 tcg_gen_discard_i64(cc_src);
576 tcg_gen_discard_i64(cc_dst);
577 tcg_gen_discard_i64(cc_vr);
578 s->cc_op = CC_OP_STATIC;
579 }
580
581 static inline void gen_op_set_cc_op(DisasContext *s)
582 {
583 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
584 tcg_gen_movi_i32(cc_op, s->cc_op);
585 }
586 }
587
588 static inline void gen_update_cc_op(DisasContext *s)
589 {
590 gen_op_set_cc_op(s);
591 }
592
593 /* calculates cc into cc_op */
594 static void gen_op_calc_cc(DisasContext *s)
595 {
596 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
597 TCGv_i64 dummy = tcg_const_i64(0);
598
599 switch (s->cc_op) {
600 case CC_OP_CONST0:
601 case CC_OP_CONST1:
602 case CC_OP_CONST2:
603 case CC_OP_CONST3:
604 /* s->cc_op is the cc value */
605 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
606 break;
607 case CC_OP_STATIC:
608 /* env->cc_op already is the cc value */
609 break;
610 case CC_OP_NZ:
611 case CC_OP_ABS_64:
612 case CC_OP_NABS_64:
613 case CC_OP_ABS_32:
614 case CC_OP_NABS_32:
615 case CC_OP_LTGT0_32:
616 case CC_OP_LTGT0_64:
617 case CC_OP_COMP_32:
618 case CC_OP_COMP_64:
619 case CC_OP_NZ_F32:
620 case CC_OP_NZ_F64:
621 /* 1 argument */
622 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
623 break;
624 case CC_OP_ICM:
625 case CC_OP_LTGT_32:
626 case CC_OP_LTGT_64:
627 case CC_OP_LTUGTU_32:
628 case CC_OP_LTUGTU_64:
629 case CC_OP_TM_32:
630 case CC_OP_TM_64:
631 case CC_OP_LTGT_F32:
632 case CC_OP_LTGT_F64:
633 case CC_OP_SLA_32:
634 case CC_OP_SLA_64:
635 /* 2 arguments */
636 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
637 break;
638 case CC_OP_ADD_64:
639 case CC_OP_ADDU_64:
640 case CC_OP_ADDC_64:
641 case CC_OP_SUB_64:
642 case CC_OP_SUBU_64:
643 case CC_OP_SUBB_64:
644 case CC_OP_ADD_32:
645 case CC_OP_ADDU_32:
646 case CC_OP_ADDC_32:
647 case CC_OP_SUB_32:
648 case CC_OP_SUBU_32:
649 case CC_OP_SUBB_32:
650 /* 3 arguments */
651 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
652 break;
653 case CC_OP_DYNAMIC:
654 /* unknown operation - assume 3 arguments and cc_op in env */
655 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
656 break;
657 default:
658 tcg_abort();
659 }
660
661 tcg_temp_free_i32(local_cc_op);
662 tcg_temp_free_i64(dummy);
663
664 /* We now have cc in cc_op as constant */
665 set_cc_static(s);
666 }
667
668 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
669 {
670 debug_insn(insn);
671
672 *r1 = (insn >> 4) & 0xf;
673 *r2 = insn & 0xf;
674 }
675
676 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
677 int *x2, int *b2, int *d2)
678 {
679 debug_insn(insn);
680
681 *r1 = (insn >> 20) & 0xf;
682 *x2 = (insn >> 16) & 0xf;
683 *b2 = (insn >> 12) & 0xf;
684 *d2 = insn & 0xfff;
685
686 return get_address(s, *x2, *b2, *d2);
687 }
688
689 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
690 int *b2, int *d2)
691 {
692 debug_insn(insn);
693
694 *r1 = (insn >> 20) & 0xf;
695 /* aka m3 */
696 *r3 = (insn >> 16) & 0xf;
697 *b2 = (insn >> 12) & 0xf;
698 *d2 = insn & 0xfff;
699 }
700
701 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
702 int *b1, int *d1)
703 {
704 debug_insn(insn);
705
706 *i2 = (insn >> 16) & 0xff;
707 *b1 = (insn >> 12) & 0xf;
708 *d1 = insn & 0xfff;
709
710 return get_address(s, 0, *b1, *d1);
711 }
712
713 static int use_goto_tb(DisasContext *s, uint64_t dest)
714 {
715 /* NOTE: we handle the case where the TB spans two pages here */
716 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
717 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
718 && !s->singlestep_enabled
719 && !(s->tb->cflags & CF_LAST_IO));
720 }
721
722 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
723 {
724 gen_update_cc_op(s);
725
726 if (use_goto_tb(s, pc)) {
727 tcg_gen_goto_tb(tb_num);
728 tcg_gen_movi_i64(psw_addr, pc);
729 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
730 } else {
731 /* jump to another page: currently not optimized */
732 tcg_gen_movi_i64(psw_addr, pc);
733 tcg_gen_exit_tb(0);
734 }
735 }
736
737 static inline void account_noninline_branch(DisasContext *s, int cc_op)
738 {
739 #ifdef DEBUG_INLINE_BRANCHES
740 inline_branch_miss[cc_op]++;
741 #endif
742 }
743
744 static inline void account_inline_branch(DisasContext *s, int cc_op)
745 {
746 #ifdef DEBUG_INLINE_BRANCHES
747 inline_branch_hit[cc_op]++;
748 #endif
749 }
750
751 /* Table of mask values to comparison codes, given a comparison as input.
752 For a true comparison CC=3 will never be set, but we treat this
753 conservatively for possible use when CC=3 indicates overflow. */
754 static const TCGCond ltgt_cond[16] = {
755 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
756 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
757 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
758 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
759 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
760 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
761 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
762 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
763 };
764
765 /* Table of mask values to comparison codes, given a logic op as input.
766 For such, only CC=0 and CC=1 should be possible. */
767 static const TCGCond nz_cond[16] = {
768 /* | | x | x */
769 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
770 /* | NE | x | x */
771 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
772 /* EQ | | x | x */
773 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
774 /* EQ | NE | x | x */
775 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
776 };
777
778 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
779 details required to generate a TCG comparison. */
780 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
781 {
782 TCGCond cond;
783 enum cc_op old_cc_op = s->cc_op;
784
785 if (mask == 15 || mask == 0) {
786 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
787 c->u.s32.a = cc_op;
788 c->u.s32.b = cc_op;
789 c->g1 = c->g2 = true;
790 c->is_64 = false;
791 return;
792 }
793
794 /* Find the TCG condition for the mask + cc op. */
795 switch (old_cc_op) {
796 case CC_OP_LTGT0_32:
797 case CC_OP_LTGT0_64:
798 case CC_OP_LTGT_32:
799 case CC_OP_LTGT_64:
800 cond = ltgt_cond[mask];
801 if (cond == TCG_COND_NEVER) {
802 goto do_dynamic;
803 }
804 account_inline_branch(s, old_cc_op);
805 break;
806
807 case CC_OP_LTUGTU_32:
808 case CC_OP_LTUGTU_64:
809 cond = tcg_unsigned_cond(ltgt_cond[mask]);
810 if (cond == TCG_COND_NEVER) {
811 goto do_dynamic;
812 }
813 account_inline_branch(s, old_cc_op);
814 break;
815
816 case CC_OP_NZ:
817 cond = nz_cond[mask];
818 if (cond == TCG_COND_NEVER) {
819 goto do_dynamic;
820 }
821 account_inline_branch(s, old_cc_op);
822 break;
823
824 case CC_OP_TM_32:
825 case CC_OP_TM_64:
826 switch (mask) {
827 case 8:
828 cond = TCG_COND_EQ;
829 break;
830 case 4 | 2 | 1:
831 cond = TCG_COND_NE;
832 break;
833 default:
834 goto do_dynamic;
835 }
836 account_inline_branch(s, old_cc_op);
837 break;
838
839 case CC_OP_ICM:
840 switch (mask) {
841 case 8:
842 cond = TCG_COND_EQ;
843 break;
844 case 4 | 2 | 1:
845 case 4 | 2:
846 cond = TCG_COND_NE;
847 break;
848 default:
849 goto do_dynamic;
850 }
851 account_inline_branch(s, old_cc_op);
852 break;
853
854 default:
855 do_dynamic:
856 /* Calculate cc value. */
857 gen_op_calc_cc(s);
858 /* FALLTHRU */
859
860 case CC_OP_STATIC:
861 /* Jump based on CC. We'll load up the real cond below;
862 the assignment here merely avoids a compiler warning. */
863 account_noninline_branch(s, old_cc_op);
864 old_cc_op = CC_OP_STATIC;
865 cond = TCG_COND_NEVER;
866 break;
867 }
868
869 /* Load up the arguments of the comparison. */
870 c->is_64 = true;
871 c->g1 = c->g2 = false;
872 switch (old_cc_op) {
873 case CC_OP_LTGT0_32:
874 c->is_64 = false;
875 c->u.s32.a = tcg_temp_new_i32();
876 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
877 c->u.s32.b = tcg_const_i32(0);
878 break;
879 case CC_OP_LTGT_32:
880 case CC_OP_LTUGTU_32:
881 c->is_64 = false;
882 c->u.s32.a = tcg_temp_new_i32();
883 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
884 c->u.s32.b = tcg_temp_new_i32();
885 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
886 break;
887
888 case CC_OP_LTGT0_64:
889 case CC_OP_NZ:
890 c->u.s64.a = cc_dst;
891 c->u.s64.b = tcg_const_i64(0);
892 c->g1 = true;
893 break;
894 case CC_OP_LTGT_64:
895 case CC_OP_LTUGTU_64:
896 c->u.s64.a = cc_src;
897 c->u.s64.b = cc_dst;
898 c->g1 = c->g2 = true;
899 break;
900
901 case CC_OP_TM_32:
902 case CC_OP_TM_64:
903 case CC_OP_ICM:
904 c->u.s64.a = tcg_temp_new_i64();
905 c->u.s64.b = tcg_const_i64(0);
906 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
907 break;
908
909 case CC_OP_STATIC:
910 c->is_64 = false;
911 c->u.s32.a = cc_op;
912 c->g1 = true;
913 switch (mask) {
914 case 0x8 | 0x4 | 0x2: /* cc != 3 */
915 cond = TCG_COND_NE;
916 c->u.s32.b = tcg_const_i32(3);
917 break;
918 case 0x8 | 0x4 | 0x1: /* cc != 2 */
919 cond = TCG_COND_NE;
920 c->u.s32.b = tcg_const_i32(2);
921 break;
922 case 0x8 | 0x2 | 0x1: /* cc != 1 */
923 cond = TCG_COND_NE;
924 c->u.s32.b = tcg_const_i32(1);
925 break;
926 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
927 cond = TCG_COND_EQ;
928 c->g1 = false;
929 c->u.s32.a = tcg_temp_new_i32();
930 c->u.s32.b = tcg_const_i32(0);
931 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
932 break;
933 case 0x8 | 0x4: /* cc < 2 */
934 cond = TCG_COND_LTU;
935 c->u.s32.b = tcg_const_i32(2);
936 break;
937 case 0x8: /* cc == 0 */
938 cond = TCG_COND_EQ;
939 c->u.s32.b = tcg_const_i32(0);
940 break;
941 case 0x4 | 0x2 | 0x1: /* cc != 0 */
942 cond = TCG_COND_NE;
943 c->u.s32.b = tcg_const_i32(0);
944 break;
945 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
946 cond = TCG_COND_NE;
947 c->g1 = false;
948 c->u.s32.a = tcg_temp_new_i32();
949 c->u.s32.b = tcg_const_i32(0);
950 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
951 break;
952 case 0x4: /* cc == 1 */
953 cond = TCG_COND_EQ;
954 c->u.s32.b = tcg_const_i32(1);
955 break;
956 case 0x2 | 0x1: /* cc > 1 */
957 cond = TCG_COND_GTU;
958 c->u.s32.b = tcg_const_i32(1);
959 break;
960 case 0x2: /* cc == 2 */
961 cond = TCG_COND_EQ;
962 c->u.s32.b = tcg_const_i32(2);
963 break;
964 case 0x1: /* cc == 3 */
965 cond = TCG_COND_EQ;
966 c->u.s32.b = tcg_const_i32(3);
967 break;
968 default:
969 /* CC is masked by something else: (8 >> cc) & mask. */
970 cond = TCG_COND_NE;
971 c->g1 = false;
972 c->u.s32.a = tcg_const_i32(8);
973 c->u.s32.b = tcg_const_i32(0);
974 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
975 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
976 break;
977 }
978 break;
979
980 default:
981 abort();
982 }
983 c->cond = cond;
984 }
985
986 static void free_compare(DisasCompare *c)
987 {
988 if (!c->g1) {
989 if (c->is_64) {
990 tcg_temp_free_i64(c->u.s64.a);
991 } else {
992 tcg_temp_free_i32(c->u.s32.a);
993 }
994 }
995 if (!c->g2) {
996 if (c->is_64) {
997 tcg_temp_free_i64(c->u.s64.b);
998 } else {
999 tcg_temp_free_i32(c->u.s32.b);
1000 }
1001 }
1002 }
1003
1004 static void disas_ed(CPUS390XState *env, DisasContext *s, int op, int r1,
1005 int x2, int b2, int d2, int r1b)
1006 {
1007 TCGv_i32 tmp_r1, tmp32;
1008 TCGv_i64 addr, tmp;
1009 addr = get_address(s, x2, b2, d2);
1010 tmp_r1 = tcg_const_i32(r1);
1011 switch (op) {
1012 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1013 potential_page_fault(s);
1014 gen_helper_ldeb(cpu_env, tmp_r1, addr);
1015 break;
1016 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1017 potential_page_fault(s);
1018 gen_helper_lxdb(cpu_env, tmp_r1, addr);
1019 break;
1020 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1021 tmp = tcg_temp_new_i64();
1022 tmp32 = load_freg32(r1);
1023 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1024 set_cc_cmp_f32_i64(s, tmp32, tmp);
1025 tcg_temp_free_i64(tmp);
1026 tcg_temp_free_i32(tmp32);
1027 break;
1028 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1029 tmp = tcg_temp_new_i64();
1030 tmp32 = tcg_temp_new_i32();
1031 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1032 tcg_gen_trunc_i64_i32(tmp32, tmp);
1033 gen_helper_aeb(cpu_env, tmp_r1, tmp32);
1034 tcg_temp_free_i64(tmp);
1035 tcg_temp_free_i32(tmp32);
1036
1037 tmp32 = load_freg32(r1);
1038 gen_set_cc_nz_f32(s, tmp32);
1039 tcg_temp_free_i32(tmp32);
1040 break;
1041 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1042 tmp = tcg_temp_new_i64();
1043 tmp32 = tcg_temp_new_i32();
1044 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1045 tcg_gen_trunc_i64_i32(tmp32, tmp);
1046 gen_helper_seb(cpu_env, tmp_r1, tmp32);
1047 tcg_temp_free_i64(tmp);
1048 tcg_temp_free_i32(tmp32);
1049
1050 tmp32 = load_freg32(r1);
1051 gen_set_cc_nz_f32(s, tmp32);
1052 tcg_temp_free_i32(tmp32);
1053 break;
1054 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1055 tmp = tcg_temp_new_i64();
1056 tmp32 = tcg_temp_new_i32();
1057 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1058 tcg_gen_trunc_i64_i32(tmp32, tmp);
1059 gen_helper_deb(cpu_env, tmp_r1, tmp32);
1060 tcg_temp_free_i64(tmp);
1061 tcg_temp_free_i32(tmp32);
1062 break;
1063 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1064 potential_page_fault(s);
1065 gen_helper_tceb(cc_op, cpu_env, tmp_r1, addr);
1066 set_cc_static(s);
1067 break;
1068 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1069 potential_page_fault(s);
1070 gen_helper_tcdb(cc_op, cpu_env, tmp_r1, addr);
1071 set_cc_static(s);
1072 break;
1073 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1074 potential_page_fault(s);
1075 gen_helper_tcxb(cc_op, cpu_env, tmp_r1, addr);
1076 set_cc_static(s);
1077 break;
1078 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1079 tmp = tcg_temp_new_i64();
1080 tmp32 = tcg_temp_new_i32();
1081 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1082 tcg_gen_trunc_i64_i32(tmp32, tmp);
1083 gen_helper_meeb(cpu_env, tmp_r1, tmp32);
1084 tcg_temp_free_i64(tmp);
1085 tcg_temp_free_i32(tmp32);
1086 break;
1087 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1088 potential_page_fault(s);
1089 gen_helper_cdb(cc_op, cpu_env, tmp_r1, addr);
1090 set_cc_static(s);
1091 break;
1092 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1093 potential_page_fault(s);
1094 gen_helper_adb(cc_op, cpu_env, tmp_r1, addr);
1095 set_cc_static(s);
1096 break;
1097 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1098 potential_page_fault(s);
1099 gen_helper_sdb(cc_op, cpu_env, tmp_r1, addr);
1100 set_cc_static(s);
1101 break;
1102 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1103 potential_page_fault(s);
1104 gen_helper_mdb(cpu_env, tmp_r1, addr);
1105 break;
1106 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1107 potential_page_fault(s);
1108 gen_helper_ddb(cpu_env, tmp_r1, addr);
1109 break;
1110 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1111 /* for RXF insns, r1 is R3 and r1b is R1 */
1112 tmp32 = tcg_const_i32(r1b);
1113 potential_page_fault(s);
1114 gen_helper_madb(cpu_env, tmp32, addr, tmp_r1);
1115 tcg_temp_free_i32(tmp32);
1116 break;
1117 default:
1118 LOG_DISAS("illegal ed operation 0x%x\n", op);
1119 gen_illegal_opcode(s);
1120 return;
1121 }
1122 tcg_temp_free_i32(tmp_r1);
1123 tcg_temp_free_i64(addr);
1124 }
1125
1126 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1127 uint32_t insn)
1128 {
1129 TCGv_i64 tmp, tmp2, tmp3;
1130 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1131 int r1, r2;
1132 #ifndef CONFIG_USER_ONLY
1133 int r3, d2, b2;
1134 #endif
1135
1136 r1 = (insn >> 4) & 0xf;
1137 r2 = insn & 0xf;
1138
1139 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1140
1141 switch (op) {
1142 case 0x22: /* IPM R1 [RRE] */
1143 tmp32_1 = tcg_const_i32(r1);
1144 gen_op_calc_cc(s);
1145 gen_helper_ipm(cpu_env, cc_op, tmp32_1);
1146 tcg_temp_free_i32(tmp32_1);
1147 break;
1148 case 0x41: /* CKSM R1,R2 [RRE] */
1149 tmp32_1 = tcg_const_i32(r1);
1150 tmp32_2 = tcg_const_i32(r2);
1151 potential_page_fault(s);
1152 gen_helper_cksm(cpu_env, tmp32_1, tmp32_2);
1153 tcg_temp_free_i32(tmp32_1);
1154 tcg_temp_free_i32(tmp32_2);
1155 gen_op_movi_cc(s, 0);
1156 break;
1157 case 0x4e: /* SAR R1,R2 [RRE] */
1158 tmp32_1 = load_reg32(r2);
1159 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r1]));
1160 tcg_temp_free_i32(tmp32_1);
1161 break;
1162 case 0x4f: /* EAR R1,R2 [RRE] */
1163 tmp32_1 = tcg_temp_new_i32();
1164 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1165 store_reg32(r1, tmp32_1);
1166 tcg_temp_free_i32(tmp32_1);
1167 break;
1168 case 0x54: /* MVPG R1,R2 [RRE] */
1169 tmp = load_reg(0);
1170 tmp2 = load_reg(r1);
1171 tmp3 = load_reg(r2);
1172 potential_page_fault(s);
1173 gen_helper_mvpg(cpu_env, tmp, tmp2, tmp3);
1174 tcg_temp_free_i64(tmp);
1175 tcg_temp_free_i64(tmp2);
1176 tcg_temp_free_i64(tmp3);
1177 /* XXX check CCO bit and set CC accordingly */
1178 gen_op_movi_cc(s, 0);
1179 break;
1180 case 0x55: /* MVST R1,R2 [RRE] */
1181 tmp32_1 = load_reg32(0);
1182 tmp32_2 = tcg_const_i32(r1);
1183 tmp32_3 = tcg_const_i32(r2);
1184 potential_page_fault(s);
1185 gen_helper_mvst(cpu_env, tmp32_1, tmp32_2, tmp32_3);
1186 tcg_temp_free_i32(tmp32_1);
1187 tcg_temp_free_i32(tmp32_2);
1188 tcg_temp_free_i32(tmp32_3);
1189 gen_op_movi_cc(s, 1);
1190 break;
1191 case 0x5d: /* CLST R1,R2 [RRE] */
1192 tmp32_1 = load_reg32(0);
1193 tmp32_2 = tcg_const_i32(r1);
1194 tmp32_3 = tcg_const_i32(r2);
1195 potential_page_fault(s);
1196 gen_helper_clst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1197 set_cc_static(s);
1198 tcg_temp_free_i32(tmp32_1);
1199 tcg_temp_free_i32(tmp32_2);
1200 tcg_temp_free_i32(tmp32_3);
1201 break;
1202 case 0x5e: /* SRST R1,R2 [RRE] */
1203 tmp32_1 = load_reg32(0);
1204 tmp32_2 = tcg_const_i32(r1);
1205 tmp32_3 = tcg_const_i32(r2);
1206 potential_page_fault(s);
1207 gen_helper_srst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1208 set_cc_static(s);
1209 tcg_temp_free_i32(tmp32_1);
1210 tcg_temp_free_i32(tmp32_2);
1211 tcg_temp_free_i32(tmp32_3);
1212 break;
1213
1214 #ifndef CONFIG_USER_ONLY
1215 case 0x02: /* STIDP D2(B2) [S] */
1216 /* Store CPU ID */
1217 check_privileged(s);
1218 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1219 tmp = get_address(s, 0, b2, d2);
1220 potential_page_fault(s);
1221 gen_helper_stidp(cpu_env, tmp);
1222 tcg_temp_free_i64(tmp);
1223 break;
1224 case 0x04: /* SCK D2(B2) [S] */
1225 /* Set Clock */
1226 check_privileged(s);
1227 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1228 tmp = get_address(s, 0, b2, d2);
1229 potential_page_fault(s);
1230 gen_helper_sck(cc_op, tmp);
1231 set_cc_static(s);
1232 tcg_temp_free_i64(tmp);
1233 break;
1234 case 0x05: /* STCK D2(B2) [S] */
1235 /* Store Clock */
1236 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1237 tmp = get_address(s, 0, b2, d2);
1238 potential_page_fault(s);
1239 gen_helper_stck(cc_op, cpu_env, tmp);
1240 set_cc_static(s);
1241 tcg_temp_free_i64(tmp);
1242 break;
1243 case 0x06: /* SCKC D2(B2) [S] */
1244 /* Set Clock Comparator */
1245 check_privileged(s);
1246 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1247 tmp = get_address(s, 0, b2, d2);
1248 potential_page_fault(s);
1249 gen_helper_sckc(cpu_env, tmp);
1250 tcg_temp_free_i64(tmp);
1251 break;
1252 case 0x07: /* STCKC D2(B2) [S] */
1253 /* Store Clock Comparator */
1254 check_privileged(s);
1255 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1256 tmp = get_address(s, 0, b2, d2);
1257 potential_page_fault(s);
1258 gen_helper_stckc(cpu_env, tmp);
1259 tcg_temp_free_i64(tmp);
1260 break;
1261 case 0x08: /* SPT D2(B2) [S] */
1262 /* Set CPU Timer */
1263 check_privileged(s);
1264 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1265 tmp = get_address(s, 0, b2, d2);
1266 potential_page_fault(s);
1267 gen_helper_spt(cpu_env, tmp);
1268 tcg_temp_free_i64(tmp);
1269 break;
1270 case 0x09: /* STPT D2(B2) [S] */
1271 /* Store CPU Timer */
1272 check_privileged(s);
1273 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1274 tmp = get_address(s, 0, b2, d2);
1275 potential_page_fault(s);
1276 gen_helper_stpt(cpu_env, tmp);
1277 tcg_temp_free_i64(tmp);
1278 break;
1279 case 0x0a: /* SPKA D2(B2) [S] */
1280 /* Set PSW Key from Address */
1281 check_privileged(s);
1282 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1283 tmp = get_address(s, 0, b2, d2);
1284 tmp2 = tcg_temp_new_i64();
1285 tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
1286 tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
1287 tcg_gen_or_i64(psw_mask, tmp2, tmp);
1288 tcg_temp_free_i64(tmp2);
1289 tcg_temp_free_i64(tmp);
1290 break;
1291 case 0x0d: /* PTLB [S] */
1292 /* Purge TLB */
1293 check_privileged(s);
1294 gen_helper_ptlb(cpu_env);
1295 break;
1296 case 0x10: /* SPX D2(B2) [S] */
1297 /* Set Prefix Register */
1298 check_privileged(s);
1299 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1300 tmp = get_address(s, 0, b2, d2);
1301 potential_page_fault(s);
1302 gen_helper_spx(cpu_env, tmp);
1303 tcg_temp_free_i64(tmp);
1304 break;
1305 case 0x11: /* STPX D2(B2) [S] */
1306 /* Store Prefix */
1307 check_privileged(s);
1308 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1309 tmp = get_address(s, 0, b2, d2);
1310 tmp2 = tcg_temp_new_i64();
1311 tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1312 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1313 tcg_temp_free_i64(tmp);
1314 tcg_temp_free_i64(tmp2);
1315 break;
1316 case 0x12: /* STAP D2(B2) [S] */
1317 /* Store CPU Address */
1318 check_privileged(s);
1319 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1320 tmp = get_address(s, 0, b2, d2);
1321 tmp2 = tcg_temp_new_i64();
1322 tmp32_1 = tcg_temp_new_i32();
1323 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1324 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1325 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1326 tcg_temp_free_i64(tmp);
1327 tcg_temp_free_i64(tmp2);
1328 tcg_temp_free_i32(tmp32_1);
1329 break;
1330 case 0x21: /* IPTE R1,R2 [RRE] */
1331 /* Invalidate PTE */
1332 check_privileged(s);
1333 r1 = (insn >> 4) & 0xf;
1334 r2 = insn & 0xf;
1335 tmp = load_reg(r1);
1336 tmp2 = load_reg(r2);
1337 gen_helper_ipte(cpu_env, tmp, tmp2);
1338 tcg_temp_free_i64(tmp);
1339 tcg_temp_free_i64(tmp2);
1340 break;
1341 case 0x29: /* ISKE R1,R2 [RRE] */
1342 /* Insert Storage Key Extended */
1343 check_privileged(s);
1344 r1 = (insn >> 4) & 0xf;
1345 r2 = insn & 0xf;
1346 tmp = load_reg(r2);
1347 tmp2 = tcg_temp_new_i64();
1348 gen_helper_iske(tmp2, cpu_env, tmp);
1349 store_reg(r1, tmp2);
1350 tcg_temp_free_i64(tmp);
1351 tcg_temp_free_i64(tmp2);
1352 break;
1353 case 0x2a: /* RRBE R1,R2 [RRE] */
1354 /* Set Storage Key Extended */
1355 check_privileged(s);
1356 r1 = (insn >> 4) & 0xf;
1357 r2 = insn & 0xf;
1358 tmp32_1 = load_reg32(r1);
1359 tmp = load_reg(r2);
1360 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1361 set_cc_static(s);
1362 tcg_temp_free_i32(tmp32_1);
1363 tcg_temp_free_i64(tmp);
1364 break;
1365 case 0x2b: /* SSKE R1,R2 [RRE] */
1366 /* Set Storage Key Extended */
1367 check_privileged(s);
1368 r1 = (insn >> 4) & 0xf;
1369 r2 = insn & 0xf;
1370 tmp32_1 = load_reg32(r1);
1371 tmp = load_reg(r2);
1372 gen_helper_sske(cpu_env, tmp32_1, tmp);
1373 tcg_temp_free_i32(tmp32_1);
1374 tcg_temp_free_i64(tmp);
1375 break;
1376 case 0x34: /* STCH ? */
1377 /* Store Subchannel */
1378 check_privileged(s);
1379 gen_op_movi_cc(s, 3);
1380 break;
1381 case 0x46: /* STURA R1,R2 [RRE] */
1382 /* Store Using Real Address */
1383 check_privileged(s);
1384 r1 = (insn >> 4) & 0xf;
1385 r2 = insn & 0xf;
1386 tmp32_1 = load_reg32(r1);
1387 tmp = load_reg(r2);
1388 potential_page_fault(s);
1389 gen_helper_stura(cpu_env, tmp, tmp32_1);
1390 tcg_temp_free_i32(tmp32_1);
1391 tcg_temp_free_i64(tmp);
1392 break;
1393 case 0x50: /* CSP R1,R2 [RRE] */
1394 /* Compare And Swap And Purge */
1395 check_privileged(s);
1396 r1 = (insn >> 4) & 0xf;
1397 r2 = insn & 0xf;
1398 tmp32_1 = tcg_const_i32(r1);
1399 tmp32_2 = tcg_const_i32(r2);
1400 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1401 set_cc_static(s);
1402 tcg_temp_free_i32(tmp32_1);
1403 tcg_temp_free_i32(tmp32_2);
1404 break;
1405 case 0x5f: /* CHSC ? */
1406 /* Channel Subsystem Call */
1407 check_privileged(s);
1408 gen_op_movi_cc(s, 3);
1409 break;
1410 case 0x78: /* STCKE D2(B2) [S] */
1411 /* Store Clock Extended */
1412 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1413 tmp = get_address(s, 0, b2, d2);
1414 potential_page_fault(s);
1415 gen_helper_stcke(cc_op, cpu_env, tmp);
1416 set_cc_static(s);
1417 tcg_temp_free_i64(tmp);
1418 break;
1419 case 0x79: /* SACF D2(B2) [S] */
1420 /* Set Address Space Control Fast */
1421 check_privileged(s);
1422 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1423 tmp = get_address(s, 0, b2, d2);
1424 potential_page_fault(s);
1425 gen_helper_sacf(cpu_env, tmp);
1426 tcg_temp_free_i64(tmp);
1427 /* addressing mode has changed, so end the block */
1428 s->pc = s->next_pc;
1429 update_psw_addr(s);
1430 s->is_jmp = DISAS_JUMP;
1431 break;
1432 case 0x7d: /* STSI D2,(B2) [S] */
1433 check_privileged(s);
1434 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1435 tmp = get_address(s, 0, b2, d2);
1436 tmp32_1 = load_reg32(0);
1437 tmp32_2 = load_reg32(1);
1438 potential_page_fault(s);
1439 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1440 set_cc_static(s);
1441 tcg_temp_free_i64(tmp);
1442 tcg_temp_free_i32(tmp32_1);
1443 tcg_temp_free_i32(tmp32_2);
1444 break;
1445 case 0x9d: /* LFPC D2(B2) [S] */
1446 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1447 tmp = get_address(s, 0, b2, d2);
1448 tmp2 = tcg_temp_new_i64();
1449 tmp32_1 = tcg_temp_new_i32();
1450 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1451 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1452 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1453 tcg_temp_free_i64(tmp);
1454 tcg_temp_free_i64(tmp2);
1455 tcg_temp_free_i32(tmp32_1);
1456 break;
1457 case 0xb1: /* STFL D2(B2) [S] */
1458 /* Store Facility List (CPU features) at 200 */
1459 check_privileged(s);
1460 tmp2 = tcg_const_i64(0xc0000000);
1461 tmp = tcg_const_i64(200);
1462 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1463 tcg_temp_free_i64(tmp2);
1464 tcg_temp_free_i64(tmp);
1465 break;
1466 case 0xb2: /* LPSWE D2(B2) [S] */
1467 /* Load PSW Extended */
1468 check_privileged(s);
1469 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1470 tmp = get_address(s, 0, b2, d2);
1471 tmp2 = tcg_temp_new_i64();
1472 tmp3 = tcg_temp_new_i64();
1473 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1474 tcg_gen_addi_i64(tmp, tmp, 8);
1475 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1476 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1477 /* we need to keep cc_op intact */
1478 s->is_jmp = DISAS_JUMP;
1479 tcg_temp_free_i64(tmp);
1480 tcg_temp_free_i64(tmp2);
1481 tcg_temp_free_i64(tmp3);
1482 break;
1483 case 0x20: /* SERVC R1,R2 [RRE] */
1484 /* SCLP Service call (PV hypercall) */
1485 check_privileged(s);
1486 potential_page_fault(s);
1487 tmp32_1 = load_reg32(r2);
1488 tmp = load_reg(r1);
1489 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1490 set_cc_static(s);
1491 tcg_temp_free_i32(tmp32_1);
1492 tcg_temp_free_i64(tmp);
1493 break;
1494 #endif
1495 default:
1496 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1497 gen_illegal_opcode(s);
1498 break;
1499 }
1500 }
1501
1502 static void disas_b3(CPUS390XState *env, DisasContext *s, int op, int m3,
1503 int r1, int r2)
1504 {
1505 TCGv_i64 tmp;
1506 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1507 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
1508 #define FP_HELPER(i) \
1509 tmp32_1 = tcg_const_i32(r1); \
1510 tmp32_2 = tcg_const_i32(r2); \
1511 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1512 tcg_temp_free_i32(tmp32_1); \
1513 tcg_temp_free_i32(tmp32_2);
1514
1515 #define FP_HELPER_CC(i) \
1516 tmp32_1 = tcg_const_i32(r1); \
1517 tmp32_2 = tcg_const_i32(r2); \
1518 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1519 set_cc_static(s); \
1520 tcg_temp_free_i32(tmp32_1); \
1521 tcg_temp_free_i32(tmp32_2);
1522
1523 switch (op) {
1524 case 0x0: /* LPEBR R1,R2 [RRE] */
1525 FP_HELPER_CC(lpebr);
1526 break;
1527 case 0x2: /* LTEBR R1,R2 [RRE] */
1528 FP_HELPER_CC(ltebr);
1529 break;
1530 case 0x3: /* LCEBR R1,R2 [RRE] */
1531 FP_HELPER_CC(lcebr);
1532 break;
1533 case 0x4: /* LDEBR R1,R2 [RRE] */
1534 FP_HELPER(ldebr);
1535 break;
1536 case 0x5: /* LXDBR R1,R2 [RRE] */
1537 FP_HELPER(lxdbr);
1538 break;
1539 case 0x9: /* CEBR R1,R2 [RRE] */
1540 FP_HELPER_CC(cebr);
1541 break;
1542 case 0xa: /* AEBR R1,R2 [RRE] */
1543 FP_HELPER_CC(aebr);
1544 break;
1545 case 0xb: /* SEBR R1,R2 [RRE] */
1546 FP_HELPER_CC(sebr);
1547 break;
1548 case 0xd: /* DEBR R1,R2 [RRE] */
1549 FP_HELPER(debr);
1550 break;
1551 case 0x10: /* LPDBR R1,R2 [RRE] */
1552 FP_HELPER_CC(lpdbr);
1553 break;
1554 case 0x12: /* LTDBR R1,R2 [RRE] */
1555 FP_HELPER_CC(ltdbr);
1556 break;
1557 case 0x13: /* LCDBR R1,R2 [RRE] */
1558 FP_HELPER_CC(lcdbr);
1559 break;
1560 case 0x15: /* SQBDR R1,R2 [RRE] */
1561 FP_HELPER(sqdbr);
1562 break;
1563 case 0x17: /* MEEBR R1,R2 [RRE] */
1564 FP_HELPER(meebr);
1565 break;
1566 case 0x19: /* CDBR R1,R2 [RRE] */
1567 FP_HELPER_CC(cdbr);
1568 break;
1569 case 0x1a: /* ADBR R1,R2 [RRE] */
1570 FP_HELPER_CC(adbr);
1571 break;
1572 case 0x1b: /* SDBR R1,R2 [RRE] */
1573 FP_HELPER_CC(sdbr);
1574 break;
1575 case 0x1c: /* MDBR R1,R2 [RRE] */
1576 FP_HELPER(mdbr);
1577 break;
1578 case 0x1d: /* DDBR R1,R2 [RRE] */
1579 FP_HELPER(ddbr);
1580 break;
1581 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
1582 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
1583 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
1584 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
1585 tmp32_1 = tcg_const_i32(m3);
1586 tmp32_2 = tcg_const_i32(r2);
1587 tmp32_3 = tcg_const_i32(r1);
1588 switch (op) {
1589 case 0xe:
1590 gen_helper_maebr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1591 break;
1592 case 0x1e:
1593 gen_helper_madbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1594 break;
1595 case 0x1f:
1596 gen_helper_msdbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
1597 break;
1598 default:
1599 tcg_abort();
1600 }
1601 tcg_temp_free_i32(tmp32_1);
1602 tcg_temp_free_i32(tmp32_2);
1603 tcg_temp_free_i32(tmp32_3);
1604 break;
1605 case 0x40: /* LPXBR R1,R2 [RRE] */
1606 FP_HELPER_CC(lpxbr);
1607 break;
1608 case 0x42: /* LTXBR R1,R2 [RRE] */
1609 FP_HELPER_CC(ltxbr);
1610 break;
1611 case 0x43: /* LCXBR R1,R2 [RRE] */
1612 FP_HELPER_CC(lcxbr);
1613 break;
1614 case 0x44: /* LEDBR R1,R2 [RRE] */
1615 FP_HELPER(ledbr);
1616 break;
1617 case 0x45: /* LDXBR R1,R2 [RRE] */
1618 FP_HELPER(ldxbr);
1619 break;
1620 case 0x46: /* LEXBR R1,R2 [RRE] */
1621 FP_HELPER(lexbr);
1622 break;
1623 case 0x49: /* CXBR R1,R2 [RRE] */
1624 FP_HELPER_CC(cxbr);
1625 break;
1626 case 0x4a: /* AXBR R1,R2 [RRE] */
1627 FP_HELPER_CC(axbr);
1628 break;
1629 case 0x4b: /* SXBR R1,R2 [RRE] */
1630 FP_HELPER_CC(sxbr);
1631 break;
1632 case 0x4c: /* MXBR R1,R2 [RRE] */
1633 FP_HELPER(mxbr);
1634 break;
1635 case 0x4d: /* DXBR R1,R2 [RRE] */
1636 FP_HELPER(dxbr);
1637 break;
1638 case 0x65: /* LXR R1,R2 [RRE] */
1639 tmp = load_freg(r2);
1640 store_freg(r1, tmp);
1641 tcg_temp_free_i64(tmp);
1642 tmp = load_freg(r2 + 2);
1643 store_freg(r1 + 2, tmp);
1644 tcg_temp_free_i64(tmp);
1645 break;
1646 case 0x74: /* LZER R1 [RRE] */
1647 tmp32_1 = tcg_const_i32(r1);
1648 gen_helper_lzer(cpu_env, tmp32_1);
1649 tcg_temp_free_i32(tmp32_1);
1650 break;
1651 case 0x75: /* LZDR R1 [RRE] */
1652 tmp32_1 = tcg_const_i32(r1);
1653 gen_helper_lzdr(cpu_env, tmp32_1);
1654 tcg_temp_free_i32(tmp32_1);
1655 break;
1656 case 0x76: /* LZXR R1 [RRE] */
1657 tmp32_1 = tcg_const_i32(r1);
1658 gen_helper_lzxr(cpu_env, tmp32_1);
1659 tcg_temp_free_i32(tmp32_1);
1660 break;
1661 case 0x84: /* SFPC R1 [RRE] */
1662 tmp32_1 = load_reg32(r1);
1663 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1664 tcg_temp_free_i32(tmp32_1);
1665 break;
1666 case 0x94: /* CEFBR R1,R2 [RRE] */
1667 case 0x95: /* CDFBR R1,R2 [RRE] */
1668 case 0x96: /* CXFBR R1,R2 [RRE] */
1669 tmp32_1 = tcg_const_i32(r1);
1670 tmp32_2 = load_reg32(r2);
1671 switch (op) {
1672 case 0x94:
1673 gen_helper_cefbr(cpu_env, tmp32_1, tmp32_2);
1674 break;
1675 case 0x95:
1676 gen_helper_cdfbr(cpu_env, tmp32_1, tmp32_2);
1677 break;
1678 case 0x96:
1679 gen_helper_cxfbr(cpu_env, tmp32_1, tmp32_2);
1680 break;
1681 default:
1682 tcg_abort();
1683 }
1684 tcg_temp_free_i32(tmp32_1);
1685 tcg_temp_free_i32(tmp32_2);
1686 break;
1687 case 0x98: /* CFEBR R1,R2 [RRE] */
1688 case 0x99: /* CFDBR R1,R2 [RRE] */
1689 case 0x9a: /* CFXBR R1,R2 [RRE] */
1690 tmp32_1 = tcg_const_i32(r1);
1691 tmp32_2 = tcg_const_i32(r2);
1692 tmp32_3 = tcg_const_i32(m3);
1693 switch (op) {
1694 case 0x98:
1695 gen_helper_cfebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1696 break;
1697 case 0x99:
1698 gen_helper_cfdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1699 break;
1700 case 0x9a:
1701 gen_helper_cfxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1702 break;
1703 default:
1704 tcg_abort();
1705 }
1706 set_cc_static(s);
1707 tcg_temp_free_i32(tmp32_1);
1708 tcg_temp_free_i32(tmp32_2);
1709 tcg_temp_free_i32(tmp32_3);
1710 break;
1711 case 0xa4: /* CEGBR R1,R2 [RRE] */
1712 case 0xa5: /* CDGBR R1,R2 [RRE] */
1713 tmp32_1 = tcg_const_i32(r1);
1714 tmp = load_reg(r2);
1715 switch (op) {
1716 case 0xa4:
1717 gen_helper_cegbr(cpu_env, tmp32_1, tmp);
1718 break;
1719 case 0xa5:
1720 gen_helper_cdgbr(cpu_env, tmp32_1, tmp);
1721 break;
1722 default:
1723 tcg_abort();
1724 }
1725 tcg_temp_free_i32(tmp32_1);
1726 tcg_temp_free_i64(tmp);
1727 break;
1728 case 0xa6: /* CXGBR R1,R2 [RRE] */
1729 tmp32_1 = tcg_const_i32(r1);
1730 tmp = load_reg(r2);
1731 gen_helper_cxgbr(cpu_env, tmp32_1, tmp);
1732 tcg_temp_free_i32(tmp32_1);
1733 tcg_temp_free_i64(tmp);
1734 break;
1735 case 0xa8: /* CGEBR R1,R2 [RRE] */
1736 tmp32_1 = tcg_const_i32(r1);
1737 tmp32_2 = tcg_const_i32(r2);
1738 tmp32_3 = tcg_const_i32(m3);
1739 gen_helper_cgebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1740 set_cc_static(s);
1741 tcg_temp_free_i32(tmp32_1);
1742 tcg_temp_free_i32(tmp32_2);
1743 tcg_temp_free_i32(tmp32_3);
1744 break;
1745 case 0xa9: /* CGDBR R1,R2 [RRE] */
1746 tmp32_1 = tcg_const_i32(r1);
1747 tmp32_2 = tcg_const_i32(r2);
1748 tmp32_3 = tcg_const_i32(m3);
1749 gen_helper_cgdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1750 set_cc_static(s);
1751 tcg_temp_free_i32(tmp32_1);
1752 tcg_temp_free_i32(tmp32_2);
1753 tcg_temp_free_i32(tmp32_3);
1754 break;
1755 case 0xaa: /* CGXBR R1,R2 [RRE] */
1756 tmp32_1 = tcg_const_i32(r1);
1757 tmp32_2 = tcg_const_i32(r2);
1758 tmp32_3 = tcg_const_i32(m3);
1759 gen_helper_cgxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1760 set_cc_static(s);
1761 tcg_temp_free_i32(tmp32_1);
1762 tcg_temp_free_i32(tmp32_2);
1763 tcg_temp_free_i32(tmp32_3);
1764 break;
1765 default:
1766 LOG_DISAS("illegal b3 operation 0x%x\n", op);
1767 gen_illegal_opcode(s);
1768 break;
1769 }
1770
1771 #undef FP_HELPER_CC
1772 #undef FP_HELPER
1773 }
1774
1775 static void disas_b9(CPUS390XState *env, DisasContext *s, int op, int r1,
1776 int r2)
1777 {
1778 TCGv_i64 tmp;
1779 TCGv_i32 tmp32_1;
1780
1781 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1782 switch (op) {
1783 case 0x83: /* FLOGR R1,R2 [RRE] */
1784 tmp = load_reg(r2);
1785 tmp32_1 = tcg_const_i32(r1);
1786 gen_helper_flogr(cc_op, cpu_env, tmp32_1, tmp);
1787 set_cc_static(s);
1788 tcg_temp_free_i64(tmp);
1789 tcg_temp_free_i32(tmp32_1);
1790 break;
1791 default:
1792 LOG_DISAS("illegal b9 operation 0x%x\n", op);
1793 gen_illegal_opcode(s);
1794 break;
1795 }
1796 }
1797
1798 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1799 {
1800 unsigned char opc;
1801 uint64_t insn;
1802 int op, r1, r2, r3, d2, x2, b2, r1b;
1803
1804 opc = cpu_ldub_code(env, s->pc);
1805 LOG_DISAS("opc 0x%x\n", opc);
1806
1807 switch (opc) {
1808 case 0xb2:
1809 insn = ld_code4(env, s->pc);
1810 op = (insn >> 16) & 0xff;
1811 disas_b2(env, s, op, insn);
1812 break;
1813 case 0xb3:
1814 insn = ld_code4(env, s->pc);
1815 op = (insn >> 16) & 0xff;
1816 r3 = (insn >> 12) & 0xf; /* aka m3 */
1817 r1 = (insn >> 4) & 0xf;
1818 r2 = insn & 0xf;
1819 disas_b3(env, s, op, r3, r1, r2);
1820 break;
1821 case 0xb9:
1822 insn = ld_code4(env, s->pc);
1823 r1 = (insn >> 4) & 0xf;
1824 r2 = insn & 0xf;
1825 op = (insn >> 16) & 0xff;
1826 disas_b9(env, s, op, r1, r2);
1827 break;
1828 case 0xed:
1829 insn = ld_code6(env, s->pc);
1830 debug_insn(insn);
1831 op = insn & 0xff;
1832 r1 = (insn >> 36) & 0xf;
1833 x2 = (insn >> 32) & 0xf;
1834 b2 = (insn >> 28) & 0xf;
1835 d2 = (short)((insn >> 16) & 0xfff);
1836 r1b = (insn >> 12) & 0xf;
1837 disas_ed(env, s, op, r1, x2, b2, d2, r1b);
1838 break;
1839 default:
1840 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
1841 gen_illegal_opcode(s);
1842 break;
1843 }
1844 }
1845
1846 /* ====================================================================== */
1847 /* Define the insn format enumeration. */
1848 #define F0(N) FMT_##N,
1849 #define F1(N, X1) F0(N)
1850 #define F2(N, X1, X2) F0(N)
1851 #define F3(N, X1, X2, X3) F0(N)
1852 #define F4(N, X1, X2, X3, X4) F0(N)
1853 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1854
1855 typedef enum {
1856 #include "insn-format.def"
1857 } DisasFormat;
1858
1859 #undef F0
1860 #undef F1
1861 #undef F2
1862 #undef F3
1863 #undef F4
1864 #undef F5
1865
1866 /* Define a structure to hold the decoded fields. We'll store each inside
1867 an array indexed by an enum. In order to conserve memory, we'll arrange
1868 for fields that do not exist at the same time to overlap, thus the "C"
1869 for compact. For checking purposes there is an "O" for original index
1870 as well that will be applied to availability bitmaps. */
1871
1872 enum DisasFieldIndexO {
1873 FLD_O_r1,
1874 FLD_O_r2,
1875 FLD_O_r3,
1876 FLD_O_m1,
1877 FLD_O_m3,
1878 FLD_O_m4,
1879 FLD_O_b1,
1880 FLD_O_b2,
1881 FLD_O_b4,
1882 FLD_O_d1,
1883 FLD_O_d2,
1884 FLD_O_d4,
1885 FLD_O_x2,
1886 FLD_O_l1,
1887 FLD_O_l2,
1888 FLD_O_i1,
1889 FLD_O_i2,
1890 FLD_O_i3,
1891 FLD_O_i4,
1892 FLD_O_i5
1893 };
1894
1895 enum DisasFieldIndexC {
1896 FLD_C_r1 = 0,
1897 FLD_C_m1 = 0,
1898 FLD_C_b1 = 0,
1899 FLD_C_i1 = 0,
1900
1901 FLD_C_r2 = 1,
1902 FLD_C_b2 = 1,
1903 FLD_C_i2 = 1,
1904
1905 FLD_C_r3 = 2,
1906 FLD_C_m3 = 2,
1907 FLD_C_i3 = 2,
1908
1909 FLD_C_m4 = 3,
1910 FLD_C_b4 = 3,
1911 FLD_C_i4 = 3,
1912 FLD_C_l1 = 3,
1913
1914 FLD_C_i5 = 4,
1915 FLD_C_d1 = 4,
1916
1917 FLD_C_d2 = 5,
1918
1919 FLD_C_d4 = 6,
1920 FLD_C_x2 = 6,
1921 FLD_C_l2 = 6,
1922
1923 NUM_C_FIELD = 7
1924 };
1925
1926 struct DisasFields {
1927 unsigned op:8;
1928 unsigned op2:8;
1929 unsigned presentC:16;
1930 unsigned int presentO;
1931 int c[NUM_C_FIELD];
1932 };
1933
1934 /* This is the way fields are to be accessed out of DisasFields. */
1935 #define have_field(S, F) have_field1((S), FLD_O_##F)
1936 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1937
1938 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1939 {
1940 return (f->presentO >> c) & 1;
1941 }
1942
1943 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1944 enum DisasFieldIndexC c)
1945 {
1946 assert(have_field1(f, o));
1947 return f->c[c];
1948 }
1949
1950 /* Describe the layout of each field in each format. */
1951 typedef struct DisasField {
1952 unsigned int beg:8;
1953 unsigned int size:8;
1954 unsigned int type:2;
1955 unsigned int indexC:6;
1956 enum DisasFieldIndexO indexO:8;
1957 } DisasField;
1958
1959 typedef struct DisasFormatInfo {
1960 DisasField op[NUM_C_FIELD];
1961 } DisasFormatInfo;
1962
1963 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1964 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1965 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1966 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1967 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1968 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1969 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1970 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1971 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1972 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1973 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1974 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1975 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1976 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1977
1978 #define F0(N) { { } },
1979 #define F1(N, X1) { { X1 } },
1980 #define F2(N, X1, X2) { { X1, X2 } },
1981 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1982 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1983 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1984
1985 static const DisasFormatInfo format_info[] = {
1986 #include "insn-format.def"
1987 };
1988
1989 #undef F0
1990 #undef F1
1991 #undef F2
1992 #undef F3
1993 #undef F4
1994 #undef F5
1995 #undef R
1996 #undef M
1997 #undef BD
1998 #undef BXD
1999 #undef BDL
2000 #undef BXDL
2001 #undef I
2002 #undef L
2003
2004 /* Generally, we'll extract operands into this structures, operate upon
2005 them, and store them back. See the "in1", "in2", "prep", "wout" sets
2006 of routines below for more details. */
2007 typedef struct {
2008 bool g_out, g_out2, g_in1, g_in2;
2009 TCGv_i64 out, out2, in1, in2;
2010 TCGv_i64 addr1;
2011 } DisasOps;
2012
2013 /* Return values from translate_one, indicating the state of the TB. */
2014 typedef enum {
2015 /* Continue the TB. */
2016 NO_EXIT,
2017 /* We have emitted one or more goto_tb. No fixup required. */
2018 EXIT_GOTO_TB,
2019 /* We are not using a goto_tb (for whatever reason), but have updated
2020 the PC (for whatever reason), so there's no need to do it again on
2021 exiting the TB. */
2022 EXIT_PC_UPDATED,
2023 /* We are exiting the TB, but have neither emitted a goto_tb, nor
2024 updated the PC for the next instruction to be executed. */
2025 EXIT_PC_STALE,
2026 /* We are ending the TB with a noreturn function call, e.g. longjmp.
2027 No following code will be executed. */
2028 EXIT_NORETURN,
2029 } ExitStatus;
2030
2031 typedef enum DisasFacility {
2032 FAC_Z, /* zarch (default) */
2033 FAC_CASS, /* compare and swap and store */
2034 FAC_CASS2, /* compare and swap and store 2*/
2035 FAC_DFP, /* decimal floating point */
2036 FAC_DFPR, /* decimal floating point rounding */
2037 FAC_DO, /* distinct operands */
2038 FAC_EE, /* execute extensions */
2039 FAC_EI, /* extended immediate */
2040 FAC_FPE, /* floating point extension */
2041 FAC_FPSSH, /* floating point support sign handling */
2042 FAC_FPRGR, /* FPR-GR transfer */
2043 FAC_GIE, /* general instructions extension */
2044 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
2045 FAC_HW, /* high-word */
2046 FAC_IEEEE_SIM, /* IEEE exception sumilation */
2047 FAC_LOC, /* load/store on condition */
2048 FAC_LD, /* long displacement */
2049 FAC_PC, /* population count */
2050 FAC_SCF, /* store clock fast */
2051 FAC_SFLE, /* store facility list extended */
2052 } DisasFacility;
2053
2054 struct DisasInsn {
2055 unsigned opc:16;
2056 DisasFormat fmt:6;
2057 DisasFacility fac:6;
2058
2059 const char *name;
2060
2061 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
2062 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
2063 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
2064 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
2065 void (*help_cout)(DisasContext *, DisasOps *);
2066 ExitStatus (*help_op)(DisasContext *, DisasOps *);
2067
2068 uint64_t data;
2069 };
2070
2071 /* ====================================================================== */
2072 /* Miscelaneous helpers, used by several operations. */
2073
2074 static void help_l2_shift(DisasContext *s, DisasFields *f,
2075 DisasOps *o, int mask)
2076 {
2077 int b2 = get_field(f, b2);
2078 int d2 = get_field(f, d2);
2079
2080 if (b2 == 0) {
2081 o->in2 = tcg_const_i64(d2 & mask);
2082 } else {
2083 o->in2 = get_address(s, 0, b2, d2);
2084 tcg_gen_andi_i64(o->in2, o->in2, mask);
2085 }
2086 }
2087
2088 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
2089 {
2090 if (dest == s->next_pc) {
2091 return NO_EXIT;
2092 }
2093 if (use_goto_tb(s, dest)) {
2094 gen_update_cc_op(s);
2095 tcg_gen_goto_tb(0);
2096 tcg_gen_movi_i64(psw_addr, dest);
2097 tcg_gen_exit_tb((tcg_target_long)s->tb);
2098 return EXIT_GOTO_TB;
2099 } else {
2100 tcg_gen_movi_i64(psw_addr, dest);
2101 return EXIT_PC_UPDATED;
2102 }
2103 }
2104
2105 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
2106 bool is_imm, int imm, TCGv_i64 cdest)
2107 {
2108 ExitStatus ret;
2109 uint64_t dest = s->pc + 2 * imm;
2110 int lab;
2111
2112 /* Take care of the special cases first. */
2113 if (c->cond == TCG_COND_NEVER) {
2114 ret = NO_EXIT;
2115 goto egress;
2116 }
2117 if (is_imm) {
2118 if (dest == s->next_pc) {
2119 /* Branch to next. */
2120 ret = NO_EXIT;
2121 goto egress;
2122 }
2123 if (c->cond == TCG_COND_ALWAYS) {
2124 ret = help_goto_direct(s, dest);
2125 goto egress;
2126 }
2127 } else {
2128 if (TCGV_IS_UNUSED_I64(cdest)) {
2129 /* E.g. bcr %r0 -> no branch. */
2130 ret = NO_EXIT;
2131 goto egress;
2132 }
2133 if (c->cond == TCG_COND_ALWAYS) {
2134 tcg_gen_mov_i64(psw_addr, cdest);
2135 ret = EXIT_PC_UPDATED;
2136 goto egress;
2137 }
2138 }
2139
2140 if (use_goto_tb(s, s->next_pc)) {
2141 if (is_imm && use_goto_tb(s, dest)) {
2142 /* Both exits can use goto_tb. */
2143 gen_update_cc_op(s);
2144
2145 lab = gen_new_label();
2146 if (c->is_64) {
2147 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
2148 } else {
2149 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
2150 }
2151
2152 /* Branch not taken. */
2153 tcg_gen_goto_tb(0);
2154 tcg_gen_movi_i64(psw_addr, s->next_pc);
2155 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
2156
2157 /* Branch taken. */
2158 gen_set_label(lab);
2159 tcg_gen_goto_tb(1);
2160 tcg_gen_movi_i64(psw_addr, dest);
2161 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
2162
2163 ret = EXIT_GOTO_TB;
2164 } else {
2165 /* Fallthru can use goto_tb, but taken branch cannot. */
2166 /* Store taken branch destination before the brcond. This
2167 avoids having to allocate a new local temp to hold it.
2168 We'll overwrite this in the not taken case anyway. */
2169 if (!is_imm) {
2170 tcg_gen_mov_i64(psw_addr, cdest);
2171 }
2172
2173 lab = gen_new_label();
2174 if (c->is_64) {
2175 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
2176 } else {
2177 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
2178 }
2179
2180 /* Branch not taken. */
2181 gen_update_cc_op(s);
2182 tcg_gen_goto_tb(0);
2183 tcg_gen_movi_i64(psw_addr, s->next_pc);
2184 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
2185
2186 gen_set_label(lab);
2187 if (is_imm) {
2188 tcg_gen_movi_i64(psw_addr, dest);
2189 }
2190 ret = EXIT_PC_UPDATED;
2191 }
2192 } else {
2193 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
2194 Most commonly we're single-stepping or some other condition that
2195 disables all use of goto_tb. Just update the PC and exit. */
2196
2197 TCGv_i64 next = tcg_const_i64(s->next_pc);
2198 if (is_imm) {
2199 cdest = tcg_const_i64(dest);
2200 }
2201
2202 if (c->is_64) {
2203 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
2204 cdest, next);
2205 } else {
2206 TCGv_i32 t0 = tcg_temp_new_i32();
2207 TCGv_i64 t1 = tcg_temp_new_i64();
2208 TCGv_i64 z = tcg_const_i64(0);
2209 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
2210 tcg_gen_extu_i32_i64(t1, t0);
2211 tcg_temp_free_i32(t0);
2212 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
2213 tcg_temp_free_i64(t1);
2214 tcg_temp_free_i64(z);
2215 }
2216
2217 if (is_imm) {
2218 tcg_temp_free_i64(cdest);
2219 }
2220 tcg_temp_free_i64(next);
2221
2222 ret = EXIT_PC_UPDATED;
2223 }
2224
2225 egress:
2226 free_compare(c);
2227 return ret;
2228 }
2229
2230 /* ====================================================================== */
2231 /* The operations. These perform the bulk of the work for any insn,
2232 usually after the operands have been loaded and output initialized. */
2233
2234 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
2235 {
2236 gen_helper_abs_i64(o->out, o->in2);
2237 return NO_EXIT;
2238 }
2239
2240 static ExitStatus op_add(DisasContext *s, DisasOps *o)
2241 {
2242 tcg_gen_add_i64(o->out, o->in1, o->in2);
2243 return NO_EXIT;
2244 }
2245
2246 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
2247 {
2248 TCGv_i64 cc;
2249
2250 tcg_gen_add_i64(o->out, o->in1, o->in2);
2251
2252 /* XXX possible optimization point */
2253 gen_op_calc_cc(s);
2254 cc = tcg_temp_new_i64();
2255 tcg_gen_extu_i32_i64(cc, cc_op);
2256 tcg_gen_shri_i64(cc, cc, 1);
2257
2258 tcg_gen_add_i64(o->out, o->out, cc);
2259 tcg_temp_free_i64(cc);
2260 return NO_EXIT;
2261 }
2262
2263 static ExitStatus op_and(DisasContext *s, DisasOps *o)
2264 {
2265 tcg_gen_and_i64(o->out, o->in1, o->in2);
2266 return NO_EXIT;
2267 }
2268
2269 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
2270 {
2271 int shift = s->insn->data & 0xff;
2272 int size = s->insn->data >> 8;
2273 uint64_t mask = ((1ull << size) - 1) << shift;
2274
2275 assert(!o->g_in2);
2276 tcg_gen_shli_i64(o->in2, o->in2, shift);
2277 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2278 tcg_gen_and_i64(o->out, o->in1, o->in2);
2279
2280 /* Produce the CC from only the bits manipulated. */
2281 tcg_gen_andi_i64(cc_dst, o->out, mask);
2282 set_cc_nz_u64(s, cc_dst);
2283 return NO_EXIT;
2284 }
2285
2286 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
2287 {
2288 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2289 if (!TCGV_IS_UNUSED_I64(o->in2)) {
2290 tcg_gen_mov_i64(psw_addr, o->in2);
2291 return EXIT_PC_UPDATED;
2292 } else {
2293 return NO_EXIT;
2294 }
2295 }
2296
2297 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
2298 {
2299 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
2300 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
2301 }
2302
2303 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
2304 {
2305 int m1 = get_field(s->fields, m1);
2306 bool is_imm = have_field(s->fields, i2);
2307 int imm = is_imm ? get_field(s->fields, i2) : 0;
2308 DisasCompare c;
2309
2310 disas_jcc(s, &c, m1);
2311 return help_branch(s, &c, is_imm, imm, o->in2);
2312 }
2313
2314 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
2315 {
2316 int r1 = get_field(s->fields, r1);
2317 bool is_imm = have_field(s->fields, i2);
2318 int imm = is_imm ? get_field(s->fields, i2) : 0;
2319 DisasCompare c;
2320 TCGv_i64 t;
2321
2322 c.cond = TCG_COND_NE;
2323 c.is_64 = false;
2324 c.g1 = false;
2325 c.g2 = false;
2326
2327 t = tcg_temp_new_i64();
2328 tcg_gen_subi_i64(t, regs[r1], 1);
2329 store_reg32_i64(r1, t);
2330 c.u.s32.a = tcg_temp_new_i32();
2331 c.u.s32.b = tcg_const_i32(0);
2332 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
2333 tcg_temp_free_i64(t);
2334
2335 return help_branch(s, &c, is_imm, imm, o->in2);
2336 }
2337
2338 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
2339 {
2340 int r1 = get_field(s->fields, r1);
2341 bool is_imm = have_field(s->fields, i2);
2342 int imm = is_imm ? get_field(s->fields, i2) : 0;
2343 DisasCompare c;
2344
2345 c.cond = TCG_COND_NE;
2346 c.is_64 = true;
2347 c.g1 = true;
2348 c.g2 = false;
2349
2350 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
2351 c.u.s64.a = regs[r1];
2352 c.u.s64.b = tcg_const_i64(0);
2353
2354 return help_branch(s, &c, is_imm, imm, o->in2);
2355 }
2356
2357 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
2358 {
2359 int l = get_field(s->fields, l1);
2360 TCGv_i32 vl;
2361
2362 switch (l + 1) {
2363 case 1:
2364 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2365 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2366 break;
2367 case 2:
2368 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2369 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2370 break;
2371 case 4:
2372 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2373 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2374 break;
2375 case 8:
2376 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2377 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2378 break;
2379 default:
2380 potential_page_fault(s);
2381 vl = tcg_const_i32(l);
2382 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2383 tcg_temp_free_i32(vl);
2384 set_cc_static(s);
2385 return NO_EXIT;
2386 }
2387 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2388 return NO_EXIT;
2389 }
2390
2391 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
2392 {
2393 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2394 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2395 potential_page_fault(s);
2396 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
2397 tcg_temp_free_i32(r1);
2398 tcg_temp_free_i32(r3);
2399 set_cc_static(s);
2400 return NO_EXIT;
2401 }
2402
2403 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
2404 {
2405 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2406 TCGv_i32 t1 = tcg_temp_new_i32();
2407 tcg_gen_trunc_i64_i32(t1, o->in1);
2408 potential_page_fault(s);
2409 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2410 set_cc_static(s);
2411 tcg_temp_free_i32(t1);
2412 tcg_temp_free_i32(m3);
2413 return NO_EXIT;
2414 }
2415
2416 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
2417 {
2418 int r3 = get_field(s->fields, r3);
2419 potential_page_fault(s);
2420 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2421 set_cc_static(s);
2422 return NO_EXIT;
2423 }
2424
2425 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
2426 {
2427 int r3 = get_field(s->fields, r3);
2428 potential_page_fault(s);
2429 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2430 set_cc_static(s);
2431 return NO_EXIT;
2432 }
2433
2434 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
2435 {
2436 int r3 = get_field(s->fields, r3);
2437 TCGv_i64 in3 = tcg_temp_new_i64();
2438 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
2439 potential_page_fault(s);
2440 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
2441 tcg_temp_free_i64(in3);
2442 set_cc_static(s);
2443 return NO_EXIT;
2444 }
2445
2446 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2447 {
2448 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2449 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2450 potential_page_fault(s);
2451 /* XXX rewrite in tcg */
2452 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
2453 set_cc_static(s);
2454 return NO_EXIT;
2455 }
2456
2457 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2458 {
2459 TCGv_i64 t1 = tcg_temp_new_i64();
2460 TCGv_i32 t2 = tcg_temp_new_i32();
2461 tcg_gen_trunc_i64_i32(t2, o->in1);
2462 gen_helper_cvd(t1, t2);
2463 tcg_temp_free_i32(t2);
2464 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2465 tcg_temp_free_i64(t1);
2466 return NO_EXIT;
2467 }
2468
2469 #ifndef CONFIG_USER_ONLY
2470 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2471 {
2472 TCGv_i32 tmp;
2473
2474 check_privileged(s);
2475 potential_page_fault(s);
2476
2477 /* We pretend the format is RX_a so that D2 is the field we want. */
2478 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2479 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2480 tcg_temp_free_i32(tmp);
2481 return NO_EXIT;
2482 }
2483 #endif
2484
2485 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2486 {
2487 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2488 return_low128(o->out);
2489 return NO_EXIT;
2490 }
2491
2492 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2493 {
2494 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2495 return_low128(o->out);
2496 return NO_EXIT;
2497 }
2498
2499 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2500 {
2501 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2502 return_low128(o->out);
2503 return NO_EXIT;
2504 }
2505
2506 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2507 {
2508 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2509 return_low128(o->out);
2510 return NO_EXIT;
2511 }
2512
2513 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2514 {
2515 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2516 return NO_EXIT;
2517 }
2518
2519 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2520 {
2521 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2522 tb->flags, (ab)use the tb->cs_base field as the address of
2523 the template in memory, and grab 8 bits of tb->flags/cflags for
2524 the contents of the register. We would then recognize all this
2525 in gen_intermediate_code_internal, generating code for exactly
2526 one instruction. This new TB then gets executed normally.
2527
2528 On the other hand, this seems to be mostly used for modifying
2529 MVC inside of memcpy, which needs a helper call anyway. So
2530 perhaps this doesn't bear thinking about any further. */
2531
2532 TCGv_i64 tmp;
2533
2534 update_psw_addr(s);
2535 gen_op_calc_cc(s);
2536
2537 tmp = tcg_const_i64(s->next_pc);
2538 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2539 tcg_temp_free_i64(tmp);
2540
2541 set_cc_static(s);
2542 return NO_EXIT;
2543 }
2544
2545 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2546 {
2547 int m3 = get_field(s->fields, m3);
2548 int pos, len, base = s->insn->data;
2549 TCGv_i64 tmp = tcg_temp_new_i64();
2550 uint64_t ccm;
2551
2552 switch (m3) {
2553 case 0xf:
2554 /* Effectively a 32-bit load. */
2555 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2556 len = 32;
2557 goto one_insert;
2558
2559 case 0xc:
2560 case 0x6:
2561 case 0x3:
2562 /* Effectively a 16-bit load. */
2563 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2564 len = 16;
2565 goto one_insert;
2566
2567 case 0x8:
2568 case 0x4:
2569 case 0x2:
2570 case 0x1:
2571 /* Effectively an 8-bit load. */
2572 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2573 len = 8;
2574 goto one_insert;
2575
2576 one_insert:
2577 pos = base + ctz32(m3) * 8;
2578 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2579 ccm = ((1ull << len) - 1) << pos;
2580 break;
2581
2582 default:
2583 /* This is going to be a sequence of loads and inserts. */
2584 pos = base + 32 - 8;
2585 ccm = 0;
2586 while (m3) {
2587 if (m3 & 0x8) {
2588 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2589 tcg_gen_addi_i64(o->in2, o->in2, 1);
2590 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2591 ccm |= 0xff << pos;
2592 }
2593 m3 = (m3 << 1) & 0xf;
2594 pos -= 8;
2595 }
2596 break;
2597 }
2598
2599 tcg_gen_movi_i64(tmp, ccm);
2600 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2601 tcg_temp_free_i64(tmp);
2602 return NO_EXIT;
2603 }
2604
2605 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2606 {
2607 int shift = s->insn->data & 0xff;
2608 int size = s->insn->data >> 8;
2609 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2610 return NO_EXIT;
2611 }
2612
2613 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2614 {
2615 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2616 return NO_EXIT;
2617 }
2618
2619 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2620 {
2621 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2622 return NO_EXIT;
2623 }
2624
2625 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2626 {
2627 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2628 return NO_EXIT;
2629 }
2630
2631 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2632 {
2633 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2634 return NO_EXIT;
2635 }
2636
2637 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2638 {
2639 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2640 return NO_EXIT;
2641 }
2642
2643 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2644 {
2645 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2646 return NO_EXIT;
2647 }
2648
2649 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2650 {
2651 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2652 return NO_EXIT;
2653 }
2654
2655 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2656 {
2657 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2658 return NO_EXIT;
2659 }
2660
2661 #ifndef CONFIG_USER_ONLY
2662 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2663 {
2664 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2665 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2666 check_privileged(s);
2667 potential_page_fault(s);
2668 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2669 tcg_temp_free_i32(r1);
2670 tcg_temp_free_i32(r3);
2671 return NO_EXIT;
2672 }
2673
2674 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2675 {
2676 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2677 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2678 check_privileged(s);
2679 potential_page_fault(s);
2680 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2681 tcg_temp_free_i32(r1);
2682 tcg_temp_free_i32(r3);
2683 return NO_EXIT;
2684 }
2685 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2686 {
2687 check_privileged(s);
2688 potential_page_fault(s);
2689 gen_helper_lra(o->out, cpu_env, o->in2);
2690 set_cc_static(s);
2691 return NO_EXIT;
2692 }
2693
2694 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2695 {
2696 TCGv_i64 t1, t2;
2697
2698 check_privileged(s);
2699
2700 t1 = tcg_temp_new_i64();
2701 t2 = tcg_temp_new_i64();
2702 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2703 tcg_gen_addi_i64(o->in2, o->in2, 4);
2704 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2705 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2706 tcg_gen_shli_i64(t1, t1, 32);
2707 gen_helper_load_psw(cpu_env, t1, t2);
2708 tcg_temp_free_i64(t1);
2709 tcg_temp_free_i64(t2);
2710 return EXIT_NORETURN;
2711 }
2712 #endif
2713
2714 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2715 {
2716 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2717 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2718 potential_page_fault(s);
2719 gen_helper_lam(cpu_env, r1, o->in2, r3);
2720 tcg_temp_free_i32(r1);
2721 tcg_temp_free_i32(r3);
2722 return NO_EXIT;
2723 }
2724
2725 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2726 {
2727 int r1 = get_field(s->fields, r1);
2728 int r3 = get_field(s->fields, r3);
2729 TCGv_i64 t = tcg_temp_new_i64();
2730 TCGv_i64 t4 = tcg_const_i64(4);
2731
2732 while (1) {
2733 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2734 store_reg32_i64(r1, t);
2735 if (r1 == r3) {
2736 break;
2737 }
2738 tcg_gen_add_i64(o->in2, o->in2, t4);
2739 r1 = (r1 + 1) & 15;
2740 }
2741
2742 tcg_temp_free_i64(t);
2743 tcg_temp_free_i64(t4);
2744 return NO_EXIT;
2745 }
2746
2747 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2748 {
2749 int r1 = get_field(s->fields, r1);
2750 int r3 = get_field(s->fields, r3);
2751 TCGv_i64 t = tcg_temp_new_i64();
2752 TCGv_i64 t4 = tcg_const_i64(4);
2753
2754 while (1) {
2755 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2756 store_reg32h_i64(r1, t);
2757 if (r1 == r3) {
2758 break;
2759 }
2760 tcg_gen_add_i64(o->in2, o->in2, t4);
2761 r1 = (r1 + 1) & 15;
2762 }
2763
2764 tcg_temp_free_i64(t);
2765 tcg_temp_free_i64(t4);
2766 return NO_EXIT;
2767 }
2768
2769 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2770 {
2771 int r1 = get_field(s->fields, r1);
2772 int r3 = get_field(s->fields, r3);
2773 TCGv_i64 t8 = tcg_const_i64(8);
2774
2775 while (1) {
2776 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2777 if (r1 == r3) {
2778 break;
2779 }
2780 tcg_gen_add_i64(o->in2, o->in2, t8);
2781 r1 = (r1 + 1) & 15;
2782 }
2783
2784 tcg_temp_free_i64(t8);
2785 return NO_EXIT;
2786 }
2787
2788 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2789 {
2790 o->out = o->in2;
2791 o->g_out = o->g_in2;
2792 TCGV_UNUSED_I64(o->in2);
2793 o->g_in2 = false;
2794 return NO_EXIT;
2795 }
2796
2797 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2798 {
2799 o->out = o->in1;
2800 o->out2 = o->in2;
2801 o->g_out = o->g_in1;
2802 o->g_out2 = o->g_in2;
2803 TCGV_UNUSED_I64(o->in1);
2804 TCGV_UNUSED_I64(o->in2);
2805 o->g_in1 = o->g_in2 = false;
2806 return NO_EXIT;
2807 }
2808
2809 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2810 {
2811 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2812 potential_page_fault(s);
2813 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2814 tcg_temp_free_i32(l);
2815 return NO_EXIT;
2816 }
2817
2818 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2819 {
2820 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2821 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2822 potential_page_fault(s);
2823 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2824 tcg_temp_free_i32(r1);
2825 tcg_temp_free_i32(r2);
2826 set_cc_static(s);
2827 return NO_EXIT;
2828 }
2829
2830 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2831 {
2832 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2833 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2834 potential_page_fault(s);
2835 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2836 tcg_temp_free_i32(r1);
2837 tcg_temp_free_i32(r3);
2838 set_cc_static(s);
2839 return NO_EXIT;
2840 }
2841
2842 #ifndef CONFIG_USER_ONLY
2843 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2844 {
2845 int r1 = get_field(s->fields, l1);
2846 check_privileged(s);
2847 potential_page_fault(s);
2848 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2849 set_cc_static(s);
2850 return NO_EXIT;
2851 }
2852
2853 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2854 {
2855 int r1 = get_field(s->fields, l1);
2856 check_privileged(s);
2857 potential_page_fault(s);
2858 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2859 set_cc_static(s);
2860 return NO_EXIT;
2861 }
2862 #endif
2863
2864 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2865 {
2866 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2867 return NO_EXIT;
2868 }
2869
2870 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2871 {
2872 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2873 return_low128(o->out2);
2874 return NO_EXIT;
2875 }
2876
2877 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2878 {
2879 gen_helper_nabs_i64(o->out, o->in2);
2880 return NO_EXIT;
2881 }
2882
2883 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2884 {
2885 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2886 potential_page_fault(s);
2887 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2888 tcg_temp_free_i32(l);
2889 set_cc_static(s);
2890 return NO_EXIT;
2891 }
2892
2893 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2894 {
2895 tcg_gen_neg_i64(o->out, o->in2);
2896 return NO_EXIT;
2897 }
2898
2899 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2900 {
2901 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2902 potential_page_fault(s);
2903 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2904 tcg_temp_free_i32(l);
2905 set_cc_static(s);
2906 return NO_EXIT;
2907 }
2908
2909 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2910 {
2911 tcg_gen_or_i64(o->out, o->in1, o->in2);
2912 return NO_EXIT;
2913 }
2914
2915 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2916 {
2917 int shift = s->insn->data & 0xff;
2918 int size = s->insn->data >> 8;
2919 uint64_t mask = ((1ull << size) - 1) << shift;
2920
2921 assert(!o->g_in2);
2922 tcg_gen_shli_i64(o->in2, o->in2, shift);
2923 tcg_gen_or_i64(o->out, o->in1, o->in2);
2924
2925 /* Produce the CC from only the bits manipulated. */
2926 tcg_gen_andi_i64(cc_dst, o->out, mask);
2927 set_cc_nz_u64(s, cc_dst);
2928 return NO_EXIT;
2929 }
2930
2931 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2932 {
2933 tcg_gen_bswap16_i64(o->out, o->in2);
2934 return NO_EXIT;
2935 }
2936
2937 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2938 {
2939 tcg_gen_bswap32_i64(o->out, o->in2);
2940 return NO_EXIT;
2941 }
2942
2943 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2944 {
2945 tcg_gen_bswap64_i64(o->out, o->in2);
2946 return NO_EXIT;
2947 }
2948
2949 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2950 {
2951 TCGv_i32 t1 = tcg_temp_new_i32();
2952 TCGv_i32 t2 = tcg_temp_new_i32();
2953 TCGv_i32 to = tcg_temp_new_i32();
2954 tcg_gen_trunc_i64_i32(t1, o->in1);
2955 tcg_gen_trunc_i64_i32(t2, o->in2);
2956 tcg_gen_rotl_i32(to, t1, t2);
2957 tcg_gen_extu_i32_i64(o->out, to);
2958 tcg_temp_free_i32(t1);
2959 tcg_temp_free_i32(t2);
2960 tcg_temp_free_i32(to);
2961 return NO_EXIT;
2962 }
2963
2964 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2965 {
2966 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2967 return NO_EXIT;
2968 }
2969
2970 #ifndef CONFIG_USER_ONLY
2971 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2972 {
2973 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2974 check_privileged(s);
2975 potential_page_fault(s);
2976 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2977 tcg_temp_free_i32(r1);
2978 return NO_EXIT;
2979 }
2980 #endif
2981
2982 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2983 {
2984 uint64_t sign = 1ull << s->insn->data;
2985 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2986 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2987 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2988 /* The arithmetic left shift is curious in that it does not affect
2989 the sign bit. Copy that over from the source unchanged. */
2990 tcg_gen_andi_i64(o->out, o->out, ~sign);
2991 tcg_gen_andi_i64(o->in1, o->in1, sign);
2992 tcg_gen_or_i64(o->out, o->out, o->in1);
2993 return NO_EXIT;
2994 }
2995
2996 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2997 {
2998 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2999 return NO_EXIT;
3000 }
3001
3002 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3003 {
3004 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3005 return NO_EXIT;
3006 }
3007
3008 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3009 {
3010 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3011 return NO_EXIT;
3012 }
3013
3014 #ifndef CONFIG_USER_ONLY
3015 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3016 {
3017 check_privileged(s);
3018 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3019 return NO_EXIT;
3020 }
3021
3022 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3023 {
3024 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3025 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3026 check_privileged(s);
3027 potential_page_fault(s);
3028 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3029 tcg_temp_free_i32(r1);
3030 tcg_temp_free_i32(r3);
3031 return NO_EXIT;
3032 }
3033
3034 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3035 {
3036 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3037 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3038 check_privileged(s);
3039 potential_page_fault(s);
3040 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3041 tcg_temp_free_i32(r1);
3042 tcg_temp_free_i32(r3);
3043 return NO_EXIT;
3044 }
3045
3046 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3047 {
3048 uint64_t i2 = get_field(s->fields, i2);
3049 TCGv_i64 t;
3050
3051 check_privileged(s);
3052
3053 /* It is important to do what the instruction name says: STORE THEN.
3054 If we let the output hook perform the store then if we fault and
3055 restart, we'll have the wrong SYSTEM MASK in place. */
3056 t = tcg_temp_new_i64();
3057 tcg_gen_shri_i64(t, psw_mask, 56);
3058 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3059 tcg_temp_free_i64(t);
3060
3061 if (s->fields->op == 0xac) {
3062 tcg_gen_andi_i64(psw_mask, psw_mask,
3063 (i2 << 56) | 0x00ffffffffffffffull);
3064 } else {
3065 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3066 }
3067 return NO_EXIT;
3068 }
3069 #endif
3070
3071 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3072 {
3073 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3074 return NO_EXIT;
3075 }
3076
3077 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3078 {
3079 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3080 return NO_EXIT;
3081 }
3082
3083 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3084 {
3085 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3086 return NO_EXIT;
3087 }
3088
3089 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3090 {
3091 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3092 return NO_EXIT;
3093 }
3094
3095 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3096 {
3097 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3098 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3099 potential_page_fault(s);
3100 gen_helper_stam(cpu_env, r1, o->in2, r3);
3101 tcg_temp_free_i32(r1);
3102 tcg_temp_free_i32(r3);
3103 return NO_EXIT;
3104 }
3105
3106 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3107 {
3108 int m3 = get_field(s->fields, m3);
3109 int pos, base = s->insn->data;
3110 TCGv_i64 tmp = tcg_temp_new_i64();
3111
3112 pos = base + ctz32(m3) * 8;
3113 switch (m3) {
3114 case 0xf:
3115 /* Effectively a 32-bit store. */
3116 tcg_gen_shri_i64(tmp, o->in1, pos);
3117 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3118 break;
3119
3120 case 0xc:
3121 case 0x6:
3122 case 0x3:
3123 /* Effectively a 16-bit store. */
3124 tcg_gen_shri_i64(tmp, o->in1, pos);
3125 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3126 break;
3127
3128 case 0x8:
3129 case 0x4:
3130 case 0x2:
3131 case 0x1:
3132 /* Effectively an 8-bit store. */
3133 tcg_gen_shri_i64(tmp, o->in1, pos);
3134 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3135 break;
3136
3137 default:
3138 /* This is going to be a sequence of shifts and stores. */
3139 pos = base + 32 - 8;
3140 while (m3) {
3141 if (m3 & 0x8) {
3142 tcg_gen_shri_i64(tmp, o->in1, pos);
3143 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3144 tcg_gen_addi_i64(o->in2, o->in2, 1);
3145 }
3146 m3 = (m3 << 1) & 0xf;
3147 pos -= 8;
3148 }
3149 break;
3150 }
3151 tcg_temp_free_i64(tmp);
3152 return NO_EXIT;
3153 }
3154
3155 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3156 {
3157 int r1 = get_field(s->fields, r1);
3158 int r3 = get_field(s->fields, r3);
3159 int size = s->insn->data;
3160 TCGv_i64 tsize = tcg_const_i64(size);
3161
3162 while (1) {
3163 if (size == 8) {
3164 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3165 } else {
3166 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3167 }
3168 if (r1 == r3) {
3169 break;
3170 }
3171 tcg_gen_add_i64(o->in2, o->in2, tsize);
3172 r1 = (r1 + 1) & 15;
3173 }
3174
3175 tcg_temp_free_i64(tsize);
3176 return NO_EXIT;
3177 }
3178
3179 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3180 {
3181 int r1 = get_field(s->fields, r1);
3182 int r3 = get_field(s->fields, r3);
3183 TCGv_i64 t = tcg_temp_new_i64();
3184 TCGv_i64 t4 = tcg_const_i64(4);
3185 TCGv_i64 t32 = tcg_const_i64(32);
3186
3187 while (1) {
3188 tcg_gen_shl_i64(t, regs[r1], t32);
3189 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3190 if (r1 == r3) {
3191 break;
3192 }
3193 tcg_gen_add_i64(o->in2, o->in2, t4);
3194 r1 = (r1 + 1) & 15;
3195 }
3196
3197 tcg_temp_free_i64(t);
3198 tcg_temp_free_i64(t4);
3199 tcg_temp_free_i64(t32);
3200 return NO_EXIT;
3201 }
3202
3203 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3204 {
3205 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3206 return NO_EXIT;
3207 }
3208
3209 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3210 {
3211 TCGv_i64 cc;
3212
3213 assert(!o->g_in2);
3214 tcg_gen_not_i64(o->in2, o->in2);
3215 tcg_gen_add_i64(o->out, o->in1, o->in2);
3216
3217 /* XXX possible optimization point */
3218 gen_op_calc_cc(s);
3219 cc = tcg_temp_new_i64();
3220 tcg_gen_extu_i32_i64(cc, cc_op);
3221 tcg_gen_shri_i64(cc, cc, 1);
3222 tcg_gen_add_i64(o->out, o->out, cc);
3223 tcg_temp_free_i64(cc);
3224 return NO_EXIT;
3225 }
3226
3227 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3228 {
3229 TCGv_i32 t;
3230
3231 update_psw_addr(s);
3232 gen_op_calc_cc(s);
3233
3234 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3235 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3236 tcg_temp_free_i32(t);
3237
3238 t = tcg_const_i32(s->next_pc - s->pc);
3239 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3240 tcg_temp_free_i32(t);
3241
3242 gen_exception(EXCP_SVC);
3243 return EXIT_NORETURN;
3244 }
3245
3246 #ifndef CONFIG_USER_ONLY
3247 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3248 {
3249 potential_page_fault(s);
3250 gen_helper_tprot(cc_op, o->addr1, o->in2);
3251 set_cc_static(s);
3252 return NO_EXIT;
3253 }
3254 #endif
3255
3256 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3257 {
3258 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3259 potential_page_fault(s);
3260 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3261 tcg_temp_free_i32(l);
3262 set_cc_static(s);
3263 return NO_EXIT;
3264 }
3265
3266 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3267 {
3268 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3269 potential_page_fault(s);
3270 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3271 tcg_temp_free_i32(l);
3272 return NO_EXIT;
3273 }
3274
3275 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3276 {
3277 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3278 potential_page_fault(s);
3279 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3280 tcg_temp_free_i32(l);
3281 set_cc_static(s);
3282 return NO_EXIT;
3283 }
3284
3285 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3286 {
3287 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3288 return NO_EXIT;
3289 }
3290
3291 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3292 {
3293 int shift = s->insn->data & 0xff;
3294 int size = s->insn->data >> 8;
3295 uint64_t mask = ((1ull << size) - 1) << shift;
3296
3297 assert(!o->g_in2);
3298 tcg_gen_shli_i64(o->in2, o->in2, shift);
3299 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3300
3301 /* Produce the CC from only the bits manipulated. */
3302 tcg_gen_andi_i64(cc_dst, o->out, mask);
3303 set_cc_nz_u64(s, cc_dst);
3304 return NO_EXIT;
3305 }
3306
3307 /* ====================================================================== */
3308 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3309 the original inputs), update the various cc data structures in order to
3310 be able to compute the new condition code. */
3311
3312 static void cout_abs32(DisasContext *s, DisasOps *o)
3313 {
3314 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3315 }
3316
3317 static void cout_abs64(DisasContext *s, DisasOps *o)
3318 {
3319 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3320 }
3321
3322 static void cout_adds32(DisasContext *s, DisasOps *o)
3323 {
3324 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3325 }
3326
3327 static void cout_adds64(DisasContext *s, DisasOps *o)
3328 {
3329 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3330 }
3331
3332 static void cout_addu32(DisasContext *s, DisasOps *o)
3333 {
3334 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3335 }
3336
3337 static void cout_addu64(DisasContext *s, DisasOps *o)
3338 {
3339 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3340 }
3341
3342 static void cout_addc32(DisasContext *s, DisasOps *o)
3343 {
3344 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3345 }
3346
3347 static void cout_addc64(DisasContext *s, DisasOps *o)
3348 {
3349 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3350 }
3351
3352 static void cout_cmps32(DisasContext *s, DisasOps *o)
3353 {
3354 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3355 }
3356
3357 static void cout_cmps64(DisasContext *s, DisasOps *o)
3358 {
3359 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3360 }
3361
3362 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3363 {
3364 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3365 }
3366
3367 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3368 {
3369 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3370 }
3371
3372 static void cout_nabs32(DisasContext *s, DisasOps *o)
3373 {
3374 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3375 }
3376
3377 static void cout_nabs64(DisasContext *s, DisasOps *o)
3378 {
3379 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3380 }
3381
3382 static void cout_neg32(DisasContext *s, DisasOps *o)
3383 {
3384 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3385 }
3386
3387 static void cout_neg64(DisasContext *s, DisasOps *o)
3388 {
3389 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3390 }
3391
3392 static void cout_nz32(DisasContext *s, DisasOps *o)
3393 {
3394 tcg_gen_ext32u_i64(cc_dst, o->out);
3395 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3396 }
3397
3398 static void cout_nz64(DisasContext *s, DisasOps *o)
3399 {
3400 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3401 }
3402
3403 static void cout_s32(DisasContext *s, DisasOps *o)
3404 {
3405 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3406 }
3407
3408 static void cout_s64(DisasContext *s, DisasOps *o)
3409 {
3410 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3411 }
3412
3413 static void cout_subs32(DisasContext *s, DisasOps *o)
3414 {
3415 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3416 }
3417
3418 static void cout_subs64(DisasContext *s, DisasOps *o)
3419 {
3420 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3421 }
3422
3423 static void cout_subu32(DisasContext *s, DisasOps *o)
3424 {
3425 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3426 }
3427
3428 static void cout_subu64(DisasContext *s, DisasOps *o)
3429 {
3430 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3431 }
3432
3433 static void cout_subb32(DisasContext *s, DisasOps *o)
3434 {
3435 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3436 }
3437
3438 static void cout_subb64(DisasContext *s, DisasOps *o)
3439 {
3440 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3441 }
3442
3443 static void cout_tm32(DisasContext *s, DisasOps *o)
3444 {
3445 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3446 }
3447
3448 static void cout_tm64(DisasContext *s, DisasOps *o)
3449 {
3450 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3451 }
3452
3453 /* ====================================================================== */
3454 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3455 with the TCG register to which we will write. Used in combination with
3456 the "wout" generators, in some cases we need a new temporary, and in
3457 some cases we can write to a TCG global. */
3458
3459 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3460 {
3461 o->out = tcg_temp_new_i64();
3462 }
3463
3464 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3465 {
3466 o->out = tcg_temp_new_i64();
3467 o->out2 = tcg_temp_new_i64();
3468 }
3469
3470 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3471 {
3472 o->out = regs[get_field(f, r1)];
3473 o->g_out = true;
3474 }
3475
3476 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3477 {
3478 /* ??? Specification exception: r1 must be even. */
3479 int r1 = get_field(f, r1);
3480 o->out = regs[r1];
3481 o->out2 = regs[(r1 + 1) & 15];
3482 o->g_out = o->g_out2 = true;
3483 }
3484
3485 /* ====================================================================== */
3486 /* The "Write OUTput" generators. These generally perform some non-trivial
3487 copy of data to TCG globals, or to main memory. The trivial cases are
3488 generally handled by having a "prep" generator install the TCG global
3489 as the destination of the operation. */
3490
3491 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3492 {
3493 store_reg(get_field(f, r1), o->out);
3494 }
3495
3496 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3497 {
3498 int r1 = get_field(f, r1);
3499 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3500 }
3501
3502 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3503 {
3504 int r1 = get_field(f, r1);
3505 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3506 }
3507
3508 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3509 {
3510 store_reg32_i64(get_field(f, r1), o->out);
3511 }
3512
3513 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3514 {
3515 /* ??? Specification exception: r1 must be even. */
3516 int r1 = get_field(f, r1);
3517 store_reg32_i64(r1, o->out);
3518 store_reg32_i64((r1 + 1) & 15, o->out2);
3519 }
3520
3521 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3522 {
3523 /* ??? Specification exception: r1 must be even. */
3524 int r1 = get_field(f, r1);
3525 store_reg32_i64((r1 + 1) & 15, o->out);
3526 tcg_gen_shri_i64(o->out, o->out, 32);
3527 store_reg32_i64(r1, o->out);
3528 }
3529
3530 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3531 {
3532 store_freg32_i64(get_field(f, r1), o->out);
3533 }
3534
3535 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3536 {
3537 store_freg(get_field(f, r1), o->out);
3538 }
3539
3540 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3541 {
3542 int f1 = get_field(s->fields, r1);
3543 store_freg(f1, o->out);
3544 store_freg((f1 + 2) & 15, o->out2);
3545 }
3546
3547 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3548 {
3549 if (get_field(f, r1) != get_field(f, r2)) {
3550 store_reg32_i64(get_field(f, r1), o->out);
3551 }
3552 }
3553
3554 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3555 {
3556 if (get_field(f, r1) != get_field(f, r2)) {
3557 store_freg32_i64(get_field(f, r1), o->out);
3558 }
3559 }
3560
3561 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3562 {
3563 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3564 }
3565
3566 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3567 {
3568 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3569 }
3570
3571 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3572 {
3573 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3574 }
3575
3576 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3577 {
3578 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3579 }
3580
3581 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3582 {
3583 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3584 }
3585
3586 /* ====================================================================== */
3587 /* The "INput 1" generators. These load the first operand to an insn. */
3588
3589 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3590 {
3591 o->in1 = load_reg(get_field(f, r1));
3592 }
3593
3594 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3595 {
3596 o->in1 = regs[get_field(f, r1)];
3597 o->g_in1 = true;
3598 }
3599
3600 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3601 {
3602 o->in1 = tcg_temp_new_i64();
3603 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3604 }
3605
3606 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3607 {
3608 o->in1 = tcg_temp_new_i64();
3609 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3610 }
3611
3612 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3613 {
3614 o->in1 = tcg_temp_new_i64();
3615 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3616 }
3617
3618 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3619 {
3620 /* ??? Specification exception: r1 must be even. */
3621 int r1 = get_field(f, r1);
3622 o->in1 = load_reg((r1 + 1) & 15);
3623 }
3624
3625 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3626 {
3627 /* ??? Specification exception: r1 must be even. */
3628 int r1 = get_field(f, r1);
3629 o->in1 = tcg_temp_new_i64();
3630 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3631 }
3632
3633 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3634 {
3635 /* ??? Specification exception: r1 must be even. */
3636 int r1 = get_field(f, r1);
3637 o->in1 = tcg_temp_new_i64();
3638 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3639 }
3640
3641 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3642 {
3643 /* ??? Specification exception: r1 must be even. */
3644 int r1 = get_field(f, r1);
3645 o->in1 = tcg_temp_new_i64();
3646 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3647 }
3648
3649 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3650 {
3651 o->in1 = load_reg(get_field(f, r2));
3652 }
3653
3654 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3655 {
3656 o->in1 = load_reg(get_field(f, r3));
3657 }
3658
3659 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3660 {
3661 o->in1 = regs[get_field(f, r3)];
3662 o->g_in1 = true;
3663 }
3664
3665 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3666 {
3667 o->in1 = tcg_temp_new_i64();
3668 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3669 }
3670
3671 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3672 {
3673 o->in1 = tcg_temp_new_i64();
3674 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3675 }
3676
3677 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3678 {
3679 o->in1 = load_freg32_i64(get_field(f, r1));
3680 }
3681
3682 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3683 {
3684 o->in1 = fregs[get_field(f, r1)];
3685 o->g_in1 = true;
3686 }
3687
3688 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3689 {
3690 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3691 }
3692
3693 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3694 {
3695 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3696 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3697 }
3698
3699 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3700 {
3701 in1_la1(s, f, o);
3702 o->in1 = tcg_temp_new_i64();
3703 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3704 }
3705
3706 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3707 {
3708 in1_la1(s, f, o);
3709 o->in1 = tcg_temp_new_i64();
3710 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3711 }
3712
3713 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3714 {
3715 in1_la1(s, f, o);
3716 o->in1 = tcg_temp_new_i64();
3717 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3718 }
3719
3720 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3721 {
3722 in1_la1(s, f, o);
3723 o->in1 = tcg_temp_new_i64();
3724 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3725 }
3726
3727 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3728 {
3729 in1_la1(s, f, o);
3730 o->in1 = tcg_temp_new_i64();
3731 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3732 }
3733
3734 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3735 {
3736 in1_la1(s, f, o);
3737 o->in1 = tcg_temp_new_i64();
3738 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3739 }
3740
3741 /* ====================================================================== */
3742 /* The "INput 2" generators. These load the second operand to an insn. */
3743
3744 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3745 {
3746 o->in2 = regs[get_field(f, r1)];
3747 o->g_in2 = true;
3748 }
3749
3750 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3751 {
3752 o->in2 = tcg_temp_new_i64();
3753 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3754 }
3755
3756 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3757 {
3758 o->in2 = tcg_temp_new_i64();
3759 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3760 }
3761
3762 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3763 {
3764 o->in2 = load_reg(get_field(f, r2));
3765 }
3766
3767 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3768 {
3769 o->in2 = regs[get_field(f, r2)];
3770 o->g_in2 = true;
3771 }
3772
3773 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3774 {
3775 int r2 = get_field(f, r2);
3776 if (r2 != 0) {
3777 o->in2 = load_reg(r2);
3778 }
3779 }
3780
3781 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3782 {
3783 o->in2 = tcg_temp_new_i64();
3784 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3785 }
3786
3787 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3788 {
3789 o->in2 = tcg_temp_new_i64();
3790 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3791 }
3792
3793 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3794 {
3795 o->in2 = tcg_temp_new_i64();
3796 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3797 }
3798
3799 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3800 {
3801 o->in2 = tcg_temp_new_i64();
3802 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3803 }
3804
3805 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3806 {
3807 o->in2 = load_reg(get_field(f, r3));
3808 }
3809
3810 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3811 {
3812 o->in2 = tcg_temp_new_i64();
3813 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3814 }
3815
3816 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3817 {
3818 o->in2 = tcg_temp_new_i64();
3819 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3820 }
3821
3822 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3823 {
3824 o->in2 = load_freg32_i64(get_field(f, r2));
3825 }
3826
3827 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3828 {
3829 o->in2 = fregs[get_field(f, r2)];
3830 o->g_in2 = true;
3831 }
3832
3833 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3834 {
3835 int f2 = get_field(f, r2);
3836 o->in1 = fregs[f2];
3837 o->in2 = fregs[(f2 + 2) & 15];
3838 o->g_in1 = o->g_in2 = true;
3839 }
3840
3841 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3842 {
3843 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3844 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3845 }
3846
3847 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3848 {
3849 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3850 }
3851
3852 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3853 {
3854 help_l2_shift(s, f, o, 31);
3855 }
3856
3857 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3858 {
3859 help_l2_shift(s, f, o, 63);
3860 }
3861
3862 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3863 {
3864 in2_a2(s, f, o);
3865 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3866 }
3867
3868 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3869 {
3870 in2_a2(s, f, o);
3871 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3872 }
3873
3874 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3875 {
3876 in2_a2(s, f, o);
3877 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3878 }
3879
3880 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3881 {
3882 in2_a2(s, f, o);
3883 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3884 }
3885
3886 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3887 {
3888 in2_a2(s, f, o);
3889 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3890 }
3891
3892 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3893 {
3894 in2_a2(s, f, o);
3895 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3896 }
3897
3898 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3899 {
3900 in2_ri2(s, f, o);
3901 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3902 }
3903
3904 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3905 {
3906 in2_ri2(s, f, o);
3907 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3908 }
3909
3910 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3911 {
3912 in2_ri2(s, f, o);
3913 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3914 }
3915
3916 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3917 {
3918 in2_ri2(s, f, o);
3919 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3920 }
3921
3922 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3923 {
3924 o->in2 = tcg_const_i64(get_field(f, i2));
3925 }
3926
3927 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3928 {
3929 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3930 }
3931
3932 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3933 {
3934 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3935 }
3936
3937 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3938 {
3939 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3940 }
3941
3942 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3943 {
3944 uint64_t i2 = (uint16_t)get_field(f, i2);
3945 o->in2 = tcg_const_i64(i2 << s->insn->data);
3946 }
3947
3948 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3949 {
3950 uint64_t i2 = (uint32_t)get_field(f, i2);
3951 o->in2 = tcg_const_i64(i2 << s->insn->data);
3952 }
3953
3954 /* ====================================================================== */
3955
3956 /* Find opc within the table of insns. This is formulated as a switch
3957 statement so that (1) we get compile-time notice of cut-paste errors
3958 for duplicated opcodes, and (2) the compiler generates the binary
3959 search tree, rather than us having to post-process the table. */
3960
3961 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3962 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3963
3964 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3965
3966 enum DisasInsnEnum {
3967 #include "insn-data.def"
3968 };
3969
3970 #undef D
3971 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3972 .opc = OPC, \
3973 .fmt = FMT_##FT, \
3974 .fac = FAC_##FC, \
3975 .name = #NM, \
3976 .help_in1 = in1_##I1, \
3977 .help_in2 = in2_##I2, \
3978 .help_prep = prep_##P, \
3979 .help_wout = wout_##W, \
3980 .help_cout = cout_##CC, \
3981 .help_op = op_##OP, \
3982 .data = D \
3983 },
3984
3985 /* Allow 0 to be used for NULL in the table below. */
3986 #define in1_0 NULL
3987 #define in2_0 NULL
3988 #define prep_0 NULL
3989 #define wout_0 NULL
3990 #define cout_0 NULL
3991 #define op_0 NULL
3992
3993 static const DisasInsn insn_info[] = {
3994 #include "insn-data.def"
3995 };
3996
3997 #undef D
3998 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3999 case OPC: return &insn_info[insn_ ## NM];
4000
4001 static const DisasInsn *lookup_opc(uint16_t opc)
4002 {
4003 switch (opc) {
4004 #include "insn-data.def"
4005 default:
4006 return NULL;
4007 }
4008 }
4009
4010 #undef D
4011 #undef C
4012
4013 /* Extract a field from the insn. The INSN should be left-aligned in
4014 the uint64_t so that we can more easily utilize the big-bit-endian
4015 definitions we extract from the Principals of Operation. */
4016
4017 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4018 {
4019 uint32_t r, m;
4020
4021 if (f->size == 0) {
4022 return;
4023 }
4024
4025 /* Zero extract the field from the insn. */
4026 r = (insn << f->beg) >> (64 - f->size);
4027
4028 /* Sign-extend, or un-swap the field as necessary. */
4029 switch (f->type) {
4030 case 0: /* unsigned */
4031 break;
4032 case 1: /* signed */
4033 assert(f->size <= 32);
4034 m = 1u << (f->size - 1);
4035 r = (r ^ m) - m;
4036 break;
4037 case 2: /* dl+dh split, signed 20 bit. */
4038 r = ((int8_t)r << 12) | (r >> 8);
4039 break;
4040 default:
4041 abort();
4042 }
4043
4044 /* Validate that the "compressed" encoding we selected above is valid.
4045 I.e. we havn't make two different original fields overlap. */
4046 assert(((o->presentC >> f->indexC) & 1) == 0);
4047 o->presentC |= 1 << f->indexC;
4048 o->presentO |= 1 << f->indexO;
4049
4050 o->c[f->indexC] = r;
4051 }
4052
4053 /* Lookup the insn at the current PC, extracting the operands into O and
4054 returning the info struct for the insn. Returns NULL for invalid insn. */
4055
4056 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4057 DisasFields *f)
4058 {
4059 uint64_t insn, pc = s->pc;
4060 int op, op2, ilen;
4061 const DisasInsn *info;
4062
4063 insn = ld_code2(env, pc);
4064 op = (insn >> 8) & 0xff;
4065 ilen = get_ilen(op);
4066 s->next_pc = s->pc + ilen;
4067
4068 switch (ilen) {
4069 case 2:
4070 insn = insn << 48;
4071 break;
4072 case 4:
4073 insn = ld_code4(env, pc) << 32;
4074 break;
4075 case 6:
4076 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4077 break;
4078 default:
4079 abort();
4080 }
4081
4082 /* We can't actually determine the insn format until we've looked up
4083 the full insn opcode. Which we can't do without locating the
4084 secondary opcode. Assume by default that OP2 is at bit 40; for
4085 those smaller insns that don't actually have a secondary opcode
4086 this will correctly result in OP2 = 0. */
4087 switch (op) {
4088 case 0x01: /* E */
4089 case 0x80: /* S */
4090 case 0x82: /* S */
4091 case 0x93: /* S */
4092 case 0xb2: /* S, RRF, RRE */
4093 case 0xb3: /* RRE, RRD, RRF */
4094 case 0xb9: /* RRE, RRF */
4095 case 0xe5: /* SSE, SIL */
4096 op2 = (insn << 8) >> 56;
4097 break;
4098 case 0xa5: /* RI */
4099 case 0xa7: /* RI */
4100 case 0xc0: /* RIL */
4101 case 0xc2: /* RIL */
4102 case 0xc4: /* RIL */
4103 case 0xc6: /* RIL */
4104 case 0xc8: /* SSF */
4105 case 0xcc: /* RIL */
4106 op2 = (insn << 12) >> 60;
4107 break;
4108 case 0xd0 ... 0xdf: /* SS */
4109 case 0xe1: /* SS */
4110 case 0xe2: /* SS */
4111 case 0xe8: /* SS */
4112 case 0xe9: /* SS */
4113 case 0xea: /* SS */
4114 case 0xee ... 0xf3: /* SS */
4115 case 0xf8 ... 0xfd: /* SS */
4116 op2 = 0;
4117 break;
4118 default:
4119 op2 = (insn << 40) >> 56;
4120 break;
4121 }
4122
4123 memset(f, 0, sizeof(*f));
4124 f->op = op;
4125 f->op2 = op2;
4126
4127 /* Lookup the instruction. */
4128 info = lookup_opc(op << 8 | op2);
4129
4130 /* If we found it, extract the operands. */
4131 if (info != NULL) {
4132 DisasFormat fmt = info->fmt;
4133 int i;
4134
4135 for (i = 0; i < NUM_C_FIELD; ++i) {
4136 extract_field(f, &format_info[fmt].op[i], insn);
4137 }
4138 }
4139 return info;
4140 }
4141
4142 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4143 {
4144 const DisasInsn *insn;
4145 ExitStatus ret = NO_EXIT;
4146 DisasFields f;
4147 DisasOps o;
4148
4149 insn = extract_insn(env, s, &f);
4150
4151 /* If not found, try the old interpreter. This includes ILLOPC. */
4152 if (insn == NULL) {
4153 disas_s390_insn(env, s);
4154 switch (s->is_jmp) {
4155 case DISAS_NEXT:
4156 ret = NO_EXIT;
4157 break;
4158 case DISAS_TB_JUMP:
4159 ret = EXIT_GOTO_TB;
4160 break;
4161 case DISAS_JUMP:
4162 ret = EXIT_PC_UPDATED;
4163 break;
4164 case DISAS_EXCP:
4165 ret = EXIT_NORETURN;
4166 break;
4167 default:
4168 abort();
4169 }
4170
4171 s->pc = s->next_pc;
4172 return ret;
4173 }
4174
4175 /* Set up the strutures we use to communicate with the helpers. */
4176 s->insn = insn;
4177 s->fields = &f;
4178 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4179 TCGV_UNUSED_I64(o.out);
4180 TCGV_UNUSED_I64(o.out2);
4181 TCGV_UNUSED_I64(o.in1);
4182 TCGV_UNUSED_I64(o.in2);
4183 TCGV_UNUSED_I64(o.addr1);
4184
4185 /* Implement the instruction. */
4186 if (insn->help_in1) {
4187 insn->help_in1(s, &f, &o);
4188 }
4189 if (insn->help_in2) {
4190 insn->help_in2(s, &f, &o);
4191 }
4192 if (insn->help_prep) {
4193 insn->help_prep(s, &f, &o);
4194 }
4195 if (insn->help_op) {
4196 ret = insn->help_op(s, &o);
4197 }
4198 if (insn->help_wout) {
4199 insn->help_wout(s, &f, &o);
4200 }
4201 if (insn->help_cout) {
4202 insn->help_cout(s, &o);
4203 }
4204
4205 /* Free any temporaries created by the helpers. */
4206 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4207 tcg_temp_free_i64(o.out);
4208 }
4209 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4210 tcg_temp_free_i64(o.out2);
4211 }
4212 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4213 tcg_temp_free_i64(o.in1);
4214 }
4215 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4216 tcg_temp_free_i64(o.in2);
4217 }
4218 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4219 tcg_temp_free_i64(o.addr1);
4220 }
4221
4222 /* Advance to the next instruction. */
4223 s->pc = s->next_pc;
4224 return ret;
4225 }
4226
4227 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4228 TranslationBlock *tb,
4229 int search_pc)
4230 {
4231 DisasContext dc;
4232 target_ulong pc_start;
4233 uint64_t next_page_start;
4234 uint16_t *gen_opc_end;
4235 int j, lj = -1;
4236 int num_insns, max_insns;
4237 CPUBreakpoint *bp;
4238 ExitStatus status;
4239 bool do_debug;
4240
4241 pc_start = tb->pc;
4242
4243 /* 31-bit mode */
4244 if (!(tb->flags & FLAG_MASK_64)) {
4245 pc_start &= 0x7fffffff;
4246 }
4247
4248 dc.tb = tb;
4249 dc.pc = pc_start;
4250 dc.cc_op = CC_OP_DYNAMIC;
4251 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4252 dc.is_jmp = DISAS_NEXT;
4253
4254 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4255
4256 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4257
4258 num_insns = 0;
4259 max_insns = tb->cflags & CF_COUNT_MASK;
4260 if (max_insns == 0) {
4261 max_insns = CF_COUNT_MASK;
4262 }
4263
4264 gen_icount_start();
4265
4266 do {
4267 if (search_pc) {
4268 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4269 if (lj < j) {
4270 lj++;
4271 while (lj < j) {
4272 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4273 }
4274 }
4275 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4276 gen_opc_cc_op[lj] = dc.cc_op;
4277 tcg_ctx.gen_opc_instr_start[lj] = 1;
4278 tcg_ctx.gen_opc_icount[lj] = num_insns;
4279 }
4280 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4281 gen_io_start();
4282 }
4283
4284 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4285 tcg_gen_debug_insn_start(dc.pc);
4286 }
4287
4288 status = NO_EXIT;
4289 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4290 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4291 if (bp->pc == dc.pc) {
4292 status = EXIT_PC_STALE;
4293 do_debug = true;
4294 break;
4295 }
4296 }
4297 }
4298 if (status == NO_EXIT) {
4299 status = translate_one(env, &dc);
4300 }
4301
4302 /* If we reach a page boundary, are single stepping,
4303 or exhaust instruction count, stop generation. */
4304 if (status == NO_EXIT
4305 && (dc.pc >= next_page_start
4306 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4307 || num_insns >= max_insns
4308 || singlestep
4309 || env->singlestep_enabled)) {
4310 status = EXIT_PC_STALE;
4311 }
4312 } while (status == NO_EXIT);
4313
4314 if (tb->cflags & CF_LAST_IO) {
4315 gen_io_end();
4316 }
4317
4318 switch (status) {
4319 case EXIT_GOTO_TB:
4320 case EXIT_NORETURN:
4321 break;
4322 case EXIT_PC_STALE:
4323 update_psw_addr(&dc);
4324 /* FALLTHRU */
4325 case EXIT_PC_UPDATED:
4326 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4327 gen_op_calc_cc(&dc);
4328 } else {
4329 /* Next TB starts off with CC_OP_DYNAMIC,
4330 so make sure the cc op type is in env */
4331 gen_op_set_cc_op(&dc);
4332 }
4333 if (do_debug) {
4334 gen_exception(EXCP_DEBUG);
4335 } else {
4336 /* Generate the return instruction */
4337 tcg_gen_exit_tb(0);
4338 }
4339 break;
4340 default:
4341 abort();
4342 }
4343
4344 gen_icount_end(tb, num_insns);
4345 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4346 if (search_pc) {
4347 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4348 lj++;
4349 while (lj <= j) {
4350 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4351 }
4352 } else {
4353 tb->size = dc.pc - pc_start;
4354 tb->icount = num_insns;
4355 }
4356
4357 #if defined(S390X_DEBUG_DISAS)
4358 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4359 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4360 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4361 qemu_log("\n");
4362 }
4363 #endif
4364 }
4365
4366 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4367 {
4368 gen_intermediate_code_internal(env, tb, 0);
4369 }
4370
4371 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4372 {
4373 gen_intermediate_code_internal(env, tb, 1);
4374 }
4375
4376 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4377 {
4378 int cc_op;
4379 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4380 cc_op = gen_opc_cc_op[pc_pos];
4381 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4382 env->cc_op = cc_op;
4383 }
4384 }