]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
26ca49b28a22587a0ee9c400a6f306c336d6b7e6
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35
36 /* global register indexes */
37 static TCGv_ptr cpu_env;
38
39 #include "exec/gen-icount.h"
40 #include "helper.h"
41 #define GEN_HELPER 1
42 #include "helper.h"
43
44
45 /* Information that (most) every instruction needs to manipulate. */
46 typedef struct DisasContext DisasContext;
47 typedef struct DisasInsn DisasInsn;
48 typedef struct DisasFields DisasFields;
49
50 struct DisasContext {
51 struct TranslationBlock *tb;
52 const DisasInsn *insn;
53 DisasFields *fields;
54 uint64_t pc, next_pc;
55 enum cc_op cc_op;
56 bool singlestep_enabled;
57 int is_jmp;
58 };
59
60 /* Information carried about a condition to be evaluated. */
61 typedef struct {
62 TCGCond cond:8;
63 bool is_64;
64 bool g1;
65 bool g2;
66 union {
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
69 } u;
70 } DisasCompare;
71
72 #define DISAS_EXCP 4
73
74 static void gen_op_calc_cc(DisasContext *s);
75
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
80
81 static inline void debug_insn(uint64_t insn)
82 {
83 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
84 }
85
86 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
87 {
88 if (!(s->tb->flags & FLAG_MASK_64)) {
89 if (s->tb->flags & FLAG_MASK_32) {
90 return pc | 0x80000000;
91 }
92 }
93 return pc;
94 }
95
96 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
97 int flags)
98 {
99 int i;
100
101 if (env->cc_op > 3) {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
103 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
104 } else {
105 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
106 env->psw.mask, env->psw.addr, env->cc_op);
107 }
108
109 for (i = 0; i < 16; i++) {
110 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
111 if ((i % 4) == 3) {
112 cpu_fprintf(f, "\n");
113 } else {
114 cpu_fprintf(f, " ");
115 }
116 }
117
118 for (i = 0; i < 16; i++) {
119 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
120 if ((i % 4) == 3) {
121 cpu_fprintf(f, "\n");
122 } else {
123 cpu_fprintf(f, " ");
124 }
125 }
126
127 #ifndef CONFIG_USER_ONLY
128 for (i = 0; i < 16; i++) {
129 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
130 if ((i % 4) == 3) {
131 cpu_fprintf(f, "\n");
132 } else {
133 cpu_fprintf(f, " ");
134 }
135 }
136 #endif
137
138 #ifdef DEBUG_INLINE_BRANCHES
139 for (i = 0; i < CC_OP_MAX; i++) {
140 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
141 inline_branch_miss[i], inline_branch_hit[i]);
142 }
143 #endif
144
145 cpu_fprintf(f, "\n");
146 }
147
148 static TCGv_i64 psw_addr;
149 static TCGv_i64 psw_mask;
150
151 static TCGv_i32 cc_op;
152 static TCGv_i64 cc_src;
153 static TCGv_i64 cc_dst;
154 static TCGv_i64 cc_vr;
155
156 static char cpu_reg_names[32][4];
157 static TCGv_i64 regs[16];
158 static TCGv_i64 fregs[16];
159
160 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
161
162 void s390x_translate_init(void)
163 {
164 int i;
165
166 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
167 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
168 offsetof(CPUS390XState, psw.addr),
169 "psw_addr");
170 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
171 offsetof(CPUS390XState, psw.mask),
172 "psw_mask");
173
174 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
175 "cc_op");
176 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
177 "cc_src");
178 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
179 "cc_dst");
180 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
181 "cc_vr");
182
183 for (i = 0; i < 16; i++) {
184 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
185 regs[i] = tcg_global_mem_new(TCG_AREG0,
186 offsetof(CPUS390XState, regs[i]),
187 cpu_reg_names[i]);
188 }
189
190 for (i = 0; i < 16; i++) {
191 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
192 fregs[i] = tcg_global_mem_new(TCG_AREG0,
193 offsetof(CPUS390XState, fregs[i].d),
194 cpu_reg_names[i + 16]);
195 }
196
197 /* register helpers */
198 #define GEN_HELPER 2
199 #include "helper.h"
200 }
201
202 static inline TCGv_i64 load_reg(int reg)
203 {
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_mov_i64(r, regs[reg]);
206 return r;
207 }
208
209 static inline TCGv_i64 load_freg(int reg)
210 {
211 TCGv_i64 r = tcg_temp_new_i64();
212 tcg_gen_mov_i64(r, fregs[reg]);
213 return r;
214 }
215
216 static inline TCGv_i32 load_freg32(int reg)
217 {
218 TCGv_i32 r = tcg_temp_new_i32();
219 #if HOST_LONG_BITS == 32
220 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
221 #else
222 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
223 #endif
224 return r;
225 }
226
227 static inline TCGv_i32 load_reg32(int reg)
228 {
229 TCGv_i32 r = tcg_temp_new_i32();
230 tcg_gen_trunc_i64_i32(r, regs[reg]);
231 return r;
232 }
233
234 static inline TCGv_i64 load_reg32_i64(int reg)
235 {
236 TCGv_i64 r = tcg_temp_new_i64();
237 tcg_gen_ext32s_i64(r, regs[reg]);
238 return r;
239 }
240
241 static inline void store_reg(int reg, TCGv_i64 v)
242 {
243 tcg_gen_mov_i64(regs[reg], v);
244 }
245
246 static inline void store_freg(int reg, TCGv_i64 v)
247 {
248 tcg_gen_mov_i64(fregs[reg], v);
249 }
250
251 static inline void store_reg32(int reg, TCGv_i32 v)
252 {
253 /* 32 bit register writes keep the upper half */
254 #if HOST_LONG_BITS == 32
255 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
256 #else
257 tcg_gen_deposit_i64(regs[reg], regs[reg],
258 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
259 #endif
260 }
261
262 static inline void store_reg32_i64(int reg, TCGv_i64 v)
263 {
264 /* 32 bit register writes keep the upper half */
265 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
266 }
267
268 static inline void store_reg16(int reg, TCGv_i32 v)
269 {
270 /* 16 bit register writes keep the upper bytes */
271 #if HOST_LONG_BITS == 32
272 tcg_gen_deposit_i32(TCGV_LOW(regs[reg]), TCGV_LOW(regs[reg]), v, 0, 16);
273 #else
274 tcg_gen_deposit_i64(regs[reg], regs[reg],
275 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 16);
276 #endif
277 }
278
279 static inline void store_reg8(int reg, TCGv_i64 v)
280 {
281 /* 8 bit register writes keep the upper bytes */
282 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 8);
283 }
284
285 static inline void store_freg32(int reg, TCGv_i32 v)
286 {
287 /* 32 bit register writes keep the lower half */
288 #if HOST_LONG_BITS == 32
289 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
290 #else
291 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
292 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
293 #endif
294 }
295
296 static inline void return_low128(TCGv_i64 dest)
297 {
298 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
299 }
300
301 static inline void update_psw_addr(DisasContext *s)
302 {
303 /* psw.addr */
304 tcg_gen_movi_i64(psw_addr, s->pc);
305 }
306
307 static inline void potential_page_fault(DisasContext *s)
308 {
309 #ifndef CONFIG_USER_ONLY
310 update_psw_addr(s);
311 gen_op_calc_cc(s);
312 #endif
313 }
314
315 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
316 {
317 return (uint64_t)cpu_lduw_code(env, pc);
318 }
319
320 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
321 {
322 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
323 }
324
325 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
326 {
327 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
328 }
329
330 static inline int get_mem_index(DisasContext *s)
331 {
332 switch (s->tb->flags & FLAG_MASK_ASC) {
333 case PSW_ASC_PRIMARY >> 32:
334 return 0;
335 case PSW_ASC_SECONDARY >> 32:
336 return 1;
337 case PSW_ASC_HOME >> 32:
338 return 2;
339 default:
340 tcg_abort();
341 break;
342 }
343 }
344
345 static void gen_exception(int excp)
346 {
347 TCGv_i32 tmp = tcg_const_i32(excp);
348 gen_helper_exception(cpu_env, tmp);
349 tcg_temp_free_i32(tmp);
350 }
351
352 static void gen_program_exception(DisasContext *s, int code)
353 {
354 TCGv_i32 tmp;
355
356 /* Remember what pgm exeption this was. */
357 tmp = tcg_const_i32(code);
358 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
359 tcg_temp_free_i32(tmp);
360
361 tmp = tcg_const_i32(s->next_pc - s->pc);
362 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
363 tcg_temp_free_i32(tmp);
364
365 /* Advance past instruction. */
366 s->pc = s->next_pc;
367 update_psw_addr(s);
368
369 /* Save off cc. */
370 gen_op_calc_cc(s);
371
372 /* Trigger exception. */
373 gen_exception(EXCP_PGM);
374
375 /* End TB here. */
376 s->is_jmp = DISAS_EXCP;
377 }
378
379 static inline void gen_illegal_opcode(DisasContext *s)
380 {
381 gen_program_exception(s, PGM_SPECIFICATION);
382 }
383
384 static inline void check_privileged(DisasContext *s)
385 {
386 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
387 gen_program_exception(s, PGM_PRIVILEGED);
388 }
389 }
390
391 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
392 {
393 TCGv_i64 tmp;
394
395 /* 31-bitify the immediate part; register contents are dealt with below */
396 if (!(s->tb->flags & FLAG_MASK_64)) {
397 d2 &= 0x7fffffffUL;
398 }
399
400 if (x2) {
401 if (d2) {
402 tmp = tcg_const_i64(d2);
403 tcg_gen_add_i64(tmp, tmp, regs[x2]);
404 } else {
405 tmp = load_reg(x2);
406 }
407 if (b2) {
408 tcg_gen_add_i64(tmp, tmp, regs[b2]);
409 }
410 } else if (b2) {
411 if (d2) {
412 tmp = tcg_const_i64(d2);
413 tcg_gen_add_i64(tmp, tmp, regs[b2]);
414 } else {
415 tmp = load_reg(b2);
416 }
417 } else {
418 tmp = tcg_const_i64(d2);
419 }
420
421 /* 31-bit mode mask if there are values loaded from registers */
422 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
423 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
424 }
425
426 return tmp;
427 }
428
429 static void gen_op_movi_cc(DisasContext *s, uint32_t val)
430 {
431 s->cc_op = CC_OP_CONST0 + val;
432 }
433
434 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
435 {
436 tcg_gen_discard_i64(cc_src);
437 tcg_gen_mov_i64(cc_dst, dst);
438 tcg_gen_discard_i64(cc_vr);
439 s->cc_op = op;
440 }
441
442 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
443 {
444 tcg_gen_discard_i64(cc_src);
445 tcg_gen_extu_i32_i64(cc_dst, dst);
446 tcg_gen_discard_i64(cc_vr);
447 s->cc_op = op;
448 }
449
450 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
451 TCGv_i64 dst)
452 {
453 tcg_gen_mov_i64(cc_src, src);
454 tcg_gen_mov_i64(cc_dst, dst);
455 tcg_gen_discard_i64(cc_vr);
456 s->cc_op = op;
457 }
458
459 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
460 TCGv_i32 dst)
461 {
462 tcg_gen_extu_i32_i64(cc_src, src);
463 tcg_gen_extu_i32_i64(cc_dst, dst);
464 tcg_gen_discard_i64(cc_vr);
465 s->cc_op = op;
466 }
467
468 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
469 TCGv_i64 dst, TCGv_i64 vr)
470 {
471 tcg_gen_mov_i64(cc_src, src);
472 tcg_gen_mov_i64(cc_dst, dst);
473 tcg_gen_mov_i64(cc_vr, vr);
474 s->cc_op = op;
475 }
476
477 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
478 {
479 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
480 }
481
482 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
483 {
484 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
485 }
486
487 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
488 enum cc_op cond)
489 {
490 gen_op_update2_cc_i32(s, cond, v1, v2);
491 }
492
493 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
494 enum cc_op cond)
495 {
496 gen_op_update2_cc_i64(s, cond, v1, v2);
497 }
498
499 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
500 {
501 cmp_32(s, v1, v2, CC_OP_LTGT_32);
502 }
503
504 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
505 {
506 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
507 }
508
509 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
510 {
511 /* XXX optimize for the constant? put it in s? */
512 TCGv_i32 tmp = tcg_const_i32(v2);
513 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
514 tcg_temp_free_i32(tmp);
515 }
516
517 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
518 {
519 TCGv_i32 tmp = tcg_const_i32(v2);
520 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
521 tcg_temp_free_i32(tmp);
522 }
523
524 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
525 {
526 cmp_64(s, v1, v2, CC_OP_LTGT_64);
527 }
528
529 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
530 {
531 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
532 }
533
534 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
535 {
536 TCGv_i64 tmp = tcg_const_i64(v2);
537 cmp_s64(s, v1, tmp);
538 tcg_temp_free_i64(tmp);
539 }
540
541 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
542 {
543 TCGv_i64 tmp = tcg_const_i64(v2);
544 cmp_u64(s, v1, tmp);
545 tcg_temp_free_i64(tmp);
546 }
547
548 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
549 {
550 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
551 }
552
553 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
554 {
555 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
556 }
557
558 static void set_cc_icm(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
559 {
560 gen_op_update2_cc_i32(s, CC_OP_ICM, v1, v2);
561 }
562
563 static void set_cc_cmp_f32_i64(DisasContext *s, TCGv_i32 v1, TCGv_i64 v2)
564 {
565 tcg_gen_extu_i32_i64(cc_src, v1);
566 tcg_gen_mov_i64(cc_dst, v2);
567 tcg_gen_discard_i64(cc_vr);
568 s->cc_op = CC_OP_LTGT_F32;
569 }
570
571 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i32 v1)
572 {
573 gen_op_update1_cc_i32(s, CC_OP_NZ_F32, v1);
574 }
575
576 /* CC value is in env->cc_op */
577 static inline void set_cc_static(DisasContext *s)
578 {
579 tcg_gen_discard_i64(cc_src);
580 tcg_gen_discard_i64(cc_dst);
581 tcg_gen_discard_i64(cc_vr);
582 s->cc_op = CC_OP_STATIC;
583 }
584
585 static inline void gen_op_set_cc_op(DisasContext *s)
586 {
587 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
588 tcg_gen_movi_i32(cc_op, s->cc_op);
589 }
590 }
591
592 static inline void gen_update_cc_op(DisasContext *s)
593 {
594 gen_op_set_cc_op(s);
595 }
596
597 /* calculates cc into cc_op */
598 static void gen_op_calc_cc(DisasContext *s)
599 {
600 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
601 TCGv_i64 dummy = tcg_const_i64(0);
602
603 switch (s->cc_op) {
604 case CC_OP_CONST0:
605 case CC_OP_CONST1:
606 case CC_OP_CONST2:
607 case CC_OP_CONST3:
608 /* s->cc_op is the cc value */
609 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
610 break;
611 case CC_OP_STATIC:
612 /* env->cc_op already is the cc value */
613 break;
614 case CC_OP_NZ:
615 case CC_OP_ABS_64:
616 case CC_OP_NABS_64:
617 case CC_OP_ABS_32:
618 case CC_OP_NABS_32:
619 case CC_OP_LTGT0_32:
620 case CC_OP_LTGT0_64:
621 case CC_OP_COMP_32:
622 case CC_OP_COMP_64:
623 case CC_OP_NZ_F32:
624 case CC_OP_NZ_F64:
625 /* 1 argument */
626 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
627 break;
628 case CC_OP_ICM:
629 case CC_OP_LTGT_32:
630 case CC_OP_LTGT_64:
631 case CC_OP_LTUGTU_32:
632 case CC_OP_LTUGTU_64:
633 case CC_OP_TM_32:
634 case CC_OP_TM_64:
635 case CC_OP_LTGT_F32:
636 case CC_OP_LTGT_F64:
637 case CC_OP_SLAG:
638 /* 2 arguments */
639 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
640 break;
641 case CC_OP_ADD_64:
642 case CC_OP_ADDU_64:
643 case CC_OP_ADDC_64:
644 case CC_OP_SUB_64:
645 case CC_OP_SUBU_64:
646 case CC_OP_SUBB_64:
647 case CC_OP_ADD_32:
648 case CC_OP_ADDU_32:
649 case CC_OP_ADDC_32:
650 case CC_OP_SUB_32:
651 case CC_OP_SUBU_32:
652 case CC_OP_SUBB_32:
653 /* 3 arguments */
654 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
655 break;
656 case CC_OP_DYNAMIC:
657 /* unknown operation - assume 3 arguments and cc_op in env */
658 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
659 break;
660 default:
661 tcg_abort();
662 }
663
664 tcg_temp_free_i32(local_cc_op);
665 tcg_temp_free_i64(dummy);
666
667 /* We now have cc in cc_op as constant */
668 set_cc_static(s);
669 }
670
671 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
672 {
673 debug_insn(insn);
674
675 *r1 = (insn >> 4) & 0xf;
676 *r2 = insn & 0xf;
677 }
678
679 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
680 int *x2, int *b2, int *d2)
681 {
682 debug_insn(insn);
683
684 *r1 = (insn >> 20) & 0xf;
685 *x2 = (insn >> 16) & 0xf;
686 *b2 = (insn >> 12) & 0xf;
687 *d2 = insn & 0xfff;
688
689 return get_address(s, *x2, *b2, *d2);
690 }
691
692 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
693 int *b2, int *d2)
694 {
695 debug_insn(insn);
696
697 *r1 = (insn >> 20) & 0xf;
698 /* aka m3 */
699 *r3 = (insn >> 16) & 0xf;
700 *b2 = (insn >> 12) & 0xf;
701 *d2 = insn & 0xfff;
702 }
703
704 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
705 int *b1, int *d1)
706 {
707 debug_insn(insn);
708
709 *i2 = (insn >> 16) & 0xff;
710 *b1 = (insn >> 12) & 0xf;
711 *d1 = insn & 0xfff;
712
713 return get_address(s, 0, *b1, *d1);
714 }
715
716 static int use_goto_tb(DisasContext *s, uint64_t dest)
717 {
718 /* NOTE: we handle the case where the TB spans two pages here */
719 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
720 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
721 && !s->singlestep_enabled
722 && !(s->tb->cflags & CF_LAST_IO));
723 }
724
725 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
726 {
727 gen_update_cc_op(s);
728
729 if (use_goto_tb(s, pc)) {
730 tcg_gen_goto_tb(tb_num);
731 tcg_gen_movi_i64(psw_addr, pc);
732 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
733 } else {
734 /* jump to another page: currently not optimized */
735 tcg_gen_movi_i64(psw_addr, pc);
736 tcg_gen_exit_tb(0);
737 }
738 }
739
740 static inline void account_noninline_branch(DisasContext *s, int cc_op)
741 {
742 #ifdef DEBUG_INLINE_BRANCHES
743 inline_branch_miss[cc_op]++;
744 #endif
745 }
746
747 static inline void account_inline_branch(DisasContext *s, int cc_op)
748 {
749 #ifdef DEBUG_INLINE_BRANCHES
750 inline_branch_hit[cc_op]++;
751 #endif
752 }
753
754 /* Table of mask values to comparison codes, given a comparison as input.
755 For a true comparison CC=3 will never be set, but we treat this
756 conservatively for possible use when CC=3 indicates overflow. */
757 static const TCGCond ltgt_cond[16] = {
758 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
759 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
760 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
761 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
762 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
763 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
764 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
765 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
766 };
767
768 /* Table of mask values to comparison codes, given a logic op as input.
769 For such, only CC=0 and CC=1 should be possible. */
770 static const TCGCond nz_cond[16] = {
771 /* | | x | x */
772 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
773 /* | NE | x | x */
774 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
775 /* EQ | | x | x */
776 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
777 /* EQ | NE | x | x */
778 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
779 };
780
781 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
782 details required to generate a TCG comparison. */
783 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
784 {
785 TCGCond cond;
786 enum cc_op old_cc_op = s->cc_op;
787
788 if (mask == 15 || mask == 0) {
789 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
790 c->u.s32.a = cc_op;
791 c->u.s32.b = cc_op;
792 c->g1 = c->g2 = true;
793 c->is_64 = false;
794 return;
795 }
796
797 /* Find the TCG condition for the mask + cc op. */
798 switch (old_cc_op) {
799 case CC_OP_LTGT0_32:
800 case CC_OP_LTGT0_64:
801 case CC_OP_LTGT_32:
802 case CC_OP_LTGT_64:
803 cond = ltgt_cond[mask];
804 if (cond == TCG_COND_NEVER) {
805 goto do_dynamic;
806 }
807 account_inline_branch(s, old_cc_op);
808 break;
809
810 case CC_OP_LTUGTU_32:
811 case CC_OP_LTUGTU_64:
812 cond = tcg_unsigned_cond(ltgt_cond[mask]);
813 if (cond == TCG_COND_NEVER) {
814 goto do_dynamic;
815 }
816 account_inline_branch(s, old_cc_op);
817 break;
818
819 case CC_OP_NZ:
820 cond = nz_cond[mask];
821 if (cond == TCG_COND_NEVER) {
822 goto do_dynamic;
823 }
824 account_inline_branch(s, old_cc_op);
825 break;
826
827 case CC_OP_TM_32:
828 case CC_OP_TM_64:
829 switch (mask) {
830 case 8:
831 cond = TCG_COND_EQ;
832 break;
833 case 4 | 2 | 1:
834 cond = TCG_COND_NE;
835 break;
836 default:
837 goto do_dynamic;
838 }
839 account_inline_branch(s, old_cc_op);
840 break;
841
842 case CC_OP_ICM:
843 switch (mask) {
844 case 8:
845 cond = TCG_COND_EQ;
846 break;
847 case 4 | 2 | 1:
848 case 4 | 2:
849 cond = TCG_COND_NE;
850 break;
851 default:
852 goto do_dynamic;
853 }
854 account_inline_branch(s, old_cc_op);
855 break;
856
857 default:
858 do_dynamic:
859 /* Calculate cc value. */
860 gen_op_calc_cc(s);
861 /* FALLTHRU */
862
863 case CC_OP_STATIC:
864 /* Jump based on CC. We'll load up the real cond below;
865 the assignment here merely avoids a compiler warning. */
866 account_noninline_branch(s, old_cc_op);
867 old_cc_op = CC_OP_STATIC;
868 cond = TCG_COND_NEVER;
869 break;
870 }
871
872 /* Load up the arguments of the comparison. */
873 c->is_64 = true;
874 c->g1 = c->g2 = false;
875 switch (old_cc_op) {
876 case CC_OP_LTGT0_32:
877 c->is_64 = false;
878 c->u.s32.a = tcg_temp_new_i32();
879 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
880 c->u.s32.b = tcg_const_i32(0);
881 break;
882 case CC_OP_LTGT_32:
883 case CC_OP_LTUGTU_32:
884 c->is_64 = false;
885 c->u.s32.a = tcg_temp_new_i32();
886 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
887 c->u.s32.b = tcg_temp_new_i32();
888 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
889 break;
890
891 case CC_OP_LTGT0_64:
892 case CC_OP_NZ:
893 case CC_OP_ICM:
894 c->u.s64.a = cc_dst;
895 c->u.s64.b = tcg_const_i64(0);
896 c->g1 = true;
897 break;
898 case CC_OP_LTGT_64:
899 case CC_OP_LTUGTU_64:
900 c->u.s64.a = cc_src;
901 c->u.s64.b = cc_dst;
902 c->g1 = c->g2 = true;
903 break;
904
905 case CC_OP_TM_32:
906 case CC_OP_TM_64:
907 c->u.s64.a = tcg_temp_new_i64();
908 c->u.s64.b = tcg_const_i64(0);
909 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
910 break;
911
912 case CC_OP_STATIC:
913 c->is_64 = false;
914 c->u.s32.a = cc_op;
915 c->g1 = true;
916 switch (mask) {
917 case 0x8 | 0x4 | 0x2: /* cc != 3 */
918 cond = TCG_COND_NE;
919 c->u.s32.b = tcg_const_i32(3);
920 break;
921 case 0x8 | 0x4 | 0x1: /* cc != 2 */
922 cond = TCG_COND_NE;
923 c->u.s32.b = tcg_const_i32(2);
924 break;
925 case 0x8 | 0x2 | 0x1: /* cc != 1 */
926 cond = TCG_COND_NE;
927 c->u.s32.b = tcg_const_i32(1);
928 break;
929 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
930 cond = TCG_COND_EQ;
931 c->g1 = false;
932 c->u.s32.a = tcg_temp_new_i32();
933 c->u.s32.b = tcg_const_i32(0);
934 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
935 break;
936 case 0x8 | 0x4: /* cc < 2 */
937 cond = TCG_COND_LTU;
938 c->u.s32.b = tcg_const_i32(2);
939 break;
940 case 0x8: /* cc == 0 */
941 cond = TCG_COND_EQ;
942 c->u.s32.b = tcg_const_i32(0);
943 break;
944 case 0x4 | 0x2 | 0x1: /* cc != 0 */
945 cond = TCG_COND_NE;
946 c->u.s32.b = tcg_const_i32(0);
947 break;
948 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
949 cond = TCG_COND_NE;
950 c->g1 = false;
951 c->u.s32.a = tcg_temp_new_i32();
952 c->u.s32.b = tcg_const_i32(0);
953 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
954 break;
955 case 0x4: /* cc == 1 */
956 cond = TCG_COND_EQ;
957 c->u.s32.b = tcg_const_i32(1);
958 break;
959 case 0x2 | 0x1: /* cc > 1 */
960 cond = TCG_COND_GTU;
961 c->u.s32.b = tcg_const_i32(1);
962 break;
963 case 0x2: /* cc == 2 */
964 cond = TCG_COND_EQ;
965 c->u.s32.b = tcg_const_i32(2);
966 break;
967 case 0x1: /* cc == 3 */
968 cond = TCG_COND_EQ;
969 c->u.s32.b = tcg_const_i32(3);
970 break;
971 default:
972 /* CC is masked by something else: (8 >> cc) & mask. */
973 cond = TCG_COND_NE;
974 c->g1 = false;
975 c->u.s32.a = tcg_const_i32(8);
976 c->u.s32.b = tcg_const_i32(0);
977 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
978 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
979 break;
980 }
981 break;
982
983 default:
984 abort();
985 }
986 c->cond = cond;
987 }
988
989 static void free_compare(DisasCompare *c)
990 {
991 if (!c->g1) {
992 if (c->is_64) {
993 tcg_temp_free_i64(c->u.s64.a);
994 } else {
995 tcg_temp_free_i32(c->u.s32.a);
996 }
997 }
998 if (!c->g2) {
999 if (c->is_64) {
1000 tcg_temp_free_i64(c->u.s64.b);
1001 } else {
1002 tcg_temp_free_i32(c->u.s32.b);
1003 }
1004 }
1005 }
1006
1007 static void gen_op_mvc(DisasContext *s, int l, TCGv_i64 s1, TCGv_i64 s2)
1008 {
1009 TCGv_i64 tmp, tmp2;
1010 int i;
1011 int l_memset = gen_new_label();
1012 int l_out = gen_new_label();
1013 TCGv_i64 dest = tcg_temp_local_new_i64();
1014 TCGv_i64 src = tcg_temp_local_new_i64();
1015 TCGv_i32 vl;
1016
1017 /* Find out if we should use the inline version of mvc */
1018 switch (l) {
1019 case 0:
1020 case 1:
1021 case 2:
1022 case 3:
1023 case 4:
1024 case 5:
1025 case 6:
1026 case 7:
1027 case 11:
1028 case 15:
1029 /* use inline */
1030 break;
1031 default:
1032 /* Fall back to helper */
1033 vl = tcg_const_i32(l);
1034 potential_page_fault(s);
1035 gen_helper_mvc(cpu_env, vl, s1, s2);
1036 tcg_temp_free_i32(vl);
1037 return;
1038 }
1039
1040 tcg_gen_mov_i64(dest, s1);
1041 tcg_gen_mov_i64(src, s2);
1042
1043 if (!(s->tb->flags & FLAG_MASK_64)) {
1044 /* XXX what if we overflow while moving? */
1045 tcg_gen_andi_i64(dest, dest, 0x7fffffffUL);
1046 tcg_gen_andi_i64(src, src, 0x7fffffffUL);
1047 }
1048
1049 tmp = tcg_temp_new_i64();
1050 tcg_gen_addi_i64(tmp, src, 1);
1051 tcg_gen_brcond_i64(TCG_COND_EQ, dest, tmp, l_memset);
1052 tcg_temp_free_i64(tmp);
1053
1054 switch (l) {
1055 case 0:
1056 tmp = tcg_temp_new_i64();
1057
1058 tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
1059 tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
1060
1061 tcg_temp_free_i64(tmp);
1062 break;
1063 case 1:
1064 tmp = tcg_temp_new_i64();
1065
1066 tcg_gen_qemu_ld16u(tmp, src, get_mem_index(s));
1067 tcg_gen_qemu_st16(tmp, dest, get_mem_index(s));
1068
1069 tcg_temp_free_i64(tmp);
1070 break;
1071 case 3:
1072 tmp = tcg_temp_new_i64();
1073
1074 tcg_gen_qemu_ld32u(tmp, src, get_mem_index(s));
1075 tcg_gen_qemu_st32(tmp, dest, get_mem_index(s));
1076
1077 tcg_temp_free_i64(tmp);
1078 break;
1079 case 4:
1080 tmp = tcg_temp_new_i64();
1081 tmp2 = tcg_temp_new_i64();
1082
1083 tcg_gen_qemu_ld32u(tmp, src, get_mem_index(s));
1084 tcg_gen_addi_i64(src, src, 4);
1085 tcg_gen_qemu_ld8u(tmp2, src, get_mem_index(s));
1086 tcg_gen_qemu_st32(tmp, dest, get_mem_index(s));
1087 tcg_gen_addi_i64(dest, dest, 4);
1088 tcg_gen_qemu_st8(tmp2, dest, get_mem_index(s));
1089
1090 tcg_temp_free_i64(tmp);
1091 tcg_temp_free_i64(tmp2);
1092 break;
1093 case 7:
1094 tmp = tcg_temp_new_i64();
1095
1096 tcg_gen_qemu_ld64(tmp, src, get_mem_index(s));
1097 tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
1098
1099 tcg_temp_free_i64(tmp);
1100 break;
1101 default:
1102 /* The inline version can become too big for too uneven numbers, only
1103 use it on known good lengths */
1104 tmp = tcg_temp_new_i64();
1105 tmp2 = tcg_const_i64(8);
1106 for (i = 0; (i + 7) <= l; i += 8) {
1107 tcg_gen_qemu_ld64(tmp, src, get_mem_index(s));
1108 tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
1109
1110 tcg_gen_add_i64(src, src, tmp2);
1111 tcg_gen_add_i64(dest, dest, tmp2);
1112 }
1113
1114 tcg_temp_free_i64(tmp2);
1115 tmp2 = tcg_const_i64(1);
1116
1117 for (; i <= l; i++) {
1118 tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
1119 tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
1120
1121 tcg_gen_add_i64(src, src, tmp2);
1122 tcg_gen_add_i64(dest, dest, tmp2);
1123 }
1124
1125 tcg_temp_free_i64(tmp2);
1126 tcg_temp_free_i64(tmp);
1127 break;
1128 }
1129
1130 tcg_gen_br(l_out);
1131
1132 gen_set_label(l_memset);
1133 /* memset case (dest == (src + 1)) */
1134
1135 tmp = tcg_temp_new_i64();
1136 tmp2 = tcg_temp_new_i64();
1137 /* fill tmp with the byte */
1138 tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
1139 tcg_gen_shli_i64(tmp2, tmp, 8);
1140 tcg_gen_or_i64(tmp, tmp, tmp2);
1141 tcg_gen_shli_i64(tmp2, tmp, 16);
1142 tcg_gen_or_i64(tmp, tmp, tmp2);
1143 tcg_gen_shli_i64(tmp2, tmp, 32);
1144 tcg_gen_or_i64(tmp, tmp, tmp2);
1145 tcg_temp_free_i64(tmp2);
1146
1147 tmp2 = tcg_const_i64(8);
1148
1149 for (i = 0; (i + 7) <= l; i += 8) {
1150 tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
1151 tcg_gen_addi_i64(dest, dest, 8);
1152 }
1153
1154 tcg_temp_free_i64(tmp2);
1155 tmp2 = tcg_const_i64(1);
1156
1157 for (; i <= l; i++) {
1158 tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
1159 tcg_gen_addi_i64(dest, dest, 1);
1160 }
1161
1162 tcg_temp_free_i64(tmp2);
1163 tcg_temp_free_i64(tmp);
1164
1165 gen_set_label(l_out);
1166
1167 tcg_temp_free(dest);
1168 tcg_temp_free(src);
1169 }
1170
1171 static void gen_op_clc(DisasContext *s, int l, TCGv_i64 s1, TCGv_i64 s2)
1172 {
1173 TCGv_i64 tmp;
1174 TCGv_i64 tmp2;
1175 TCGv_i32 vl;
1176
1177 /* check for simple 32bit or 64bit match */
1178 switch (l) {
1179 case 0:
1180 tmp = tcg_temp_new_i64();
1181 tmp2 = tcg_temp_new_i64();
1182
1183 tcg_gen_qemu_ld8u(tmp, s1, get_mem_index(s));
1184 tcg_gen_qemu_ld8u(tmp2, s2, get_mem_index(s));
1185 cmp_u64(s, tmp, tmp2);
1186
1187 tcg_temp_free_i64(tmp);
1188 tcg_temp_free_i64(tmp2);
1189 return;
1190 case 1:
1191 tmp = tcg_temp_new_i64();
1192 tmp2 = tcg_temp_new_i64();
1193
1194 tcg_gen_qemu_ld16u(tmp, s1, get_mem_index(s));
1195 tcg_gen_qemu_ld16u(tmp2, s2, get_mem_index(s));
1196 cmp_u64(s, tmp, tmp2);
1197
1198 tcg_temp_free_i64(tmp);
1199 tcg_temp_free_i64(tmp2);
1200 return;
1201 case 3:
1202 tmp = tcg_temp_new_i64();
1203 tmp2 = tcg_temp_new_i64();
1204
1205 tcg_gen_qemu_ld32u(tmp, s1, get_mem_index(s));
1206 tcg_gen_qemu_ld32u(tmp2, s2, get_mem_index(s));
1207 cmp_u64(s, tmp, tmp2);
1208
1209 tcg_temp_free_i64(tmp);
1210 tcg_temp_free_i64(tmp2);
1211 return;
1212 case 7:
1213 tmp = tcg_temp_new_i64();
1214 tmp2 = tcg_temp_new_i64();
1215
1216 tcg_gen_qemu_ld64(tmp, s1, get_mem_index(s));
1217 tcg_gen_qemu_ld64(tmp2, s2, get_mem_index(s));
1218 cmp_u64(s, tmp, tmp2);
1219
1220 tcg_temp_free_i64(tmp);
1221 tcg_temp_free_i64(tmp2);
1222 return;
1223 }
1224
1225 potential_page_fault(s);
1226 vl = tcg_const_i32(l);
1227 gen_helper_clc(cc_op, cpu_env, vl, s1, s2);
1228 tcg_temp_free_i32(vl);
1229 set_cc_static(s);
1230 }
1231
1232 static void disas_e3(CPUS390XState *env, DisasContext* s, int op, int r1,
1233 int x2, int b2, int d2)
1234 {
1235 TCGv_i64 addr, tmp2, tmp3;
1236 TCGv_i32 tmp32_1;
1237
1238 LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
1239 op, r1, x2, b2, d2);
1240 addr = get_address(s, x2, b2, d2);
1241 switch (op) {
1242 case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
1243 tmp2 = tcg_temp_new_i64();
1244 tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
1245 tcg_gen_bswap64_i64(tmp2, tmp2);
1246 store_reg(r1, tmp2);
1247 tcg_temp_free_i64(tmp2);
1248 break;
1249 case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
1250 tmp2 = tcg_temp_new_i64();
1251 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1252 tcg_gen_andi_i64(tmp2, tmp2, 0x7fffffffULL);
1253 store_reg(r1, tmp2);
1254 tcg_temp_free_i64(tmp2);
1255 break;
1256 case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
1257 tmp2 = tcg_temp_new_i64();
1258 tmp32_1 = tcg_temp_new_i32();
1259 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1260 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1261 tcg_temp_free_i64(tmp2);
1262 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1263 store_reg32(r1, tmp32_1);
1264 tcg_temp_free_i32(tmp32_1);
1265 break;
1266 case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
1267 tmp2 = tcg_temp_new_i64();
1268 tmp32_1 = tcg_temp_new_i32();
1269 tcg_gen_qemu_ld16u(tmp2, addr, get_mem_index(s));
1270 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1271 tcg_temp_free_i64(tmp2);
1272 tcg_gen_bswap16_i32(tmp32_1, tmp32_1);
1273 store_reg16(r1, tmp32_1);
1274 tcg_temp_free_i32(tmp32_1);
1275 break;
1276 case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
1277 tmp32_1 = load_reg32(r1);
1278 tmp2 = tcg_temp_new_i64();
1279 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1280 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1281 tcg_temp_free_i32(tmp32_1);
1282 tcg_gen_qemu_st32(tmp2, addr, get_mem_index(s));
1283 tcg_temp_free_i64(tmp2);
1284 break;
1285 case 0x73: /* ICY R1,D2(X2,B2) [RXY] */
1286 tmp3 = tcg_temp_new_i64();
1287 tcg_gen_qemu_ld8u(tmp3, addr, get_mem_index(s));
1288 store_reg8(r1, tmp3);
1289 tcg_temp_free_i64(tmp3);
1290 break;
1291 default:
1292 LOG_DISAS("illegal e3 operation 0x%x\n", op);
1293 gen_illegal_opcode(s);
1294 break;
1295 }
1296 tcg_temp_free_i64(addr);
1297 }
1298
1299 #ifndef CONFIG_USER_ONLY
1300 static void disas_e5(CPUS390XState *env, DisasContext* s, uint64_t insn)
1301 {
1302 TCGv_i64 tmp, tmp2;
1303 int op = (insn >> 32) & 0xff;
1304
1305 tmp = get_address(s, 0, (insn >> 28) & 0xf, (insn >> 16) & 0xfff);
1306 tmp2 = get_address(s, 0, (insn >> 12) & 0xf, insn & 0xfff);
1307
1308 LOG_DISAS("disas_e5: insn %" PRIx64 "\n", insn);
1309 switch (op) {
1310 case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
1311 /* Test Protection */
1312 potential_page_fault(s);
1313 gen_helper_tprot(cc_op, tmp, tmp2);
1314 set_cc_static(s);
1315 break;
1316 default:
1317 LOG_DISAS("illegal e5 operation 0x%x\n", op);
1318 gen_illegal_opcode(s);
1319 break;
1320 }
1321
1322 tcg_temp_free_i64(tmp);
1323 tcg_temp_free_i64(tmp2);
1324 }
1325 #endif
1326
1327 static void disas_eb(CPUS390XState *env, DisasContext *s, int op, int r1,
1328 int r3, int b2, int d2)
1329 {
1330 TCGv_i64 tmp, tmp2, tmp3, tmp4;
1331 TCGv_i32 tmp32_1, tmp32_2;
1332 int i, stm_len;
1333
1334 LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
1335 op, r1, r3, b2, d2);
1336 switch (op) {
1337 case 0xc: /* SRLG R1,R3,D2(B2) [RSY] */
1338 case 0xd: /* SLLG R1,R3,D2(B2) [RSY] */
1339 case 0xa: /* SRAG R1,R3,D2(B2) [RSY] */
1340 case 0xb: /* SLAG R1,R3,D2(B2) [RSY] */
1341 case 0x1c: /* RLLG R1,R3,D2(B2) [RSY] */
1342 if (b2) {
1343 tmp = get_address(s, 0, b2, d2);
1344 tcg_gen_andi_i64(tmp, tmp, 0x3f);
1345 } else {
1346 tmp = tcg_const_i64(d2 & 0x3f);
1347 }
1348 switch (op) {
1349 case 0xc:
1350 tcg_gen_shr_i64(regs[r1], regs[r3], tmp);
1351 break;
1352 case 0xd:
1353 tcg_gen_shl_i64(regs[r1], regs[r3], tmp);
1354 break;
1355 case 0xa:
1356 tcg_gen_sar_i64(regs[r1], regs[r3], tmp);
1357 break;
1358 case 0xb:
1359 tmp2 = tcg_temp_new_i64();
1360 tmp3 = tcg_temp_new_i64();
1361 gen_op_update2_cc_i64(s, CC_OP_SLAG, regs[r3], tmp);
1362 tcg_gen_shl_i64(tmp2, regs[r3], tmp);
1363 /* override sign bit with source sign */
1364 tcg_gen_andi_i64(tmp2, tmp2, ~0x8000000000000000ULL);
1365 tcg_gen_andi_i64(tmp3, regs[r3], 0x8000000000000000ULL);
1366 tcg_gen_or_i64(regs[r1], tmp2, tmp3);
1367 tcg_temp_free_i64(tmp2);
1368 tcg_temp_free_i64(tmp3);
1369 break;
1370 case 0x1c:
1371 tcg_gen_rotl_i64(regs[r1], regs[r3], tmp);
1372 break;
1373 default:
1374 tcg_abort();
1375 break;
1376 }
1377 if (op == 0xa) {
1378 set_cc_s64(s, regs[r1]);
1379 }
1380 tcg_temp_free_i64(tmp);
1381 break;
1382 case 0x1d: /* RLL R1,R3,D2(B2) [RSY] */
1383 if (b2) {
1384 tmp = get_address(s, 0, b2, d2);
1385 tcg_gen_andi_i64(tmp, tmp, 0x3f);
1386 } else {
1387 tmp = tcg_const_i64(d2 & 0x3f);
1388 }
1389 tmp32_1 = tcg_temp_new_i32();
1390 tmp32_2 = load_reg32(r3);
1391 tcg_gen_trunc_i64_i32(tmp32_1, tmp);
1392 switch (op) {
1393 case 0x1d:
1394 tcg_gen_rotl_i32(tmp32_1, tmp32_2, tmp32_1);
1395 break;
1396 default:
1397 tcg_abort();
1398 break;
1399 }
1400 store_reg32(r1, tmp32_1);
1401 tcg_temp_free_i64(tmp);
1402 tcg_temp_free_i32(tmp32_1);
1403 tcg_temp_free_i32(tmp32_2);
1404 break;
1405 case 0x4: /* LMG R1,R3,D2(B2) [RSE] */
1406 case 0x24: /* STMG R1,R3,D2(B2) [RSE] */
1407 stm_len = 8;
1408 goto do_mh;
1409 case 0x26: /* STMH R1,R3,D2(B2) [RSE] */
1410 case 0x96: /* LMH R1,R3,D2(B2) [RSE] */
1411 stm_len = 4;
1412 do_mh:
1413 /* Apparently, unrolling lmg/stmg of any size gains performance -
1414 even for very long ones... */
1415 tmp = get_address(s, 0, b2, d2);
1416 tmp3 = tcg_const_i64(stm_len);
1417 tmp4 = tcg_const_i64(op == 0x26 ? 32 : 4);
1418 for (i = r1;; i = (i + 1) % 16) {
1419 switch (op) {
1420 case 0x4:
1421 tcg_gen_qemu_ld64(regs[i], tmp, get_mem_index(s));
1422 break;
1423 case 0x96:
1424 tmp2 = tcg_temp_new_i64();
1425 #if HOST_LONG_BITS == 32
1426 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1427 tcg_gen_trunc_i64_i32(TCGV_HIGH(regs[i]), tmp2);
1428 #else
1429 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1430 tcg_gen_shl_i64(tmp2, tmp2, tmp4);
1431 tcg_gen_ext32u_i64(regs[i], regs[i]);
1432 tcg_gen_or_i64(regs[i], regs[i], tmp2);
1433 #endif
1434 tcg_temp_free_i64(tmp2);
1435 break;
1436 case 0x24:
1437 tcg_gen_qemu_st64(regs[i], tmp, get_mem_index(s));
1438 break;
1439 case 0x26:
1440 tmp2 = tcg_temp_new_i64();
1441 tcg_gen_shr_i64(tmp2, regs[i], tmp4);
1442 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1443 tcg_temp_free_i64(tmp2);
1444 break;
1445 default:
1446 tcg_abort();
1447 }
1448 if (i == r3) {
1449 break;
1450 }
1451 tcg_gen_add_i64(tmp, tmp, tmp3);
1452 }
1453 tcg_temp_free_i64(tmp);
1454 tcg_temp_free_i64(tmp3);
1455 tcg_temp_free_i64(tmp4);
1456 break;
1457 case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */
1458 tmp = get_address(s, 0, b2, d2);
1459 tmp32_1 = tcg_const_i32(r1);
1460 tmp32_2 = tcg_const_i32(r3);
1461 potential_page_fault(s);
1462 gen_helper_stcmh(cpu_env, tmp32_1, tmp, tmp32_2);
1463 tcg_temp_free_i64(tmp);
1464 tcg_temp_free_i32(tmp32_1);
1465 tcg_temp_free_i32(tmp32_2);
1466 break;
1467 #ifndef CONFIG_USER_ONLY
1468 case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
1469 /* Load Control */
1470 check_privileged(s);
1471 tmp = get_address(s, 0, b2, d2);
1472 tmp32_1 = tcg_const_i32(r1);
1473 tmp32_2 = tcg_const_i32(r3);
1474 potential_page_fault(s);
1475 gen_helper_lctlg(cpu_env, tmp32_1, tmp, tmp32_2);
1476 tcg_temp_free_i64(tmp);
1477 tcg_temp_free_i32(tmp32_1);
1478 tcg_temp_free_i32(tmp32_2);
1479 break;
1480 case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
1481 /* Store Control */
1482 check_privileged(s);
1483 tmp = get_address(s, 0, b2, d2);
1484 tmp32_1 = tcg_const_i32(r1);
1485 tmp32_2 = tcg_const_i32(r3);
1486 potential_page_fault(s);
1487 gen_helper_stctg(cpu_env, tmp32_1, tmp, tmp32_2);
1488 tcg_temp_free_i64(tmp);
1489 tcg_temp_free_i32(tmp32_1);
1490 tcg_temp_free_i32(tmp32_2);
1491 break;
1492 #endif
1493 case 0x30: /* CSG R1,R3,D2(B2) [RSY] */
1494 tmp = get_address(s, 0, b2, d2);
1495 tmp32_1 = tcg_const_i32(r1);
1496 tmp32_2 = tcg_const_i32(r3);
1497 potential_page_fault(s);
1498 /* XXX rewrite in tcg */
1499 gen_helper_csg(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
1500 set_cc_static(s);
1501 tcg_temp_free_i64(tmp);
1502 tcg_temp_free_i32(tmp32_1);
1503 tcg_temp_free_i32(tmp32_2);
1504 break;
1505 case 0x3e: /* CDSG R1,R3,D2(B2) [RSY] */
1506 tmp = get_address(s, 0, b2, d2);
1507 tmp32_1 = tcg_const_i32(r1);
1508 tmp32_2 = tcg_const_i32(r3);
1509 potential_page_fault(s);
1510 /* XXX rewrite in tcg */
1511 gen_helper_cdsg(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
1512 set_cc_static(s);
1513 tcg_temp_free_i64(tmp);
1514 tcg_temp_free_i32(tmp32_1);
1515 tcg_temp_free_i32(tmp32_2);
1516 break;
1517 case 0x52: /* MVIY D1(B1),I2 [SIY] */
1518 tmp = get_address(s, 0, b2, d2); /* SIY -> this is the destination */
1519 tmp2 = tcg_const_i64((r1 << 4) | r3);
1520 tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
1521 tcg_temp_free_i64(tmp);
1522 tcg_temp_free_i64(tmp2);
1523 break;
1524 case 0x80: /* ICMH R1,M3,D2(B2) [RSY] */
1525 tmp = get_address(s, 0, b2, d2);
1526 tmp32_1 = tcg_const_i32(r1);
1527 tmp32_2 = tcg_const_i32(r3);
1528 potential_page_fault(s);
1529 /* XXX split CC calculation out */
1530 gen_helper_icmh(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
1531 set_cc_static(s);
1532 tcg_temp_free_i64(tmp);
1533 tcg_temp_free_i32(tmp32_1);
1534 tcg_temp_free_i32(tmp32_2);
1535 break;
1536 default:
1537 LOG_DISAS("illegal eb operation 0x%x\n", op);
1538 gen_illegal_opcode(s);
1539 break;
1540 }
1541 }
1542
1543 static void disas_ed(CPUS390XState *env, DisasContext *s, int op, int r1,
1544 int x2, int b2, int d2, int r1b)
1545 {
1546 TCGv_i32 tmp_r1, tmp32;
1547 TCGv_i64 addr, tmp;
1548 addr = get_address(s, x2, b2, d2);
1549 tmp_r1 = tcg_const_i32(r1);
1550 switch (op) {
1551 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1552 potential_page_fault(s);
1553 gen_helper_ldeb(cpu_env, tmp_r1, addr);
1554 break;
1555 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1556 potential_page_fault(s);
1557 gen_helper_lxdb(cpu_env, tmp_r1, addr);
1558 break;
1559 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1560 tmp = tcg_temp_new_i64();
1561 tmp32 = load_freg32(r1);
1562 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1563 set_cc_cmp_f32_i64(s, tmp32, tmp);
1564 tcg_temp_free_i64(tmp);
1565 tcg_temp_free_i32(tmp32);
1566 break;
1567 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1568 tmp = tcg_temp_new_i64();
1569 tmp32 = tcg_temp_new_i32();
1570 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1571 tcg_gen_trunc_i64_i32(tmp32, tmp);
1572 gen_helper_aeb(cpu_env, tmp_r1, tmp32);
1573 tcg_temp_free_i64(tmp);
1574 tcg_temp_free_i32(tmp32);
1575
1576 tmp32 = load_freg32(r1);
1577 gen_set_cc_nz_f32(s, tmp32);
1578 tcg_temp_free_i32(tmp32);
1579 break;
1580 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1581 tmp = tcg_temp_new_i64();
1582 tmp32 = tcg_temp_new_i32();
1583 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1584 tcg_gen_trunc_i64_i32(tmp32, tmp);
1585 gen_helper_seb(cpu_env, tmp_r1, tmp32);
1586 tcg_temp_free_i64(tmp);
1587 tcg_temp_free_i32(tmp32);
1588
1589 tmp32 = load_freg32(r1);
1590 gen_set_cc_nz_f32(s, tmp32);
1591 tcg_temp_free_i32(tmp32);
1592 break;
1593 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1594 tmp = tcg_temp_new_i64();
1595 tmp32 = tcg_temp_new_i32();
1596 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1597 tcg_gen_trunc_i64_i32(tmp32, tmp);
1598 gen_helper_deb(cpu_env, tmp_r1, tmp32);
1599 tcg_temp_free_i64(tmp);
1600 tcg_temp_free_i32(tmp32);
1601 break;
1602 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1603 potential_page_fault(s);
1604 gen_helper_tceb(cc_op, cpu_env, tmp_r1, addr);
1605 set_cc_static(s);
1606 break;
1607 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1608 potential_page_fault(s);
1609 gen_helper_tcdb(cc_op, cpu_env, tmp_r1, addr);
1610 set_cc_static(s);
1611 break;
1612 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1613 potential_page_fault(s);
1614 gen_helper_tcxb(cc_op, cpu_env, tmp_r1, addr);
1615 set_cc_static(s);
1616 break;
1617 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1618 tmp = tcg_temp_new_i64();
1619 tmp32 = tcg_temp_new_i32();
1620 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1621 tcg_gen_trunc_i64_i32(tmp32, tmp);
1622 gen_helper_meeb(cpu_env, tmp_r1, tmp32);
1623 tcg_temp_free_i64(tmp);
1624 tcg_temp_free_i32(tmp32);
1625 break;
1626 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1627 potential_page_fault(s);
1628 gen_helper_cdb(cc_op, cpu_env, tmp_r1, addr);
1629 set_cc_static(s);
1630 break;
1631 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1632 potential_page_fault(s);
1633 gen_helper_adb(cc_op, cpu_env, tmp_r1, addr);
1634 set_cc_static(s);
1635 break;
1636 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1637 potential_page_fault(s);
1638 gen_helper_sdb(cc_op, cpu_env, tmp_r1, addr);
1639 set_cc_static(s);
1640 break;
1641 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1642 potential_page_fault(s);
1643 gen_helper_mdb(cpu_env, tmp_r1, addr);
1644 break;
1645 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1646 potential_page_fault(s);
1647 gen_helper_ddb(cpu_env, tmp_r1, addr);
1648 break;
1649 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1650 /* for RXF insns, r1 is R3 and r1b is R1 */
1651 tmp32 = tcg_const_i32(r1b);
1652 potential_page_fault(s);
1653 gen_helper_madb(cpu_env, tmp32, addr, tmp_r1);
1654 tcg_temp_free_i32(tmp32);
1655 break;
1656 default:
1657 LOG_DISAS("illegal ed operation 0x%x\n", op);
1658 gen_illegal_opcode(s);
1659 return;
1660 }
1661 tcg_temp_free_i32(tmp_r1);
1662 tcg_temp_free_i64(addr);
1663 }
1664
1665 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1666 uint32_t insn)
1667 {
1668 TCGv_i64 tmp, tmp2, tmp3;
1669 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1670 int r1, r2;
1671 #ifndef CONFIG_USER_ONLY
1672 int r3, d2, b2;
1673 #endif
1674
1675 r1 = (insn >> 4) & 0xf;
1676 r2 = insn & 0xf;
1677
1678 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1679
1680 switch (op) {
1681 case 0x22: /* IPM R1 [RRE] */
1682 tmp32_1 = tcg_const_i32(r1);
1683 gen_op_calc_cc(s);
1684 gen_helper_ipm(cpu_env, cc_op, tmp32_1);
1685 tcg_temp_free_i32(tmp32_1);
1686 break;
1687 case 0x41: /* CKSM R1,R2 [RRE] */
1688 tmp32_1 = tcg_const_i32(r1);
1689 tmp32_2 = tcg_const_i32(r2);
1690 potential_page_fault(s);
1691 gen_helper_cksm(cpu_env, tmp32_1, tmp32_2);
1692 tcg_temp_free_i32(tmp32_1);
1693 tcg_temp_free_i32(tmp32_2);
1694 gen_op_movi_cc(s, 0);
1695 break;
1696 case 0x4e: /* SAR R1,R2 [RRE] */
1697 tmp32_1 = load_reg32(r2);
1698 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r1]));
1699 tcg_temp_free_i32(tmp32_1);
1700 break;
1701 case 0x4f: /* EAR R1,R2 [RRE] */
1702 tmp32_1 = tcg_temp_new_i32();
1703 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1704 store_reg32(r1, tmp32_1);
1705 tcg_temp_free_i32(tmp32_1);
1706 break;
1707 case 0x54: /* MVPG R1,R2 [RRE] */
1708 tmp = load_reg(0);
1709 tmp2 = load_reg(r1);
1710 tmp3 = load_reg(r2);
1711 potential_page_fault(s);
1712 gen_helper_mvpg(cpu_env, tmp, tmp2, tmp3);
1713 tcg_temp_free_i64(tmp);
1714 tcg_temp_free_i64(tmp2);
1715 tcg_temp_free_i64(tmp3);
1716 /* XXX check CCO bit and set CC accordingly */
1717 gen_op_movi_cc(s, 0);
1718 break;
1719 case 0x55: /* MVST R1,R2 [RRE] */
1720 tmp32_1 = load_reg32(0);
1721 tmp32_2 = tcg_const_i32(r1);
1722 tmp32_3 = tcg_const_i32(r2);
1723 potential_page_fault(s);
1724 gen_helper_mvst(cpu_env, tmp32_1, tmp32_2, tmp32_3);
1725 tcg_temp_free_i32(tmp32_1);
1726 tcg_temp_free_i32(tmp32_2);
1727 tcg_temp_free_i32(tmp32_3);
1728 gen_op_movi_cc(s, 1);
1729 break;
1730 case 0x5d: /* CLST R1,R2 [RRE] */
1731 tmp32_1 = load_reg32(0);
1732 tmp32_2 = tcg_const_i32(r1);
1733 tmp32_3 = tcg_const_i32(r2);
1734 potential_page_fault(s);
1735 gen_helper_clst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1736 set_cc_static(s);
1737 tcg_temp_free_i32(tmp32_1);
1738 tcg_temp_free_i32(tmp32_2);
1739 tcg_temp_free_i32(tmp32_3);
1740 break;
1741 case 0x5e: /* SRST R1,R2 [RRE] */
1742 tmp32_1 = load_reg32(0);
1743 tmp32_2 = tcg_const_i32(r1);
1744 tmp32_3 = tcg_const_i32(r2);
1745 potential_page_fault(s);
1746 gen_helper_srst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1747 set_cc_static(s);
1748 tcg_temp_free_i32(tmp32_1);
1749 tcg_temp_free_i32(tmp32_2);
1750 tcg_temp_free_i32(tmp32_3);
1751 break;
1752
1753 #ifndef CONFIG_USER_ONLY
1754 case 0x02: /* STIDP D2(B2) [S] */
1755 /* Store CPU ID */
1756 check_privileged(s);
1757 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1758 tmp = get_address(s, 0, b2, d2);
1759 potential_page_fault(s);
1760 gen_helper_stidp(cpu_env, tmp);
1761 tcg_temp_free_i64(tmp);
1762 break;
1763 case 0x04: /* SCK D2(B2) [S] */
1764 /* Set Clock */
1765 check_privileged(s);
1766 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1767 tmp = get_address(s, 0, b2, d2);
1768 potential_page_fault(s);
1769 gen_helper_sck(cc_op, tmp);
1770 set_cc_static(s);
1771 tcg_temp_free_i64(tmp);
1772 break;
1773 case 0x05: /* STCK D2(B2) [S] */
1774 /* Store Clock */
1775 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1776 tmp = get_address(s, 0, b2, d2);
1777 potential_page_fault(s);
1778 gen_helper_stck(cc_op, cpu_env, tmp);
1779 set_cc_static(s);
1780 tcg_temp_free_i64(tmp);
1781 break;
1782 case 0x06: /* SCKC D2(B2) [S] */
1783 /* Set Clock Comparator */
1784 check_privileged(s);
1785 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1786 tmp = get_address(s, 0, b2, d2);
1787 potential_page_fault(s);
1788 gen_helper_sckc(cpu_env, tmp);
1789 tcg_temp_free_i64(tmp);
1790 break;
1791 case 0x07: /* STCKC D2(B2) [S] */
1792 /* Store Clock Comparator */
1793 check_privileged(s);
1794 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1795 tmp = get_address(s, 0, b2, d2);
1796 potential_page_fault(s);
1797 gen_helper_stckc(cpu_env, tmp);
1798 tcg_temp_free_i64(tmp);
1799 break;
1800 case 0x08: /* SPT D2(B2) [S] */
1801 /* Set CPU Timer */
1802 check_privileged(s);
1803 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1804 tmp = get_address(s, 0, b2, d2);
1805 potential_page_fault(s);
1806 gen_helper_spt(cpu_env, tmp);
1807 tcg_temp_free_i64(tmp);
1808 break;
1809 case 0x09: /* STPT D2(B2) [S] */
1810 /* Store CPU Timer */
1811 check_privileged(s);
1812 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1813 tmp = get_address(s, 0, b2, d2);
1814 potential_page_fault(s);
1815 gen_helper_stpt(cpu_env, tmp);
1816 tcg_temp_free_i64(tmp);
1817 break;
1818 case 0x0a: /* SPKA D2(B2) [S] */
1819 /* Set PSW Key from Address */
1820 check_privileged(s);
1821 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1822 tmp = get_address(s, 0, b2, d2);
1823 tmp2 = tcg_temp_new_i64();
1824 tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
1825 tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
1826 tcg_gen_or_i64(psw_mask, tmp2, tmp);
1827 tcg_temp_free_i64(tmp2);
1828 tcg_temp_free_i64(tmp);
1829 break;
1830 case 0x0d: /* PTLB [S] */
1831 /* Purge TLB */
1832 check_privileged(s);
1833 gen_helper_ptlb(cpu_env);
1834 break;
1835 case 0x10: /* SPX D2(B2) [S] */
1836 /* Set Prefix Register */
1837 check_privileged(s);
1838 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1839 tmp = get_address(s, 0, b2, d2);
1840 potential_page_fault(s);
1841 gen_helper_spx(cpu_env, tmp);
1842 tcg_temp_free_i64(tmp);
1843 break;
1844 case 0x11: /* STPX D2(B2) [S] */
1845 /* Store Prefix */
1846 check_privileged(s);
1847 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1848 tmp = get_address(s, 0, b2, d2);
1849 tmp2 = tcg_temp_new_i64();
1850 tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1851 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1852 tcg_temp_free_i64(tmp);
1853 tcg_temp_free_i64(tmp2);
1854 break;
1855 case 0x12: /* STAP D2(B2) [S] */
1856 /* Store CPU Address */
1857 check_privileged(s);
1858 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1859 tmp = get_address(s, 0, b2, d2);
1860 tmp2 = tcg_temp_new_i64();
1861 tmp32_1 = tcg_temp_new_i32();
1862 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1863 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1864 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1865 tcg_temp_free_i64(tmp);
1866 tcg_temp_free_i64(tmp2);
1867 tcg_temp_free_i32(tmp32_1);
1868 break;
1869 case 0x21: /* IPTE R1,R2 [RRE] */
1870 /* Invalidate PTE */
1871 check_privileged(s);
1872 r1 = (insn >> 4) & 0xf;
1873 r2 = insn & 0xf;
1874 tmp = load_reg(r1);
1875 tmp2 = load_reg(r2);
1876 gen_helper_ipte(cpu_env, tmp, tmp2);
1877 tcg_temp_free_i64(tmp);
1878 tcg_temp_free_i64(tmp2);
1879 break;
1880 case 0x29: /* ISKE R1,R2 [RRE] */
1881 /* Insert Storage Key Extended */
1882 check_privileged(s);
1883 r1 = (insn >> 4) & 0xf;
1884 r2 = insn & 0xf;
1885 tmp = load_reg(r2);
1886 tmp2 = tcg_temp_new_i64();
1887 gen_helper_iske(tmp2, cpu_env, tmp);
1888 store_reg(r1, tmp2);
1889 tcg_temp_free_i64(tmp);
1890 tcg_temp_free_i64(tmp2);
1891 break;
1892 case 0x2a: /* RRBE R1,R2 [RRE] */
1893 /* Set Storage Key Extended */
1894 check_privileged(s);
1895 r1 = (insn >> 4) & 0xf;
1896 r2 = insn & 0xf;
1897 tmp32_1 = load_reg32(r1);
1898 tmp = load_reg(r2);
1899 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1900 set_cc_static(s);
1901 tcg_temp_free_i32(tmp32_1);
1902 tcg_temp_free_i64(tmp);
1903 break;
1904 case 0x2b: /* SSKE R1,R2 [RRE] */
1905 /* Set Storage Key Extended */
1906 check_privileged(s);
1907 r1 = (insn >> 4) & 0xf;
1908 r2 = insn & 0xf;
1909 tmp32_1 = load_reg32(r1);
1910 tmp = load_reg(r2);
1911 gen_helper_sske(cpu_env, tmp32_1, tmp);
1912 tcg_temp_free_i32(tmp32_1);
1913 tcg_temp_free_i64(tmp);
1914 break;
1915 case 0x34: /* STCH ? */
1916 /* Store Subchannel */
1917 check_privileged(s);
1918 gen_op_movi_cc(s, 3);
1919 break;
1920 case 0x46: /* STURA R1,R2 [RRE] */
1921 /* Store Using Real Address */
1922 check_privileged(s);
1923 r1 = (insn >> 4) & 0xf;
1924 r2 = insn & 0xf;
1925 tmp32_1 = load_reg32(r1);
1926 tmp = load_reg(r2);
1927 potential_page_fault(s);
1928 gen_helper_stura(cpu_env, tmp, tmp32_1);
1929 tcg_temp_free_i32(tmp32_1);
1930 tcg_temp_free_i64(tmp);
1931 break;
1932 case 0x50: /* CSP R1,R2 [RRE] */
1933 /* Compare And Swap And Purge */
1934 check_privileged(s);
1935 r1 = (insn >> 4) & 0xf;
1936 r2 = insn & 0xf;
1937 tmp32_1 = tcg_const_i32(r1);
1938 tmp32_2 = tcg_const_i32(r2);
1939 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1940 set_cc_static(s);
1941 tcg_temp_free_i32(tmp32_1);
1942 tcg_temp_free_i32(tmp32_2);
1943 break;
1944 case 0x5f: /* CHSC ? */
1945 /* Channel Subsystem Call */
1946 check_privileged(s);
1947 gen_op_movi_cc(s, 3);
1948 break;
1949 case 0x78: /* STCKE D2(B2) [S] */
1950 /* Store Clock Extended */
1951 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1952 tmp = get_address(s, 0, b2, d2);
1953 potential_page_fault(s);
1954 gen_helper_stcke(cc_op, cpu_env, tmp);
1955 set_cc_static(s);
1956 tcg_temp_free_i64(tmp);
1957 break;
1958 case 0x79: /* SACF D2(B2) [S] */
1959 /* Set Address Space Control Fast */
1960 check_privileged(s);
1961 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1962 tmp = get_address(s, 0, b2, d2);
1963 potential_page_fault(s);
1964 gen_helper_sacf(cpu_env, tmp);
1965 tcg_temp_free_i64(tmp);
1966 /* addressing mode has changed, so end the block */
1967 s->pc = s->next_pc;
1968 update_psw_addr(s);
1969 s->is_jmp = DISAS_JUMP;
1970 break;
1971 case 0x7d: /* STSI D2,(B2) [S] */
1972 check_privileged(s);
1973 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1974 tmp = get_address(s, 0, b2, d2);
1975 tmp32_1 = load_reg32(0);
1976 tmp32_2 = load_reg32(1);
1977 potential_page_fault(s);
1978 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1979 set_cc_static(s);
1980 tcg_temp_free_i64(tmp);
1981 tcg_temp_free_i32(tmp32_1);
1982 tcg_temp_free_i32(tmp32_2);
1983 break;
1984 case 0x9d: /* LFPC D2(B2) [S] */
1985 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1986 tmp = get_address(s, 0, b2, d2);
1987 tmp2 = tcg_temp_new_i64();
1988 tmp32_1 = tcg_temp_new_i32();
1989 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1990 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1991 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1992 tcg_temp_free_i64(tmp);
1993 tcg_temp_free_i64(tmp2);
1994 tcg_temp_free_i32(tmp32_1);
1995 break;
1996 case 0xb1: /* STFL D2(B2) [S] */
1997 /* Store Facility List (CPU features) at 200 */
1998 check_privileged(s);
1999 tmp2 = tcg_const_i64(0xc0000000);
2000 tmp = tcg_const_i64(200);
2001 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
2002 tcg_temp_free_i64(tmp2);
2003 tcg_temp_free_i64(tmp);
2004 break;
2005 case 0xb2: /* LPSWE D2(B2) [S] */
2006 /* Load PSW Extended */
2007 check_privileged(s);
2008 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2009 tmp = get_address(s, 0, b2, d2);
2010 tmp2 = tcg_temp_new_i64();
2011 tmp3 = tcg_temp_new_i64();
2012 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
2013 tcg_gen_addi_i64(tmp, tmp, 8);
2014 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
2015 gen_helper_load_psw(cpu_env, tmp2, tmp3);
2016 /* we need to keep cc_op intact */
2017 s->is_jmp = DISAS_JUMP;
2018 tcg_temp_free_i64(tmp);
2019 tcg_temp_free_i64(tmp2);
2020 tcg_temp_free_i64(tmp3);
2021 break;
2022 case 0x20: /* SERVC R1,R2 [RRE] */
2023 /* SCLP Service call (PV hypercall) */
2024 check_privileged(s);
2025 potential_page_fault(s);
2026 tmp32_1 = load_reg32(r2);
2027 tmp = load_reg(r1);
2028 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
2029 set_cc_static(s);
2030 tcg_temp_free_i32(tmp32_1);
2031 tcg_temp_free_i64(tmp);
2032 break;
2033 #endif
2034 default:
2035 LOG_DISAS("illegal b2 operation 0x%x\n", op);
2036 gen_illegal_opcode(s);
2037 break;
2038 }
2039 }
2040
2041 static void disas_b3(CPUS390XState *env, DisasContext *s, int op, int m3,
2042 int r1, int r2)
2043 {
2044 TCGv_i64 tmp;
2045 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
2046 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
2047 #define FP_HELPER(i) \
2048 tmp32_1 = tcg_const_i32(r1); \
2049 tmp32_2 = tcg_const_i32(r2); \
2050 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
2051 tcg_temp_free_i32(tmp32_1); \
2052 tcg_temp_free_i32(tmp32_2);
2053
2054 #define FP_HELPER_CC(i) \
2055 tmp32_1 = tcg_const_i32(r1); \
2056 tmp32_2 = tcg_const_i32(r2); \
2057 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
2058 set_cc_static(s); \
2059 tcg_temp_free_i32(tmp32_1); \
2060 tcg_temp_free_i32(tmp32_2);
2061
2062 switch (op) {
2063 case 0x0: /* LPEBR R1,R2 [RRE] */
2064 FP_HELPER_CC(lpebr);
2065 break;
2066 case 0x2: /* LTEBR R1,R2 [RRE] */
2067 FP_HELPER_CC(ltebr);
2068 break;
2069 case 0x3: /* LCEBR R1,R2 [RRE] */
2070 FP_HELPER_CC(lcebr);
2071 break;
2072 case 0x4: /* LDEBR R1,R2 [RRE] */
2073 FP_HELPER(ldebr);
2074 break;
2075 case 0x5: /* LXDBR R1,R2 [RRE] */
2076 FP_HELPER(lxdbr);
2077 break;
2078 case 0x9: /* CEBR R1,R2 [RRE] */
2079 FP_HELPER_CC(cebr);
2080 break;
2081 case 0xa: /* AEBR R1,R2 [RRE] */
2082 FP_HELPER_CC(aebr);
2083 break;
2084 case 0xb: /* SEBR R1,R2 [RRE] */
2085 FP_HELPER_CC(sebr);
2086 break;
2087 case 0xd: /* DEBR R1,R2 [RRE] */
2088 FP_HELPER(debr);
2089 break;
2090 case 0x10: /* LPDBR R1,R2 [RRE] */
2091 FP_HELPER_CC(lpdbr);
2092 break;
2093 case 0x12: /* LTDBR R1,R2 [RRE] */
2094 FP_HELPER_CC(ltdbr);
2095 break;
2096 case 0x13: /* LCDBR R1,R2 [RRE] */
2097 FP_HELPER_CC(lcdbr);
2098 break;
2099 case 0x15: /* SQBDR R1,R2 [RRE] */
2100 FP_HELPER(sqdbr);
2101 break;
2102 case 0x17: /* MEEBR R1,R2 [RRE] */
2103 FP_HELPER(meebr);
2104 break;
2105 case 0x19: /* CDBR R1,R2 [RRE] */
2106 FP_HELPER_CC(cdbr);
2107 break;
2108 case 0x1a: /* ADBR R1,R2 [RRE] */
2109 FP_HELPER_CC(adbr);
2110 break;
2111 case 0x1b: /* SDBR R1,R2 [RRE] */
2112 FP_HELPER_CC(sdbr);
2113 break;
2114 case 0x1c: /* MDBR R1,R2 [RRE] */
2115 FP_HELPER(mdbr);
2116 break;
2117 case 0x1d: /* DDBR R1,R2 [RRE] */
2118 FP_HELPER(ddbr);
2119 break;
2120 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
2121 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
2122 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
2123 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
2124 tmp32_1 = tcg_const_i32(m3);
2125 tmp32_2 = tcg_const_i32(r2);
2126 tmp32_3 = tcg_const_i32(r1);
2127 switch (op) {
2128 case 0xe:
2129 gen_helper_maebr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
2130 break;
2131 case 0x1e:
2132 gen_helper_madbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
2133 break;
2134 case 0x1f:
2135 gen_helper_msdbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
2136 break;
2137 default:
2138 tcg_abort();
2139 }
2140 tcg_temp_free_i32(tmp32_1);
2141 tcg_temp_free_i32(tmp32_2);
2142 tcg_temp_free_i32(tmp32_3);
2143 break;
2144 case 0x40: /* LPXBR R1,R2 [RRE] */
2145 FP_HELPER_CC(lpxbr);
2146 break;
2147 case 0x42: /* LTXBR R1,R2 [RRE] */
2148 FP_HELPER_CC(ltxbr);
2149 break;
2150 case 0x43: /* LCXBR R1,R2 [RRE] */
2151 FP_HELPER_CC(lcxbr);
2152 break;
2153 case 0x44: /* LEDBR R1,R2 [RRE] */
2154 FP_HELPER(ledbr);
2155 break;
2156 case 0x45: /* LDXBR R1,R2 [RRE] */
2157 FP_HELPER(ldxbr);
2158 break;
2159 case 0x46: /* LEXBR R1,R2 [RRE] */
2160 FP_HELPER(lexbr);
2161 break;
2162 case 0x49: /* CXBR R1,R2 [RRE] */
2163 FP_HELPER_CC(cxbr);
2164 break;
2165 case 0x4a: /* AXBR R1,R2 [RRE] */
2166 FP_HELPER_CC(axbr);
2167 break;
2168 case 0x4b: /* SXBR R1,R2 [RRE] */
2169 FP_HELPER_CC(sxbr);
2170 break;
2171 case 0x4c: /* MXBR R1,R2 [RRE] */
2172 FP_HELPER(mxbr);
2173 break;
2174 case 0x4d: /* DXBR R1,R2 [RRE] */
2175 FP_HELPER(dxbr);
2176 break;
2177 case 0x65: /* LXR R1,R2 [RRE] */
2178 tmp = load_freg(r2);
2179 store_freg(r1, tmp);
2180 tcg_temp_free_i64(tmp);
2181 tmp = load_freg(r2 + 2);
2182 store_freg(r1 + 2, tmp);
2183 tcg_temp_free_i64(tmp);
2184 break;
2185 case 0x74: /* LZER R1 [RRE] */
2186 tmp32_1 = tcg_const_i32(r1);
2187 gen_helper_lzer(cpu_env, tmp32_1);
2188 tcg_temp_free_i32(tmp32_1);
2189 break;
2190 case 0x75: /* LZDR R1 [RRE] */
2191 tmp32_1 = tcg_const_i32(r1);
2192 gen_helper_lzdr(cpu_env, tmp32_1);
2193 tcg_temp_free_i32(tmp32_1);
2194 break;
2195 case 0x76: /* LZXR R1 [RRE] */
2196 tmp32_1 = tcg_const_i32(r1);
2197 gen_helper_lzxr(cpu_env, tmp32_1);
2198 tcg_temp_free_i32(tmp32_1);
2199 break;
2200 case 0x84: /* SFPC R1 [RRE] */
2201 tmp32_1 = load_reg32(r1);
2202 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
2203 tcg_temp_free_i32(tmp32_1);
2204 break;
2205 case 0x8c: /* EFPC R1 [RRE] */
2206 tmp32_1 = tcg_temp_new_i32();
2207 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
2208 store_reg32(r1, tmp32_1);
2209 tcg_temp_free_i32(tmp32_1);
2210 break;
2211 case 0x94: /* CEFBR R1,R2 [RRE] */
2212 case 0x95: /* CDFBR R1,R2 [RRE] */
2213 case 0x96: /* CXFBR R1,R2 [RRE] */
2214 tmp32_1 = tcg_const_i32(r1);
2215 tmp32_2 = load_reg32(r2);
2216 switch (op) {
2217 case 0x94:
2218 gen_helper_cefbr(cpu_env, tmp32_1, tmp32_2);
2219 break;
2220 case 0x95:
2221 gen_helper_cdfbr(cpu_env, tmp32_1, tmp32_2);
2222 break;
2223 case 0x96:
2224 gen_helper_cxfbr(cpu_env, tmp32_1, tmp32_2);
2225 break;
2226 default:
2227 tcg_abort();
2228 }
2229 tcg_temp_free_i32(tmp32_1);
2230 tcg_temp_free_i32(tmp32_2);
2231 break;
2232 case 0x98: /* CFEBR R1,R2 [RRE] */
2233 case 0x99: /* CFDBR R1,R2 [RRE] */
2234 case 0x9a: /* CFXBR R1,R2 [RRE] */
2235 tmp32_1 = tcg_const_i32(r1);
2236 tmp32_2 = tcg_const_i32(r2);
2237 tmp32_3 = tcg_const_i32(m3);
2238 switch (op) {
2239 case 0x98:
2240 gen_helper_cfebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2241 break;
2242 case 0x99:
2243 gen_helper_cfdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2244 break;
2245 case 0x9a:
2246 gen_helper_cfxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2247 break;
2248 default:
2249 tcg_abort();
2250 }
2251 set_cc_static(s);
2252 tcg_temp_free_i32(tmp32_1);
2253 tcg_temp_free_i32(tmp32_2);
2254 tcg_temp_free_i32(tmp32_3);
2255 break;
2256 case 0xa4: /* CEGBR R1,R2 [RRE] */
2257 case 0xa5: /* CDGBR R1,R2 [RRE] */
2258 tmp32_1 = tcg_const_i32(r1);
2259 tmp = load_reg(r2);
2260 switch (op) {
2261 case 0xa4:
2262 gen_helper_cegbr(cpu_env, tmp32_1, tmp);
2263 break;
2264 case 0xa5:
2265 gen_helper_cdgbr(cpu_env, tmp32_1, tmp);
2266 break;
2267 default:
2268 tcg_abort();
2269 }
2270 tcg_temp_free_i32(tmp32_1);
2271 tcg_temp_free_i64(tmp);
2272 break;
2273 case 0xa6: /* CXGBR R1,R2 [RRE] */
2274 tmp32_1 = tcg_const_i32(r1);
2275 tmp = load_reg(r2);
2276 gen_helper_cxgbr(cpu_env, tmp32_1, tmp);
2277 tcg_temp_free_i32(tmp32_1);
2278 tcg_temp_free_i64(tmp);
2279 break;
2280 case 0xa8: /* CGEBR R1,R2 [RRE] */
2281 tmp32_1 = tcg_const_i32(r1);
2282 tmp32_2 = tcg_const_i32(r2);
2283 tmp32_3 = tcg_const_i32(m3);
2284 gen_helper_cgebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2285 set_cc_static(s);
2286 tcg_temp_free_i32(tmp32_1);
2287 tcg_temp_free_i32(tmp32_2);
2288 tcg_temp_free_i32(tmp32_3);
2289 break;
2290 case 0xa9: /* CGDBR R1,R2 [RRE] */
2291 tmp32_1 = tcg_const_i32(r1);
2292 tmp32_2 = tcg_const_i32(r2);
2293 tmp32_3 = tcg_const_i32(m3);
2294 gen_helper_cgdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2295 set_cc_static(s);
2296 tcg_temp_free_i32(tmp32_1);
2297 tcg_temp_free_i32(tmp32_2);
2298 tcg_temp_free_i32(tmp32_3);
2299 break;
2300 case 0xaa: /* CGXBR R1,R2 [RRE] */
2301 tmp32_1 = tcg_const_i32(r1);
2302 tmp32_2 = tcg_const_i32(r2);
2303 tmp32_3 = tcg_const_i32(m3);
2304 gen_helper_cgxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2305 set_cc_static(s);
2306 tcg_temp_free_i32(tmp32_1);
2307 tcg_temp_free_i32(tmp32_2);
2308 tcg_temp_free_i32(tmp32_3);
2309 break;
2310 default:
2311 LOG_DISAS("illegal b3 operation 0x%x\n", op);
2312 gen_illegal_opcode(s);
2313 break;
2314 }
2315
2316 #undef FP_HELPER_CC
2317 #undef FP_HELPER
2318 }
2319
2320 static void disas_b9(CPUS390XState *env, DisasContext *s, int op, int r1,
2321 int r2)
2322 {
2323 TCGv_i64 tmp;
2324 TCGv_i32 tmp32_1;
2325
2326 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
2327 switch (op) {
2328 case 0x17: /* LLGTR R1,R2 [RRE] */
2329 tmp32_1 = load_reg32(r2);
2330 tmp = tcg_temp_new_i64();
2331 tcg_gen_andi_i32(tmp32_1, tmp32_1, 0x7fffffffUL);
2332 tcg_gen_extu_i32_i64(tmp, tmp32_1);
2333 store_reg(r1, tmp);
2334 tcg_temp_free_i32(tmp32_1);
2335 tcg_temp_free_i64(tmp);
2336 break;
2337 case 0x0f: /* LRVGR R1,R2 [RRE] */
2338 tcg_gen_bswap64_i64(regs[r1], regs[r2]);
2339 break;
2340 case 0x1f: /* LRVR R1,R2 [RRE] */
2341 tmp32_1 = load_reg32(r2);
2342 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
2343 store_reg32(r1, tmp32_1);
2344 tcg_temp_free_i32(tmp32_1);
2345 break;
2346 case 0x83: /* FLOGR R1,R2 [RRE] */
2347 tmp = load_reg(r2);
2348 tmp32_1 = tcg_const_i32(r1);
2349 gen_helper_flogr(cc_op, cpu_env, tmp32_1, tmp);
2350 set_cc_static(s);
2351 tcg_temp_free_i64(tmp);
2352 tcg_temp_free_i32(tmp32_1);
2353 break;
2354 default:
2355 LOG_DISAS("illegal b9 operation 0x%x\n", op);
2356 gen_illegal_opcode(s);
2357 break;
2358 }
2359 }
2360
2361 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
2362 {
2363 TCGv_i64 tmp, tmp2, tmp3, tmp4;
2364 TCGv_i32 tmp32_1, tmp32_2, tmp32_3, tmp32_4;
2365 unsigned char opc;
2366 uint64_t insn;
2367 int op, r1, r2, r3, d1, d2, x2, b1, b2, i, i2, r1b;
2368 TCGv_i32 vl;
2369
2370 opc = cpu_ldub_code(env, s->pc);
2371 LOG_DISAS("opc 0x%x\n", opc);
2372
2373 switch (opc) {
2374 case 0xa: /* SVC I [RR] */
2375 insn = ld_code2(env, s->pc);
2376 debug_insn(insn);
2377 i = insn & 0xff;
2378 update_psw_addr(s);
2379 gen_op_calc_cc(s);
2380 tmp32_1 = tcg_const_i32(i);
2381 tmp32_2 = tcg_const_i32(s->next_pc - s->pc);
2382 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, int_svc_code));
2383 tcg_gen_st_i32(tmp32_2, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
2384 gen_exception(EXCP_SVC);
2385 s->is_jmp = DISAS_EXCP;
2386 tcg_temp_free_i32(tmp32_1);
2387 tcg_temp_free_i32(tmp32_2);
2388 break;
2389 case 0xe: /* MVCL R1,R2 [RR] */
2390 insn = ld_code2(env, s->pc);
2391 decode_rr(s, insn, &r1, &r2);
2392 tmp32_1 = tcg_const_i32(r1);
2393 tmp32_2 = tcg_const_i32(r2);
2394 potential_page_fault(s);
2395 gen_helper_mvcl(cc_op, cpu_env, tmp32_1, tmp32_2);
2396 set_cc_static(s);
2397 tcg_temp_free_i32(tmp32_1);
2398 tcg_temp_free_i32(tmp32_2);
2399 break;
2400 case 0x28: /* LDR R1,R2 [RR] */
2401 insn = ld_code2(env, s->pc);
2402 decode_rr(s, insn, &r1, &r2);
2403 tmp = load_freg(r2);
2404 store_freg(r1, tmp);
2405 tcg_temp_free_i64(tmp);
2406 break;
2407 case 0x38: /* LER R1,R2 [RR] */
2408 insn = ld_code2(env, s->pc);
2409 decode_rr(s, insn, &r1, &r2);
2410 tmp32_1 = load_freg32(r2);
2411 store_freg32(r1, tmp32_1);
2412 tcg_temp_free_i32(tmp32_1);
2413 break;
2414 case 0x43: /* IC R1,D2(X2,B2) [RX] */
2415 insn = ld_code4(env, s->pc);
2416 tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
2417 tmp2 = tcg_temp_new_i64();
2418 tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
2419 store_reg8(r1, tmp2);
2420 tcg_temp_free_i64(tmp);
2421 tcg_temp_free_i64(tmp2);
2422 break;
2423 case 0x44: /* EX R1,D2(X2,B2) [RX] */
2424 insn = ld_code4(env, s->pc);
2425 tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
2426 tmp2 = load_reg(r1);
2427 tmp3 = tcg_const_i64(s->pc + 4);
2428 update_psw_addr(s);
2429 gen_op_calc_cc(s);
2430 gen_helper_ex(cc_op, cpu_env, cc_op, tmp2, tmp, tmp3);
2431 set_cc_static(s);
2432 tcg_temp_free_i64(tmp);
2433 tcg_temp_free_i64(tmp2);
2434 tcg_temp_free_i64(tmp3);
2435 break;
2436 case 0x4e: /* CVD R1,D2(X2,B2) [RX] */
2437 insn = ld_code4(env, s->pc);
2438 tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
2439 tmp2 = tcg_temp_new_i64();
2440 tmp32_1 = tcg_temp_new_i32();
2441 tcg_gen_trunc_i64_i32(tmp32_1, regs[r1]);
2442 gen_helper_cvd(tmp2, tmp32_1);
2443 tcg_gen_qemu_st64(tmp2, tmp, get_mem_index(s));
2444 tcg_temp_free_i64(tmp);
2445 tcg_temp_free_i64(tmp2);
2446 tcg_temp_free_i32(tmp32_1);
2447 break;
2448 case 0x60: /* STD R1,D2(X2,B2) [RX] */
2449 insn = ld_code4(env, s->pc);
2450 tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
2451 tmp2 = load_freg(r1);
2452 tcg_gen_qemu_st64(tmp2, tmp, get_mem_index(s));
2453 tcg_temp_free_i64(tmp);
2454 tcg_temp_free_i64(tmp2);
2455 break;
2456 case 0x68: /* LD R1,D2(X2,B2) [RX] */
2457 insn = ld_code4(env, s->pc);
2458 tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
2459 tmp2 = tcg_temp_new_i64();
2460 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
2461 store_freg(r1, tmp2);
2462 tcg_temp_free_i64(tmp);
2463 tcg_temp_free_i64(tmp2);
2464 break;
2465 case 0x70: /* STE R1,D2(X2,B2) [RX] */
2466 insn = ld_code4(env, s->pc);
2467 tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
2468 tmp2 = tcg_temp_new_i64();
2469 tmp32_1 = load_freg32(r1);
2470 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
2471 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
2472 tcg_temp_free_i64(tmp);
2473 tcg_temp_free_i64(tmp2);
2474 tcg_temp_free_i32(tmp32_1);
2475 break;
2476 case 0x78: /* LE R1,D2(X2,B2) [RX] */
2477 insn = ld_code4(env, s->pc);
2478 tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
2479 tmp2 = tcg_temp_new_i64();
2480 tmp32_1 = tcg_temp_new_i32();
2481 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
2482 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
2483 store_freg32(r1, tmp32_1);
2484 tcg_temp_free_i64(tmp);
2485 tcg_temp_free_i64(tmp2);
2486 tcg_temp_free_i32(tmp32_1);
2487 break;
2488 #ifndef CONFIG_USER_ONLY
2489 case 0x80: /* SSM D2(B2) [S] */
2490 /* Set System Mask */
2491 check_privileged(s);
2492 insn = ld_code4(env, s->pc);
2493 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2494 tmp = get_address(s, 0, b2, d2);
2495 tmp2 = tcg_temp_new_i64();
2496 tmp3 = tcg_temp_new_i64();
2497 tcg_gen_andi_i64(tmp3, psw_mask, ~0xff00000000000000ULL);
2498 tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
2499 tcg_gen_shli_i64(tmp2, tmp2, 56);
2500 tcg_gen_or_i64(psw_mask, tmp3, tmp2);
2501 tcg_temp_free_i64(tmp);
2502 tcg_temp_free_i64(tmp2);
2503 tcg_temp_free_i64(tmp3);
2504 break;
2505 case 0x82: /* LPSW D2(B2) [S] */
2506 /* Load PSW */
2507 check_privileged(s);
2508 insn = ld_code4(env, s->pc);
2509 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2510 tmp = get_address(s, 0, b2, d2);
2511 tmp2 = tcg_temp_new_i64();
2512 tmp3 = tcg_temp_new_i64();
2513 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
2514 tcg_gen_addi_i64(tmp, tmp, 4);
2515 tcg_gen_qemu_ld32u(tmp3, tmp, get_mem_index(s));
2516 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2517 tcg_gen_shli_i64(tmp2, tmp2, 32);
2518 gen_helper_load_psw(cpu_env, tmp2, tmp3);
2519 tcg_temp_free_i64(tmp);
2520 tcg_temp_free_i64(tmp2);
2521 tcg_temp_free_i64(tmp3);
2522 /* we need to keep cc_op intact */
2523 s->is_jmp = DISAS_JUMP;
2524 break;
2525 case 0x83: /* DIAG R1,R3,D2 [RS] */
2526 /* Diagnose call (KVM hypercall) */
2527 check_privileged(s);
2528 potential_page_fault(s);
2529 insn = ld_code4(env, s->pc);
2530 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2531 tmp32_1 = tcg_const_i32(insn & 0xfff);
2532 tmp2 = load_reg(2);
2533 tmp3 = load_reg(1);
2534 gen_helper_diag(tmp2, cpu_env, tmp32_1, tmp2, tmp3);
2535 store_reg(2, tmp2);
2536 tcg_temp_free_i32(tmp32_1);
2537 tcg_temp_free_i64(tmp2);
2538 tcg_temp_free_i64(tmp3);
2539 break;
2540 #endif
2541 case 0x88: /* SRL R1,D2(B2) [RS] */
2542 case 0x89: /* SLL R1,D2(B2) [RS] */
2543 case 0x8a: /* SRA R1,D2(B2) [RS] */
2544 insn = ld_code4(env, s->pc);
2545 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2546 tmp = get_address(s, 0, b2, d2);
2547 tmp32_1 = load_reg32(r1);
2548 tmp32_2 = tcg_temp_new_i32();
2549 tcg_gen_trunc_i64_i32(tmp32_2, tmp);
2550 tcg_gen_andi_i32(tmp32_2, tmp32_2, 0x3f);
2551 switch (opc) {
2552 case 0x88:
2553 tcg_gen_shr_i32(tmp32_1, tmp32_1, tmp32_2);
2554 break;
2555 case 0x89:
2556 tcg_gen_shl_i32(tmp32_1, tmp32_1, tmp32_2);
2557 break;
2558 case 0x8a:
2559 tcg_gen_sar_i32(tmp32_1, tmp32_1, tmp32_2);
2560 set_cc_s32(s, tmp32_1);
2561 break;
2562 default:
2563 tcg_abort();
2564 }
2565 store_reg32(r1, tmp32_1);
2566 tcg_temp_free_i64(tmp);
2567 tcg_temp_free_i32(tmp32_1);
2568 tcg_temp_free_i32(tmp32_2);
2569 break;
2570 case 0x8c: /* SRDL R1,D2(B2) [RS] */
2571 case 0x8d: /* SLDL R1,D2(B2) [RS] */
2572 case 0x8e: /* SRDA R1,D2(B2) [RS] */
2573 insn = ld_code4(env, s->pc);
2574 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2575 tmp = get_address(s, 0, b2, d2); /* shift */
2576 tmp2 = tcg_temp_new_i64();
2577 tmp32_1 = load_reg32(r1);
2578 tmp32_2 = load_reg32(r1 + 1);
2579 tcg_gen_concat_i32_i64(tmp2, tmp32_2, tmp32_1); /* operand */
2580 switch (opc) {
2581 case 0x8c:
2582 tcg_gen_shr_i64(tmp2, tmp2, tmp);
2583 break;
2584 case 0x8d:
2585 tcg_gen_shl_i64(tmp2, tmp2, tmp);
2586 break;
2587 case 0x8e:
2588 tcg_gen_sar_i64(tmp2, tmp2, tmp);
2589 set_cc_s64(s, tmp2);
2590 break;
2591 }
2592 tcg_gen_shri_i64(tmp, tmp2, 32);
2593 tcg_gen_trunc_i64_i32(tmp32_1, tmp);
2594 store_reg32(r1, tmp32_1);
2595 tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
2596 store_reg32(r1 + 1, tmp32_2);
2597 tcg_temp_free_i64(tmp);
2598 tcg_temp_free_i64(tmp2);
2599 break;
2600 case 0x98: /* LM R1,R3,D2(B2) [RS] */
2601 case 0x90: /* STM R1,R3,D2(B2) [RS] */
2602 insn = ld_code4(env, s->pc);
2603 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2604
2605 tmp = get_address(s, 0, b2, d2);
2606 tmp2 = tcg_temp_new_i64();
2607 tmp3 = tcg_const_i64(4);
2608 tmp4 = tcg_const_i64(0xffffffff00000000ULL);
2609 for (i = r1;; i = (i + 1) % 16) {
2610 if (opc == 0x98) {
2611 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
2612 tcg_gen_and_i64(regs[i], regs[i], tmp4);
2613 tcg_gen_or_i64(regs[i], regs[i], tmp2);
2614 } else {
2615 tcg_gen_qemu_st32(regs[i], tmp, get_mem_index(s));
2616 }
2617 if (i == r3) {
2618 break;
2619 }
2620 tcg_gen_add_i64(tmp, tmp, tmp3);
2621 }
2622 tcg_temp_free_i64(tmp);
2623 tcg_temp_free_i64(tmp2);
2624 tcg_temp_free_i64(tmp3);
2625 tcg_temp_free_i64(tmp4);
2626 break;
2627 case 0x92: /* MVI D1(B1),I2 [SI] */
2628 insn = ld_code4(env, s->pc);
2629 tmp = decode_si(s, insn, &i2, &b1, &d1);
2630 tmp2 = tcg_const_i64(i2);
2631 tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
2632 tcg_temp_free_i64(tmp);
2633 tcg_temp_free_i64(tmp2);
2634 break;
2635 case 0x94: /* NI D1(B1),I2 [SI] */
2636 case 0x96: /* OI D1(B1),I2 [SI] */
2637 case 0x97: /* XI D1(B1),I2 [SI] */
2638 insn = ld_code4(env, s->pc);
2639 tmp = decode_si(s, insn, &i2, &b1, &d1);
2640 tmp2 = tcg_temp_new_i64();
2641 tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
2642 switch (opc) {
2643 case 0x94:
2644 tcg_gen_andi_i64(tmp2, tmp2, i2);
2645 break;
2646 case 0x96:
2647 tcg_gen_ori_i64(tmp2, tmp2, i2);
2648 break;
2649 case 0x97:
2650 tcg_gen_xori_i64(tmp2, tmp2, i2);
2651 break;
2652 default:
2653 tcg_abort();
2654 }
2655 tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
2656 set_cc_nz_u64(s, tmp2);
2657 tcg_temp_free_i64(tmp);
2658 tcg_temp_free_i64(tmp2);
2659 break;
2660 case 0x9a: /* LAM R1,R3,D2(B2) [RS] */
2661 insn = ld_code4(env, s->pc);
2662 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2663 tmp = get_address(s, 0, b2, d2);
2664 tmp32_1 = tcg_const_i32(r1);
2665 tmp32_2 = tcg_const_i32(r3);
2666 potential_page_fault(s);
2667 gen_helper_lam(cpu_env, tmp32_1, tmp, tmp32_2);
2668 tcg_temp_free_i64(tmp);
2669 tcg_temp_free_i32(tmp32_1);
2670 tcg_temp_free_i32(tmp32_2);
2671 break;
2672 case 0x9b: /* STAM R1,R3,D2(B2) [RS] */
2673 insn = ld_code4(env, s->pc);
2674 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2675 tmp = get_address(s, 0, b2, d2);
2676 tmp32_1 = tcg_const_i32(r1);
2677 tmp32_2 = tcg_const_i32(r3);
2678 potential_page_fault(s);
2679 gen_helper_stam(cpu_env, tmp32_1, tmp, tmp32_2);
2680 tcg_temp_free_i64(tmp);
2681 tcg_temp_free_i32(tmp32_1);
2682 tcg_temp_free_i32(tmp32_2);
2683 break;
2684 case 0xa8: /* MVCLE R1,R3,D2(B2) [RS] */
2685 insn = ld_code4(env, s->pc);
2686 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2687 tmp = get_address(s, 0, b2, d2);
2688 tmp32_1 = tcg_const_i32(r1);
2689 tmp32_2 = tcg_const_i32(r3);
2690 potential_page_fault(s);
2691 gen_helper_mvcle(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
2692 set_cc_static(s);
2693 tcg_temp_free_i64(tmp);
2694 tcg_temp_free_i32(tmp32_1);
2695 tcg_temp_free_i32(tmp32_2);
2696 break;
2697 case 0xa9: /* CLCLE R1,R3,D2(B2) [RS] */
2698 insn = ld_code4(env, s->pc);
2699 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2700 tmp = get_address(s, 0, b2, d2);
2701 tmp32_1 = tcg_const_i32(r1);
2702 tmp32_2 = tcg_const_i32(r3);
2703 potential_page_fault(s);
2704 gen_helper_clcle(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
2705 set_cc_static(s);
2706 tcg_temp_free_i64(tmp);
2707 tcg_temp_free_i32(tmp32_1);
2708 tcg_temp_free_i32(tmp32_2);
2709 break;
2710 #ifndef CONFIG_USER_ONLY
2711 case 0xac: /* STNSM D1(B1),I2 [SI] */
2712 case 0xad: /* STOSM D1(B1),I2 [SI] */
2713 check_privileged(s);
2714 insn = ld_code4(env, s->pc);
2715 tmp = decode_si(s, insn, &i2, &b1, &d1);
2716 tmp2 = tcg_temp_new_i64();
2717 tcg_gen_shri_i64(tmp2, psw_mask, 56);
2718 tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
2719 if (opc == 0xac) {
2720 tcg_gen_andi_i64(psw_mask, psw_mask,
2721 ((uint64_t)i2 << 56) | 0x00ffffffffffffffULL);
2722 } else {
2723 tcg_gen_ori_i64(psw_mask, psw_mask, (uint64_t)i2 << 56);
2724 }
2725 tcg_temp_free_i64(tmp);
2726 tcg_temp_free_i64(tmp2);
2727 break;
2728 case 0xae: /* SIGP R1,R3,D2(B2) [RS] */
2729 check_privileged(s);
2730 insn = ld_code4(env, s->pc);
2731 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2732 tmp = get_address(s, 0, b2, d2);
2733 tmp2 = load_reg(r3);
2734 tmp32_1 = tcg_const_i32(r1);
2735 potential_page_fault(s);
2736 gen_helper_sigp(cc_op, cpu_env, tmp, tmp32_1, tmp2);
2737 set_cc_static(s);
2738 tcg_temp_free_i64(tmp);
2739 tcg_temp_free_i64(tmp2);
2740 tcg_temp_free_i32(tmp32_1);
2741 break;
2742 case 0xb1: /* LRA R1,D2(X2, B2) [RX] */
2743 check_privileged(s);
2744 insn = ld_code4(env, s->pc);
2745 tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
2746 tmp32_1 = tcg_const_i32(r1);
2747 potential_page_fault(s);
2748 gen_helper_lra(cc_op, cpu_env, tmp, tmp32_1);
2749 set_cc_static(s);
2750 tcg_temp_free_i64(tmp);
2751 tcg_temp_free_i32(tmp32_1);
2752 break;
2753 #endif
2754 case 0xb2:
2755 insn = ld_code4(env, s->pc);
2756 op = (insn >> 16) & 0xff;
2757 switch (op) {
2758 case 0x9c: /* STFPC D2(B2) [S] */
2759 d2 = insn & 0xfff;
2760 b2 = (insn >> 12) & 0xf;
2761 tmp32_1 = tcg_temp_new_i32();
2762 tmp = tcg_temp_new_i64();
2763 tmp2 = get_address(s, 0, b2, d2);
2764 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
2765 tcg_gen_extu_i32_i64(tmp, tmp32_1);
2766 tcg_gen_qemu_st32(tmp, tmp2, get_mem_index(s));
2767 tcg_temp_free_i32(tmp32_1);
2768 tcg_temp_free_i64(tmp);
2769 tcg_temp_free_i64(tmp2);
2770 break;
2771 default:
2772 disas_b2(env, s, op, insn);
2773 break;
2774 }
2775 break;
2776 case 0xb3:
2777 insn = ld_code4(env, s->pc);
2778 op = (insn >> 16) & 0xff;
2779 r3 = (insn >> 12) & 0xf; /* aka m3 */
2780 r1 = (insn >> 4) & 0xf;
2781 r2 = insn & 0xf;
2782 disas_b3(env, s, op, r3, r1, r2);
2783 break;
2784 #ifndef CONFIG_USER_ONLY
2785 case 0xb6: /* STCTL R1,R3,D2(B2) [RS] */
2786 /* Store Control */
2787 check_privileged(s);
2788 insn = ld_code4(env, s->pc);
2789 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2790 tmp = get_address(s, 0, b2, d2);
2791 tmp32_1 = tcg_const_i32(r1);
2792 tmp32_2 = tcg_const_i32(r3);
2793 potential_page_fault(s);
2794 gen_helper_stctl(cpu_env, tmp32_1, tmp, tmp32_2);
2795 tcg_temp_free_i64(tmp);
2796 tcg_temp_free_i32(tmp32_1);
2797 tcg_temp_free_i32(tmp32_2);
2798 break;
2799 case 0xb7: /* LCTL R1,R3,D2(B2) [RS] */
2800 /* Load Control */
2801 check_privileged(s);
2802 insn = ld_code4(env, s->pc);
2803 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2804 tmp = get_address(s, 0, b2, d2);
2805 tmp32_1 = tcg_const_i32(r1);
2806 tmp32_2 = tcg_const_i32(r3);
2807 potential_page_fault(s);
2808 gen_helper_lctl(cpu_env, tmp32_1, tmp, tmp32_2);
2809 tcg_temp_free_i64(tmp);
2810 tcg_temp_free_i32(tmp32_1);
2811 tcg_temp_free_i32(tmp32_2);
2812 break;
2813 #endif
2814 case 0xb9:
2815 insn = ld_code4(env, s->pc);
2816 r1 = (insn >> 4) & 0xf;
2817 r2 = insn & 0xf;
2818 op = (insn >> 16) & 0xff;
2819 disas_b9(env, s, op, r1, r2);
2820 break;
2821 case 0xba: /* CS R1,R3,D2(B2) [RS] */
2822 insn = ld_code4(env, s->pc);
2823 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2824 tmp = get_address(s, 0, b2, d2);
2825 tmp32_1 = tcg_const_i32(r1);
2826 tmp32_2 = tcg_const_i32(r3);
2827 potential_page_fault(s);
2828 gen_helper_cs(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
2829 set_cc_static(s);
2830 tcg_temp_free_i64(tmp);
2831 tcg_temp_free_i32(tmp32_1);
2832 tcg_temp_free_i32(tmp32_2);
2833 break;
2834 case 0xbd: /* CLM R1,M3,D2(B2) [RS] */
2835 insn = ld_code4(env, s->pc);
2836 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2837 tmp = get_address(s, 0, b2, d2);
2838 tmp32_1 = load_reg32(r1);
2839 tmp32_2 = tcg_const_i32(r3);
2840 potential_page_fault(s);
2841 gen_helper_clm(cc_op, cpu_env, tmp32_1, tmp32_2, tmp);
2842 set_cc_static(s);
2843 tcg_temp_free_i64(tmp);
2844 tcg_temp_free_i32(tmp32_1);
2845 tcg_temp_free_i32(tmp32_2);
2846 break;
2847 case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
2848 insn = ld_code4(env, s->pc);
2849 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2850 tmp = get_address(s, 0, b2, d2);
2851 tmp32_1 = load_reg32(r1);
2852 tmp32_2 = tcg_const_i32(r3);
2853 potential_page_fault(s);
2854 gen_helper_stcm(cpu_env, tmp32_1, tmp32_2, tmp);
2855 tcg_temp_free_i64(tmp);
2856 tcg_temp_free_i32(tmp32_1);
2857 tcg_temp_free_i32(tmp32_2);
2858 break;
2859 case 0xbf: /* ICM R1,M3,D2(B2) [RS] */
2860 insn = ld_code4(env, s->pc);
2861 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2862 if (r3 == 15) {
2863 /* effectively a 32-bit load */
2864 tmp = get_address(s, 0, b2, d2);
2865 tmp32_1 = tcg_temp_new_i32();
2866 tmp32_2 = tcg_const_i32(r3);
2867 tcg_gen_qemu_ld32u(tmp, tmp, get_mem_index(s));
2868 store_reg32_i64(r1, tmp);
2869 tcg_gen_trunc_i64_i32(tmp32_1, tmp);
2870 set_cc_icm(s, tmp32_2, tmp32_1);
2871 tcg_temp_free_i64(tmp);
2872 tcg_temp_free_i32(tmp32_1);
2873 tcg_temp_free_i32(tmp32_2);
2874 } else if (r3) {
2875 uint32_t mask = 0x00ffffffUL;
2876 uint32_t shift = 24;
2877 int m3 = r3;
2878 tmp = get_address(s, 0, b2, d2);
2879 tmp2 = tcg_temp_new_i64();
2880 tmp32_1 = load_reg32(r1);
2881 tmp32_2 = tcg_temp_new_i32();
2882 tmp32_3 = tcg_const_i32(r3);
2883 tmp32_4 = tcg_const_i32(0);
2884 while (m3) {
2885 if (m3 & 8) {
2886 tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
2887 tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
2888 if (shift) {
2889 tcg_gen_shli_i32(tmp32_2, tmp32_2, shift);
2890 }
2891 tcg_gen_andi_i32(tmp32_1, tmp32_1, mask);
2892 tcg_gen_or_i32(tmp32_1, tmp32_1, tmp32_2);
2893 tcg_gen_or_i32(tmp32_4, tmp32_4, tmp32_2);
2894 tcg_gen_addi_i64(tmp, tmp, 1);
2895 }
2896 m3 = (m3 << 1) & 0xf;
2897 mask = (mask >> 8) | 0xff000000UL;
2898 shift -= 8;
2899 }
2900 store_reg32(r1, tmp32_1);
2901 set_cc_icm(s, tmp32_3, tmp32_4);
2902 tcg_temp_free_i64(tmp);
2903 tcg_temp_free_i64(tmp2);
2904 tcg_temp_free_i32(tmp32_1);
2905 tcg_temp_free_i32(tmp32_2);
2906 tcg_temp_free_i32(tmp32_3);
2907 tcg_temp_free_i32(tmp32_4);
2908 } else {
2909 /* i.e. env->cc = 0 */
2910 gen_op_movi_cc(s, 0);
2911 }
2912 break;
2913 case 0xd2: /* MVC D1(L,B1),D2(B2) [SS] */
2914 case 0xd4: /* NC D1(L,B1),D2(B2) [SS] */
2915 case 0xd5: /* CLC D1(L,B1),D2(B2) [SS] */
2916 case 0xd6: /* OC D1(L,B1),D2(B2) [SS] */
2917 case 0xd7: /* XC D1(L,B1),D2(B2) [SS] */
2918 case 0xdc: /* TR D1(L,B1),D2(B2) [SS] */
2919 case 0xf3: /* UNPK D1(L1,B1),D2(L2,B2) [SS] */
2920 insn = ld_code6(env, s->pc);
2921 vl = tcg_const_i32((insn >> 32) & 0xff);
2922 b1 = (insn >> 28) & 0xf;
2923 b2 = (insn >> 12) & 0xf;
2924 d1 = (insn >> 16) & 0xfff;
2925 d2 = insn & 0xfff;
2926 tmp = get_address(s, 0, b1, d1);
2927 tmp2 = get_address(s, 0, b2, d2);
2928 switch (opc) {
2929 case 0xd2:
2930 gen_op_mvc(s, (insn >> 32) & 0xff, tmp, tmp2);
2931 break;
2932 case 0xd4:
2933 potential_page_fault(s);
2934 gen_helper_nc(cc_op, cpu_env, vl, tmp, tmp2);
2935 set_cc_static(s);
2936 break;
2937 case 0xd5:
2938 gen_op_clc(s, (insn >> 32) & 0xff, tmp, tmp2);
2939 break;
2940 case 0xd6:
2941 potential_page_fault(s);
2942 gen_helper_oc(cc_op, cpu_env, vl, tmp, tmp2);
2943 set_cc_static(s);
2944 break;
2945 case 0xd7:
2946 potential_page_fault(s);
2947 gen_helper_xc(cc_op, cpu_env, vl, tmp, tmp2);
2948 set_cc_static(s);
2949 break;
2950 case 0xdc:
2951 potential_page_fault(s);
2952 gen_helper_tr(cpu_env, vl, tmp, tmp2);
2953 set_cc_static(s);
2954 break;
2955 case 0xf3:
2956 potential_page_fault(s);
2957 gen_helper_unpk(cpu_env, vl, tmp, tmp2);
2958 break;
2959 default:
2960 tcg_abort();
2961 }
2962 tcg_temp_free_i64(tmp);
2963 tcg_temp_free_i64(tmp2);
2964 break;
2965 #ifndef CONFIG_USER_ONLY
2966 case 0xda: /* MVCP D1(R1,B1),D2(B2),R3 [SS] */
2967 case 0xdb: /* MVCS D1(R1,B1),D2(B2),R3 [SS] */
2968 check_privileged(s);
2969 potential_page_fault(s);
2970 insn = ld_code6(env, s->pc);
2971 r1 = (insn >> 36) & 0xf;
2972 r3 = (insn >> 32) & 0xf;
2973 b1 = (insn >> 28) & 0xf;
2974 d1 = (insn >> 16) & 0xfff;
2975 b2 = (insn >> 12) & 0xf;
2976 d2 = insn & 0xfff;
2977 tmp = load_reg(r1);
2978 /* XXX key in r3 */
2979 tmp2 = get_address(s, 0, b1, d1);
2980 tmp3 = get_address(s, 0, b2, d2);
2981 if (opc == 0xda) {
2982 gen_helper_mvcp(cc_op, cpu_env, tmp, tmp2, tmp3);
2983 } else {
2984 gen_helper_mvcs(cc_op, cpu_env, tmp, tmp2, tmp3);
2985 }
2986 set_cc_static(s);
2987 tcg_temp_free_i64(tmp);
2988 tcg_temp_free_i64(tmp2);
2989 tcg_temp_free_i64(tmp3);
2990 break;
2991 #endif
2992 case 0xe3:
2993 insn = ld_code6(env, s->pc);
2994 debug_insn(insn);
2995 op = insn & 0xff;
2996 r1 = (insn >> 36) & 0xf;
2997 x2 = (insn >> 32) & 0xf;
2998 b2 = (insn >> 28) & 0xf;
2999 d2 = ((int)((((insn >> 16) & 0xfff)
3000 | ((insn << 4) & 0xff000)) << 12)) >> 12;
3001 disas_e3(env, s, op, r1, x2, b2, d2 );
3002 break;
3003 #ifndef CONFIG_USER_ONLY
3004 case 0xe5:
3005 /* Test Protection */
3006 check_privileged(s);
3007 insn = ld_code6(env, s->pc);
3008 debug_insn(insn);
3009 disas_e5(env, s, insn);
3010 break;
3011 #endif
3012 case 0xeb:
3013 insn = ld_code6(env, s->pc);
3014 debug_insn(insn);
3015 op = insn & 0xff;
3016 r1 = (insn >> 36) & 0xf;
3017 r3 = (insn >> 32) & 0xf;
3018 b2 = (insn >> 28) & 0xf;
3019 d2 = ((int)((((insn >> 16) & 0xfff)
3020 | ((insn << 4) & 0xff000)) << 12)) >> 12;
3021 disas_eb(env, s, op, r1, r3, b2, d2);
3022 break;
3023 case 0xed:
3024 insn = ld_code6(env, s->pc);
3025 debug_insn(insn);
3026 op = insn & 0xff;
3027 r1 = (insn >> 36) & 0xf;
3028 x2 = (insn >> 32) & 0xf;
3029 b2 = (insn >> 28) & 0xf;
3030 d2 = (short)((insn >> 16) & 0xfff);
3031 r1b = (insn >> 12) & 0xf;
3032 disas_ed(env, s, op, r1, x2, b2, d2, r1b);
3033 break;
3034 default:
3035 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
3036 gen_illegal_opcode(s);
3037 break;
3038 }
3039 }
3040
3041 /* ====================================================================== */
3042 /* Define the insn format enumeration. */
3043 #define F0(N) FMT_##N,
3044 #define F1(N, X1) F0(N)
3045 #define F2(N, X1, X2) F0(N)
3046 #define F3(N, X1, X2, X3) F0(N)
3047 #define F4(N, X1, X2, X3, X4) F0(N)
3048 #define F5(N, X1, X2, X3, X4, X5) F0(N)
3049
3050 typedef enum {
3051 #include "insn-format.def"
3052 } DisasFormat;
3053
3054 #undef F0
3055 #undef F1
3056 #undef F2
3057 #undef F3
3058 #undef F4
3059 #undef F5
3060
3061 /* Define a structure to hold the decoded fields. We'll store each inside
3062 an array indexed by an enum. In order to conserve memory, we'll arrange
3063 for fields that do not exist at the same time to overlap, thus the "C"
3064 for compact. For checking purposes there is an "O" for original index
3065 as well that will be applied to availability bitmaps. */
3066
3067 enum DisasFieldIndexO {
3068 FLD_O_r1,
3069 FLD_O_r2,
3070 FLD_O_r3,
3071 FLD_O_m1,
3072 FLD_O_m3,
3073 FLD_O_m4,
3074 FLD_O_b1,
3075 FLD_O_b2,
3076 FLD_O_b4,
3077 FLD_O_d1,
3078 FLD_O_d2,
3079 FLD_O_d4,
3080 FLD_O_x2,
3081 FLD_O_l1,
3082 FLD_O_l2,
3083 FLD_O_i1,
3084 FLD_O_i2,
3085 FLD_O_i3,
3086 FLD_O_i4,
3087 FLD_O_i5
3088 };
3089
3090 enum DisasFieldIndexC {
3091 FLD_C_r1 = 0,
3092 FLD_C_m1 = 0,
3093 FLD_C_b1 = 0,
3094 FLD_C_i1 = 0,
3095
3096 FLD_C_r2 = 1,
3097 FLD_C_b2 = 1,
3098 FLD_C_i2 = 1,
3099
3100 FLD_C_r3 = 2,
3101 FLD_C_m3 = 2,
3102 FLD_C_i3 = 2,
3103
3104 FLD_C_m4 = 3,
3105 FLD_C_b4 = 3,
3106 FLD_C_i4 = 3,
3107 FLD_C_l1 = 3,
3108
3109 FLD_C_i5 = 4,
3110 FLD_C_d1 = 4,
3111
3112 FLD_C_d2 = 5,
3113
3114 FLD_C_d4 = 6,
3115 FLD_C_x2 = 6,
3116 FLD_C_l2 = 6,
3117
3118 NUM_C_FIELD = 7
3119 };
3120
3121 struct DisasFields {
3122 unsigned op:8;
3123 unsigned op2:8;
3124 unsigned presentC:16;
3125 unsigned int presentO;
3126 int c[NUM_C_FIELD];
3127 };
3128
3129 /* This is the way fields are to be accessed out of DisasFields. */
3130 #define have_field(S, F) have_field1((S), FLD_O_##F)
3131 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
3132
3133 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
3134 {
3135 return (f->presentO >> c) & 1;
3136 }
3137
3138 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
3139 enum DisasFieldIndexC c)
3140 {
3141 assert(have_field1(f, o));
3142 return f->c[c];
3143 }
3144
3145 /* Describe the layout of each field in each format. */
3146 typedef struct DisasField {
3147 unsigned int beg:8;
3148 unsigned int size:8;
3149 unsigned int type:2;
3150 unsigned int indexC:6;
3151 enum DisasFieldIndexO indexO:8;
3152 } DisasField;
3153
3154 typedef struct DisasFormatInfo {
3155 DisasField op[NUM_C_FIELD];
3156 } DisasFormatInfo;
3157
3158 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
3159 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
3160 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
3161 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
3162 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
3163 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
3164 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
3165 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
3166 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
3167 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
3168 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
3169 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
3170 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
3171 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
3172
3173 #define F0(N) { { } },
3174 #define F1(N, X1) { { X1 } },
3175 #define F2(N, X1, X2) { { X1, X2 } },
3176 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
3177 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
3178 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
3179
3180 static const DisasFormatInfo format_info[] = {
3181 #include "insn-format.def"
3182 };
3183
3184 #undef F0
3185 #undef F1
3186 #undef F2
3187 #undef F3
3188 #undef F4
3189 #undef F5
3190 #undef R
3191 #undef M
3192 #undef BD
3193 #undef BXD
3194 #undef BDL
3195 #undef BXDL
3196 #undef I
3197 #undef L
3198
3199 /* Generally, we'll extract operands into this structures, operate upon
3200 them, and store them back. See the "in1", "in2", "prep", "wout" sets
3201 of routines below for more details. */
3202 typedef struct {
3203 bool g_out, g_out2, g_in1, g_in2;
3204 TCGv_i64 out, out2, in1, in2;
3205 TCGv_i64 addr1;
3206 } DisasOps;
3207
3208 /* Return values from translate_one, indicating the state of the TB. */
3209 typedef enum {
3210 /* Continue the TB. */
3211 NO_EXIT,
3212 /* We have emitted one or more goto_tb. No fixup required. */
3213 EXIT_GOTO_TB,
3214 /* We are not using a goto_tb (for whatever reason), but have updated
3215 the PC (for whatever reason), so there's no need to do it again on
3216 exiting the TB. */
3217 EXIT_PC_UPDATED,
3218 /* We are exiting the TB, but have neither emitted a goto_tb, nor
3219 updated the PC for the next instruction to be executed. */
3220 EXIT_PC_STALE,
3221 /* We are ending the TB with a noreturn function call, e.g. longjmp.
3222 No following code will be executed. */
3223 EXIT_NORETURN,
3224 } ExitStatus;
3225
3226 typedef enum DisasFacility {
3227 FAC_Z, /* zarch (default) */
3228 FAC_CASS, /* compare and swap and store */
3229 FAC_CASS2, /* compare and swap and store 2*/
3230 FAC_DFP, /* decimal floating point */
3231 FAC_DFPR, /* decimal floating point rounding */
3232 FAC_DO, /* distinct operands */
3233 FAC_EE, /* execute extensions */
3234 FAC_EI, /* extended immediate */
3235 FAC_FPE, /* floating point extension */
3236 FAC_FPSSH, /* floating point support sign handling */
3237 FAC_FPRGR, /* FPR-GR transfer */
3238 FAC_GIE, /* general instructions extension */
3239 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
3240 FAC_HW, /* high-word */
3241 FAC_IEEEE_SIM, /* IEEE exception sumilation */
3242 FAC_LOC, /* load/store on condition */
3243 FAC_LD, /* long displacement */
3244 FAC_PC, /* population count */
3245 FAC_SCF, /* store clock fast */
3246 FAC_SFLE, /* store facility list extended */
3247 } DisasFacility;
3248
3249 struct DisasInsn {
3250 unsigned opc:16;
3251 DisasFormat fmt:6;
3252 DisasFacility fac:6;
3253
3254 const char *name;
3255
3256 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
3257 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
3258 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
3259 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
3260 void (*help_cout)(DisasContext *, DisasOps *);
3261 ExitStatus (*help_op)(DisasContext *, DisasOps *);
3262
3263 uint64_t data;
3264 };
3265
3266 /* ====================================================================== */
3267 /* Miscelaneous helpers, used by several operations. */
3268
3269 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
3270 {
3271 if (dest == s->next_pc) {
3272 return NO_EXIT;
3273 }
3274 if (use_goto_tb(s, dest)) {
3275 gen_update_cc_op(s);
3276 tcg_gen_goto_tb(0);
3277 tcg_gen_movi_i64(psw_addr, dest);
3278 tcg_gen_exit_tb((tcg_target_long)s->tb);
3279 return EXIT_GOTO_TB;
3280 } else {
3281 tcg_gen_movi_i64(psw_addr, dest);
3282 return EXIT_PC_UPDATED;
3283 }
3284 }
3285
3286 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
3287 bool is_imm, int imm, TCGv_i64 cdest)
3288 {
3289 ExitStatus ret;
3290 uint64_t dest = s->pc + 2 * imm;
3291 int lab;
3292
3293 /* Take care of the special cases first. */
3294 if (c->cond == TCG_COND_NEVER) {
3295 ret = NO_EXIT;
3296 goto egress;
3297 }
3298 if (is_imm) {
3299 if (dest == s->next_pc) {
3300 /* Branch to next. */
3301 ret = NO_EXIT;
3302 goto egress;
3303 }
3304 if (c->cond == TCG_COND_ALWAYS) {
3305 ret = help_goto_direct(s, dest);
3306 goto egress;
3307 }
3308 } else {
3309 if (TCGV_IS_UNUSED_I64(cdest)) {
3310 /* E.g. bcr %r0 -> no branch. */
3311 ret = NO_EXIT;
3312 goto egress;
3313 }
3314 if (c->cond == TCG_COND_ALWAYS) {
3315 tcg_gen_mov_i64(psw_addr, cdest);
3316 ret = EXIT_PC_UPDATED;
3317 goto egress;
3318 }
3319 }
3320
3321 if (use_goto_tb(s, s->next_pc)) {
3322 if (is_imm && use_goto_tb(s, dest)) {
3323 /* Both exits can use goto_tb. */
3324 gen_update_cc_op(s);
3325
3326 lab = gen_new_label();
3327 if (c->is_64) {
3328 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
3329 } else {
3330 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
3331 }
3332
3333 /* Branch not taken. */
3334 tcg_gen_goto_tb(0);
3335 tcg_gen_movi_i64(psw_addr, s->next_pc);
3336 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
3337
3338 /* Branch taken. */
3339 gen_set_label(lab);
3340 tcg_gen_goto_tb(1);
3341 tcg_gen_movi_i64(psw_addr, dest);
3342 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
3343
3344 ret = EXIT_GOTO_TB;
3345 } else {
3346 /* Fallthru can use goto_tb, but taken branch cannot. */
3347 /* Store taken branch destination before the brcond. This
3348 avoids having to allocate a new local temp to hold it.
3349 We'll overwrite this in the not taken case anyway. */
3350 if (!is_imm) {
3351 tcg_gen_mov_i64(psw_addr, cdest);
3352 }
3353
3354 lab = gen_new_label();
3355 if (c->is_64) {
3356 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
3357 } else {
3358 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
3359 }
3360
3361 /* Branch not taken. */
3362 gen_update_cc_op(s);
3363 tcg_gen_goto_tb(0);
3364 tcg_gen_movi_i64(psw_addr, s->next_pc);
3365 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
3366
3367 gen_set_label(lab);
3368 if (is_imm) {
3369 tcg_gen_movi_i64(psw_addr, dest);
3370 }
3371 ret = EXIT_PC_UPDATED;
3372 }
3373 } else {
3374 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
3375 Most commonly we're single-stepping or some other condition that
3376 disables all use of goto_tb. Just update the PC and exit. */
3377
3378 TCGv_i64 next = tcg_const_i64(s->next_pc);
3379 if (is_imm) {
3380 cdest = tcg_const_i64(dest);
3381 }
3382
3383 if (c->is_64) {
3384 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
3385 cdest, next);
3386 } else {
3387 TCGv_i32 t0 = tcg_temp_new_i32();
3388 TCGv_i64 t1 = tcg_temp_new_i64();
3389 TCGv_i64 z = tcg_const_i64(0);
3390 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
3391 tcg_gen_extu_i32_i64(t1, t0);
3392 tcg_temp_free_i32(t0);
3393 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
3394 tcg_temp_free_i64(t1);
3395 tcg_temp_free_i64(z);
3396 }
3397
3398 if (is_imm) {
3399 tcg_temp_free_i64(cdest);
3400 }
3401 tcg_temp_free_i64(next);
3402
3403 ret = EXIT_PC_UPDATED;
3404 }
3405
3406 egress:
3407 free_compare(c);
3408 return ret;
3409 }
3410
3411 /* ====================================================================== */
3412 /* The operations. These perform the bulk of the work for any insn,
3413 usually after the operands have been loaded and output initialized. */
3414
3415 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
3416 {
3417 gen_helper_abs_i64(o->out, o->in2);
3418 return NO_EXIT;
3419 }
3420
3421 static ExitStatus op_add(DisasContext *s, DisasOps *o)
3422 {
3423 tcg_gen_add_i64(o->out, o->in1, o->in2);
3424 return NO_EXIT;
3425 }
3426
3427 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
3428 {
3429 TCGv_i64 cc;
3430
3431 tcg_gen_add_i64(o->out, o->in1, o->in2);
3432
3433 /* XXX possible optimization point */
3434 gen_op_calc_cc(s);
3435 cc = tcg_temp_new_i64();
3436 tcg_gen_extu_i32_i64(cc, cc_op);
3437 tcg_gen_shri_i64(cc, cc, 1);
3438
3439 tcg_gen_add_i64(o->out, o->out, cc);
3440 tcg_temp_free_i64(cc);
3441 return NO_EXIT;
3442 }
3443
3444 static ExitStatus op_and(DisasContext *s, DisasOps *o)
3445 {
3446 tcg_gen_and_i64(o->out, o->in1, o->in2);
3447 return NO_EXIT;
3448 }
3449
3450 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
3451 {
3452 int shift = s->insn->data & 0xff;
3453 int size = s->insn->data >> 8;
3454 uint64_t mask = ((1ull << size) - 1) << shift;
3455
3456 assert(!o->g_in2);
3457 tcg_gen_shli_i64(o->in2, o->in2, shift);
3458 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3459 tcg_gen_and_i64(o->out, o->in1, o->in2);
3460
3461 /* Produce the CC from only the bits manipulated. */
3462 tcg_gen_andi_i64(cc_dst, o->out, mask);
3463 set_cc_nz_u64(s, cc_dst);
3464 return NO_EXIT;
3465 }
3466
3467 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
3468 {
3469 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
3470 if (!TCGV_IS_UNUSED_I64(o->in2)) {
3471 tcg_gen_mov_i64(psw_addr, o->in2);
3472 return EXIT_PC_UPDATED;
3473 } else {
3474 return NO_EXIT;
3475 }
3476 }
3477
3478 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
3479 {
3480 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
3481 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
3482 }
3483
3484 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
3485 {
3486 int m1 = get_field(s->fields, m1);
3487 bool is_imm = have_field(s->fields, i2);
3488 int imm = is_imm ? get_field(s->fields, i2) : 0;
3489 DisasCompare c;
3490
3491 disas_jcc(s, &c, m1);
3492 return help_branch(s, &c, is_imm, imm, o->in2);
3493 }
3494
3495 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
3496 {
3497 int r1 = get_field(s->fields, r1);
3498 bool is_imm = have_field(s->fields, i2);
3499 int imm = is_imm ? get_field(s->fields, i2) : 0;
3500 DisasCompare c;
3501 TCGv_i64 t;
3502
3503 c.cond = TCG_COND_NE;
3504 c.is_64 = false;
3505 c.g1 = false;
3506 c.g2 = false;
3507
3508 t = tcg_temp_new_i64();
3509 tcg_gen_subi_i64(t, regs[r1], 1);
3510 store_reg32_i64(r1, t);
3511 c.u.s32.a = tcg_temp_new_i32();
3512 c.u.s32.b = tcg_const_i32(0);
3513 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
3514 tcg_temp_free_i64(t);
3515
3516 return help_branch(s, &c, is_imm, imm, o->in2);
3517 }
3518
3519 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
3520 {
3521 int r1 = get_field(s->fields, r1);
3522 bool is_imm = have_field(s->fields, i2);
3523 int imm = is_imm ? get_field(s->fields, i2) : 0;
3524 DisasCompare c;
3525
3526 c.cond = TCG_COND_NE;
3527 c.is_64 = true;
3528 c.g1 = true;
3529 c.g2 = false;
3530
3531 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
3532 c.u.s64.a = regs[r1];
3533 c.u.s64.b = tcg_const_i64(0);
3534
3535 return help_branch(s, &c, is_imm, imm, o->in2);
3536 }
3537
3538 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
3539 {
3540 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
3541 return_low128(o->out);
3542 return NO_EXIT;
3543 }
3544
3545 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
3546 {
3547 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
3548 return_low128(o->out);
3549 return NO_EXIT;
3550 }
3551
3552 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
3553 {
3554 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
3555 return_low128(o->out);
3556 return NO_EXIT;
3557 }
3558
3559 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
3560 {
3561 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
3562 return_low128(o->out);
3563 return NO_EXIT;
3564 }
3565
3566 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
3567 {
3568 int shift = s->insn->data & 0xff;
3569 int size = s->insn->data >> 8;
3570 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
3571 return NO_EXIT;
3572 }
3573
3574 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
3575 {
3576 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
3577 return NO_EXIT;
3578 }
3579
3580 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
3581 {
3582 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
3583 return NO_EXIT;
3584 }
3585
3586 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
3587 {
3588 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
3589 return NO_EXIT;
3590 }
3591
3592 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
3593 {
3594 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
3595 return NO_EXIT;
3596 }
3597
3598 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
3599 {
3600 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
3601 return NO_EXIT;
3602 }
3603
3604 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
3605 {
3606 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
3607 return NO_EXIT;
3608 }
3609
3610 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
3611 {
3612 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
3613 return NO_EXIT;
3614 }
3615
3616 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
3617 {
3618 o->out = o->in2;
3619 o->g_out = o->g_in2;
3620 TCGV_UNUSED_I64(o->in2);
3621 o->g_in2 = false;
3622 return NO_EXIT;
3623 }
3624
3625 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3626 {
3627 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3628 return NO_EXIT;
3629 }
3630
3631 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3632 {
3633 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
3634 return_low128(o->out2);
3635 return NO_EXIT;
3636 }
3637
3638 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3639 {
3640 gen_helper_nabs_i64(o->out, o->in2);
3641 return NO_EXIT;
3642 }
3643
3644 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3645 {
3646 tcg_gen_neg_i64(o->out, o->in2);
3647 return NO_EXIT;
3648 }
3649
3650 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3651 {
3652 tcg_gen_or_i64(o->out, o->in1, o->in2);
3653 return NO_EXIT;
3654 }
3655
3656 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3657 {
3658 int shift = s->insn->data & 0xff;
3659 int size = s->insn->data >> 8;
3660 uint64_t mask = ((1ull << size) - 1) << shift;
3661
3662 assert(!o->g_in2);
3663 tcg_gen_shli_i64(o->in2, o->in2, shift);
3664 tcg_gen_or_i64(o->out, o->in1, o->in2);
3665
3666 /* Produce the CC from only the bits manipulated. */
3667 tcg_gen_andi_i64(cc_dst, o->out, mask);
3668 set_cc_nz_u64(s, cc_dst);
3669 return NO_EXIT;
3670 }
3671
3672 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3673 {
3674 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3675 return NO_EXIT;
3676 }
3677
3678 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3679 {
3680 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3681 return NO_EXIT;
3682 }
3683
3684 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3685 {
3686 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3687 return NO_EXIT;
3688 }
3689
3690 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3691 {
3692 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3693 return NO_EXIT;
3694 }
3695
3696 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3697 {
3698 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3699 return NO_EXIT;
3700 }
3701
3702 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3703 {
3704 TCGv_i64 cc;
3705
3706 assert(!o->g_in2);
3707 tcg_gen_not_i64(o->in2, o->in2);
3708 tcg_gen_add_i64(o->out, o->in1, o->in2);
3709
3710 /* XXX possible optimization point */
3711 gen_op_calc_cc(s);
3712 cc = tcg_temp_new_i64();
3713 tcg_gen_extu_i32_i64(cc, cc_op);
3714 tcg_gen_shri_i64(cc, cc, 1);
3715 tcg_gen_add_i64(o->out, o->out, cc);
3716 tcg_temp_free_i64(cc);
3717 return NO_EXIT;
3718 }
3719
3720 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3721 {
3722 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3723 return NO_EXIT;
3724 }
3725
3726 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3727 {
3728 int shift = s->insn->data & 0xff;
3729 int size = s->insn->data >> 8;
3730 uint64_t mask = ((1ull << size) - 1) << shift;
3731
3732 assert(!o->g_in2);
3733 tcg_gen_shli_i64(o->in2, o->in2, shift);
3734 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3735
3736 /* Produce the CC from only the bits manipulated. */
3737 tcg_gen_andi_i64(cc_dst, o->out, mask);
3738 set_cc_nz_u64(s, cc_dst);
3739 return NO_EXIT;
3740 }
3741
3742 /* ====================================================================== */
3743 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3744 the original inputs), update the various cc data structures in order to
3745 be able to compute the new condition code. */
3746
3747 static void cout_abs32(DisasContext *s, DisasOps *o)
3748 {
3749 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3750 }
3751
3752 static void cout_abs64(DisasContext *s, DisasOps *o)
3753 {
3754 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3755 }
3756
3757 static void cout_adds32(DisasContext *s, DisasOps *o)
3758 {
3759 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3760 }
3761
3762 static void cout_adds64(DisasContext *s, DisasOps *o)
3763 {
3764 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3765 }
3766
3767 static void cout_addu32(DisasContext *s, DisasOps *o)
3768 {
3769 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3770 }
3771
3772 static void cout_addu64(DisasContext *s, DisasOps *o)
3773 {
3774 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3775 }
3776
3777 static void cout_addc32(DisasContext *s, DisasOps *o)
3778 {
3779 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3780 }
3781
3782 static void cout_addc64(DisasContext *s, DisasOps *o)
3783 {
3784 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3785 }
3786
3787 static void cout_cmps32(DisasContext *s, DisasOps *o)
3788 {
3789 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3790 }
3791
3792 static void cout_cmps64(DisasContext *s, DisasOps *o)
3793 {
3794 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3795 }
3796
3797 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3798 {
3799 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3800 }
3801
3802 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3803 {
3804 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3805 }
3806
3807 static void cout_nabs32(DisasContext *s, DisasOps *o)
3808 {
3809 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3810 }
3811
3812 static void cout_nabs64(DisasContext *s, DisasOps *o)
3813 {
3814 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3815 }
3816
3817 static void cout_neg32(DisasContext *s, DisasOps *o)
3818 {
3819 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3820 }
3821
3822 static void cout_neg64(DisasContext *s, DisasOps *o)
3823 {
3824 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3825 }
3826
3827 static void cout_nz32(DisasContext *s, DisasOps *o)
3828 {
3829 tcg_gen_ext32u_i64(cc_dst, o->out);
3830 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3831 }
3832
3833 static void cout_nz64(DisasContext *s, DisasOps *o)
3834 {
3835 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3836 }
3837
3838 static void cout_s32(DisasContext *s, DisasOps *o)
3839 {
3840 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3841 }
3842
3843 static void cout_s64(DisasContext *s, DisasOps *o)
3844 {
3845 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3846 }
3847
3848 static void cout_subs32(DisasContext *s, DisasOps *o)
3849 {
3850 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3851 }
3852
3853 static void cout_subs64(DisasContext *s, DisasOps *o)
3854 {
3855 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3856 }
3857
3858 static void cout_subu32(DisasContext *s, DisasOps *o)
3859 {
3860 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3861 }
3862
3863 static void cout_subu64(DisasContext *s, DisasOps *o)
3864 {
3865 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3866 }
3867
3868 static void cout_subb32(DisasContext *s, DisasOps *o)
3869 {
3870 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3871 }
3872
3873 static void cout_subb64(DisasContext *s, DisasOps *o)
3874 {
3875 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3876 }
3877
3878 static void cout_tm32(DisasContext *s, DisasOps *o)
3879 {
3880 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3881 }
3882
3883 static void cout_tm64(DisasContext *s, DisasOps *o)
3884 {
3885 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3886 }
3887
3888 /* ====================================================================== */
3889 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3890 with the TCG register to which we will write. Used in combination with
3891 the "wout" generators, in some cases we need a new temporary, and in
3892 some cases we can write to a TCG global. */
3893
3894 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3895 {
3896 o->out = tcg_temp_new_i64();
3897 }
3898
3899 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3900 {
3901 o->out = tcg_temp_new_i64();
3902 o->out2 = tcg_temp_new_i64();
3903 }
3904
3905 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3906 {
3907 o->out = regs[get_field(f, r1)];
3908 o->g_out = true;
3909 }
3910
3911 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3912 {
3913 /* ??? Specification exception: r1 must be even. */
3914 int r1 = get_field(f, r1);
3915 o->out = regs[r1];
3916 o->out2 = regs[(r1 + 1) & 15];
3917 o->g_out = o->g_out2 = true;
3918 }
3919
3920 /* ====================================================================== */
3921 /* The "Write OUTput" generators. These generally perform some non-trivial
3922 copy of data to TCG globals, or to main memory. The trivial cases are
3923 generally handled by having a "prep" generator install the TCG global
3924 as the destination of the operation. */
3925
3926 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3927 {
3928 store_reg(get_field(f, r1), o->out);
3929 }
3930
3931 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3932 {
3933 store_reg32_i64(get_field(f, r1), o->out);
3934 }
3935
3936 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3937 {
3938 /* ??? Specification exception: r1 must be even. */
3939 int r1 = get_field(f, r1);
3940 store_reg32_i64(r1, o->out);
3941 store_reg32_i64((r1 + 1) & 15, o->out2);
3942 }
3943
3944 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3945 {
3946 /* ??? Specification exception: r1 must be even. */
3947 int r1 = get_field(f, r1);
3948 store_reg32_i64((r1 + 1) & 15, o->out);
3949 tcg_gen_shri_i64(o->out, o->out, 32);
3950 store_reg32_i64(r1, o->out);
3951 }
3952
3953 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3954 {
3955 if (get_field(f, r1) != get_field(f, r2)) {
3956 store_reg32_i64(get_field(f, r1), o->out);
3957 }
3958 }
3959
3960 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3961 {
3962 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3963 }
3964
3965 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3966 {
3967 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3968 }
3969
3970 /* ====================================================================== */
3971 /* The "INput 1" generators. These load the first operand to an insn. */
3972
3973 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3974 {
3975 o->in1 = load_reg(get_field(f, r1));
3976 }
3977
3978 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3979 {
3980 o->in1 = regs[get_field(f, r1)];
3981 o->g_in1 = true;
3982 }
3983
3984 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3985 {
3986 /* ??? Specification exception: r1 must be even. */
3987 int r1 = get_field(f, r1);
3988 o->in1 = load_reg((r1 + 1) & 15);
3989 }
3990
3991 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3992 {
3993 /* ??? Specification exception: r1 must be even. */
3994 int r1 = get_field(f, r1);
3995 o->in1 = tcg_temp_new_i64();
3996 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3997 }
3998
3999 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4000 {
4001 /* ??? Specification exception: r1 must be even. */
4002 int r1 = get_field(f, r1);
4003 o->in1 = tcg_temp_new_i64();
4004 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
4005 }
4006
4007 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4008 {
4009 /* ??? Specification exception: r1 must be even. */
4010 int r1 = get_field(f, r1);
4011 o->in1 = tcg_temp_new_i64();
4012 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4013 }
4014
4015 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4016 {
4017 o->in1 = load_reg(get_field(f, r2));
4018 }
4019
4020 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4021 {
4022 o->in1 = load_reg(get_field(f, r3));
4023 }
4024
4025 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4026 {
4027 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4028 }
4029
4030 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4031 {
4032 in1_la1(s, f, o);
4033 o->in1 = tcg_temp_new_i64();
4034 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4035 }
4036
4037 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4038 {
4039 in1_la1(s, f, o);
4040 o->in1 = tcg_temp_new_i64();
4041 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4042 }
4043
4044 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4045 {
4046 in1_la1(s, f, o);
4047 o->in1 = tcg_temp_new_i64();
4048 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4049 }
4050
4051 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4052 {
4053 in1_la1(s, f, o);
4054 o->in1 = tcg_temp_new_i64();
4055 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4056 }
4057
4058 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4059 {
4060 in1_la1(s, f, o);
4061 o->in1 = tcg_temp_new_i64();
4062 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4063 }
4064
4065 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4066 {
4067 in1_la1(s, f, o);
4068 o->in1 = tcg_temp_new_i64();
4069 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4070 }
4071
4072 /* ====================================================================== */
4073 /* The "INput 2" generators. These load the second operand to an insn. */
4074
4075 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4076 {
4077 o->in2 = load_reg(get_field(f, r2));
4078 }
4079
4080 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4081 {
4082 o->in2 = regs[get_field(f, r2)];
4083 o->g_in2 = true;
4084 }
4085
4086 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4087 {
4088 int r2 = get_field(f, r2);
4089 if (r2 != 0) {
4090 o->in2 = load_reg(r2);
4091 }
4092 }
4093
4094 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4095 {
4096 o->in2 = tcg_temp_new_i64();
4097 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4098 }
4099
4100 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4101 {
4102 o->in2 = tcg_temp_new_i64();
4103 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4104 }
4105
4106 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4107 {
4108 o->in2 = tcg_temp_new_i64();
4109 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4110 }
4111
4112 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4113 {
4114 o->in2 = tcg_temp_new_i64();
4115 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4116 }
4117
4118 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4119 {
4120 o->in2 = load_reg(get_field(f, r3));
4121 }
4122
4123 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4124 {
4125 o->in2 = tcg_temp_new_i64();
4126 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4127 }
4128
4129 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4130 {
4131 o->in2 = tcg_temp_new_i64();
4132 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4133 }
4134
4135 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4136 {
4137 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4138 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4139 }
4140
4141 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4142 {
4143 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4144 }
4145
4146 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4147 {
4148 in2_a2(s, f, o);
4149 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4150 }
4151
4152 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4153 {
4154 in2_a2(s, f, o);
4155 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4156 }
4157
4158 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4159 {
4160 in2_a2(s, f, o);
4161 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4162 }
4163
4164 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4165 {
4166 in2_a2(s, f, o);
4167 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4168 }
4169
4170 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4171 {
4172 in2_ri2(s, f, o);
4173 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4174 }
4175
4176 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4177 {
4178 in2_ri2(s, f, o);
4179 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4180 }
4181
4182 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4183 {
4184 in2_ri2(s, f, o);
4185 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4186 }
4187
4188 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4189 {
4190 in2_ri2(s, f, o);
4191 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4192 }
4193
4194 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4195 {
4196 o->in2 = tcg_const_i64(get_field(f, i2));
4197 }
4198
4199 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4200 {
4201 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4202 }
4203
4204 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4205 {
4206 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4207 }
4208
4209 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4210 {
4211 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4212 }
4213
4214 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4215 {
4216 uint64_t i2 = (uint16_t)get_field(f, i2);
4217 o->in2 = tcg_const_i64(i2 << s->insn->data);
4218 }
4219
4220 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4221 {
4222 uint64_t i2 = (uint32_t)get_field(f, i2);
4223 o->in2 = tcg_const_i64(i2 << s->insn->data);
4224 }
4225
4226 /* ====================================================================== */
4227
4228 /* Find opc within the table of insns. This is formulated as a switch
4229 statement so that (1) we get compile-time notice of cut-paste errors
4230 for duplicated opcodes, and (2) the compiler generates the binary
4231 search tree, rather than us having to post-process the table. */
4232
4233 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4234 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4235
4236 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4237
4238 enum DisasInsnEnum {
4239 #include "insn-data.def"
4240 };
4241
4242 #undef D
4243 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4244 .opc = OPC, \
4245 .fmt = FMT_##FT, \
4246 .fac = FAC_##FC, \
4247 .name = #NM, \
4248 .help_in1 = in1_##I1, \
4249 .help_in2 = in2_##I2, \
4250 .help_prep = prep_##P, \
4251 .help_wout = wout_##W, \
4252 .help_cout = cout_##CC, \
4253 .help_op = op_##OP, \
4254 .data = D \
4255 },
4256
4257 /* Allow 0 to be used for NULL in the table below. */
4258 #define in1_0 NULL
4259 #define in2_0 NULL
4260 #define prep_0 NULL
4261 #define wout_0 NULL
4262 #define cout_0 NULL
4263 #define op_0 NULL
4264
4265 static const DisasInsn insn_info[] = {
4266 #include "insn-data.def"
4267 };
4268
4269 #undef D
4270 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4271 case OPC: return &insn_info[insn_ ## NM];
4272
4273 static const DisasInsn *lookup_opc(uint16_t opc)
4274 {
4275 switch (opc) {
4276 #include "insn-data.def"
4277 default:
4278 return NULL;
4279 }
4280 }
4281
4282 #undef D
4283 #undef C
4284
4285 /* Extract a field from the insn. The INSN should be left-aligned in
4286 the uint64_t so that we can more easily utilize the big-bit-endian
4287 definitions we extract from the Principals of Operation. */
4288
4289 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4290 {
4291 uint32_t r, m;
4292
4293 if (f->size == 0) {
4294 return;
4295 }
4296
4297 /* Zero extract the field from the insn. */
4298 r = (insn << f->beg) >> (64 - f->size);
4299
4300 /* Sign-extend, or un-swap the field as necessary. */
4301 switch (f->type) {
4302 case 0: /* unsigned */
4303 break;
4304 case 1: /* signed */
4305 assert(f->size <= 32);
4306 m = 1u << (f->size - 1);
4307 r = (r ^ m) - m;
4308 break;
4309 case 2: /* dl+dh split, signed 20 bit. */
4310 r = ((int8_t)r << 12) | (r >> 8);
4311 break;
4312 default:
4313 abort();
4314 }
4315
4316 /* Validate that the "compressed" encoding we selected above is valid.
4317 I.e. we havn't make two different original fields overlap. */
4318 assert(((o->presentC >> f->indexC) & 1) == 0);
4319 o->presentC |= 1 << f->indexC;
4320 o->presentO |= 1 << f->indexO;
4321
4322 o->c[f->indexC] = r;
4323 }
4324
4325 /* Lookup the insn at the current PC, extracting the operands into O and
4326 returning the info struct for the insn. Returns NULL for invalid insn. */
4327
4328 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4329 DisasFields *f)
4330 {
4331 uint64_t insn, pc = s->pc;
4332 int op, op2, ilen;
4333 const DisasInsn *info;
4334
4335 insn = ld_code2(env, pc);
4336 op = (insn >> 8) & 0xff;
4337 ilen = get_ilen(op);
4338 s->next_pc = s->pc + ilen;
4339
4340 switch (ilen) {
4341 case 2:
4342 insn = insn << 48;
4343 break;
4344 case 4:
4345 insn = ld_code4(env, pc) << 32;
4346 break;
4347 case 6:
4348 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4349 break;
4350 default:
4351 abort();
4352 }
4353
4354 /* We can't actually determine the insn format until we've looked up
4355 the full insn opcode. Which we can't do without locating the
4356 secondary opcode. Assume by default that OP2 is at bit 40; for
4357 those smaller insns that don't actually have a secondary opcode
4358 this will correctly result in OP2 = 0. */
4359 switch (op) {
4360 case 0x01: /* E */
4361 case 0x80: /* S */
4362 case 0x82: /* S */
4363 case 0x93: /* S */
4364 case 0xb2: /* S, RRF, RRE */
4365 case 0xb3: /* RRE, RRD, RRF */
4366 case 0xb9: /* RRE, RRF */
4367 case 0xe5: /* SSE, SIL */
4368 op2 = (insn << 8) >> 56;
4369 break;
4370 case 0xa5: /* RI */
4371 case 0xa7: /* RI */
4372 case 0xc0: /* RIL */
4373 case 0xc2: /* RIL */
4374 case 0xc4: /* RIL */
4375 case 0xc6: /* RIL */
4376 case 0xc8: /* SSF */
4377 case 0xcc: /* RIL */
4378 op2 = (insn << 12) >> 60;
4379 break;
4380 case 0xd0 ... 0xdf: /* SS */
4381 case 0xe1: /* SS */
4382 case 0xe2: /* SS */
4383 case 0xe8: /* SS */
4384 case 0xe9: /* SS */
4385 case 0xea: /* SS */
4386 case 0xee ... 0xf3: /* SS */
4387 case 0xf8 ... 0xfd: /* SS */
4388 op2 = 0;
4389 break;
4390 default:
4391 op2 = (insn << 40) >> 56;
4392 break;
4393 }
4394
4395 memset(f, 0, sizeof(*f));
4396 f->op = op;
4397 f->op2 = op2;
4398
4399 /* Lookup the instruction. */
4400 info = lookup_opc(op << 8 | op2);
4401
4402 /* If we found it, extract the operands. */
4403 if (info != NULL) {
4404 DisasFormat fmt = info->fmt;
4405 int i;
4406
4407 for (i = 0; i < NUM_C_FIELD; ++i) {
4408 extract_field(f, &format_info[fmt].op[i], insn);
4409 }
4410 }
4411 return info;
4412 }
4413
4414 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4415 {
4416 const DisasInsn *insn;
4417 ExitStatus ret = NO_EXIT;
4418 DisasFields f;
4419 DisasOps o;
4420
4421 insn = extract_insn(env, s, &f);
4422
4423 /* If not found, try the old interpreter. This includes ILLOPC. */
4424 if (insn == NULL) {
4425 disas_s390_insn(env, s);
4426 switch (s->is_jmp) {
4427 case DISAS_NEXT:
4428 ret = NO_EXIT;
4429 break;
4430 case DISAS_TB_JUMP:
4431 ret = EXIT_GOTO_TB;
4432 break;
4433 case DISAS_JUMP:
4434 ret = EXIT_PC_UPDATED;
4435 break;
4436 case DISAS_EXCP:
4437 ret = EXIT_NORETURN;
4438 break;
4439 default:
4440 abort();
4441 }
4442
4443 s->pc = s->next_pc;
4444 return ret;
4445 }
4446
4447 /* Set up the strutures we use to communicate with the helpers. */
4448 s->insn = insn;
4449 s->fields = &f;
4450 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4451 TCGV_UNUSED_I64(o.out);
4452 TCGV_UNUSED_I64(o.out2);
4453 TCGV_UNUSED_I64(o.in1);
4454 TCGV_UNUSED_I64(o.in2);
4455 TCGV_UNUSED_I64(o.addr1);
4456
4457 /* Implement the instruction. */
4458 if (insn->help_in1) {
4459 insn->help_in1(s, &f, &o);
4460 }
4461 if (insn->help_in2) {
4462 insn->help_in2(s, &f, &o);
4463 }
4464 if (insn->help_prep) {
4465 insn->help_prep(s, &f, &o);
4466 }
4467 if (insn->help_op) {
4468 ret = insn->help_op(s, &o);
4469 }
4470 if (insn->help_wout) {
4471 insn->help_wout(s, &f, &o);
4472 }
4473 if (insn->help_cout) {
4474 insn->help_cout(s, &o);
4475 }
4476
4477 /* Free any temporaries created by the helpers. */
4478 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4479 tcg_temp_free_i64(o.out);
4480 }
4481 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4482 tcg_temp_free_i64(o.out2);
4483 }
4484 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4485 tcg_temp_free_i64(o.in1);
4486 }
4487 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4488 tcg_temp_free_i64(o.in2);
4489 }
4490 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4491 tcg_temp_free_i64(o.addr1);
4492 }
4493
4494 /* Advance to the next instruction. */
4495 s->pc = s->next_pc;
4496 return ret;
4497 }
4498
4499 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4500 TranslationBlock *tb,
4501 int search_pc)
4502 {
4503 DisasContext dc;
4504 target_ulong pc_start;
4505 uint64_t next_page_start;
4506 uint16_t *gen_opc_end;
4507 int j, lj = -1;
4508 int num_insns, max_insns;
4509 CPUBreakpoint *bp;
4510 ExitStatus status;
4511 bool do_debug;
4512
4513 pc_start = tb->pc;
4514
4515 /* 31-bit mode */
4516 if (!(tb->flags & FLAG_MASK_64)) {
4517 pc_start &= 0x7fffffff;
4518 }
4519
4520 dc.tb = tb;
4521 dc.pc = pc_start;
4522 dc.cc_op = CC_OP_DYNAMIC;
4523 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4524 dc.is_jmp = DISAS_NEXT;
4525
4526 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4527
4528 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4529
4530 num_insns = 0;
4531 max_insns = tb->cflags & CF_COUNT_MASK;
4532 if (max_insns == 0) {
4533 max_insns = CF_COUNT_MASK;
4534 }
4535
4536 gen_icount_start();
4537
4538 do {
4539 if (search_pc) {
4540 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4541 if (lj < j) {
4542 lj++;
4543 while (lj < j) {
4544 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4545 }
4546 }
4547 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4548 gen_opc_cc_op[lj] = dc.cc_op;
4549 tcg_ctx.gen_opc_instr_start[lj] = 1;
4550 tcg_ctx.gen_opc_icount[lj] = num_insns;
4551 }
4552 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4553 gen_io_start();
4554 }
4555
4556 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4557 tcg_gen_debug_insn_start(dc.pc);
4558 }
4559
4560 status = NO_EXIT;
4561 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4562 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4563 if (bp->pc == dc.pc) {
4564 status = EXIT_PC_STALE;
4565 do_debug = true;
4566 break;
4567 }
4568 }
4569 }
4570 if (status == NO_EXIT) {
4571 status = translate_one(env, &dc);
4572 }
4573
4574 /* If we reach a page boundary, are single stepping,
4575 or exhaust instruction count, stop generation. */
4576 if (status == NO_EXIT
4577 && (dc.pc >= next_page_start
4578 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4579 || num_insns >= max_insns
4580 || singlestep
4581 || env->singlestep_enabled)) {
4582 status = EXIT_PC_STALE;
4583 }
4584 } while (status == NO_EXIT);
4585
4586 if (tb->cflags & CF_LAST_IO) {
4587 gen_io_end();
4588 }
4589
4590 switch (status) {
4591 case EXIT_GOTO_TB:
4592 case EXIT_NORETURN:
4593 break;
4594 case EXIT_PC_STALE:
4595 update_psw_addr(&dc);
4596 /* FALLTHRU */
4597 case EXIT_PC_UPDATED:
4598 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4599 gen_op_calc_cc(&dc);
4600 } else {
4601 /* Next TB starts off with CC_OP_DYNAMIC,
4602 so make sure the cc op type is in env */
4603 gen_op_set_cc_op(&dc);
4604 }
4605 if (do_debug) {
4606 gen_exception(EXCP_DEBUG);
4607 } else {
4608 /* Generate the return instruction */
4609 tcg_gen_exit_tb(0);
4610 }
4611 break;
4612 default:
4613 abort();
4614 }
4615
4616 gen_icount_end(tb, num_insns);
4617 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4618 if (search_pc) {
4619 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4620 lj++;
4621 while (lj <= j) {
4622 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4623 }
4624 } else {
4625 tb->size = dc.pc - pc_start;
4626 tb->icount = num_insns;
4627 }
4628
4629 #if defined(S390X_DEBUG_DISAS)
4630 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4631 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4632 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4633 qemu_log("\n");
4634 }
4635 #endif
4636 }
4637
4638 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4639 {
4640 gen_intermediate_code_internal(env, tb, 0);
4641 }
4642
4643 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4644 {
4645 gen_intermediate_code_internal(env, tb, 1);
4646 }
4647
4648 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4649 {
4650 int cc_op;
4651 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4652 cc_op = gen_opc_cc_op[pc_pos];
4653 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4654 env->cc_op = cc_op;
4655 }
4656 }