]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Convert SPKA
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static inline void debug_insn(uint64_t insn)
83 {
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85 }
86
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 {
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
92 }
93 }
94 return pc;
95 }
96
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
99 {
100 int i;
101
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
108 }
109
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
116 }
117 }
118
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
125 }
126 }
127
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
135 }
136 }
137 #endif
138
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
143 }
144 #endif
145
146 cpu_fprintf(f, "\n");
147 }
148
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
151
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
156
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
160
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162
163 void s390x_translate_init(void)
164 {
165 int i;
166
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
174
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
196 }
197
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
201 }
202
203 static inline TCGv_i64 load_reg(int reg)
204 {
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
208 }
209
210 static inline TCGv_i64 load_freg(int reg)
211 {
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
215 }
216
217 static inline TCGv_i32 load_freg32(int reg)
218 {
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
226 }
227
228 static inline TCGv_i64 load_freg32_i64(int reg)
229 {
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
233 }
234
235 static inline TCGv_i32 load_reg32(int reg)
236 {
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
240 }
241
242 static inline TCGv_i64 load_reg32_i64(int reg)
243 {
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
247 }
248
249 static inline void store_reg(int reg, TCGv_i64 v)
250 {
251 tcg_gen_mov_i64(regs[reg], v);
252 }
253
254 static inline void store_freg(int reg, TCGv_i64 v)
255 {
256 tcg_gen_mov_i64(fregs[reg], v);
257 }
258
259 static inline void store_reg32(int reg, TCGv_i32 v)
260 {
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
268 }
269
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
271 {
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274 }
275
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
277 {
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
279 }
280
281 static inline void store_freg32(int reg, TCGv_i32 v)
282 {
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
286 #else
287 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
289 #endif
290 }
291
292 static inline void store_freg32_i64(int reg, TCGv_i64 v)
293 {
294 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
295 }
296
297 static inline void return_low128(TCGv_i64 dest)
298 {
299 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
300 }
301
302 static inline void update_psw_addr(DisasContext *s)
303 {
304 /* psw.addr */
305 tcg_gen_movi_i64(psw_addr, s->pc);
306 }
307
308 static inline void potential_page_fault(DisasContext *s)
309 {
310 #ifndef CONFIG_USER_ONLY
311 update_psw_addr(s);
312 gen_op_calc_cc(s);
313 #endif
314 }
315
316 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
317 {
318 return (uint64_t)cpu_lduw_code(env, pc);
319 }
320
321 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
322 {
323 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
324 }
325
326 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
327 {
328 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
329 }
330
331 static inline int get_mem_index(DisasContext *s)
332 {
333 switch (s->tb->flags & FLAG_MASK_ASC) {
334 case PSW_ASC_PRIMARY >> 32:
335 return 0;
336 case PSW_ASC_SECONDARY >> 32:
337 return 1;
338 case PSW_ASC_HOME >> 32:
339 return 2;
340 default:
341 tcg_abort();
342 break;
343 }
344 }
345
346 static void gen_exception(int excp)
347 {
348 TCGv_i32 tmp = tcg_const_i32(excp);
349 gen_helper_exception(cpu_env, tmp);
350 tcg_temp_free_i32(tmp);
351 }
352
353 static void gen_program_exception(DisasContext *s, int code)
354 {
355 TCGv_i32 tmp;
356
357 /* Remember what pgm exeption this was. */
358 tmp = tcg_const_i32(code);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
360 tcg_temp_free_i32(tmp);
361
362 tmp = tcg_const_i32(s->next_pc - s->pc);
363 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
364 tcg_temp_free_i32(tmp);
365
366 /* Advance past instruction. */
367 s->pc = s->next_pc;
368 update_psw_addr(s);
369
370 /* Save off cc. */
371 gen_op_calc_cc(s);
372
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM);
375
376 /* End TB here. */
377 s->is_jmp = DISAS_EXCP;
378 }
379
380 static inline void gen_illegal_opcode(DisasContext *s)
381 {
382 gen_program_exception(s, PGM_SPECIFICATION);
383 }
384
385 static inline void check_privileged(DisasContext *s)
386 {
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
389 }
390 }
391
392 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
393 {
394 TCGv_i64 tmp;
395
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s->tb->flags & FLAG_MASK_64)) {
398 d2 &= 0x7fffffffUL;
399 }
400
401 if (x2) {
402 if (d2) {
403 tmp = tcg_const_i64(d2);
404 tcg_gen_add_i64(tmp, tmp, regs[x2]);
405 } else {
406 tmp = load_reg(x2);
407 }
408 if (b2) {
409 tcg_gen_add_i64(tmp, tmp, regs[b2]);
410 }
411 } else if (b2) {
412 if (d2) {
413 tmp = tcg_const_i64(d2);
414 tcg_gen_add_i64(tmp, tmp, regs[b2]);
415 } else {
416 tmp = load_reg(b2);
417 }
418 } else {
419 tmp = tcg_const_i64(d2);
420 }
421
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
424 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
425 }
426
427 return tmp;
428 }
429
430 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
431 {
432 s->cc_op = CC_OP_CONST0 + val;
433 }
434
435 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
436 {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_mov_i64(cc_dst, dst);
439 tcg_gen_discard_i64(cc_vr);
440 s->cc_op = op;
441 }
442
443 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
444 {
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_extu_i32_i64(cc_dst, dst);
447 tcg_gen_discard_i64(cc_vr);
448 s->cc_op = op;
449 }
450
451 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
452 TCGv_i64 dst)
453 {
454 tcg_gen_mov_i64(cc_src, src);
455 tcg_gen_mov_i64(cc_dst, dst);
456 tcg_gen_discard_i64(cc_vr);
457 s->cc_op = op;
458 }
459
460 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
461 TCGv_i32 dst)
462 {
463 tcg_gen_extu_i32_i64(cc_src, src);
464 tcg_gen_extu_i32_i64(cc_dst, dst);
465 tcg_gen_discard_i64(cc_vr);
466 s->cc_op = op;
467 }
468
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
471 {
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
475 s->cc_op = op;
476 }
477
478 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
479 {
480 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
481 }
482
483 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
484 {
485 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
486 }
487
488 static inline void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
489 {
490 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
491 }
492
493 static inline void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
494 {
495 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
496 }
497
498 static inline void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
499 {
500 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
501 }
502
503 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
504 enum cc_op cond)
505 {
506 gen_op_update2_cc_i32(s, cond, v1, v2);
507 }
508
509 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
510 enum cc_op cond)
511 {
512 gen_op_update2_cc_i64(s, cond, v1, v2);
513 }
514
515 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
516 {
517 cmp_32(s, v1, v2, CC_OP_LTGT_32);
518 }
519
520 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
521 {
522 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
523 }
524
525 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
526 {
527 /* XXX optimize for the constant? put it in s? */
528 TCGv_i32 tmp = tcg_const_i32(v2);
529 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
530 tcg_temp_free_i32(tmp);
531 }
532
533 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
534 {
535 TCGv_i32 tmp = tcg_const_i32(v2);
536 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
537 tcg_temp_free_i32(tmp);
538 }
539
540 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
541 {
542 cmp_64(s, v1, v2, CC_OP_LTGT_64);
543 }
544
545 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
546 {
547 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
548 }
549
550 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
551 {
552 TCGv_i64 tmp = tcg_const_i64(v2);
553 cmp_s64(s, v1, tmp);
554 tcg_temp_free_i64(tmp);
555 }
556
557 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
558 {
559 TCGv_i64 tmp = tcg_const_i64(v2);
560 cmp_u64(s, v1, tmp);
561 tcg_temp_free_i64(tmp);
562 }
563
564 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
565 {
566 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
567 }
568
569 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
570 {
571 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
572 }
573
574 /* CC value is in env->cc_op */
575 static inline void set_cc_static(DisasContext *s)
576 {
577 tcg_gen_discard_i64(cc_src);
578 tcg_gen_discard_i64(cc_dst);
579 tcg_gen_discard_i64(cc_vr);
580 s->cc_op = CC_OP_STATIC;
581 }
582
583 static inline void gen_op_set_cc_op(DisasContext *s)
584 {
585 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
586 tcg_gen_movi_i32(cc_op, s->cc_op);
587 }
588 }
589
590 static inline void gen_update_cc_op(DisasContext *s)
591 {
592 gen_op_set_cc_op(s);
593 }
594
595 /* calculates cc into cc_op */
596 static void gen_op_calc_cc(DisasContext *s)
597 {
598 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
599 TCGv_i64 dummy = tcg_const_i64(0);
600
601 switch (s->cc_op) {
602 case CC_OP_CONST0:
603 case CC_OP_CONST1:
604 case CC_OP_CONST2:
605 case CC_OP_CONST3:
606 /* s->cc_op is the cc value */
607 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
608 break;
609 case CC_OP_STATIC:
610 /* env->cc_op already is the cc value */
611 break;
612 case CC_OP_NZ:
613 case CC_OP_ABS_64:
614 case CC_OP_NABS_64:
615 case CC_OP_ABS_32:
616 case CC_OP_NABS_32:
617 case CC_OP_LTGT0_32:
618 case CC_OP_LTGT0_64:
619 case CC_OP_COMP_32:
620 case CC_OP_COMP_64:
621 case CC_OP_NZ_F32:
622 case CC_OP_NZ_F64:
623 case CC_OP_FLOGR:
624 /* 1 argument */
625 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
626 break;
627 case CC_OP_ICM:
628 case CC_OP_LTGT_32:
629 case CC_OP_LTGT_64:
630 case CC_OP_LTUGTU_32:
631 case CC_OP_LTUGTU_64:
632 case CC_OP_TM_32:
633 case CC_OP_TM_64:
634 case CC_OP_SLA_32:
635 case CC_OP_SLA_64:
636 case CC_OP_NZ_F128:
637 /* 2 arguments */
638 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
639 break;
640 case CC_OP_ADD_64:
641 case CC_OP_ADDU_64:
642 case CC_OP_ADDC_64:
643 case CC_OP_SUB_64:
644 case CC_OP_SUBU_64:
645 case CC_OP_SUBB_64:
646 case CC_OP_ADD_32:
647 case CC_OP_ADDU_32:
648 case CC_OP_ADDC_32:
649 case CC_OP_SUB_32:
650 case CC_OP_SUBU_32:
651 case CC_OP_SUBB_32:
652 /* 3 arguments */
653 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
654 break;
655 case CC_OP_DYNAMIC:
656 /* unknown operation - assume 3 arguments and cc_op in env */
657 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
658 break;
659 default:
660 tcg_abort();
661 }
662
663 tcg_temp_free_i32(local_cc_op);
664 tcg_temp_free_i64(dummy);
665
666 /* We now have cc in cc_op as constant */
667 set_cc_static(s);
668 }
669
670 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
671 {
672 debug_insn(insn);
673
674 *r1 = (insn >> 4) & 0xf;
675 *r2 = insn & 0xf;
676 }
677
678 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
679 int *x2, int *b2, int *d2)
680 {
681 debug_insn(insn);
682
683 *r1 = (insn >> 20) & 0xf;
684 *x2 = (insn >> 16) & 0xf;
685 *b2 = (insn >> 12) & 0xf;
686 *d2 = insn & 0xfff;
687
688 return get_address(s, *x2, *b2, *d2);
689 }
690
691 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
692 int *b2, int *d2)
693 {
694 debug_insn(insn);
695
696 *r1 = (insn >> 20) & 0xf;
697 /* aka m3 */
698 *r3 = (insn >> 16) & 0xf;
699 *b2 = (insn >> 12) & 0xf;
700 *d2 = insn & 0xfff;
701 }
702
703 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
704 int *b1, int *d1)
705 {
706 debug_insn(insn);
707
708 *i2 = (insn >> 16) & 0xff;
709 *b1 = (insn >> 12) & 0xf;
710 *d1 = insn & 0xfff;
711
712 return get_address(s, 0, *b1, *d1);
713 }
714
715 static int use_goto_tb(DisasContext *s, uint64_t dest)
716 {
717 /* NOTE: we handle the case where the TB spans two pages here */
718 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
719 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
720 && !s->singlestep_enabled
721 && !(s->tb->cflags & CF_LAST_IO));
722 }
723
724 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
725 {
726 gen_update_cc_op(s);
727
728 if (use_goto_tb(s, pc)) {
729 tcg_gen_goto_tb(tb_num);
730 tcg_gen_movi_i64(psw_addr, pc);
731 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
732 } else {
733 /* jump to another page: currently not optimized */
734 tcg_gen_movi_i64(psw_addr, pc);
735 tcg_gen_exit_tb(0);
736 }
737 }
738
739 static inline void account_noninline_branch(DisasContext *s, int cc_op)
740 {
741 #ifdef DEBUG_INLINE_BRANCHES
742 inline_branch_miss[cc_op]++;
743 #endif
744 }
745
746 static inline void account_inline_branch(DisasContext *s, int cc_op)
747 {
748 #ifdef DEBUG_INLINE_BRANCHES
749 inline_branch_hit[cc_op]++;
750 #endif
751 }
752
753 /* Table of mask values to comparison codes, given a comparison as input.
754 For a true comparison CC=3 will never be set, but we treat this
755 conservatively for possible use when CC=3 indicates overflow. */
756 static const TCGCond ltgt_cond[16] = {
757 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
758 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
759 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
760 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
761 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
762 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
763 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
764 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
765 };
766
767 /* Table of mask values to comparison codes, given a logic op as input.
768 For such, only CC=0 and CC=1 should be possible. */
769 static const TCGCond nz_cond[16] = {
770 /* | | x | x */
771 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
772 /* | NE | x | x */
773 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
774 /* EQ | | x | x */
775 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
776 /* EQ | NE | x | x */
777 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
778 };
779
780 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
781 details required to generate a TCG comparison. */
782 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
783 {
784 TCGCond cond;
785 enum cc_op old_cc_op = s->cc_op;
786
787 if (mask == 15 || mask == 0) {
788 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
789 c->u.s32.a = cc_op;
790 c->u.s32.b = cc_op;
791 c->g1 = c->g2 = true;
792 c->is_64 = false;
793 return;
794 }
795
796 /* Find the TCG condition for the mask + cc op. */
797 switch (old_cc_op) {
798 case CC_OP_LTGT0_32:
799 case CC_OP_LTGT0_64:
800 case CC_OP_LTGT_32:
801 case CC_OP_LTGT_64:
802 cond = ltgt_cond[mask];
803 if (cond == TCG_COND_NEVER) {
804 goto do_dynamic;
805 }
806 account_inline_branch(s, old_cc_op);
807 break;
808
809 case CC_OP_LTUGTU_32:
810 case CC_OP_LTUGTU_64:
811 cond = tcg_unsigned_cond(ltgt_cond[mask]);
812 if (cond == TCG_COND_NEVER) {
813 goto do_dynamic;
814 }
815 account_inline_branch(s, old_cc_op);
816 break;
817
818 case CC_OP_NZ:
819 cond = nz_cond[mask];
820 if (cond == TCG_COND_NEVER) {
821 goto do_dynamic;
822 }
823 account_inline_branch(s, old_cc_op);
824 break;
825
826 case CC_OP_TM_32:
827 case CC_OP_TM_64:
828 switch (mask) {
829 case 8:
830 cond = TCG_COND_EQ;
831 break;
832 case 4 | 2 | 1:
833 cond = TCG_COND_NE;
834 break;
835 default:
836 goto do_dynamic;
837 }
838 account_inline_branch(s, old_cc_op);
839 break;
840
841 case CC_OP_ICM:
842 switch (mask) {
843 case 8:
844 cond = TCG_COND_EQ;
845 break;
846 case 4 | 2 | 1:
847 case 4 | 2:
848 cond = TCG_COND_NE;
849 break;
850 default:
851 goto do_dynamic;
852 }
853 account_inline_branch(s, old_cc_op);
854 break;
855
856 case CC_OP_FLOGR:
857 switch (mask & 0xa) {
858 case 8: /* src == 0 -> no one bit found */
859 cond = TCG_COND_EQ;
860 break;
861 case 2: /* src != 0 -> one bit found */
862 cond = TCG_COND_NE;
863 break;
864 default:
865 goto do_dynamic;
866 }
867 account_inline_branch(s, old_cc_op);
868 break;
869
870 default:
871 do_dynamic:
872 /* Calculate cc value. */
873 gen_op_calc_cc(s);
874 /* FALLTHRU */
875
876 case CC_OP_STATIC:
877 /* Jump based on CC. We'll load up the real cond below;
878 the assignment here merely avoids a compiler warning. */
879 account_noninline_branch(s, old_cc_op);
880 old_cc_op = CC_OP_STATIC;
881 cond = TCG_COND_NEVER;
882 break;
883 }
884
885 /* Load up the arguments of the comparison. */
886 c->is_64 = true;
887 c->g1 = c->g2 = false;
888 switch (old_cc_op) {
889 case CC_OP_LTGT0_32:
890 c->is_64 = false;
891 c->u.s32.a = tcg_temp_new_i32();
892 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
893 c->u.s32.b = tcg_const_i32(0);
894 break;
895 case CC_OP_LTGT_32:
896 case CC_OP_LTUGTU_32:
897 c->is_64 = false;
898 c->u.s32.a = tcg_temp_new_i32();
899 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
900 c->u.s32.b = tcg_temp_new_i32();
901 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
902 break;
903
904 case CC_OP_LTGT0_64:
905 case CC_OP_NZ:
906 case CC_OP_FLOGR:
907 c->u.s64.a = cc_dst;
908 c->u.s64.b = tcg_const_i64(0);
909 c->g1 = true;
910 break;
911 case CC_OP_LTGT_64:
912 case CC_OP_LTUGTU_64:
913 c->u.s64.a = cc_src;
914 c->u.s64.b = cc_dst;
915 c->g1 = c->g2 = true;
916 break;
917
918 case CC_OP_TM_32:
919 case CC_OP_TM_64:
920 case CC_OP_ICM:
921 c->u.s64.a = tcg_temp_new_i64();
922 c->u.s64.b = tcg_const_i64(0);
923 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
924 break;
925
926 case CC_OP_STATIC:
927 c->is_64 = false;
928 c->u.s32.a = cc_op;
929 c->g1 = true;
930 switch (mask) {
931 case 0x8 | 0x4 | 0x2: /* cc != 3 */
932 cond = TCG_COND_NE;
933 c->u.s32.b = tcg_const_i32(3);
934 break;
935 case 0x8 | 0x4 | 0x1: /* cc != 2 */
936 cond = TCG_COND_NE;
937 c->u.s32.b = tcg_const_i32(2);
938 break;
939 case 0x8 | 0x2 | 0x1: /* cc != 1 */
940 cond = TCG_COND_NE;
941 c->u.s32.b = tcg_const_i32(1);
942 break;
943 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
944 cond = TCG_COND_EQ;
945 c->g1 = false;
946 c->u.s32.a = tcg_temp_new_i32();
947 c->u.s32.b = tcg_const_i32(0);
948 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
949 break;
950 case 0x8 | 0x4: /* cc < 2 */
951 cond = TCG_COND_LTU;
952 c->u.s32.b = tcg_const_i32(2);
953 break;
954 case 0x8: /* cc == 0 */
955 cond = TCG_COND_EQ;
956 c->u.s32.b = tcg_const_i32(0);
957 break;
958 case 0x4 | 0x2 | 0x1: /* cc != 0 */
959 cond = TCG_COND_NE;
960 c->u.s32.b = tcg_const_i32(0);
961 break;
962 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
963 cond = TCG_COND_NE;
964 c->g1 = false;
965 c->u.s32.a = tcg_temp_new_i32();
966 c->u.s32.b = tcg_const_i32(0);
967 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
968 break;
969 case 0x4: /* cc == 1 */
970 cond = TCG_COND_EQ;
971 c->u.s32.b = tcg_const_i32(1);
972 break;
973 case 0x2 | 0x1: /* cc > 1 */
974 cond = TCG_COND_GTU;
975 c->u.s32.b = tcg_const_i32(1);
976 break;
977 case 0x2: /* cc == 2 */
978 cond = TCG_COND_EQ;
979 c->u.s32.b = tcg_const_i32(2);
980 break;
981 case 0x1: /* cc == 3 */
982 cond = TCG_COND_EQ;
983 c->u.s32.b = tcg_const_i32(3);
984 break;
985 default:
986 /* CC is masked by something else: (8 >> cc) & mask. */
987 cond = TCG_COND_NE;
988 c->g1 = false;
989 c->u.s32.a = tcg_const_i32(8);
990 c->u.s32.b = tcg_const_i32(0);
991 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
992 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
993 break;
994 }
995 break;
996
997 default:
998 abort();
999 }
1000 c->cond = cond;
1001 }
1002
1003 static void free_compare(DisasCompare *c)
1004 {
1005 if (!c->g1) {
1006 if (c->is_64) {
1007 tcg_temp_free_i64(c->u.s64.a);
1008 } else {
1009 tcg_temp_free_i32(c->u.s32.a);
1010 }
1011 }
1012 if (!c->g2) {
1013 if (c->is_64) {
1014 tcg_temp_free_i64(c->u.s64.b);
1015 } else {
1016 tcg_temp_free_i32(c->u.s32.b);
1017 }
1018 }
1019 }
1020
1021 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1022 uint32_t insn)
1023 {
1024 #ifndef CONFIG_USER_ONLY
1025 TCGv_i64 tmp, tmp2, tmp3;
1026 TCGv_i32 tmp32_1, tmp32_2;
1027 int r1, r2;
1028 int r3, d2, b2;
1029
1030 r1 = (insn >> 4) & 0xf;
1031 r2 = insn & 0xf;
1032
1033 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1034
1035 switch (op) {
1036 case 0x0d: /* PTLB [S] */
1037 /* Purge TLB */
1038 check_privileged(s);
1039 gen_helper_ptlb(cpu_env);
1040 break;
1041 case 0x10: /* SPX D2(B2) [S] */
1042 /* Set Prefix Register */
1043 check_privileged(s);
1044 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1045 tmp = get_address(s, 0, b2, d2);
1046 potential_page_fault(s);
1047 gen_helper_spx(cpu_env, tmp);
1048 tcg_temp_free_i64(tmp);
1049 break;
1050 case 0x11: /* STPX D2(B2) [S] */
1051 /* Store Prefix */
1052 check_privileged(s);
1053 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1054 tmp = get_address(s, 0, b2, d2);
1055 tmp2 = tcg_temp_new_i64();
1056 tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1057 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1058 tcg_temp_free_i64(tmp);
1059 tcg_temp_free_i64(tmp2);
1060 break;
1061 case 0x12: /* STAP D2(B2) [S] */
1062 /* Store CPU Address */
1063 check_privileged(s);
1064 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1065 tmp = get_address(s, 0, b2, d2);
1066 tmp2 = tcg_temp_new_i64();
1067 tmp32_1 = tcg_temp_new_i32();
1068 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1069 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1070 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1071 tcg_temp_free_i64(tmp);
1072 tcg_temp_free_i64(tmp2);
1073 tcg_temp_free_i32(tmp32_1);
1074 break;
1075 case 0x21: /* IPTE R1,R2 [RRE] */
1076 /* Invalidate PTE */
1077 check_privileged(s);
1078 r1 = (insn >> 4) & 0xf;
1079 r2 = insn & 0xf;
1080 tmp = load_reg(r1);
1081 tmp2 = load_reg(r2);
1082 gen_helper_ipte(cpu_env, tmp, tmp2);
1083 tcg_temp_free_i64(tmp);
1084 tcg_temp_free_i64(tmp2);
1085 break;
1086 case 0x29: /* ISKE R1,R2 [RRE] */
1087 /* Insert Storage Key Extended */
1088 check_privileged(s);
1089 r1 = (insn >> 4) & 0xf;
1090 r2 = insn & 0xf;
1091 tmp = load_reg(r2);
1092 tmp2 = tcg_temp_new_i64();
1093 gen_helper_iske(tmp2, cpu_env, tmp);
1094 store_reg(r1, tmp2);
1095 tcg_temp_free_i64(tmp);
1096 tcg_temp_free_i64(tmp2);
1097 break;
1098 case 0x2a: /* RRBE R1,R2 [RRE] */
1099 /* Set Storage Key Extended */
1100 check_privileged(s);
1101 r1 = (insn >> 4) & 0xf;
1102 r2 = insn & 0xf;
1103 tmp32_1 = load_reg32(r1);
1104 tmp = load_reg(r2);
1105 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1106 set_cc_static(s);
1107 tcg_temp_free_i32(tmp32_1);
1108 tcg_temp_free_i64(tmp);
1109 break;
1110 case 0x2b: /* SSKE R1,R2 [RRE] */
1111 /* Set Storage Key Extended */
1112 check_privileged(s);
1113 r1 = (insn >> 4) & 0xf;
1114 r2 = insn & 0xf;
1115 tmp32_1 = load_reg32(r1);
1116 tmp = load_reg(r2);
1117 gen_helper_sske(cpu_env, tmp32_1, tmp);
1118 tcg_temp_free_i32(tmp32_1);
1119 tcg_temp_free_i64(tmp);
1120 break;
1121 case 0x34: /* STCH ? */
1122 /* Store Subchannel */
1123 check_privileged(s);
1124 gen_op_movi_cc(s, 3);
1125 break;
1126 case 0x46: /* STURA R1,R2 [RRE] */
1127 /* Store Using Real Address */
1128 check_privileged(s);
1129 r1 = (insn >> 4) & 0xf;
1130 r2 = insn & 0xf;
1131 tmp32_1 = load_reg32(r1);
1132 tmp = load_reg(r2);
1133 potential_page_fault(s);
1134 gen_helper_stura(cpu_env, tmp, tmp32_1);
1135 tcg_temp_free_i32(tmp32_1);
1136 tcg_temp_free_i64(tmp);
1137 break;
1138 case 0x50: /* CSP R1,R2 [RRE] */
1139 /* Compare And Swap And Purge */
1140 check_privileged(s);
1141 r1 = (insn >> 4) & 0xf;
1142 r2 = insn & 0xf;
1143 tmp32_1 = tcg_const_i32(r1);
1144 tmp32_2 = tcg_const_i32(r2);
1145 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1146 set_cc_static(s);
1147 tcg_temp_free_i32(tmp32_1);
1148 tcg_temp_free_i32(tmp32_2);
1149 break;
1150 case 0x5f: /* CHSC ? */
1151 /* Channel Subsystem Call */
1152 check_privileged(s);
1153 gen_op_movi_cc(s, 3);
1154 break;
1155 case 0x78: /* STCKE D2(B2) [S] */
1156 /* Store Clock Extended */
1157 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1158 tmp = get_address(s, 0, b2, d2);
1159 potential_page_fault(s);
1160 gen_helper_stcke(cc_op, cpu_env, tmp);
1161 set_cc_static(s);
1162 tcg_temp_free_i64(tmp);
1163 break;
1164 case 0x79: /* SACF D2(B2) [S] */
1165 /* Set Address Space Control Fast */
1166 check_privileged(s);
1167 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1168 tmp = get_address(s, 0, b2, d2);
1169 potential_page_fault(s);
1170 gen_helper_sacf(cpu_env, tmp);
1171 tcg_temp_free_i64(tmp);
1172 /* addressing mode has changed, so end the block */
1173 s->pc = s->next_pc;
1174 update_psw_addr(s);
1175 s->is_jmp = DISAS_JUMP;
1176 break;
1177 case 0x7d: /* STSI D2,(B2) [S] */
1178 check_privileged(s);
1179 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1180 tmp = get_address(s, 0, b2, d2);
1181 tmp32_1 = load_reg32(0);
1182 tmp32_2 = load_reg32(1);
1183 potential_page_fault(s);
1184 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1185 set_cc_static(s);
1186 tcg_temp_free_i64(tmp);
1187 tcg_temp_free_i32(tmp32_1);
1188 tcg_temp_free_i32(tmp32_2);
1189 break;
1190 case 0xb1: /* STFL D2(B2) [S] */
1191 /* Store Facility List (CPU features) at 200 */
1192 check_privileged(s);
1193 tmp2 = tcg_const_i64(0xc0000000);
1194 tmp = tcg_const_i64(200);
1195 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1196 tcg_temp_free_i64(tmp2);
1197 tcg_temp_free_i64(tmp);
1198 break;
1199 case 0xb2: /* LPSWE D2(B2) [S] */
1200 /* Load PSW Extended */
1201 check_privileged(s);
1202 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1203 tmp = get_address(s, 0, b2, d2);
1204 tmp2 = tcg_temp_new_i64();
1205 tmp3 = tcg_temp_new_i64();
1206 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1207 tcg_gen_addi_i64(tmp, tmp, 8);
1208 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1209 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1210 /* we need to keep cc_op intact */
1211 s->is_jmp = DISAS_JUMP;
1212 tcg_temp_free_i64(tmp);
1213 tcg_temp_free_i64(tmp2);
1214 tcg_temp_free_i64(tmp3);
1215 break;
1216 case 0x20: /* SERVC R1,R2 [RRE] */
1217 /* SCLP Service call (PV hypercall) */
1218 check_privileged(s);
1219 potential_page_fault(s);
1220 tmp32_1 = load_reg32(r2);
1221 tmp = load_reg(r1);
1222 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1223 set_cc_static(s);
1224 tcg_temp_free_i32(tmp32_1);
1225 tcg_temp_free_i64(tmp);
1226 break;
1227 default:
1228 #endif
1229 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1230 gen_illegal_opcode(s);
1231 #ifndef CONFIG_USER_ONLY
1232 break;
1233 }
1234 #endif
1235 }
1236
1237 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1238 {
1239 unsigned char opc;
1240 uint64_t insn;
1241 int op;
1242
1243 opc = cpu_ldub_code(env, s->pc);
1244 LOG_DISAS("opc 0x%x\n", opc);
1245
1246 switch (opc) {
1247 case 0xb2:
1248 insn = ld_code4(env, s->pc);
1249 op = (insn >> 16) & 0xff;
1250 disas_b2(env, s, op, insn);
1251 break;
1252 default:
1253 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
1254 gen_illegal_opcode(s);
1255 break;
1256 }
1257 }
1258
1259 /* ====================================================================== */
1260 /* Define the insn format enumeration. */
1261 #define F0(N) FMT_##N,
1262 #define F1(N, X1) F0(N)
1263 #define F2(N, X1, X2) F0(N)
1264 #define F3(N, X1, X2, X3) F0(N)
1265 #define F4(N, X1, X2, X3, X4) F0(N)
1266 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1267
1268 typedef enum {
1269 #include "insn-format.def"
1270 } DisasFormat;
1271
1272 #undef F0
1273 #undef F1
1274 #undef F2
1275 #undef F3
1276 #undef F4
1277 #undef F5
1278
1279 /* Define a structure to hold the decoded fields. We'll store each inside
1280 an array indexed by an enum. In order to conserve memory, we'll arrange
1281 for fields that do not exist at the same time to overlap, thus the "C"
1282 for compact. For checking purposes there is an "O" for original index
1283 as well that will be applied to availability bitmaps. */
1284
1285 enum DisasFieldIndexO {
1286 FLD_O_r1,
1287 FLD_O_r2,
1288 FLD_O_r3,
1289 FLD_O_m1,
1290 FLD_O_m3,
1291 FLD_O_m4,
1292 FLD_O_b1,
1293 FLD_O_b2,
1294 FLD_O_b4,
1295 FLD_O_d1,
1296 FLD_O_d2,
1297 FLD_O_d4,
1298 FLD_O_x2,
1299 FLD_O_l1,
1300 FLD_O_l2,
1301 FLD_O_i1,
1302 FLD_O_i2,
1303 FLD_O_i3,
1304 FLD_O_i4,
1305 FLD_O_i5
1306 };
1307
1308 enum DisasFieldIndexC {
1309 FLD_C_r1 = 0,
1310 FLD_C_m1 = 0,
1311 FLD_C_b1 = 0,
1312 FLD_C_i1 = 0,
1313
1314 FLD_C_r2 = 1,
1315 FLD_C_b2 = 1,
1316 FLD_C_i2 = 1,
1317
1318 FLD_C_r3 = 2,
1319 FLD_C_m3 = 2,
1320 FLD_C_i3 = 2,
1321
1322 FLD_C_m4 = 3,
1323 FLD_C_b4 = 3,
1324 FLD_C_i4 = 3,
1325 FLD_C_l1 = 3,
1326
1327 FLD_C_i5 = 4,
1328 FLD_C_d1 = 4,
1329
1330 FLD_C_d2 = 5,
1331
1332 FLD_C_d4 = 6,
1333 FLD_C_x2 = 6,
1334 FLD_C_l2 = 6,
1335
1336 NUM_C_FIELD = 7
1337 };
1338
1339 struct DisasFields {
1340 unsigned op:8;
1341 unsigned op2:8;
1342 unsigned presentC:16;
1343 unsigned int presentO;
1344 int c[NUM_C_FIELD];
1345 };
1346
1347 /* This is the way fields are to be accessed out of DisasFields. */
1348 #define have_field(S, F) have_field1((S), FLD_O_##F)
1349 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1350
1351 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1352 {
1353 return (f->presentO >> c) & 1;
1354 }
1355
1356 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1357 enum DisasFieldIndexC c)
1358 {
1359 assert(have_field1(f, o));
1360 return f->c[c];
1361 }
1362
1363 /* Describe the layout of each field in each format. */
1364 typedef struct DisasField {
1365 unsigned int beg:8;
1366 unsigned int size:8;
1367 unsigned int type:2;
1368 unsigned int indexC:6;
1369 enum DisasFieldIndexO indexO:8;
1370 } DisasField;
1371
1372 typedef struct DisasFormatInfo {
1373 DisasField op[NUM_C_FIELD];
1374 } DisasFormatInfo;
1375
1376 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1377 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1378 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1379 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1380 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1381 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1382 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1383 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1384 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1385 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1386 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1387 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1388 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1389 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1390
1391 #define F0(N) { { } },
1392 #define F1(N, X1) { { X1 } },
1393 #define F2(N, X1, X2) { { X1, X2 } },
1394 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1395 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1396 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1397
1398 static const DisasFormatInfo format_info[] = {
1399 #include "insn-format.def"
1400 };
1401
1402 #undef F0
1403 #undef F1
1404 #undef F2
1405 #undef F3
1406 #undef F4
1407 #undef F5
1408 #undef R
1409 #undef M
1410 #undef BD
1411 #undef BXD
1412 #undef BDL
1413 #undef BXDL
1414 #undef I
1415 #undef L
1416
1417 /* Generally, we'll extract operands into this structures, operate upon
1418 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1419 of routines below for more details. */
1420 typedef struct {
1421 bool g_out, g_out2, g_in1, g_in2;
1422 TCGv_i64 out, out2, in1, in2;
1423 TCGv_i64 addr1;
1424 } DisasOps;
1425
1426 /* Return values from translate_one, indicating the state of the TB. */
1427 typedef enum {
1428 /* Continue the TB. */
1429 NO_EXIT,
1430 /* We have emitted one or more goto_tb. No fixup required. */
1431 EXIT_GOTO_TB,
1432 /* We are not using a goto_tb (for whatever reason), but have updated
1433 the PC (for whatever reason), so there's no need to do it again on
1434 exiting the TB. */
1435 EXIT_PC_UPDATED,
1436 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1437 updated the PC for the next instruction to be executed. */
1438 EXIT_PC_STALE,
1439 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1440 No following code will be executed. */
1441 EXIT_NORETURN,
1442 } ExitStatus;
1443
1444 typedef enum DisasFacility {
1445 FAC_Z, /* zarch (default) */
1446 FAC_CASS, /* compare and swap and store */
1447 FAC_CASS2, /* compare and swap and store 2*/
1448 FAC_DFP, /* decimal floating point */
1449 FAC_DFPR, /* decimal floating point rounding */
1450 FAC_DO, /* distinct operands */
1451 FAC_EE, /* execute extensions */
1452 FAC_EI, /* extended immediate */
1453 FAC_FPE, /* floating point extension */
1454 FAC_FPSSH, /* floating point support sign handling */
1455 FAC_FPRGR, /* FPR-GR transfer */
1456 FAC_GIE, /* general instructions extension */
1457 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1458 FAC_HW, /* high-word */
1459 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1460 FAC_LOC, /* load/store on condition */
1461 FAC_LD, /* long displacement */
1462 FAC_PC, /* population count */
1463 FAC_SCF, /* store clock fast */
1464 FAC_SFLE, /* store facility list extended */
1465 } DisasFacility;
1466
1467 struct DisasInsn {
1468 unsigned opc:16;
1469 DisasFormat fmt:6;
1470 DisasFacility fac:6;
1471
1472 const char *name;
1473
1474 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1475 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1476 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1477 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1478 void (*help_cout)(DisasContext *, DisasOps *);
1479 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1480
1481 uint64_t data;
1482 };
1483
1484 /* ====================================================================== */
1485 /* Miscelaneous helpers, used by several operations. */
1486
1487 static void help_l2_shift(DisasContext *s, DisasFields *f,
1488 DisasOps *o, int mask)
1489 {
1490 int b2 = get_field(f, b2);
1491 int d2 = get_field(f, d2);
1492
1493 if (b2 == 0) {
1494 o->in2 = tcg_const_i64(d2 & mask);
1495 } else {
1496 o->in2 = get_address(s, 0, b2, d2);
1497 tcg_gen_andi_i64(o->in2, o->in2, mask);
1498 }
1499 }
1500
1501 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1502 {
1503 if (dest == s->next_pc) {
1504 return NO_EXIT;
1505 }
1506 if (use_goto_tb(s, dest)) {
1507 gen_update_cc_op(s);
1508 tcg_gen_goto_tb(0);
1509 tcg_gen_movi_i64(psw_addr, dest);
1510 tcg_gen_exit_tb((tcg_target_long)s->tb);
1511 return EXIT_GOTO_TB;
1512 } else {
1513 tcg_gen_movi_i64(psw_addr, dest);
1514 return EXIT_PC_UPDATED;
1515 }
1516 }
1517
1518 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1519 bool is_imm, int imm, TCGv_i64 cdest)
1520 {
1521 ExitStatus ret;
1522 uint64_t dest = s->pc + 2 * imm;
1523 int lab;
1524
1525 /* Take care of the special cases first. */
1526 if (c->cond == TCG_COND_NEVER) {
1527 ret = NO_EXIT;
1528 goto egress;
1529 }
1530 if (is_imm) {
1531 if (dest == s->next_pc) {
1532 /* Branch to next. */
1533 ret = NO_EXIT;
1534 goto egress;
1535 }
1536 if (c->cond == TCG_COND_ALWAYS) {
1537 ret = help_goto_direct(s, dest);
1538 goto egress;
1539 }
1540 } else {
1541 if (TCGV_IS_UNUSED_I64(cdest)) {
1542 /* E.g. bcr %r0 -> no branch. */
1543 ret = NO_EXIT;
1544 goto egress;
1545 }
1546 if (c->cond == TCG_COND_ALWAYS) {
1547 tcg_gen_mov_i64(psw_addr, cdest);
1548 ret = EXIT_PC_UPDATED;
1549 goto egress;
1550 }
1551 }
1552
1553 if (use_goto_tb(s, s->next_pc)) {
1554 if (is_imm && use_goto_tb(s, dest)) {
1555 /* Both exits can use goto_tb. */
1556 gen_update_cc_op(s);
1557
1558 lab = gen_new_label();
1559 if (c->is_64) {
1560 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1561 } else {
1562 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1563 }
1564
1565 /* Branch not taken. */
1566 tcg_gen_goto_tb(0);
1567 tcg_gen_movi_i64(psw_addr, s->next_pc);
1568 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1569
1570 /* Branch taken. */
1571 gen_set_label(lab);
1572 tcg_gen_goto_tb(1);
1573 tcg_gen_movi_i64(psw_addr, dest);
1574 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1575
1576 ret = EXIT_GOTO_TB;
1577 } else {
1578 /* Fallthru can use goto_tb, but taken branch cannot. */
1579 /* Store taken branch destination before the brcond. This
1580 avoids having to allocate a new local temp to hold it.
1581 We'll overwrite this in the not taken case anyway. */
1582 if (!is_imm) {
1583 tcg_gen_mov_i64(psw_addr, cdest);
1584 }
1585
1586 lab = gen_new_label();
1587 if (c->is_64) {
1588 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1589 } else {
1590 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1591 }
1592
1593 /* Branch not taken. */
1594 gen_update_cc_op(s);
1595 tcg_gen_goto_tb(0);
1596 tcg_gen_movi_i64(psw_addr, s->next_pc);
1597 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1598
1599 gen_set_label(lab);
1600 if (is_imm) {
1601 tcg_gen_movi_i64(psw_addr, dest);
1602 }
1603 ret = EXIT_PC_UPDATED;
1604 }
1605 } else {
1606 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1607 Most commonly we're single-stepping or some other condition that
1608 disables all use of goto_tb. Just update the PC and exit. */
1609
1610 TCGv_i64 next = tcg_const_i64(s->next_pc);
1611 if (is_imm) {
1612 cdest = tcg_const_i64(dest);
1613 }
1614
1615 if (c->is_64) {
1616 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1617 cdest, next);
1618 } else {
1619 TCGv_i32 t0 = tcg_temp_new_i32();
1620 TCGv_i64 t1 = tcg_temp_new_i64();
1621 TCGv_i64 z = tcg_const_i64(0);
1622 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1623 tcg_gen_extu_i32_i64(t1, t0);
1624 tcg_temp_free_i32(t0);
1625 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1626 tcg_temp_free_i64(t1);
1627 tcg_temp_free_i64(z);
1628 }
1629
1630 if (is_imm) {
1631 tcg_temp_free_i64(cdest);
1632 }
1633 tcg_temp_free_i64(next);
1634
1635 ret = EXIT_PC_UPDATED;
1636 }
1637
1638 egress:
1639 free_compare(c);
1640 return ret;
1641 }
1642
1643 /* ====================================================================== */
1644 /* The operations. These perform the bulk of the work for any insn,
1645 usually after the operands have been loaded and output initialized. */
1646
1647 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1648 {
1649 gen_helper_abs_i64(o->out, o->in2);
1650 return NO_EXIT;
1651 }
1652
1653 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1654 {
1655 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1656 return NO_EXIT;
1657 }
1658
1659 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1660 {
1661 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1662 return NO_EXIT;
1663 }
1664
1665 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1666 {
1667 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1668 tcg_gen_mov_i64(o->out2, o->in2);
1669 return NO_EXIT;
1670 }
1671
1672 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1673 {
1674 tcg_gen_add_i64(o->out, o->in1, o->in2);
1675 return NO_EXIT;
1676 }
1677
1678 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1679 {
1680 TCGv_i64 cc;
1681
1682 tcg_gen_add_i64(o->out, o->in1, o->in2);
1683
1684 /* XXX possible optimization point */
1685 gen_op_calc_cc(s);
1686 cc = tcg_temp_new_i64();
1687 tcg_gen_extu_i32_i64(cc, cc_op);
1688 tcg_gen_shri_i64(cc, cc, 1);
1689
1690 tcg_gen_add_i64(o->out, o->out, cc);
1691 tcg_temp_free_i64(cc);
1692 return NO_EXIT;
1693 }
1694
1695 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1696 {
1697 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1698 return NO_EXIT;
1699 }
1700
1701 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1702 {
1703 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1704 return NO_EXIT;
1705 }
1706
1707 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1708 {
1709 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1710 return_low128(o->out2);
1711 return NO_EXIT;
1712 }
1713
1714 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1715 {
1716 tcg_gen_and_i64(o->out, o->in1, o->in2);
1717 return NO_EXIT;
1718 }
1719
1720 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1721 {
1722 int shift = s->insn->data & 0xff;
1723 int size = s->insn->data >> 8;
1724 uint64_t mask = ((1ull << size) - 1) << shift;
1725
1726 assert(!o->g_in2);
1727 tcg_gen_shli_i64(o->in2, o->in2, shift);
1728 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1729 tcg_gen_and_i64(o->out, o->in1, o->in2);
1730
1731 /* Produce the CC from only the bits manipulated. */
1732 tcg_gen_andi_i64(cc_dst, o->out, mask);
1733 set_cc_nz_u64(s, cc_dst);
1734 return NO_EXIT;
1735 }
1736
1737 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1738 {
1739 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1740 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1741 tcg_gen_mov_i64(psw_addr, o->in2);
1742 return EXIT_PC_UPDATED;
1743 } else {
1744 return NO_EXIT;
1745 }
1746 }
1747
1748 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1749 {
1750 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1751 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1752 }
1753
1754 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1755 {
1756 int m1 = get_field(s->fields, m1);
1757 bool is_imm = have_field(s->fields, i2);
1758 int imm = is_imm ? get_field(s->fields, i2) : 0;
1759 DisasCompare c;
1760
1761 disas_jcc(s, &c, m1);
1762 return help_branch(s, &c, is_imm, imm, o->in2);
1763 }
1764
1765 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1766 {
1767 int r1 = get_field(s->fields, r1);
1768 bool is_imm = have_field(s->fields, i2);
1769 int imm = is_imm ? get_field(s->fields, i2) : 0;
1770 DisasCompare c;
1771 TCGv_i64 t;
1772
1773 c.cond = TCG_COND_NE;
1774 c.is_64 = false;
1775 c.g1 = false;
1776 c.g2 = false;
1777
1778 t = tcg_temp_new_i64();
1779 tcg_gen_subi_i64(t, regs[r1], 1);
1780 store_reg32_i64(r1, t);
1781 c.u.s32.a = tcg_temp_new_i32();
1782 c.u.s32.b = tcg_const_i32(0);
1783 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1784 tcg_temp_free_i64(t);
1785
1786 return help_branch(s, &c, is_imm, imm, o->in2);
1787 }
1788
1789 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1790 {
1791 int r1 = get_field(s->fields, r1);
1792 bool is_imm = have_field(s->fields, i2);
1793 int imm = is_imm ? get_field(s->fields, i2) : 0;
1794 DisasCompare c;
1795
1796 c.cond = TCG_COND_NE;
1797 c.is_64 = true;
1798 c.g1 = true;
1799 c.g2 = false;
1800
1801 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1802 c.u.s64.a = regs[r1];
1803 c.u.s64.b = tcg_const_i64(0);
1804
1805 return help_branch(s, &c, is_imm, imm, o->in2);
1806 }
1807
1808 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1809 {
1810 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1811 set_cc_static(s);
1812 return NO_EXIT;
1813 }
1814
1815 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1816 {
1817 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1818 set_cc_static(s);
1819 return NO_EXIT;
1820 }
1821
1822 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1823 {
1824 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1825 set_cc_static(s);
1826 return NO_EXIT;
1827 }
1828
1829 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1830 {
1831 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1832 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1833 tcg_temp_free_i32(m3);
1834 gen_set_cc_nz_f32(s, o->in2);
1835 return NO_EXIT;
1836 }
1837
1838 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1839 {
1840 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1841 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1842 tcg_temp_free_i32(m3);
1843 gen_set_cc_nz_f64(s, o->in2);
1844 return NO_EXIT;
1845 }
1846
1847 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1848 {
1849 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1850 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1851 tcg_temp_free_i32(m3);
1852 gen_set_cc_nz_f128(s, o->in1, o->in2);
1853 return NO_EXIT;
1854 }
1855
1856 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1857 {
1858 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1859 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1860 tcg_temp_free_i32(m3);
1861 gen_set_cc_nz_f32(s, o->in2);
1862 return NO_EXIT;
1863 }
1864
1865 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1866 {
1867 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1868 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1869 tcg_temp_free_i32(m3);
1870 gen_set_cc_nz_f64(s, o->in2);
1871 return NO_EXIT;
1872 }
1873
1874 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1875 {
1876 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1877 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1878 tcg_temp_free_i32(m3);
1879 gen_set_cc_nz_f128(s, o->in1, o->in2);
1880 return NO_EXIT;
1881 }
1882
1883 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1884 {
1885 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1886 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1887 tcg_temp_free_i32(m3);
1888 return NO_EXIT;
1889 }
1890
1891 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1892 {
1893 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1894 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1895 tcg_temp_free_i32(m3);
1896 return NO_EXIT;
1897 }
1898
1899 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1900 {
1901 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1902 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1903 tcg_temp_free_i32(m3);
1904 return_low128(o->out2);
1905 return NO_EXIT;
1906 }
1907
1908 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1909 {
1910 int r2 = get_field(s->fields, r2);
1911 TCGv_i64 len = tcg_temp_new_i64();
1912
1913 potential_page_fault(s);
1914 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1915 set_cc_static(s);
1916 return_low128(o->out);
1917
1918 tcg_gen_add_i64(regs[r2], regs[r2], len);
1919 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1920 tcg_temp_free_i64(len);
1921
1922 return NO_EXIT;
1923 }
1924
1925 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1926 {
1927 int l = get_field(s->fields, l1);
1928 TCGv_i32 vl;
1929
1930 switch (l + 1) {
1931 case 1:
1932 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1933 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1934 break;
1935 case 2:
1936 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1937 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1938 break;
1939 case 4:
1940 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1941 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1942 break;
1943 case 8:
1944 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1945 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1946 break;
1947 default:
1948 potential_page_fault(s);
1949 vl = tcg_const_i32(l);
1950 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1951 tcg_temp_free_i32(vl);
1952 set_cc_static(s);
1953 return NO_EXIT;
1954 }
1955 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1956 return NO_EXIT;
1957 }
1958
1959 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1960 {
1961 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1962 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1963 potential_page_fault(s);
1964 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1965 tcg_temp_free_i32(r1);
1966 tcg_temp_free_i32(r3);
1967 set_cc_static(s);
1968 return NO_EXIT;
1969 }
1970
1971 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1972 {
1973 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1974 TCGv_i32 t1 = tcg_temp_new_i32();
1975 tcg_gen_trunc_i64_i32(t1, o->in1);
1976 potential_page_fault(s);
1977 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1978 set_cc_static(s);
1979 tcg_temp_free_i32(t1);
1980 tcg_temp_free_i32(m3);
1981 return NO_EXIT;
1982 }
1983
1984 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1985 {
1986 potential_page_fault(s);
1987 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1988 set_cc_static(s);
1989 return_low128(o->in2);
1990 return NO_EXIT;
1991 }
1992
1993 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1994 {
1995 int r3 = get_field(s->fields, r3);
1996 potential_page_fault(s);
1997 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1998 set_cc_static(s);
1999 return NO_EXIT;
2000 }
2001
2002 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
2003 {
2004 int r3 = get_field(s->fields, r3);
2005 potential_page_fault(s);
2006 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2007 set_cc_static(s);
2008 return NO_EXIT;
2009 }
2010
2011 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
2012 {
2013 int r3 = get_field(s->fields, r3);
2014 TCGv_i64 in3 = tcg_temp_new_i64();
2015 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
2016 potential_page_fault(s);
2017 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
2018 tcg_temp_free_i64(in3);
2019 set_cc_static(s);
2020 return NO_EXIT;
2021 }
2022
2023 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2024 {
2025 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2026 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2027 potential_page_fault(s);
2028 /* XXX rewrite in tcg */
2029 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
2030 set_cc_static(s);
2031 return NO_EXIT;
2032 }
2033
2034 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2035 {
2036 TCGv_i64 t1 = tcg_temp_new_i64();
2037 TCGv_i32 t2 = tcg_temp_new_i32();
2038 tcg_gen_trunc_i64_i32(t2, o->in1);
2039 gen_helper_cvd(t1, t2);
2040 tcg_temp_free_i32(t2);
2041 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2042 tcg_temp_free_i64(t1);
2043 return NO_EXIT;
2044 }
2045
2046 #ifndef CONFIG_USER_ONLY
2047 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2048 {
2049 TCGv_i32 tmp;
2050
2051 check_privileged(s);
2052 potential_page_fault(s);
2053
2054 /* We pretend the format is RX_a so that D2 is the field we want. */
2055 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2056 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2057 tcg_temp_free_i32(tmp);
2058 return NO_EXIT;
2059 }
2060 #endif
2061
2062 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2063 {
2064 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2065 return_low128(o->out);
2066 return NO_EXIT;
2067 }
2068
2069 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2070 {
2071 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2072 return_low128(o->out);
2073 return NO_EXIT;
2074 }
2075
2076 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2077 {
2078 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2079 return_low128(o->out);
2080 return NO_EXIT;
2081 }
2082
2083 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2084 {
2085 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2086 return_low128(o->out);
2087 return NO_EXIT;
2088 }
2089
2090 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2091 {
2092 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2093 return NO_EXIT;
2094 }
2095
2096 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2097 {
2098 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2099 return NO_EXIT;
2100 }
2101
2102 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2103 {
2104 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2105 return_low128(o->out2);
2106 return NO_EXIT;
2107 }
2108
2109 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2110 {
2111 int r2 = get_field(s->fields, r2);
2112 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2113 return NO_EXIT;
2114 }
2115
2116 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2117 {
2118 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2119 return NO_EXIT;
2120 }
2121
2122 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2123 {
2124 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2125 tb->flags, (ab)use the tb->cs_base field as the address of
2126 the template in memory, and grab 8 bits of tb->flags/cflags for
2127 the contents of the register. We would then recognize all this
2128 in gen_intermediate_code_internal, generating code for exactly
2129 one instruction. This new TB then gets executed normally.
2130
2131 On the other hand, this seems to be mostly used for modifying
2132 MVC inside of memcpy, which needs a helper call anyway. So
2133 perhaps this doesn't bear thinking about any further. */
2134
2135 TCGv_i64 tmp;
2136
2137 update_psw_addr(s);
2138 gen_op_calc_cc(s);
2139
2140 tmp = tcg_const_i64(s->next_pc);
2141 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2142 tcg_temp_free_i64(tmp);
2143
2144 set_cc_static(s);
2145 return NO_EXIT;
2146 }
2147
2148 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2149 {
2150 /* We'll use the original input for cc computation, since we get to
2151 compare that against 0, which ought to be better than comparing
2152 the real output against 64. It also lets cc_dst be a convenient
2153 temporary during our computation. */
2154 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2155
2156 /* R1 = IN ? CLZ(IN) : 64. */
2157 gen_helper_clz(o->out, o->in2);
2158
2159 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2160 value by 64, which is undefined. But since the shift is 64 iff the
2161 input is zero, we still get the correct result after and'ing. */
2162 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2163 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2164 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2165 return NO_EXIT;
2166 }
2167
2168 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2169 {
2170 int m3 = get_field(s->fields, m3);
2171 int pos, len, base = s->insn->data;
2172 TCGv_i64 tmp = tcg_temp_new_i64();
2173 uint64_t ccm;
2174
2175 switch (m3) {
2176 case 0xf:
2177 /* Effectively a 32-bit load. */
2178 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2179 len = 32;
2180 goto one_insert;
2181
2182 case 0xc:
2183 case 0x6:
2184 case 0x3:
2185 /* Effectively a 16-bit load. */
2186 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2187 len = 16;
2188 goto one_insert;
2189
2190 case 0x8:
2191 case 0x4:
2192 case 0x2:
2193 case 0x1:
2194 /* Effectively an 8-bit load. */
2195 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2196 len = 8;
2197 goto one_insert;
2198
2199 one_insert:
2200 pos = base + ctz32(m3) * 8;
2201 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2202 ccm = ((1ull << len) - 1) << pos;
2203 break;
2204
2205 default:
2206 /* This is going to be a sequence of loads and inserts. */
2207 pos = base + 32 - 8;
2208 ccm = 0;
2209 while (m3) {
2210 if (m3 & 0x8) {
2211 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2212 tcg_gen_addi_i64(o->in2, o->in2, 1);
2213 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2214 ccm |= 0xff << pos;
2215 }
2216 m3 = (m3 << 1) & 0xf;
2217 pos -= 8;
2218 }
2219 break;
2220 }
2221
2222 tcg_gen_movi_i64(tmp, ccm);
2223 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2224 tcg_temp_free_i64(tmp);
2225 return NO_EXIT;
2226 }
2227
2228 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2229 {
2230 int shift = s->insn->data & 0xff;
2231 int size = s->insn->data >> 8;
2232 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2233 return NO_EXIT;
2234 }
2235
2236 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2237 {
2238 TCGv_i64 t1;
2239
2240 gen_op_calc_cc(s);
2241 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2242
2243 t1 = tcg_temp_new_i64();
2244 tcg_gen_shli_i64(t1, psw_mask, 20);
2245 tcg_gen_shri_i64(t1, t1, 36);
2246 tcg_gen_or_i64(o->out, o->out, t1);
2247
2248 tcg_gen_extu_i32_i64(t1, cc_op);
2249 tcg_gen_shli_i64(t1, t1, 28);
2250 tcg_gen_or_i64(o->out, o->out, t1);
2251 tcg_temp_free_i64(t1);
2252 return NO_EXIT;
2253 }
2254
2255 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2256 {
2257 gen_helper_ldeb(o->out, cpu_env, o->in2);
2258 return NO_EXIT;
2259 }
2260
2261 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2262 {
2263 gen_helper_ledb(o->out, cpu_env, o->in2);
2264 return NO_EXIT;
2265 }
2266
2267 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2268 {
2269 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2270 return NO_EXIT;
2271 }
2272
2273 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2274 {
2275 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2276 return NO_EXIT;
2277 }
2278
2279 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2280 {
2281 gen_helper_lxdb(o->out, cpu_env, o->in2);
2282 return_low128(o->out2);
2283 return NO_EXIT;
2284 }
2285
2286 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2287 {
2288 gen_helper_lxeb(o->out, cpu_env, o->in2);
2289 return_low128(o->out2);
2290 return NO_EXIT;
2291 }
2292
2293 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2294 {
2295 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2296 return NO_EXIT;
2297 }
2298
2299 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2300 {
2301 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2302 return NO_EXIT;
2303 }
2304
2305 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2306 {
2307 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2308 return NO_EXIT;
2309 }
2310
2311 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2312 {
2313 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2314 return NO_EXIT;
2315 }
2316
2317 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2318 {
2319 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2320 return NO_EXIT;
2321 }
2322
2323 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2324 {
2325 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2326 return NO_EXIT;
2327 }
2328
2329 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2330 {
2331 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2332 return NO_EXIT;
2333 }
2334
2335 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2336 {
2337 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2338 return NO_EXIT;
2339 }
2340
2341 #ifndef CONFIG_USER_ONLY
2342 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2343 {
2344 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2345 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2346 check_privileged(s);
2347 potential_page_fault(s);
2348 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2349 tcg_temp_free_i32(r1);
2350 tcg_temp_free_i32(r3);
2351 return NO_EXIT;
2352 }
2353
2354 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2355 {
2356 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2357 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2358 check_privileged(s);
2359 potential_page_fault(s);
2360 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2361 tcg_temp_free_i32(r1);
2362 tcg_temp_free_i32(r3);
2363 return NO_EXIT;
2364 }
2365 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2366 {
2367 check_privileged(s);
2368 potential_page_fault(s);
2369 gen_helper_lra(o->out, cpu_env, o->in2);
2370 set_cc_static(s);
2371 return NO_EXIT;
2372 }
2373
2374 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2375 {
2376 TCGv_i64 t1, t2;
2377
2378 check_privileged(s);
2379
2380 t1 = tcg_temp_new_i64();
2381 t2 = tcg_temp_new_i64();
2382 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2383 tcg_gen_addi_i64(o->in2, o->in2, 4);
2384 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2385 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2386 tcg_gen_shli_i64(t1, t1, 32);
2387 gen_helper_load_psw(cpu_env, t1, t2);
2388 tcg_temp_free_i64(t1);
2389 tcg_temp_free_i64(t2);
2390 return EXIT_NORETURN;
2391 }
2392 #endif
2393
2394 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2395 {
2396 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2397 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2398 potential_page_fault(s);
2399 gen_helper_lam(cpu_env, r1, o->in2, r3);
2400 tcg_temp_free_i32(r1);
2401 tcg_temp_free_i32(r3);
2402 return NO_EXIT;
2403 }
2404
2405 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2406 {
2407 int r1 = get_field(s->fields, r1);
2408 int r3 = get_field(s->fields, r3);
2409 TCGv_i64 t = tcg_temp_new_i64();
2410 TCGv_i64 t4 = tcg_const_i64(4);
2411
2412 while (1) {
2413 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2414 store_reg32_i64(r1, t);
2415 if (r1 == r3) {
2416 break;
2417 }
2418 tcg_gen_add_i64(o->in2, o->in2, t4);
2419 r1 = (r1 + 1) & 15;
2420 }
2421
2422 tcg_temp_free_i64(t);
2423 tcg_temp_free_i64(t4);
2424 return NO_EXIT;
2425 }
2426
2427 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2428 {
2429 int r1 = get_field(s->fields, r1);
2430 int r3 = get_field(s->fields, r3);
2431 TCGv_i64 t = tcg_temp_new_i64();
2432 TCGv_i64 t4 = tcg_const_i64(4);
2433
2434 while (1) {
2435 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2436 store_reg32h_i64(r1, t);
2437 if (r1 == r3) {
2438 break;
2439 }
2440 tcg_gen_add_i64(o->in2, o->in2, t4);
2441 r1 = (r1 + 1) & 15;
2442 }
2443
2444 tcg_temp_free_i64(t);
2445 tcg_temp_free_i64(t4);
2446 return NO_EXIT;
2447 }
2448
2449 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2450 {
2451 int r1 = get_field(s->fields, r1);
2452 int r3 = get_field(s->fields, r3);
2453 TCGv_i64 t8 = tcg_const_i64(8);
2454
2455 while (1) {
2456 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2457 if (r1 == r3) {
2458 break;
2459 }
2460 tcg_gen_add_i64(o->in2, o->in2, t8);
2461 r1 = (r1 + 1) & 15;
2462 }
2463
2464 tcg_temp_free_i64(t8);
2465 return NO_EXIT;
2466 }
2467
2468 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2469 {
2470 o->out = o->in2;
2471 o->g_out = o->g_in2;
2472 TCGV_UNUSED_I64(o->in2);
2473 o->g_in2 = false;
2474 return NO_EXIT;
2475 }
2476
2477 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2478 {
2479 o->out = o->in1;
2480 o->out2 = o->in2;
2481 o->g_out = o->g_in1;
2482 o->g_out2 = o->g_in2;
2483 TCGV_UNUSED_I64(o->in1);
2484 TCGV_UNUSED_I64(o->in2);
2485 o->g_in1 = o->g_in2 = false;
2486 return NO_EXIT;
2487 }
2488
2489 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2490 {
2491 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2492 potential_page_fault(s);
2493 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2494 tcg_temp_free_i32(l);
2495 return NO_EXIT;
2496 }
2497
2498 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2499 {
2500 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2501 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2502 potential_page_fault(s);
2503 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2504 tcg_temp_free_i32(r1);
2505 tcg_temp_free_i32(r2);
2506 set_cc_static(s);
2507 return NO_EXIT;
2508 }
2509
2510 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2511 {
2512 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2513 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2514 potential_page_fault(s);
2515 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2516 tcg_temp_free_i32(r1);
2517 tcg_temp_free_i32(r3);
2518 set_cc_static(s);
2519 return NO_EXIT;
2520 }
2521
2522 #ifndef CONFIG_USER_ONLY
2523 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2524 {
2525 int r1 = get_field(s->fields, l1);
2526 check_privileged(s);
2527 potential_page_fault(s);
2528 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2529 set_cc_static(s);
2530 return NO_EXIT;
2531 }
2532
2533 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2534 {
2535 int r1 = get_field(s->fields, l1);
2536 check_privileged(s);
2537 potential_page_fault(s);
2538 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2539 set_cc_static(s);
2540 return NO_EXIT;
2541 }
2542 #endif
2543
2544 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2545 {
2546 potential_page_fault(s);
2547 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2548 set_cc_static(s);
2549 return NO_EXIT;
2550 }
2551
2552 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2553 {
2554 potential_page_fault(s);
2555 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2556 set_cc_static(s);
2557 return_low128(o->in2);
2558 return NO_EXIT;
2559 }
2560
2561 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2562 {
2563 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2564 return NO_EXIT;
2565 }
2566
2567 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2568 {
2569 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2570 return_low128(o->out2);
2571 return NO_EXIT;
2572 }
2573
2574 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2575 {
2576 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2577 return NO_EXIT;
2578 }
2579
2580 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2581 {
2582 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2583 return NO_EXIT;
2584 }
2585
2586 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2587 {
2588 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2589 return NO_EXIT;
2590 }
2591
2592 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2593 {
2594 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2595 return_low128(o->out2);
2596 return NO_EXIT;
2597 }
2598
2599 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2600 {
2601 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2602 return_low128(o->out2);
2603 return NO_EXIT;
2604 }
2605
2606 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2607 {
2608 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2609 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2610 tcg_temp_free_i64(r3);
2611 return NO_EXIT;
2612 }
2613
2614 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2615 {
2616 int r3 = get_field(s->fields, r3);
2617 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2618 return NO_EXIT;
2619 }
2620
2621 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2622 {
2623 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2624 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2625 tcg_temp_free_i64(r3);
2626 return NO_EXIT;
2627 }
2628
2629 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2630 {
2631 int r3 = get_field(s->fields, r3);
2632 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2633 return NO_EXIT;
2634 }
2635
2636 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2637 {
2638 gen_helper_nabs_i64(o->out, o->in2);
2639 return NO_EXIT;
2640 }
2641
2642 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2643 {
2644 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2645 return NO_EXIT;
2646 }
2647
2648 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2649 {
2650 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2651 return NO_EXIT;
2652 }
2653
2654 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2655 {
2656 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2657 tcg_gen_mov_i64(o->out2, o->in2);
2658 return NO_EXIT;
2659 }
2660
2661 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2662 {
2663 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2664 potential_page_fault(s);
2665 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2666 tcg_temp_free_i32(l);
2667 set_cc_static(s);
2668 return NO_EXIT;
2669 }
2670
2671 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2672 {
2673 tcg_gen_neg_i64(o->out, o->in2);
2674 return NO_EXIT;
2675 }
2676
2677 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2678 {
2679 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2680 return NO_EXIT;
2681 }
2682
2683 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2684 {
2685 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2686 return NO_EXIT;
2687 }
2688
2689 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2690 {
2691 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2692 tcg_gen_mov_i64(o->out2, o->in2);
2693 return NO_EXIT;
2694 }
2695
2696 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2697 {
2698 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2699 potential_page_fault(s);
2700 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2701 tcg_temp_free_i32(l);
2702 set_cc_static(s);
2703 return NO_EXIT;
2704 }
2705
2706 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2707 {
2708 tcg_gen_or_i64(o->out, o->in1, o->in2);
2709 return NO_EXIT;
2710 }
2711
2712 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2713 {
2714 int shift = s->insn->data & 0xff;
2715 int size = s->insn->data >> 8;
2716 uint64_t mask = ((1ull << size) - 1) << shift;
2717
2718 assert(!o->g_in2);
2719 tcg_gen_shli_i64(o->in2, o->in2, shift);
2720 tcg_gen_or_i64(o->out, o->in1, o->in2);
2721
2722 /* Produce the CC from only the bits manipulated. */
2723 tcg_gen_andi_i64(cc_dst, o->out, mask);
2724 set_cc_nz_u64(s, cc_dst);
2725 return NO_EXIT;
2726 }
2727
2728 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2729 {
2730 tcg_gen_bswap16_i64(o->out, o->in2);
2731 return NO_EXIT;
2732 }
2733
2734 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2735 {
2736 tcg_gen_bswap32_i64(o->out, o->in2);
2737 return NO_EXIT;
2738 }
2739
2740 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2741 {
2742 tcg_gen_bswap64_i64(o->out, o->in2);
2743 return NO_EXIT;
2744 }
2745
2746 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2747 {
2748 TCGv_i32 t1 = tcg_temp_new_i32();
2749 TCGv_i32 t2 = tcg_temp_new_i32();
2750 TCGv_i32 to = tcg_temp_new_i32();
2751 tcg_gen_trunc_i64_i32(t1, o->in1);
2752 tcg_gen_trunc_i64_i32(t2, o->in2);
2753 tcg_gen_rotl_i32(to, t1, t2);
2754 tcg_gen_extu_i32_i64(o->out, to);
2755 tcg_temp_free_i32(t1);
2756 tcg_temp_free_i32(t2);
2757 tcg_temp_free_i32(to);
2758 return NO_EXIT;
2759 }
2760
2761 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2762 {
2763 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2764 return NO_EXIT;
2765 }
2766
2767 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2768 {
2769 int r1 = get_field(s->fields, r1);
2770 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2771 return NO_EXIT;
2772 }
2773
2774 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2775 {
2776 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2777 return NO_EXIT;
2778 }
2779
2780 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2781 {
2782 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2783 return NO_EXIT;
2784 }
2785
2786 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2787 {
2788 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2789 return_low128(o->out2);
2790 return NO_EXIT;
2791 }
2792
2793 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2794 {
2795 gen_helper_sqeb(o->out, cpu_env, o->in2);
2796 return NO_EXIT;
2797 }
2798
2799 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2800 {
2801 gen_helper_sqdb(o->out, cpu_env, o->in2);
2802 return NO_EXIT;
2803 }
2804
2805 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2806 {
2807 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2808 return_low128(o->out2);
2809 return NO_EXIT;
2810 }
2811
2812 #ifndef CONFIG_USER_ONLY
2813 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2814 {
2815 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2816 check_privileged(s);
2817 potential_page_fault(s);
2818 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2819 tcg_temp_free_i32(r1);
2820 return NO_EXIT;
2821 }
2822 #endif
2823
2824 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2825 {
2826 uint64_t sign = 1ull << s->insn->data;
2827 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2828 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2829 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2830 /* The arithmetic left shift is curious in that it does not affect
2831 the sign bit. Copy that over from the source unchanged. */
2832 tcg_gen_andi_i64(o->out, o->out, ~sign);
2833 tcg_gen_andi_i64(o->in1, o->in1, sign);
2834 tcg_gen_or_i64(o->out, o->out, o->in1);
2835 return NO_EXIT;
2836 }
2837
2838 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2839 {
2840 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2841 return NO_EXIT;
2842 }
2843
2844 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2845 {
2846 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2847 return NO_EXIT;
2848 }
2849
2850 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2851 {
2852 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2853 return NO_EXIT;
2854 }
2855
2856 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2857 {
2858 gen_helper_sfpc(cpu_env, o->in2);
2859 return NO_EXIT;
2860 }
2861
2862 #ifndef CONFIG_USER_ONLY
2863 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2864 {
2865 check_privileged(s);
2866 tcg_gen_shri_i64(o->in2, o->in2, 4);
2867 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2868 return NO_EXIT;
2869 }
2870
2871 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2872 {
2873 check_privileged(s);
2874 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2875 return NO_EXIT;
2876 }
2877
2878 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
2879 {
2880 gen_helper_stck(o->out, cpu_env);
2881 /* ??? We don't implement clock states. */
2882 gen_op_movi_cc(s, 0);
2883 return NO_EXIT;
2884 }
2885
2886 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
2887 {
2888 check_privileged(s);
2889 gen_helper_sckc(cpu_env, o->in2);
2890 return NO_EXIT;
2891 }
2892
2893 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
2894 {
2895 check_privileged(s);
2896 gen_helper_stckc(o->out, cpu_env);
2897 return NO_EXIT;
2898 }
2899
2900 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2901 {
2902 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2903 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2904 check_privileged(s);
2905 potential_page_fault(s);
2906 gen_helper_stctg(cpu_env, r1, o->in2, r3);
2907 tcg_temp_free_i32(r1);
2908 tcg_temp_free_i32(r3);
2909 return NO_EXIT;
2910 }
2911
2912 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
2913 {
2914 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2915 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2916 check_privileged(s);
2917 potential_page_fault(s);
2918 gen_helper_stctl(cpu_env, r1, o->in2, r3);
2919 tcg_temp_free_i32(r1);
2920 tcg_temp_free_i32(r3);
2921 return NO_EXIT;
2922 }
2923
2924 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
2925 {
2926 check_privileged(s);
2927 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2928 return NO_EXIT;
2929 }
2930
2931 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
2932 {
2933 check_privileged(s);
2934 gen_helper_spt(cpu_env, o->in2);
2935 return NO_EXIT;
2936 }
2937
2938 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
2939 {
2940 check_privileged(s);
2941 gen_helper_stpt(o->out, cpu_env);
2942 return NO_EXIT;
2943 }
2944
2945 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
2946 {
2947 uint64_t i2 = get_field(s->fields, i2);
2948 TCGv_i64 t;
2949
2950 check_privileged(s);
2951
2952 /* It is important to do what the instruction name says: STORE THEN.
2953 If we let the output hook perform the store then if we fault and
2954 restart, we'll have the wrong SYSTEM MASK in place. */
2955 t = tcg_temp_new_i64();
2956 tcg_gen_shri_i64(t, psw_mask, 56);
2957 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
2958 tcg_temp_free_i64(t);
2959
2960 if (s->fields->op == 0xac) {
2961 tcg_gen_andi_i64(psw_mask, psw_mask,
2962 (i2 << 56) | 0x00ffffffffffffffull);
2963 } else {
2964 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
2965 }
2966 return NO_EXIT;
2967 }
2968 #endif
2969
2970 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
2971 {
2972 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
2973 return NO_EXIT;
2974 }
2975
2976 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
2977 {
2978 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
2979 return NO_EXIT;
2980 }
2981
2982 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
2983 {
2984 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
2985 return NO_EXIT;
2986 }
2987
2988 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
2989 {
2990 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
2991 return NO_EXIT;
2992 }
2993
2994 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
2995 {
2996 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2997 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2998 potential_page_fault(s);
2999 gen_helper_stam(cpu_env, r1, o->in2, r3);
3000 tcg_temp_free_i32(r1);
3001 tcg_temp_free_i32(r3);
3002 return NO_EXIT;
3003 }
3004
3005 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3006 {
3007 int m3 = get_field(s->fields, m3);
3008 int pos, base = s->insn->data;
3009 TCGv_i64 tmp = tcg_temp_new_i64();
3010
3011 pos = base + ctz32(m3) * 8;
3012 switch (m3) {
3013 case 0xf:
3014 /* Effectively a 32-bit store. */
3015 tcg_gen_shri_i64(tmp, o->in1, pos);
3016 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3017 break;
3018
3019 case 0xc:
3020 case 0x6:
3021 case 0x3:
3022 /* Effectively a 16-bit store. */
3023 tcg_gen_shri_i64(tmp, o->in1, pos);
3024 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3025 break;
3026
3027 case 0x8:
3028 case 0x4:
3029 case 0x2:
3030 case 0x1:
3031 /* Effectively an 8-bit store. */
3032 tcg_gen_shri_i64(tmp, o->in1, pos);
3033 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3034 break;
3035
3036 default:
3037 /* This is going to be a sequence of shifts and stores. */
3038 pos = base + 32 - 8;
3039 while (m3) {
3040 if (m3 & 0x8) {
3041 tcg_gen_shri_i64(tmp, o->in1, pos);
3042 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3043 tcg_gen_addi_i64(o->in2, o->in2, 1);
3044 }
3045 m3 = (m3 << 1) & 0xf;
3046 pos -= 8;
3047 }
3048 break;
3049 }
3050 tcg_temp_free_i64(tmp);
3051 return NO_EXIT;
3052 }
3053
3054 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3055 {
3056 int r1 = get_field(s->fields, r1);
3057 int r3 = get_field(s->fields, r3);
3058 int size = s->insn->data;
3059 TCGv_i64 tsize = tcg_const_i64(size);
3060
3061 while (1) {
3062 if (size == 8) {
3063 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3064 } else {
3065 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3066 }
3067 if (r1 == r3) {
3068 break;
3069 }
3070 tcg_gen_add_i64(o->in2, o->in2, tsize);
3071 r1 = (r1 + 1) & 15;
3072 }
3073
3074 tcg_temp_free_i64(tsize);
3075 return NO_EXIT;
3076 }
3077
3078 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3079 {
3080 int r1 = get_field(s->fields, r1);
3081 int r3 = get_field(s->fields, r3);
3082 TCGv_i64 t = tcg_temp_new_i64();
3083 TCGv_i64 t4 = tcg_const_i64(4);
3084 TCGv_i64 t32 = tcg_const_i64(32);
3085
3086 while (1) {
3087 tcg_gen_shl_i64(t, regs[r1], t32);
3088 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3089 if (r1 == r3) {
3090 break;
3091 }
3092 tcg_gen_add_i64(o->in2, o->in2, t4);
3093 r1 = (r1 + 1) & 15;
3094 }
3095
3096 tcg_temp_free_i64(t);
3097 tcg_temp_free_i64(t4);
3098 tcg_temp_free_i64(t32);
3099 return NO_EXIT;
3100 }
3101
3102 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3103 {
3104 potential_page_fault(s);
3105 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3106 set_cc_static(s);
3107 return_low128(o->in2);
3108 return NO_EXIT;
3109 }
3110
3111 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3112 {
3113 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3114 return NO_EXIT;
3115 }
3116
3117 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3118 {
3119 TCGv_i64 cc;
3120
3121 assert(!o->g_in2);
3122 tcg_gen_not_i64(o->in2, o->in2);
3123 tcg_gen_add_i64(o->out, o->in1, o->in2);
3124
3125 /* XXX possible optimization point */
3126 gen_op_calc_cc(s);
3127 cc = tcg_temp_new_i64();
3128 tcg_gen_extu_i32_i64(cc, cc_op);
3129 tcg_gen_shri_i64(cc, cc, 1);
3130 tcg_gen_add_i64(o->out, o->out, cc);
3131 tcg_temp_free_i64(cc);
3132 return NO_EXIT;
3133 }
3134
3135 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3136 {
3137 TCGv_i32 t;
3138
3139 update_psw_addr(s);
3140 gen_op_calc_cc(s);
3141
3142 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3143 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3144 tcg_temp_free_i32(t);
3145
3146 t = tcg_const_i32(s->next_pc - s->pc);
3147 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3148 tcg_temp_free_i32(t);
3149
3150 gen_exception(EXCP_SVC);
3151 return EXIT_NORETURN;
3152 }
3153
3154 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3155 {
3156 gen_helper_tceb(cc_op, o->in1, o->in2);
3157 set_cc_static(s);
3158 return NO_EXIT;
3159 }
3160
3161 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3162 {
3163 gen_helper_tcdb(cc_op, o->in1, o->in2);
3164 set_cc_static(s);
3165 return NO_EXIT;
3166 }
3167
3168 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3169 {
3170 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3171 set_cc_static(s);
3172 return NO_EXIT;
3173 }
3174
3175 #ifndef CONFIG_USER_ONLY
3176 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3177 {
3178 potential_page_fault(s);
3179 gen_helper_tprot(cc_op, o->addr1, o->in2);
3180 set_cc_static(s);
3181 return NO_EXIT;
3182 }
3183 #endif
3184
3185 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3186 {
3187 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3188 potential_page_fault(s);
3189 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3190 tcg_temp_free_i32(l);
3191 set_cc_static(s);
3192 return NO_EXIT;
3193 }
3194
3195 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3196 {
3197 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3198 potential_page_fault(s);
3199 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3200 tcg_temp_free_i32(l);
3201 return NO_EXIT;
3202 }
3203
3204 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3205 {
3206 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3207 potential_page_fault(s);
3208 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3209 tcg_temp_free_i32(l);
3210 set_cc_static(s);
3211 return NO_EXIT;
3212 }
3213
3214 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3215 {
3216 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3217 return NO_EXIT;
3218 }
3219
3220 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3221 {
3222 int shift = s->insn->data & 0xff;
3223 int size = s->insn->data >> 8;
3224 uint64_t mask = ((1ull << size) - 1) << shift;
3225
3226 assert(!o->g_in2);
3227 tcg_gen_shli_i64(o->in2, o->in2, shift);
3228 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3229
3230 /* Produce the CC from only the bits manipulated. */
3231 tcg_gen_andi_i64(cc_dst, o->out, mask);
3232 set_cc_nz_u64(s, cc_dst);
3233 return NO_EXIT;
3234 }
3235
3236 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3237 {
3238 o->out = tcg_const_i64(0);
3239 return NO_EXIT;
3240 }
3241
3242 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3243 {
3244 o->out = tcg_const_i64(0);
3245 o->out2 = o->out;
3246 o->g_out2 = true;
3247 return NO_EXIT;
3248 }
3249
3250 /* ====================================================================== */
3251 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3252 the original inputs), update the various cc data structures in order to
3253 be able to compute the new condition code. */
3254
3255 static void cout_abs32(DisasContext *s, DisasOps *o)
3256 {
3257 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3258 }
3259
3260 static void cout_abs64(DisasContext *s, DisasOps *o)
3261 {
3262 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3263 }
3264
3265 static void cout_adds32(DisasContext *s, DisasOps *o)
3266 {
3267 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3268 }
3269
3270 static void cout_adds64(DisasContext *s, DisasOps *o)
3271 {
3272 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3273 }
3274
3275 static void cout_addu32(DisasContext *s, DisasOps *o)
3276 {
3277 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3278 }
3279
3280 static void cout_addu64(DisasContext *s, DisasOps *o)
3281 {
3282 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3283 }
3284
3285 static void cout_addc32(DisasContext *s, DisasOps *o)
3286 {
3287 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3288 }
3289
3290 static void cout_addc64(DisasContext *s, DisasOps *o)
3291 {
3292 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3293 }
3294
3295 static void cout_cmps32(DisasContext *s, DisasOps *o)
3296 {
3297 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3298 }
3299
3300 static void cout_cmps64(DisasContext *s, DisasOps *o)
3301 {
3302 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3303 }
3304
3305 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3306 {
3307 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3308 }
3309
3310 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3311 {
3312 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3313 }
3314
3315 static void cout_f32(DisasContext *s, DisasOps *o)
3316 {
3317 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3318 }
3319
3320 static void cout_f64(DisasContext *s, DisasOps *o)
3321 {
3322 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3323 }
3324
3325 static void cout_f128(DisasContext *s, DisasOps *o)
3326 {
3327 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3328 }
3329
3330 static void cout_nabs32(DisasContext *s, DisasOps *o)
3331 {
3332 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3333 }
3334
3335 static void cout_nabs64(DisasContext *s, DisasOps *o)
3336 {
3337 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3338 }
3339
3340 static void cout_neg32(DisasContext *s, DisasOps *o)
3341 {
3342 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3343 }
3344
3345 static void cout_neg64(DisasContext *s, DisasOps *o)
3346 {
3347 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3348 }
3349
3350 static void cout_nz32(DisasContext *s, DisasOps *o)
3351 {
3352 tcg_gen_ext32u_i64(cc_dst, o->out);
3353 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3354 }
3355
3356 static void cout_nz64(DisasContext *s, DisasOps *o)
3357 {
3358 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3359 }
3360
3361 static void cout_s32(DisasContext *s, DisasOps *o)
3362 {
3363 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3364 }
3365
3366 static void cout_s64(DisasContext *s, DisasOps *o)
3367 {
3368 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3369 }
3370
3371 static void cout_subs32(DisasContext *s, DisasOps *o)
3372 {
3373 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3374 }
3375
3376 static void cout_subs64(DisasContext *s, DisasOps *o)
3377 {
3378 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3379 }
3380
3381 static void cout_subu32(DisasContext *s, DisasOps *o)
3382 {
3383 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3384 }
3385
3386 static void cout_subu64(DisasContext *s, DisasOps *o)
3387 {
3388 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3389 }
3390
3391 static void cout_subb32(DisasContext *s, DisasOps *o)
3392 {
3393 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3394 }
3395
3396 static void cout_subb64(DisasContext *s, DisasOps *o)
3397 {
3398 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3399 }
3400
3401 static void cout_tm32(DisasContext *s, DisasOps *o)
3402 {
3403 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3404 }
3405
3406 static void cout_tm64(DisasContext *s, DisasOps *o)
3407 {
3408 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3409 }
3410
3411 /* ====================================================================== */
3412 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3413 with the TCG register to which we will write. Used in combination with
3414 the "wout" generators, in some cases we need a new temporary, and in
3415 some cases we can write to a TCG global. */
3416
3417 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3418 {
3419 o->out = tcg_temp_new_i64();
3420 }
3421
3422 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3423 {
3424 o->out = tcg_temp_new_i64();
3425 o->out2 = tcg_temp_new_i64();
3426 }
3427
3428 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3429 {
3430 o->out = regs[get_field(f, r1)];
3431 o->g_out = true;
3432 }
3433
3434 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3435 {
3436 /* ??? Specification exception: r1 must be even. */
3437 int r1 = get_field(f, r1);
3438 o->out = regs[r1];
3439 o->out2 = regs[(r1 + 1) & 15];
3440 o->g_out = o->g_out2 = true;
3441 }
3442
3443 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3444 {
3445 o->out = fregs[get_field(f, r1)];
3446 o->g_out = true;
3447 }
3448
3449 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3450 {
3451 /* ??? Specification exception: r1 must be < 14. */
3452 int r1 = get_field(f, r1);
3453 o->out = fregs[r1];
3454 o->out2 = fregs[(r1 + 2) & 15];
3455 o->g_out = o->g_out2 = true;
3456 }
3457
3458 /* ====================================================================== */
3459 /* The "Write OUTput" generators. These generally perform some non-trivial
3460 copy of data to TCG globals, or to main memory. The trivial cases are
3461 generally handled by having a "prep" generator install the TCG global
3462 as the destination of the operation. */
3463
3464 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3465 {
3466 store_reg(get_field(f, r1), o->out);
3467 }
3468
3469 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3470 {
3471 int r1 = get_field(f, r1);
3472 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3473 }
3474
3475 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3476 {
3477 int r1 = get_field(f, r1);
3478 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3479 }
3480
3481 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3482 {
3483 store_reg32_i64(get_field(f, r1), o->out);
3484 }
3485
3486 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3487 {
3488 /* ??? Specification exception: r1 must be even. */
3489 int r1 = get_field(f, r1);
3490 store_reg32_i64(r1, o->out);
3491 store_reg32_i64((r1 + 1) & 15, o->out2);
3492 }
3493
3494 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3495 {
3496 /* ??? Specification exception: r1 must be even. */
3497 int r1 = get_field(f, r1);
3498 store_reg32_i64((r1 + 1) & 15, o->out);
3499 tcg_gen_shri_i64(o->out, o->out, 32);
3500 store_reg32_i64(r1, o->out);
3501 }
3502
3503 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3504 {
3505 store_freg32_i64(get_field(f, r1), o->out);
3506 }
3507
3508 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3509 {
3510 store_freg(get_field(f, r1), o->out);
3511 }
3512
3513 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3514 {
3515 /* ??? Specification exception: r1 must be < 14. */
3516 int f1 = get_field(s->fields, r1);
3517 store_freg(f1, o->out);
3518 store_freg((f1 + 2) & 15, o->out2);
3519 }
3520
3521 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3522 {
3523 if (get_field(f, r1) != get_field(f, r2)) {
3524 store_reg32_i64(get_field(f, r1), o->out);
3525 }
3526 }
3527
3528 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3529 {
3530 if (get_field(f, r1) != get_field(f, r2)) {
3531 store_freg32_i64(get_field(f, r1), o->out);
3532 }
3533 }
3534
3535 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3536 {
3537 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3538 }
3539
3540 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3541 {
3542 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3543 }
3544
3545 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3546 {
3547 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3548 }
3549
3550 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3551 {
3552 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3553 }
3554
3555 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3556 {
3557 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3558 }
3559
3560 /* ====================================================================== */
3561 /* The "INput 1" generators. These load the first operand to an insn. */
3562
3563 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3564 {
3565 o->in1 = load_reg(get_field(f, r1));
3566 }
3567
3568 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3569 {
3570 o->in1 = regs[get_field(f, r1)];
3571 o->g_in1 = true;
3572 }
3573
3574 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3575 {
3576 o->in1 = tcg_temp_new_i64();
3577 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3578 }
3579
3580 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3581 {
3582 o->in1 = tcg_temp_new_i64();
3583 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3584 }
3585
3586 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3587 {
3588 o->in1 = tcg_temp_new_i64();
3589 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3590 }
3591
3592 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3593 {
3594 /* ??? Specification exception: r1 must be even. */
3595 int r1 = get_field(f, r1);
3596 o->in1 = load_reg((r1 + 1) & 15);
3597 }
3598
3599 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3600 {
3601 /* ??? Specification exception: r1 must be even. */
3602 int r1 = get_field(f, r1);
3603 o->in1 = tcg_temp_new_i64();
3604 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3605 }
3606
3607 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3608 {
3609 /* ??? Specification exception: r1 must be even. */
3610 int r1 = get_field(f, r1);
3611 o->in1 = tcg_temp_new_i64();
3612 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3613 }
3614
3615 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3616 {
3617 /* ??? Specification exception: r1 must be even. */
3618 int r1 = get_field(f, r1);
3619 o->in1 = tcg_temp_new_i64();
3620 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3621 }
3622
3623 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3624 {
3625 o->in1 = load_reg(get_field(f, r2));
3626 }
3627
3628 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3629 {
3630 o->in1 = load_reg(get_field(f, r3));
3631 }
3632
3633 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3634 {
3635 o->in1 = regs[get_field(f, r3)];
3636 o->g_in1 = true;
3637 }
3638
3639 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3640 {
3641 o->in1 = tcg_temp_new_i64();
3642 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3643 }
3644
3645 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3646 {
3647 o->in1 = tcg_temp_new_i64();
3648 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3649 }
3650
3651 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3652 {
3653 o->in1 = load_freg32_i64(get_field(f, r1));
3654 }
3655
3656 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3657 {
3658 o->in1 = fregs[get_field(f, r1)];
3659 o->g_in1 = true;
3660 }
3661
3662 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3663 {
3664 /* ??? Specification exception: r1 must be < 14. */
3665 int r1 = get_field(f, r1);
3666 o->out = fregs[r1];
3667 o->out2 = fregs[(r1 + 2) & 15];
3668 o->g_out = o->g_out2 = true;
3669 }
3670
3671 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3672 {
3673 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3674 }
3675
3676 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3677 {
3678 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3679 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3680 }
3681
3682 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3683 {
3684 in1_la1(s, f, o);
3685 o->in1 = tcg_temp_new_i64();
3686 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3687 }
3688
3689 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3690 {
3691 in1_la1(s, f, o);
3692 o->in1 = tcg_temp_new_i64();
3693 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3694 }
3695
3696 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3697 {
3698 in1_la1(s, f, o);
3699 o->in1 = tcg_temp_new_i64();
3700 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3701 }
3702
3703 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3704 {
3705 in1_la1(s, f, o);
3706 o->in1 = tcg_temp_new_i64();
3707 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3708 }
3709
3710 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3711 {
3712 in1_la1(s, f, o);
3713 o->in1 = tcg_temp_new_i64();
3714 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3715 }
3716
3717 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3718 {
3719 in1_la1(s, f, o);
3720 o->in1 = tcg_temp_new_i64();
3721 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3722 }
3723
3724 /* ====================================================================== */
3725 /* The "INput 2" generators. These load the second operand to an insn. */
3726
3727 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3728 {
3729 o->in2 = regs[get_field(f, r1)];
3730 o->g_in2 = true;
3731 }
3732
3733 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3734 {
3735 o->in2 = tcg_temp_new_i64();
3736 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3737 }
3738
3739 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3740 {
3741 o->in2 = tcg_temp_new_i64();
3742 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3743 }
3744
3745 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3746 {
3747 o->in2 = load_reg(get_field(f, r2));
3748 }
3749
3750 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3751 {
3752 o->in2 = regs[get_field(f, r2)];
3753 o->g_in2 = true;
3754 }
3755
3756 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3757 {
3758 int r2 = get_field(f, r2);
3759 if (r2 != 0) {
3760 o->in2 = load_reg(r2);
3761 }
3762 }
3763
3764 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3765 {
3766 o->in2 = tcg_temp_new_i64();
3767 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3768 }
3769
3770 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3771 {
3772 o->in2 = tcg_temp_new_i64();
3773 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3774 }
3775
3776 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3777 {
3778 o->in2 = tcg_temp_new_i64();
3779 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3780 }
3781
3782 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3783 {
3784 o->in2 = tcg_temp_new_i64();
3785 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3786 }
3787
3788 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3789 {
3790 o->in2 = load_reg(get_field(f, r3));
3791 }
3792
3793 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3794 {
3795 o->in2 = tcg_temp_new_i64();
3796 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3797 }
3798
3799 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3800 {
3801 o->in2 = tcg_temp_new_i64();
3802 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3803 }
3804
3805 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3806 {
3807 o->in2 = load_freg32_i64(get_field(f, r2));
3808 }
3809
3810 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3811 {
3812 o->in2 = fregs[get_field(f, r2)];
3813 o->g_in2 = true;
3814 }
3815
3816 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3817 {
3818 /* ??? Specification exception: r1 must be < 14. */
3819 int r2 = get_field(f, r2);
3820 o->in1 = fregs[r2];
3821 o->in2 = fregs[(r2 + 2) & 15];
3822 o->g_in1 = o->g_in2 = true;
3823 }
3824
3825 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
3826 {
3827 o->in2 = get_address(s, 0, get_field(f, r2), 0);
3828 }
3829
3830 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3831 {
3832 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3833 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3834 }
3835
3836 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3837 {
3838 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3839 }
3840
3841 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3842 {
3843 help_l2_shift(s, f, o, 31);
3844 }
3845
3846 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3847 {
3848 help_l2_shift(s, f, o, 63);
3849 }
3850
3851 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3852 {
3853 in2_a2(s, f, o);
3854 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3855 }
3856
3857 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3858 {
3859 in2_a2(s, f, o);
3860 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3861 }
3862
3863 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3864 {
3865 in2_a2(s, f, o);
3866 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3867 }
3868
3869 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3870 {
3871 in2_a2(s, f, o);
3872 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3873 }
3874
3875 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3876 {
3877 in2_a2(s, f, o);
3878 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3879 }
3880
3881 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3882 {
3883 in2_a2(s, f, o);
3884 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3885 }
3886
3887 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3888 {
3889 in2_ri2(s, f, o);
3890 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3891 }
3892
3893 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3894 {
3895 in2_ri2(s, f, o);
3896 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3897 }
3898
3899 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3900 {
3901 in2_ri2(s, f, o);
3902 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3903 }
3904
3905 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3906 {
3907 in2_ri2(s, f, o);
3908 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3909 }
3910
3911 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3912 {
3913 o->in2 = tcg_const_i64(get_field(f, i2));
3914 }
3915
3916 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3917 {
3918 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3919 }
3920
3921 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3922 {
3923 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3924 }
3925
3926 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3927 {
3928 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3929 }
3930
3931 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3932 {
3933 uint64_t i2 = (uint16_t)get_field(f, i2);
3934 o->in2 = tcg_const_i64(i2 << s->insn->data);
3935 }
3936
3937 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3938 {
3939 uint64_t i2 = (uint32_t)get_field(f, i2);
3940 o->in2 = tcg_const_i64(i2 << s->insn->data);
3941 }
3942
3943 /* ====================================================================== */
3944
3945 /* Find opc within the table of insns. This is formulated as a switch
3946 statement so that (1) we get compile-time notice of cut-paste errors
3947 for duplicated opcodes, and (2) the compiler generates the binary
3948 search tree, rather than us having to post-process the table. */
3949
3950 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3951 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3952
3953 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3954
3955 enum DisasInsnEnum {
3956 #include "insn-data.def"
3957 };
3958
3959 #undef D
3960 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3961 .opc = OPC, \
3962 .fmt = FMT_##FT, \
3963 .fac = FAC_##FC, \
3964 .name = #NM, \
3965 .help_in1 = in1_##I1, \
3966 .help_in2 = in2_##I2, \
3967 .help_prep = prep_##P, \
3968 .help_wout = wout_##W, \
3969 .help_cout = cout_##CC, \
3970 .help_op = op_##OP, \
3971 .data = D \
3972 },
3973
3974 /* Allow 0 to be used for NULL in the table below. */
3975 #define in1_0 NULL
3976 #define in2_0 NULL
3977 #define prep_0 NULL
3978 #define wout_0 NULL
3979 #define cout_0 NULL
3980 #define op_0 NULL
3981
3982 static const DisasInsn insn_info[] = {
3983 #include "insn-data.def"
3984 };
3985
3986 #undef D
3987 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3988 case OPC: return &insn_info[insn_ ## NM];
3989
3990 static const DisasInsn *lookup_opc(uint16_t opc)
3991 {
3992 switch (opc) {
3993 #include "insn-data.def"
3994 default:
3995 return NULL;
3996 }
3997 }
3998
3999 #undef D
4000 #undef C
4001
4002 /* Extract a field from the insn. The INSN should be left-aligned in
4003 the uint64_t so that we can more easily utilize the big-bit-endian
4004 definitions we extract from the Principals of Operation. */
4005
4006 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4007 {
4008 uint32_t r, m;
4009
4010 if (f->size == 0) {
4011 return;
4012 }
4013
4014 /* Zero extract the field from the insn. */
4015 r = (insn << f->beg) >> (64 - f->size);
4016
4017 /* Sign-extend, or un-swap the field as necessary. */
4018 switch (f->type) {
4019 case 0: /* unsigned */
4020 break;
4021 case 1: /* signed */
4022 assert(f->size <= 32);
4023 m = 1u << (f->size - 1);
4024 r = (r ^ m) - m;
4025 break;
4026 case 2: /* dl+dh split, signed 20 bit. */
4027 r = ((int8_t)r << 12) | (r >> 8);
4028 break;
4029 default:
4030 abort();
4031 }
4032
4033 /* Validate that the "compressed" encoding we selected above is valid.
4034 I.e. we havn't make two different original fields overlap. */
4035 assert(((o->presentC >> f->indexC) & 1) == 0);
4036 o->presentC |= 1 << f->indexC;
4037 o->presentO |= 1 << f->indexO;
4038
4039 o->c[f->indexC] = r;
4040 }
4041
4042 /* Lookup the insn at the current PC, extracting the operands into O and
4043 returning the info struct for the insn. Returns NULL for invalid insn. */
4044
4045 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4046 DisasFields *f)
4047 {
4048 uint64_t insn, pc = s->pc;
4049 int op, op2, ilen;
4050 const DisasInsn *info;
4051
4052 insn = ld_code2(env, pc);
4053 op = (insn >> 8) & 0xff;
4054 ilen = get_ilen(op);
4055 s->next_pc = s->pc + ilen;
4056
4057 switch (ilen) {
4058 case 2:
4059 insn = insn << 48;
4060 break;
4061 case 4:
4062 insn = ld_code4(env, pc) << 32;
4063 break;
4064 case 6:
4065 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4066 break;
4067 default:
4068 abort();
4069 }
4070
4071 /* We can't actually determine the insn format until we've looked up
4072 the full insn opcode. Which we can't do without locating the
4073 secondary opcode. Assume by default that OP2 is at bit 40; for
4074 those smaller insns that don't actually have a secondary opcode
4075 this will correctly result in OP2 = 0. */
4076 switch (op) {
4077 case 0x01: /* E */
4078 case 0x80: /* S */
4079 case 0x82: /* S */
4080 case 0x93: /* S */
4081 case 0xb2: /* S, RRF, RRE */
4082 case 0xb3: /* RRE, RRD, RRF */
4083 case 0xb9: /* RRE, RRF */
4084 case 0xe5: /* SSE, SIL */
4085 op2 = (insn << 8) >> 56;
4086 break;
4087 case 0xa5: /* RI */
4088 case 0xa7: /* RI */
4089 case 0xc0: /* RIL */
4090 case 0xc2: /* RIL */
4091 case 0xc4: /* RIL */
4092 case 0xc6: /* RIL */
4093 case 0xc8: /* SSF */
4094 case 0xcc: /* RIL */
4095 op2 = (insn << 12) >> 60;
4096 break;
4097 case 0xd0 ... 0xdf: /* SS */
4098 case 0xe1: /* SS */
4099 case 0xe2: /* SS */
4100 case 0xe8: /* SS */
4101 case 0xe9: /* SS */
4102 case 0xea: /* SS */
4103 case 0xee ... 0xf3: /* SS */
4104 case 0xf8 ... 0xfd: /* SS */
4105 op2 = 0;
4106 break;
4107 default:
4108 op2 = (insn << 40) >> 56;
4109 break;
4110 }
4111
4112 memset(f, 0, sizeof(*f));
4113 f->op = op;
4114 f->op2 = op2;
4115
4116 /* Lookup the instruction. */
4117 info = lookup_opc(op << 8 | op2);
4118
4119 /* If we found it, extract the operands. */
4120 if (info != NULL) {
4121 DisasFormat fmt = info->fmt;
4122 int i;
4123
4124 for (i = 0; i < NUM_C_FIELD; ++i) {
4125 extract_field(f, &format_info[fmt].op[i], insn);
4126 }
4127 }
4128 return info;
4129 }
4130
4131 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4132 {
4133 const DisasInsn *insn;
4134 ExitStatus ret = NO_EXIT;
4135 DisasFields f;
4136 DisasOps o;
4137
4138 insn = extract_insn(env, s, &f);
4139
4140 /* If not found, try the old interpreter. This includes ILLOPC. */
4141 if (insn == NULL) {
4142 disas_s390_insn(env, s);
4143 switch (s->is_jmp) {
4144 case DISAS_NEXT:
4145 ret = NO_EXIT;
4146 break;
4147 case DISAS_TB_JUMP:
4148 ret = EXIT_GOTO_TB;
4149 break;
4150 case DISAS_JUMP:
4151 ret = EXIT_PC_UPDATED;
4152 break;
4153 case DISAS_EXCP:
4154 ret = EXIT_NORETURN;
4155 break;
4156 default:
4157 abort();
4158 }
4159
4160 s->pc = s->next_pc;
4161 return ret;
4162 }
4163
4164 /* Set up the strutures we use to communicate with the helpers. */
4165 s->insn = insn;
4166 s->fields = &f;
4167 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4168 TCGV_UNUSED_I64(o.out);
4169 TCGV_UNUSED_I64(o.out2);
4170 TCGV_UNUSED_I64(o.in1);
4171 TCGV_UNUSED_I64(o.in2);
4172 TCGV_UNUSED_I64(o.addr1);
4173
4174 /* Implement the instruction. */
4175 if (insn->help_in1) {
4176 insn->help_in1(s, &f, &o);
4177 }
4178 if (insn->help_in2) {
4179 insn->help_in2(s, &f, &o);
4180 }
4181 if (insn->help_prep) {
4182 insn->help_prep(s, &f, &o);
4183 }
4184 if (insn->help_op) {
4185 ret = insn->help_op(s, &o);
4186 }
4187 if (insn->help_wout) {
4188 insn->help_wout(s, &f, &o);
4189 }
4190 if (insn->help_cout) {
4191 insn->help_cout(s, &o);
4192 }
4193
4194 /* Free any temporaries created by the helpers. */
4195 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4196 tcg_temp_free_i64(o.out);
4197 }
4198 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4199 tcg_temp_free_i64(o.out2);
4200 }
4201 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4202 tcg_temp_free_i64(o.in1);
4203 }
4204 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4205 tcg_temp_free_i64(o.in2);
4206 }
4207 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4208 tcg_temp_free_i64(o.addr1);
4209 }
4210
4211 /* Advance to the next instruction. */
4212 s->pc = s->next_pc;
4213 return ret;
4214 }
4215
4216 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4217 TranslationBlock *tb,
4218 int search_pc)
4219 {
4220 DisasContext dc;
4221 target_ulong pc_start;
4222 uint64_t next_page_start;
4223 uint16_t *gen_opc_end;
4224 int j, lj = -1;
4225 int num_insns, max_insns;
4226 CPUBreakpoint *bp;
4227 ExitStatus status;
4228 bool do_debug;
4229
4230 pc_start = tb->pc;
4231
4232 /* 31-bit mode */
4233 if (!(tb->flags & FLAG_MASK_64)) {
4234 pc_start &= 0x7fffffff;
4235 }
4236
4237 dc.tb = tb;
4238 dc.pc = pc_start;
4239 dc.cc_op = CC_OP_DYNAMIC;
4240 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4241 dc.is_jmp = DISAS_NEXT;
4242
4243 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4244
4245 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4246
4247 num_insns = 0;
4248 max_insns = tb->cflags & CF_COUNT_MASK;
4249 if (max_insns == 0) {
4250 max_insns = CF_COUNT_MASK;
4251 }
4252
4253 gen_icount_start();
4254
4255 do {
4256 if (search_pc) {
4257 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4258 if (lj < j) {
4259 lj++;
4260 while (lj < j) {
4261 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4262 }
4263 }
4264 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4265 gen_opc_cc_op[lj] = dc.cc_op;
4266 tcg_ctx.gen_opc_instr_start[lj] = 1;
4267 tcg_ctx.gen_opc_icount[lj] = num_insns;
4268 }
4269 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4270 gen_io_start();
4271 }
4272
4273 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4274 tcg_gen_debug_insn_start(dc.pc);
4275 }
4276
4277 status = NO_EXIT;
4278 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4279 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4280 if (bp->pc == dc.pc) {
4281 status = EXIT_PC_STALE;
4282 do_debug = true;
4283 break;
4284 }
4285 }
4286 }
4287 if (status == NO_EXIT) {
4288 status = translate_one(env, &dc);
4289 }
4290
4291 /* If we reach a page boundary, are single stepping,
4292 or exhaust instruction count, stop generation. */
4293 if (status == NO_EXIT
4294 && (dc.pc >= next_page_start
4295 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4296 || num_insns >= max_insns
4297 || singlestep
4298 || env->singlestep_enabled)) {
4299 status = EXIT_PC_STALE;
4300 }
4301 } while (status == NO_EXIT);
4302
4303 if (tb->cflags & CF_LAST_IO) {
4304 gen_io_end();
4305 }
4306
4307 switch (status) {
4308 case EXIT_GOTO_TB:
4309 case EXIT_NORETURN:
4310 break;
4311 case EXIT_PC_STALE:
4312 update_psw_addr(&dc);
4313 /* FALLTHRU */
4314 case EXIT_PC_UPDATED:
4315 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4316 gen_op_calc_cc(&dc);
4317 } else {
4318 /* Next TB starts off with CC_OP_DYNAMIC,
4319 so make sure the cc op type is in env */
4320 gen_op_set_cc_op(&dc);
4321 }
4322 if (do_debug) {
4323 gen_exception(EXCP_DEBUG);
4324 } else {
4325 /* Generate the return instruction */
4326 tcg_gen_exit_tb(0);
4327 }
4328 break;
4329 default:
4330 abort();
4331 }
4332
4333 gen_icount_end(tb, num_insns);
4334 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4335 if (search_pc) {
4336 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4337 lj++;
4338 while (lj <= j) {
4339 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4340 }
4341 } else {
4342 tb->size = dc.pc - pc_start;
4343 tb->icount = num_insns;
4344 }
4345
4346 #if defined(S390X_DEBUG_DISAS)
4347 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4348 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4349 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4350 qemu_log("\n");
4351 }
4352 #endif
4353 }
4354
4355 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4356 {
4357 gen_intermediate_code_internal(env, tb, 0);
4358 }
4359
4360 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4361 {
4362 gen_intermediate_code_internal(env, tb, 1);
4363 }
4364
4365 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4366 {
4367 int cc_op;
4368 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4369 cc_op = gen_opc_cc_op[pc_pos];
4370 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4371 env->cc_op = cc_op;
4372 }
4373 }