]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Convert RRBE
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static inline void debug_insn(uint64_t insn)
83 {
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85 }
86
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 {
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
92 }
93 }
94 return pc;
95 }
96
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
99 {
100 int i;
101
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
108 }
109
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
116 }
117 }
118
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
125 }
126 }
127
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
135 }
136 }
137 #endif
138
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
143 }
144 #endif
145
146 cpu_fprintf(f, "\n");
147 }
148
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
151
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
156
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
160
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162
163 void s390x_translate_init(void)
164 {
165 int i;
166
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
174
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
196 }
197
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
201 }
202
203 static inline TCGv_i64 load_reg(int reg)
204 {
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
208 }
209
210 static inline TCGv_i64 load_freg(int reg)
211 {
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
215 }
216
217 static inline TCGv_i32 load_freg32(int reg)
218 {
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
226 }
227
228 static inline TCGv_i64 load_freg32_i64(int reg)
229 {
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
233 }
234
235 static inline TCGv_i32 load_reg32(int reg)
236 {
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
240 }
241
242 static inline TCGv_i64 load_reg32_i64(int reg)
243 {
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
247 }
248
249 static inline void store_reg(int reg, TCGv_i64 v)
250 {
251 tcg_gen_mov_i64(regs[reg], v);
252 }
253
254 static inline void store_freg(int reg, TCGv_i64 v)
255 {
256 tcg_gen_mov_i64(fregs[reg], v);
257 }
258
259 static inline void store_reg32(int reg, TCGv_i32 v)
260 {
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
268 }
269
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
271 {
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274 }
275
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
277 {
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
279 }
280
281 static inline void store_freg32(int reg, TCGv_i32 v)
282 {
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
286 #else
287 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
289 #endif
290 }
291
292 static inline void store_freg32_i64(int reg, TCGv_i64 v)
293 {
294 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
295 }
296
297 static inline void return_low128(TCGv_i64 dest)
298 {
299 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
300 }
301
302 static inline void update_psw_addr(DisasContext *s)
303 {
304 /* psw.addr */
305 tcg_gen_movi_i64(psw_addr, s->pc);
306 }
307
308 static inline void potential_page_fault(DisasContext *s)
309 {
310 #ifndef CONFIG_USER_ONLY
311 update_psw_addr(s);
312 gen_op_calc_cc(s);
313 #endif
314 }
315
316 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
317 {
318 return (uint64_t)cpu_lduw_code(env, pc);
319 }
320
321 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
322 {
323 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
324 }
325
326 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
327 {
328 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
329 }
330
331 static inline int get_mem_index(DisasContext *s)
332 {
333 switch (s->tb->flags & FLAG_MASK_ASC) {
334 case PSW_ASC_PRIMARY >> 32:
335 return 0;
336 case PSW_ASC_SECONDARY >> 32:
337 return 1;
338 case PSW_ASC_HOME >> 32:
339 return 2;
340 default:
341 tcg_abort();
342 break;
343 }
344 }
345
346 static void gen_exception(int excp)
347 {
348 TCGv_i32 tmp = tcg_const_i32(excp);
349 gen_helper_exception(cpu_env, tmp);
350 tcg_temp_free_i32(tmp);
351 }
352
353 static void gen_program_exception(DisasContext *s, int code)
354 {
355 TCGv_i32 tmp;
356
357 /* Remember what pgm exeption this was. */
358 tmp = tcg_const_i32(code);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
360 tcg_temp_free_i32(tmp);
361
362 tmp = tcg_const_i32(s->next_pc - s->pc);
363 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
364 tcg_temp_free_i32(tmp);
365
366 /* Advance past instruction. */
367 s->pc = s->next_pc;
368 update_psw_addr(s);
369
370 /* Save off cc. */
371 gen_op_calc_cc(s);
372
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM);
375
376 /* End TB here. */
377 s->is_jmp = DISAS_EXCP;
378 }
379
380 static inline void gen_illegal_opcode(DisasContext *s)
381 {
382 gen_program_exception(s, PGM_SPECIFICATION);
383 }
384
385 static inline void check_privileged(DisasContext *s)
386 {
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
389 }
390 }
391
392 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
393 {
394 TCGv_i64 tmp;
395
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s->tb->flags & FLAG_MASK_64)) {
398 d2 &= 0x7fffffffUL;
399 }
400
401 if (x2) {
402 if (d2) {
403 tmp = tcg_const_i64(d2);
404 tcg_gen_add_i64(tmp, tmp, regs[x2]);
405 } else {
406 tmp = load_reg(x2);
407 }
408 if (b2) {
409 tcg_gen_add_i64(tmp, tmp, regs[b2]);
410 }
411 } else if (b2) {
412 if (d2) {
413 tmp = tcg_const_i64(d2);
414 tcg_gen_add_i64(tmp, tmp, regs[b2]);
415 } else {
416 tmp = load_reg(b2);
417 }
418 } else {
419 tmp = tcg_const_i64(d2);
420 }
421
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
424 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
425 }
426
427 return tmp;
428 }
429
430 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
431 {
432 s->cc_op = CC_OP_CONST0 + val;
433 }
434
435 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
436 {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_mov_i64(cc_dst, dst);
439 tcg_gen_discard_i64(cc_vr);
440 s->cc_op = op;
441 }
442
443 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
444 {
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_extu_i32_i64(cc_dst, dst);
447 tcg_gen_discard_i64(cc_vr);
448 s->cc_op = op;
449 }
450
451 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
452 TCGv_i64 dst)
453 {
454 tcg_gen_mov_i64(cc_src, src);
455 tcg_gen_mov_i64(cc_dst, dst);
456 tcg_gen_discard_i64(cc_vr);
457 s->cc_op = op;
458 }
459
460 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
461 TCGv_i32 dst)
462 {
463 tcg_gen_extu_i32_i64(cc_src, src);
464 tcg_gen_extu_i32_i64(cc_dst, dst);
465 tcg_gen_discard_i64(cc_vr);
466 s->cc_op = op;
467 }
468
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
471 {
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
475 s->cc_op = op;
476 }
477
478 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
479 {
480 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
481 }
482
483 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
484 {
485 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
486 }
487
488 static inline void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
489 {
490 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
491 }
492
493 static inline void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
494 {
495 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
496 }
497
498 static inline void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
499 {
500 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
501 }
502
503 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
504 enum cc_op cond)
505 {
506 gen_op_update2_cc_i32(s, cond, v1, v2);
507 }
508
509 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
510 enum cc_op cond)
511 {
512 gen_op_update2_cc_i64(s, cond, v1, v2);
513 }
514
515 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
516 {
517 cmp_32(s, v1, v2, CC_OP_LTGT_32);
518 }
519
520 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
521 {
522 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
523 }
524
525 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
526 {
527 /* XXX optimize for the constant? put it in s? */
528 TCGv_i32 tmp = tcg_const_i32(v2);
529 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
530 tcg_temp_free_i32(tmp);
531 }
532
533 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
534 {
535 TCGv_i32 tmp = tcg_const_i32(v2);
536 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
537 tcg_temp_free_i32(tmp);
538 }
539
540 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
541 {
542 cmp_64(s, v1, v2, CC_OP_LTGT_64);
543 }
544
545 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
546 {
547 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
548 }
549
550 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
551 {
552 TCGv_i64 tmp = tcg_const_i64(v2);
553 cmp_s64(s, v1, tmp);
554 tcg_temp_free_i64(tmp);
555 }
556
557 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
558 {
559 TCGv_i64 tmp = tcg_const_i64(v2);
560 cmp_u64(s, v1, tmp);
561 tcg_temp_free_i64(tmp);
562 }
563
564 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
565 {
566 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
567 }
568
569 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
570 {
571 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
572 }
573
574 /* CC value is in env->cc_op */
575 static inline void set_cc_static(DisasContext *s)
576 {
577 tcg_gen_discard_i64(cc_src);
578 tcg_gen_discard_i64(cc_dst);
579 tcg_gen_discard_i64(cc_vr);
580 s->cc_op = CC_OP_STATIC;
581 }
582
583 static inline void gen_op_set_cc_op(DisasContext *s)
584 {
585 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
586 tcg_gen_movi_i32(cc_op, s->cc_op);
587 }
588 }
589
590 static inline void gen_update_cc_op(DisasContext *s)
591 {
592 gen_op_set_cc_op(s);
593 }
594
595 /* calculates cc into cc_op */
596 static void gen_op_calc_cc(DisasContext *s)
597 {
598 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
599 TCGv_i64 dummy = tcg_const_i64(0);
600
601 switch (s->cc_op) {
602 case CC_OP_CONST0:
603 case CC_OP_CONST1:
604 case CC_OP_CONST2:
605 case CC_OP_CONST3:
606 /* s->cc_op is the cc value */
607 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
608 break;
609 case CC_OP_STATIC:
610 /* env->cc_op already is the cc value */
611 break;
612 case CC_OP_NZ:
613 case CC_OP_ABS_64:
614 case CC_OP_NABS_64:
615 case CC_OP_ABS_32:
616 case CC_OP_NABS_32:
617 case CC_OP_LTGT0_32:
618 case CC_OP_LTGT0_64:
619 case CC_OP_COMP_32:
620 case CC_OP_COMP_64:
621 case CC_OP_NZ_F32:
622 case CC_OP_NZ_F64:
623 case CC_OP_FLOGR:
624 /* 1 argument */
625 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
626 break;
627 case CC_OP_ICM:
628 case CC_OP_LTGT_32:
629 case CC_OP_LTGT_64:
630 case CC_OP_LTUGTU_32:
631 case CC_OP_LTUGTU_64:
632 case CC_OP_TM_32:
633 case CC_OP_TM_64:
634 case CC_OP_SLA_32:
635 case CC_OP_SLA_64:
636 case CC_OP_NZ_F128:
637 /* 2 arguments */
638 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
639 break;
640 case CC_OP_ADD_64:
641 case CC_OP_ADDU_64:
642 case CC_OP_ADDC_64:
643 case CC_OP_SUB_64:
644 case CC_OP_SUBU_64:
645 case CC_OP_SUBB_64:
646 case CC_OP_ADD_32:
647 case CC_OP_ADDU_32:
648 case CC_OP_ADDC_32:
649 case CC_OP_SUB_32:
650 case CC_OP_SUBU_32:
651 case CC_OP_SUBB_32:
652 /* 3 arguments */
653 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
654 break;
655 case CC_OP_DYNAMIC:
656 /* unknown operation - assume 3 arguments and cc_op in env */
657 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
658 break;
659 default:
660 tcg_abort();
661 }
662
663 tcg_temp_free_i32(local_cc_op);
664 tcg_temp_free_i64(dummy);
665
666 /* We now have cc in cc_op as constant */
667 set_cc_static(s);
668 }
669
670 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
671 {
672 debug_insn(insn);
673
674 *r1 = (insn >> 4) & 0xf;
675 *r2 = insn & 0xf;
676 }
677
678 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
679 int *x2, int *b2, int *d2)
680 {
681 debug_insn(insn);
682
683 *r1 = (insn >> 20) & 0xf;
684 *x2 = (insn >> 16) & 0xf;
685 *b2 = (insn >> 12) & 0xf;
686 *d2 = insn & 0xfff;
687
688 return get_address(s, *x2, *b2, *d2);
689 }
690
691 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
692 int *b2, int *d2)
693 {
694 debug_insn(insn);
695
696 *r1 = (insn >> 20) & 0xf;
697 /* aka m3 */
698 *r3 = (insn >> 16) & 0xf;
699 *b2 = (insn >> 12) & 0xf;
700 *d2 = insn & 0xfff;
701 }
702
703 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
704 int *b1, int *d1)
705 {
706 debug_insn(insn);
707
708 *i2 = (insn >> 16) & 0xff;
709 *b1 = (insn >> 12) & 0xf;
710 *d1 = insn & 0xfff;
711
712 return get_address(s, 0, *b1, *d1);
713 }
714
715 static int use_goto_tb(DisasContext *s, uint64_t dest)
716 {
717 /* NOTE: we handle the case where the TB spans two pages here */
718 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
719 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
720 && !s->singlestep_enabled
721 && !(s->tb->cflags & CF_LAST_IO));
722 }
723
724 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
725 {
726 gen_update_cc_op(s);
727
728 if (use_goto_tb(s, pc)) {
729 tcg_gen_goto_tb(tb_num);
730 tcg_gen_movi_i64(psw_addr, pc);
731 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
732 } else {
733 /* jump to another page: currently not optimized */
734 tcg_gen_movi_i64(psw_addr, pc);
735 tcg_gen_exit_tb(0);
736 }
737 }
738
739 static inline void account_noninline_branch(DisasContext *s, int cc_op)
740 {
741 #ifdef DEBUG_INLINE_BRANCHES
742 inline_branch_miss[cc_op]++;
743 #endif
744 }
745
746 static inline void account_inline_branch(DisasContext *s, int cc_op)
747 {
748 #ifdef DEBUG_INLINE_BRANCHES
749 inline_branch_hit[cc_op]++;
750 #endif
751 }
752
753 /* Table of mask values to comparison codes, given a comparison as input.
754 For a true comparison CC=3 will never be set, but we treat this
755 conservatively for possible use when CC=3 indicates overflow. */
756 static const TCGCond ltgt_cond[16] = {
757 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
758 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
759 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
760 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
761 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
762 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
763 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
764 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
765 };
766
767 /* Table of mask values to comparison codes, given a logic op as input.
768 For such, only CC=0 and CC=1 should be possible. */
769 static const TCGCond nz_cond[16] = {
770 /* | | x | x */
771 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
772 /* | NE | x | x */
773 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
774 /* EQ | | x | x */
775 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
776 /* EQ | NE | x | x */
777 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
778 };
779
780 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
781 details required to generate a TCG comparison. */
782 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
783 {
784 TCGCond cond;
785 enum cc_op old_cc_op = s->cc_op;
786
787 if (mask == 15 || mask == 0) {
788 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
789 c->u.s32.a = cc_op;
790 c->u.s32.b = cc_op;
791 c->g1 = c->g2 = true;
792 c->is_64 = false;
793 return;
794 }
795
796 /* Find the TCG condition for the mask + cc op. */
797 switch (old_cc_op) {
798 case CC_OP_LTGT0_32:
799 case CC_OP_LTGT0_64:
800 case CC_OP_LTGT_32:
801 case CC_OP_LTGT_64:
802 cond = ltgt_cond[mask];
803 if (cond == TCG_COND_NEVER) {
804 goto do_dynamic;
805 }
806 account_inline_branch(s, old_cc_op);
807 break;
808
809 case CC_OP_LTUGTU_32:
810 case CC_OP_LTUGTU_64:
811 cond = tcg_unsigned_cond(ltgt_cond[mask]);
812 if (cond == TCG_COND_NEVER) {
813 goto do_dynamic;
814 }
815 account_inline_branch(s, old_cc_op);
816 break;
817
818 case CC_OP_NZ:
819 cond = nz_cond[mask];
820 if (cond == TCG_COND_NEVER) {
821 goto do_dynamic;
822 }
823 account_inline_branch(s, old_cc_op);
824 break;
825
826 case CC_OP_TM_32:
827 case CC_OP_TM_64:
828 switch (mask) {
829 case 8:
830 cond = TCG_COND_EQ;
831 break;
832 case 4 | 2 | 1:
833 cond = TCG_COND_NE;
834 break;
835 default:
836 goto do_dynamic;
837 }
838 account_inline_branch(s, old_cc_op);
839 break;
840
841 case CC_OP_ICM:
842 switch (mask) {
843 case 8:
844 cond = TCG_COND_EQ;
845 break;
846 case 4 | 2 | 1:
847 case 4 | 2:
848 cond = TCG_COND_NE;
849 break;
850 default:
851 goto do_dynamic;
852 }
853 account_inline_branch(s, old_cc_op);
854 break;
855
856 case CC_OP_FLOGR:
857 switch (mask & 0xa) {
858 case 8: /* src == 0 -> no one bit found */
859 cond = TCG_COND_EQ;
860 break;
861 case 2: /* src != 0 -> one bit found */
862 cond = TCG_COND_NE;
863 break;
864 default:
865 goto do_dynamic;
866 }
867 account_inline_branch(s, old_cc_op);
868 break;
869
870 default:
871 do_dynamic:
872 /* Calculate cc value. */
873 gen_op_calc_cc(s);
874 /* FALLTHRU */
875
876 case CC_OP_STATIC:
877 /* Jump based on CC. We'll load up the real cond below;
878 the assignment here merely avoids a compiler warning. */
879 account_noninline_branch(s, old_cc_op);
880 old_cc_op = CC_OP_STATIC;
881 cond = TCG_COND_NEVER;
882 break;
883 }
884
885 /* Load up the arguments of the comparison. */
886 c->is_64 = true;
887 c->g1 = c->g2 = false;
888 switch (old_cc_op) {
889 case CC_OP_LTGT0_32:
890 c->is_64 = false;
891 c->u.s32.a = tcg_temp_new_i32();
892 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
893 c->u.s32.b = tcg_const_i32(0);
894 break;
895 case CC_OP_LTGT_32:
896 case CC_OP_LTUGTU_32:
897 c->is_64 = false;
898 c->u.s32.a = tcg_temp_new_i32();
899 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
900 c->u.s32.b = tcg_temp_new_i32();
901 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
902 break;
903
904 case CC_OP_LTGT0_64:
905 case CC_OP_NZ:
906 case CC_OP_FLOGR:
907 c->u.s64.a = cc_dst;
908 c->u.s64.b = tcg_const_i64(0);
909 c->g1 = true;
910 break;
911 case CC_OP_LTGT_64:
912 case CC_OP_LTUGTU_64:
913 c->u.s64.a = cc_src;
914 c->u.s64.b = cc_dst;
915 c->g1 = c->g2 = true;
916 break;
917
918 case CC_OP_TM_32:
919 case CC_OP_TM_64:
920 case CC_OP_ICM:
921 c->u.s64.a = tcg_temp_new_i64();
922 c->u.s64.b = tcg_const_i64(0);
923 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
924 break;
925
926 case CC_OP_STATIC:
927 c->is_64 = false;
928 c->u.s32.a = cc_op;
929 c->g1 = true;
930 switch (mask) {
931 case 0x8 | 0x4 | 0x2: /* cc != 3 */
932 cond = TCG_COND_NE;
933 c->u.s32.b = tcg_const_i32(3);
934 break;
935 case 0x8 | 0x4 | 0x1: /* cc != 2 */
936 cond = TCG_COND_NE;
937 c->u.s32.b = tcg_const_i32(2);
938 break;
939 case 0x8 | 0x2 | 0x1: /* cc != 1 */
940 cond = TCG_COND_NE;
941 c->u.s32.b = tcg_const_i32(1);
942 break;
943 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
944 cond = TCG_COND_EQ;
945 c->g1 = false;
946 c->u.s32.a = tcg_temp_new_i32();
947 c->u.s32.b = tcg_const_i32(0);
948 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
949 break;
950 case 0x8 | 0x4: /* cc < 2 */
951 cond = TCG_COND_LTU;
952 c->u.s32.b = tcg_const_i32(2);
953 break;
954 case 0x8: /* cc == 0 */
955 cond = TCG_COND_EQ;
956 c->u.s32.b = tcg_const_i32(0);
957 break;
958 case 0x4 | 0x2 | 0x1: /* cc != 0 */
959 cond = TCG_COND_NE;
960 c->u.s32.b = tcg_const_i32(0);
961 break;
962 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
963 cond = TCG_COND_NE;
964 c->g1 = false;
965 c->u.s32.a = tcg_temp_new_i32();
966 c->u.s32.b = tcg_const_i32(0);
967 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
968 break;
969 case 0x4: /* cc == 1 */
970 cond = TCG_COND_EQ;
971 c->u.s32.b = tcg_const_i32(1);
972 break;
973 case 0x2 | 0x1: /* cc > 1 */
974 cond = TCG_COND_GTU;
975 c->u.s32.b = tcg_const_i32(1);
976 break;
977 case 0x2: /* cc == 2 */
978 cond = TCG_COND_EQ;
979 c->u.s32.b = tcg_const_i32(2);
980 break;
981 case 0x1: /* cc == 3 */
982 cond = TCG_COND_EQ;
983 c->u.s32.b = tcg_const_i32(3);
984 break;
985 default:
986 /* CC is masked by something else: (8 >> cc) & mask. */
987 cond = TCG_COND_NE;
988 c->g1 = false;
989 c->u.s32.a = tcg_const_i32(8);
990 c->u.s32.b = tcg_const_i32(0);
991 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
992 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
993 break;
994 }
995 break;
996
997 default:
998 abort();
999 }
1000 c->cond = cond;
1001 }
1002
1003 static void free_compare(DisasCompare *c)
1004 {
1005 if (!c->g1) {
1006 if (c->is_64) {
1007 tcg_temp_free_i64(c->u.s64.a);
1008 } else {
1009 tcg_temp_free_i32(c->u.s32.a);
1010 }
1011 }
1012 if (!c->g2) {
1013 if (c->is_64) {
1014 tcg_temp_free_i64(c->u.s64.b);
1015 } else {
1016 tcg_temp_free_i32(c->u.s32.b);
1017 }
1018 }
1019 }
1020
1021 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1022 uint32_t insn)
1023 {
1024 #ifndef CONFIG_USER_ONLY
1025 TCGv_i64 tmp, tmp2, tmp3;
1026 TCGv_i32 tmp32_1, tmp32_2;
1027 int r1, r2;
1028 int r3, d2, b2;
1029
1030 r1 = (insn >> 4) & 0xf;
1031 r2 = insn & 0xf;
1032
1033 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1034
1035 switch (op) {
1036 case 0x34: /* STCH ? */
1037 /* Store Subchannel */
1038 check_privileged(s);
1039 gen_op_movi_cc(s, 3);
1040 break;
1041 case 0x46: /* STURA R1,R2 [RRE] */
1042 /* Store Using Real Address */
1043 check_privileged(s);
1044 r1 = (insn >> 4) & 0xf;
1045 r2 = insn & 0xf;
1046 tmp32_1 = load_reg32(r1);
1047 tmp = load_reg(r2);
1048 potential_page_fault(s);
1049 gen_helper_stura(cpu_env, tmp, tmp32_1);
1050 tcg_temp_free_i32(tmp32_1);
1051 tcg_temp_free_i64(tmp);
1052 break;
1053 case 0x50: /* CSP R1,R2 [RRE] */
1054 /* Compare And Swap And Purge */
1055 check_privileged(s);
1056 r1 = (insn >> 4) & 0xf;
1057 r2 = insn & 0xf;
1058 tmp32_1 = tcg_const_i32(r1);
1059 tmp32_2 = tcg_const_i32(r2);
1060 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1061 set_cc_static(s);
1062 tcg_temp_free_i32(tmp32_1);
1063 tcg_temp_free_i32(tmp32_2);
1064 break;
1065 case 0x5f: /* CHSC ? */
1066 /* Channel Subsystem Call */
1067 check_privileged(s);
1068 gen_op_movi_cc(s, 3);
1069 break;
1070 case 0x78: /* STCKE D2(B2) [S] */
1071 /* Store Clock Extended */
1072 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1073 tmp = get_address(s, 0, b2, d2);
1074 potential_page_fault(s);
1075 gen_helper_stcke(cc_op, cpu_env, tmp);
1076 set_cc_static(s);
1077 tcg_temp_free_i64(tmp);
1078 break;
1079 case 0x79: /* SACF D2(B2) [S] */
1080 /* Set Address Space Control Fast */
1081 check_privileged(s);
1082 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1083 tmp = get_address(s, 0, b2, d2);
1084 potential_page_fault(s);
1085 gen_helper_sacf(cpu_env, tmp);
1086 tcg_temp_free_i64(tmp);
1087 /* addressing mode has changed, so end the block */
1088 s->pc = s->next_pc;
1089 update_psw_addr(s);
1090 s->is_jmp = DISAS_JUMP;
1091 break;
1092 case 0x7d: /* STSI D2,(B2) [S] */
1093 check_privileged(s);
1094 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1095 tmp = get_address(s, 0, b2, d2);
1096 tmp32_1 = load_reg32(0);
1097 tmp32_2 = load_reg32(1);
1098 potential_page_fault(s);
1099 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1100 set_cc_static(s);
1101 tcg_temp_free_i64(tmp);
1102 tcg_temp_free_i32(tmp32_1);
1103 tcg_temp_free_i32(tmp32_2);
1104 break;
1105 case 0xb1: /* STFL D2(B2) [S] */
1106 /* Store Facility List (CPU features) at 200 */
1107 check_privileged(s);
1108 tmp2 = tcg_const_i64(0xc0000000);
1109 tmp = tcg_const_i64(200);
1110 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1111 tcg_temp_free_i64(tmp2);
1112 tcg_temp_free_i64(tmp);
1113 break;
1114 case 0xb2: /* LPSWE D2(B2) [S] */
1115 /* Load PSW Extended */
1116 check_privileged(s);
1117 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1118 tmp = get_address(s, 0, b2, d2);
1119 tmp2 = tcg_temp_new_i64();
1120 tmp3 = tcg_temp_new_i64();
1121 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1122 tcg_gen_addi_i64(tmp, tmp, 8);
1123 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1124 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1125 /* we need to keep cc_op intact */
1126 s->is_jmp = DISAS_JUMP;
1127 tcg_temp_free_i64(tmp);
1128 tcg_temp_free_i64(tmp2);
1129 tcg_temp_free_i64(tmp3);
1130 break;
1131 case 0x20: /* SERVC R1,R2 [RRE] */
1132 /* SCLP Service call (PV hypercall) */
1133 check_privileged(s);
1134 potential_page_fault(s);
1135 tmp32_1 = load_reg32(r2);
1136 tmp = load_reg(r1);
1137 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1138 set_cc_static(s);
1139 tcg_temp_free_i32(tmp32_1);
1140 tcg_temp_free_i64(tmp);
1141 break;
1142 default:
1143 #endif
1144 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1145 gen_illegal_opcode(s);
1146 #ifndef CONFIG_USER_ONLY
1147 break;
1148 }
1149 #endif
1150 }
1151
1152 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1153 {
1154 unsigned char opc;
1155 uint64_t insn;
1156 int op;
1157
1158 opc = cpu_ldub_code(env, s->pc);
1159 LOG_DISAS("opc 0x%x\n", opc);
1160
1161 switch (opc) {
1162 case 0xb2:
1163 insn = ld_code4(env, s->pc);
1164 op = (insn >> 16) & 0xff;
1165 disas_b2(env, s, op, insn);
1166 break;
1167 default:
1168 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
1169 gen_illegal_opcode(s);
1170 break;
1171 }
1172 }
1173
1174 /* ====================================================================== */
1175 /* Define the insn format enumeration. */
1176 #define F0(N) FMT_##N,
1177 #define F1(N, X1) F0(N)
1178 #define F2(N, X1, X2) F0(N)
1179 #define F3(N, X1, X2, X3) F0(N)
1180 #define F4(N, X1, X2, X3, X4) F0(N)
1181 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1182
1183 typedef enum {
1184 #include "insn-format.def"
1185 } DisasFormat;
1186
1187 #undef F0
1188 #undef F1
1189 #undef F2
1190 #undef F3
1191 #undef F4
1192 #undef F5
1193
1194 /* Define a structure to hold the decoded fields. We'll store each inside
1195 an array indexed by an enum. In order to conserve memory, we'll arrange
1196 for fields that do not exist at the same time to overlap, thus the "C"
1197 for compact. For checking purposes there is an "O" for original index
1198 as well that will be applied to availability bitmaps. */
1199
1200 enum DisasFieldIndexO {
1201 FLD_O_r1,
1202 FLD_O_r2,
1203 FLD_O_r3,
1204 FLD_O_m1,
1205 FLD_O_m3,
1206 FLD_O_m4,
1207 FLD_O_b1,
1208 FLD_O_b2,
1209 FLD_O_b4,
1210 FLD_O_d1,
1211 FLD_O_d2,
1212 FLD_O_d4,
1213 FLD_O_x2,
1214 FLD_O_l1,
1215 FLD_O_l2,
1216 FLD_O_i1,
1217 FLD_O_i2,
1218 FLD_O_i3,
1219 FLD_O_i4,
1220 FLD_O_i5
1221 };
1222
1223 enum DisasFieldIndexC {
1224 FLD_C_r1 = 0,
1225 FLD_C_m1 = 0,
1226 FLD_C_b1 = 0,
1227 FLD_C_i1 = 0,
1228
1229 FLD_C_r2 = 1,
1230 FLD_C_b2 = 1,
1231 FLD_C_i2 = 1,
1232
1233 FLD_C_r3 = 2,
1234 FLD_C_m3 = 2,
1235 FLD_C_i3 = 2,
1236
1237 FLD_C_m4 = 3,
1238 FLD_C_b4 = 3,
1239 FLD_C_i4 = 3,
1240 FLD_C_l1 = 3,
1241
1242 FLD_C_i5 = 4,
1243 FLD_C_d1 = 4,
1244
1245 FLD_C_d2 = 5,
1246
1247 FLD_C_d4 = 6,
1248 FLD_C_x2 = 6,
1249 FLD_C_l2 = 6,
1250
1251 NUM_C_FIELD = 7
1252 };
1253
1254 struct DisasFields {
1255 unsigned op:8;
1256 unsigned op2:8;
1257 unsigned presentC:16;
1258 unsigned int presentO;
1259 int c[NUM_C_FIELD];
1260 };
1261
1262 /* This is the way fields are to be accessed out of DisasFields. */
1263 #define have_field(S, F) have_field1((S), FLD_O_##F)
1264 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1265
1266 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1267 {
1268 return (f->presentO >> c) & 1;
1269 }
1270
1271 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1272 enum DisasFieldIndexC c)
1273 {
1274 assert(have_field1(f, o));
1275 return f->c[c];
1276 }
1277
1278 /* Describe the layout of each field in each format. */
1279 typedef struct DisasField {
1280 unsigned int beg:8;
1281 unsigned int size:8;
1282 unsigned int type:2;
1283 unsigned int indexC:6;
1284 enum DisasFieldIndexO indexO:8;
1285 } DisasField;
1286
1287 typedef struct DisasFormatInfo {
1288 DisasField op[NUM_C_FIELD];
1289 } DisasFormatInfo;
1290
1291 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1292 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1293 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1294 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1295 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1296 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1297 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1298 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1299 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1300 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1301 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1302 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1303 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1304 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1305
1306 #define F0(N) { { } },
1307 #define F1(N, X1) { { X1 } },
1308 #define F2(N, X1, X2) { { X1, X2 } },
1309 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1310 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1311 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1312
1313 static const DisasFormatInfo format_info[] = {
1314 #include "insn-format.def"
1315 };
1316
1317 #undef F0
1318 #undef F1
1319 #undef F2
1320 #undef F3
1321 #undef F4
1322 #undef F5
1323 #undef R
1324 #undef M
1325 #undef BD
1326 #undef BXD
1327 #undef BDL
1328 #undef BXDL
1329 #undef I
1330 #undef L
1331
1332 /* Generally, we'll extract operands into this structures, operate upon
1333 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1334 of routines below for more details. */
1335 typedef struct {
1336 bool g_out, g_out2, g_in1, g_in2;
1337 TCGv_i64 out, out2, in1, in2;
1338 TCGv_i64 addr1;
1339 } DisasOps;
1340
1341 /* Return values from translate_one, indicating the state of the TB. */
1342 typedef enum {
1343 /* Continue the TB. */
1344 NO_EXIT,
1345 /* We have emitted one or more goto_tb. No fixup required. */
1346 EXIT_GOTO_TB,
1347 /* We are not using a goto_tb (for whatever reason), but have updated
1348 the PC (for whatever reason), so there's no need to do it again on
1349 exiting the TB. */
1350 EXIT_PC_UPDATED,
1351 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1352 updated the PC for the next instruction to be executed. */
1353 EXIT_PC_STALE,
1354 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1355 No following code will be executed. */
1356 EXIT_NORETURN,
1357 } ExitStatus;
1358
1359 typedef enum DisasFacility {
1360 FAC_Z, /* zarch (default) */
1361 FAC_CASS, /* compare and swap and store */
1362 FAC_CASS2, /* compare and swap and store 2*/
1363 FAC_DFP, /* decimal floating point */
1364 FAC_DFPR, /* decimal floating point rounding */
1365 FAC_DO, /* distinct operands */
1366 FAC_EE, /* execute extensions */
1367 FAC_EI, /* extended immediate */
1368 FAC_FPE, /* floating point extension */
1369 FAC_FPSSH, /* floating point support sign handling */
1370 FAC_FPRGR, /* FPR-GR transfer */
1371 FAC_GIE, /* general instructions extension */
1372 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1373 FAC_HW, /* high-word */
1374 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1375 FAC_LOC, /* load/store on condition */
1376 FAC_LD, /* long displacement */
1377 FAC_PC, /* population count */
1378 FAC_SCF, /* store clock fast */
1379 FAC_SFLE, /* store facility list extended */
1380 } DisasFacility;
1381
1382 struct DisasInsn {
1383 unsigned opc:16;
1384 DisasFormat fmt:6;
1385 DisasFacility fac:6;
1386
1387 const char *name;
1388
1389 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1390 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1391 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1392 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1393 void (*help_cout)(DisasContext *, DisasOps *);
1394 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1395
1396 uint64_t data;
1397 };
1398
1399 /* ====================================================================== */
1400 /* Miscelaneous helpers, used by several operations. */
1401
1402 static void help_l2_shift(DisasContext *s, DisasFields *f,
1403 DisasOps *o, int mask)
1404 {
1405 int b2 = get_field(f, b2);
1406 int d2 = get_field(f, d2);
1407
1408 if (b2 == 0) {
1409 o->in2 = tcg_const_i64(d2 & mask);
1410 } else {
1411 o->in2 = get_address(s, 0, b2, d2);
1412 tcg_gen_andi_i64(o->in2, o->in2, mask);
1413 }
1414 }
1415
1416 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1417 {
1418 if (dest == s->next_pc) {
1419 return NO_EXIT;
1420 }
1421 if (use_goto_tb(s, dest)) {
1422 gen_update_cc_op(s);
1423 tcg_gen_goto_tb(0);
1424 tcg_gen_movi_i64(psw_addr, dest);
1425 tcg_gen_exit_tb((tcg_target_long)s->tb);
1426 return EXIT_GOTO_TB;
1427 } else {
1428 tcg_gen_movi_i64(psw_addr, dest);
1429 return EXIT_PC_UPDATED;
1430 }
1431 }
1432
1433 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1434 bool is_imm, int imm, TCGv_i64 cdest)
1435 {
1436 ExitStatus ret;
1437 uint64_t dest = s->pc + 2 * imm;
1438 int lab;
1439
1440 /* Take care of the special cases first. */
1441 if (c->cond == TCG_COND_NEVER) {
1442 ret = NO_EXIT;
1443 goto egress;
1444 }
1445 if (is_imm) {
1446 if (dest == s->next_pc) {
1447 /* Branch to next. */
1448 ret = NO_EXIT;
1449 goto egress;
1450 }
1451 if (c->cond == TCG_COND_ALWAYS) {
1452 ret = help_goto_direct(s, dest);
1453 goto egress;
1454 }
1455 } else {
1456 if (TCGV_IS_UNUSED_I64(cdest)) {
1457 /* E.g. bcr %r0 -> no branch. */
1458 ret = NO_EXIT;
1459 goto egress;
1460 }
1461 if (c->cond == TCG_COND_ALWAYS) {
1462 tcg_gen_mov_i64(psw_addr, cdest);
1463 ret = EXIT_PC_UPDATED;
1464 goto egress;
1465 }
1466 }
1467
1468 if (use_goto_tb(s, s->next_pc)) {
1469 if (is_imm && use_goto_tb(s, dest)) {
1470 /* Both exits can use goto_tb. */
1471 gen_update_cc_op(s);
1472
1473 lab = gen_new_label();
1474 if (c->is_64) {
1475 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1476 } else {
1477 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1478 }
1479
1480 /* Branch not taken. */
1481 tcg_gen_goto_tb(0);
1482 tcg_gen_movi_i64(psw_addr, s->next_pc);
1483 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1484
1485 /* Branch taken. */
1486 gen_set_label(lab);
1487 tcg_gen_goto_tb(1);
1488 tcg_gen_movi_i64(psw_addr, dest);
1489 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1490
1491 ret = EXIT_GOTO_TB;
1492 } else {
1493 /* Fallthru can use goto_tb, but taken branch cannot. */
1494 /* Store taken branch destination before the brcond. This
1495 avoids having to allocate a new local temp to hold it.
1496 We'll overwrite this in the not taken case anyway. */
1497 if (!is_imm) {
1498 tcg_gen_mov_i64(psw_addr, cdest);
1499 }
1500
1501 lab = gen_new_label();
1502 if (c->is_64) {
1503 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1504 } else {
1505 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1506 }
1507
1508 /* Branch not taken. */
1509 gen_update_cc_op(s);
1510 tcg_gen_goto_tb(0);
1511 tcg_gen_movi_i64(psw_addr, s->next_pc);
1512 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1513
1514 gen_set_label(lab);
1515 if (is_imm) {
1516 tcg_gen_movi_i64(psw_addr, dest);
1517 }
1518 ret = EXIT_PC_UPDATED;
1519 }
1520 } else {
1521 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1522 Most commonly we're single-stepping or some other condition that
1523 disables all use of goto_tb. Just update the PC and exit. */
1524
1525 TCGv_i64 next = tcg_const_i64(s->next_pc);
1526 if (is_imm) {
1527 cdest = tcg_const_i64(dest);
1528 }
1529
1530 if (c->is_64) {
1531 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1532 cdest, next);
1533 } else {
1534 TCGv_i32 t0 = tcg_temp_new_i32();
1535 TCGv_i64 t1 = tcg_temp_new_i64();
1536 TCGv_i64 z = tcg_const_i64(0);
1537 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1538 tcg_gen_extu_i32_i64(t1, t0);
1539 tcg_temp_free_i32(t0);
1540 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1541 tcg_temp_free_i64(t1);
1542 tcg_temp_free_i64(z);
1543 }
1544
1545 if (is_imm) {
1546 tcg_temp_free_i64(cdest);
1547 }
1548 tcg_temp_free_i64(next);
1549
1550 ret = EXIT_PC_UPDATED;
1551 }
1552
1553 egress:
1554 free_compare(c);
1555 return ret;
1556 }
1557
1558 /* ====================================================================== */
1559 /* The operations. These perform the bulk of the work for any insn,
1560 usually after the operands have been loaded and output initialized. */
1561
1562 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1563 {
1564 gen_helper_abs_i64(o->out, o->in2);
1565 return NO_EXIT;
1566 }
1567
1568 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1569 {
1570 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1571 return NO_EXIT;
1572 }
1573
1574 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1575 {
1576 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1577 return NO_EXIT;
1578 }
1579
1580 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1581 {
1582 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1583 tcg_gen_mov_i64(o->out2, o->in2);
1584 return NO_EXIT;
1585 }
1586
1587 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1588 {
1589 tcg_gen_add_i64(o->out, o->in1, o->in2);
1590 return NO_EXIT;
1591 }
1592
1593 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1594 {
1595 TCGv_i64 cc;
1596
1597 tcg_gen_add_i64(o->out, o->in1, o->in2);
1598
1599 /* XXX possible optimization point */
1600 gen_op_calc_cc(s);
1601 cc = tcg_temp_new_i64();
1602 tcg_gen_extu_i32_i64(cc, cc_op);
1603 tcg_gen_shri_i64(cc, cc, 1);
1604
1605 tcg_gen_add_i64(o->out, o->out, cc);
1606 tcg_temp_free_i64(cc);
1607 return NO_EXIT;
1608 }
1609
1610 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1611 {
1612 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1613 return NO_EXIT;
1614 }
1615
1616 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1617 {
1618 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1619 return NO_EXIT;
1620 }
1621
1622 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1623 {
1624 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1625 return_low128(o->out2);
1626 return NO_EXIT;
1627 }
1628
1629 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1630 {
1631 tcg_gen_and_i64(o->out, o->in1, o->in2);
1632 return NO_EXIT;
1633 }
1634
1635 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1636 {
1637 int shift = s->insn->data & 0xff;
1638 int size = s->insn->data >> 8;
1639 uint64_t mask = ((1ull << size) - 1) << shift;
1640
1641 assert(!o->g_in2);
1642 tcg_gen_shli_i64(o->in2, o->in2, shift);
1643 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1644 tcg_gen_and_i64(o->out, o->in1, o->in2);
1645
1646 /* Produce the CC from only the bits manipulated. */
1647 tcg_gen_andi_i64(cc_dst, o->out, mask);
1648 set_cc_nz_u64(s, cc_dst);
1649 return NO_EXIT;
1650 }
1651
1652 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1653 {
1654 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1655 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1656 tcg_gen_mov_i64(psw_addr, o->in2);
1657 return EXIT_PC_UPDATED;
1658 } else {
1659 return NO_EXIT;
1660 }
1661 }
1662
1663 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1664 {
1665 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1666 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1667 }
1668
1669 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1670 {
1671 int m1 = get_field(s->fields, m1);
1672 bool is_imm = have_field(s->fields, i2);
1673 int imm = is_imm ? get_field(s->fields, i2) : 0;
1674 DisasCompare c;
1675
1676 disas_jcc(s, &c, m1);
1677 return help_branch(s, &c, is_imm, imm, o->in2);
1678 }
1679
1680 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1681 {
1682 int r1 = get_field(s->fields, r1);
1683 bool is_imm = have_field(s->fields, i2);
1684 int imm = is_imm ? get_field(s->fields, i2) : 0;
1685 DisasCompare c;
1686 TCGv_i64 t;
1687
1688 c.cond = TCG_COND_NE;
1689 c.is_64 = false;
1690 c.g1 = false;
1691 c.g2 = false;
1692
1693 t = tcg_temp_new_i64();
1694 tcg_gen_subi_i64(t, regs[r1], 1);
1695 store_reg32_i64(r1, t);
1696 c.u.s32.a = tcg_temp_new_i32();
1697 c.u.s32.b = tcg_const_i32(0);
1698 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1699 tcg_temp_free_i64(t);
1700
1701 return help_branch(s, &c, is_imm, imm, o->in2);
1702 }
1703
1704 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1705 {
1706 int r1 = get_field(s->fields, r1);
1707 bool is_imm = have_field(s->fields, i2);
1708 int imm = is_imm ? get_field(s->fields, i2) : 0;
1709 DisasCompare c;
1710
1711 c.cond = TCG_COND_NE;
1712 c.is_64 = true;
1713 c.g1 = true;
1714 c.g2 = false;
1715
1716 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1717 c.u.s64.a = regs[r1];
1718 c.u.s64.b = tcg_const_i64(0);
1719
1720 return help_branch(s, &c, is_imm, imm, o->in2);
1721 }
1722
1723 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1724 {
1725 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1726 set_cc_static(s);
1727 return NO_EXIT;
1728 }
1729
1730 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1731 {
1732 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1733 set_cc_static(s);
1734 return NO_EXIT;
1735 }
1736
1737 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1738 {
1739 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1740 set_cc_static(s);
1741 return NO_EXIT;
1742 }
1743
1744 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1745 {
1746 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1747 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1748 tcg_temp_free_i32(m3);
1749 gen_set_cc_nz_f32(s, o->in2);
1750 return NO_EXIT;
1751 }
1752
1753 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1754 {
1755 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1756 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1757 tcg_temp_free_i32(m3);
1758 gen_set_cc_nz_f64(s, o->in2);
1759 return NO_EXIT;
1760 }
1761
1762 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1763 {
1764 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1765 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1766 tcg_temp_free_i32(m3);
1767 gen_set_cc_nz_f128(s, o->in1, o->in2);
1768 return NO_EXIT;
1769 }
1770
1771 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1772 {
1773 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1774 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1775 tcg_temp_free_i32(m3);
1776 gen_set_cc_nz_f32(s, o->in2);
1777 return NO_EXIT;
1778 }
1779
1780 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1781 {
1782 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1783 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1784 tcg_temp_free_i32(m3);
1785 gen_set_cc_nz_f64(s, o->in2);
1786 return NO_EXIT;
1787 }
1788
1789 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1790 {
1791 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1792 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1793 tcg_temp_free_i32(m3);
1794 gen_set_cc_nz_f128(s, o->in1, o->in2);
1795 return NO_EXIT;
1796 }
1797
1798 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1799 {
1800 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1801 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1802 tcg_temp_free_i32(m3);
1803 return NO_EXIT;
1804 }
1805
1806 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1807 {
1808 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1809 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1810 tcg_temp_free_i32(m3);
1811 return NO_EXIT;
1812 }
1813
1814 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1815 {
1816 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1817 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1818 tcg_temp_free_i32(m3);
1819 return_low128(o->out2);
1820 return NO_EXIT;
1821 }
1822
1823 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1824 {
1825 int r2 = get_field(s->fields, r2);
1826 TCGv_i64 len = tcg_temp_new_i64();
1827
1828 potential_page_fault(s);
1829 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1830 set_cc_static(s);
1831 return_low128(o->out);
1832
1833 tcg_gen_add_i64(regs[r2], regs[r2], len);
1834 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1835 tcg_temp_free_i64(len);
1836
1837 return NO_EXIT;
1838 }
1839
1840 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1841 {
1842 int l = get_field(s->fields, l1);
1843 TCGv_i32 vl;
1844
1845 switch (l + 1) {
1846 case 1:
1847 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1848 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1849 break;
1850 case 2:
1851 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1852 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1853 break;
1854 case 4:
1855 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1856 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1857 break;
1858 case 8:
1859 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1860 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1861 break;
1862 default:
1863 potential_page_fault(s);
1864 vl = tcg_const_i32(l);
1865 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1866 tcg_temp_free_i32(vl);
1867 set_cc_static(s);
1868 return NO_EXIT;
1869 }
1870 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1871 return NO_EXIT;
1872 }
1873
1874 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1875 {
1876 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1877 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1878 potential_page_fault(s);
1879 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1880 tcg_temp_free_i32(r1);
1881 tcg_temp_free_i32(r3);
1882 set_cc_static(s);
1883 return NO_EXIT;
1884 }
1885
1886 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1887 {
1888 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1889 TCGv_i32 t1 = tcg_temp_new_i32();
1890 tcg_gen_trunc_i64_i32(t1, o->in1);
1891 potential_page_fault(s);
1892 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1893 set_cc_static(s);
1894 tcg_temp_free_i32(t1);
1895 tcg_temp_free_i32(m3);
1896 return NO_EXIT;
1897 }
1898
1899 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1900 {
1901 potential_page_fault(s);
1902 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1903 set_cc_static(s);
1904 return_low128(o->in2);
1905 return NO_EXIT;
1906 }
1907
1908 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1909 {
1910 int r3 = get_field(s->fields, r3);
1911 potential_page_fault(s);
1912 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1913 set_cc_static(s);
1914 return NO_EXIT;
1915 }
1916
1917 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1918 {
1919 int r3 = get_field(s->fields, r3);
1920 potential_page_fault(s);
1921 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1922 set_cc_static(s);
1923 return NO_EXIT;
1924 }
1925
1926 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1927 {
1928 int r3 = get_field(s->fields, r3);
1929 TCGv_i64 in3 = tcg_temp_new_i64();
1930 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1931 potential_page_fault(s);
1932 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1933 tcg_temp_free_i64(in3);
1934 set_cc_static(s);
1935 return NO_EXIT;
1936 }
1937
1938 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1939 {
1940 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1941 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1942 potential_page_fault(s);
1943 /* XXX rewrite in tcg */
1944 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1945 set_cc_static(s);
1946 return NO_EXIT;
1947 }
1948
1949 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1950 {
1951 TCGv_i64 t1 = tcg_temp_new_i64();
1952 TCGv_i32 t2 = tcg_temp_new_i32();
1953 tcg_gen_trunc_i64_i32(t2, o->in1);
1954 gen_helper_cvd(t1, t2);
1955 tcg_temp_free_i32(t2);
1956 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1957 tcg_temp_free_i64(t1);
1958 return NO_EXIT;
1959 }
1960
1961 #ifndef CONFIG_USER_ONLY
1962 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1963 {
1964 TCGv_i32 tmp;
1965
1966 check_privileged(s);
1967 potential_page_fault(s);
1968
1969 /* We pretend the format is RX_a so that D2 is the field we want. */
1970 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1971 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1972 tcg_temp_free_i32(tmp);
1973 return NO_EXIT;
1974 }
1975 #endif
1976
1977 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1978 {
1979 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1980 return_low128(o->out);
1981 return NO_EXIT;
1982 }
1983
1984 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1985 {
1986 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1987 return_low128(o->out);
1988 return NO_EXIT;
1989 }
1990
1991 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1992 {
1993 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1994 return_low128(o->out);
1995 return NO_EXIT;
1996 }
1997
1998 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1999 {
2000 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2001 return_low128(o->out);
2002 return NO_EXIT;
2003 }
2004
2005 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2006 {
2007 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2008 return NO_EXIT;
2009 }
2010
2011 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2012 {
2013 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2014 return NO_EXIT;
2015 }
2016
2017 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2018 {
2019 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2020 return_low128(o->out2);
2021 return NO_EXIT;
2022 }
2023
2024 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2025 {
2026 int r2 = get_field(s->fields, r2);
2027 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2028 return NO_EXIT;
2029 }
2030
2031 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2032 {
2033 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2034 return NO_EXIT;
2035 }
2036
2037 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2038 {
2039 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2040 tb->flags, (ab)use the tb->cs_base field as the address of
2041 the template in memory, and grab 8 bits of tb->flags/cflags for
2042 the contents of the register. We would then recognize all this
2043 in gen_intermediate_code_internal, generating code for exactly
2044 one instruction. This new TB then gets executed normally.
2045
2046 On the other hand, this seems to be mostly used for modifying
2047 MVC inside of memcpy, which needs a helper call anyway. So
2048 perhaps this doesn't bear thinking about any further. */
2049
2050 TCGv_i64 tmp;
2051
2052 update_psw_addr(s);
2053 gen_op_calc_cc(s);
2054
2055 tmp = tcg_const_i64(s->next_pc);
2056 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2057 tcg_temp_free_i64(tmp);
2058
2059 set_cc_static(s);
2060 return NO_EXIT;
2061 }
2062
2063 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2064 {
2065 /* We'll use the original input for cc computation, since we get to
2066 compare that against 0, which ought to be better than comparing
2067 the real output against 64. It also lets cc_dst be a convenient
2068 temporary during our computation. */
2069 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2070
2071 /* R1 = IN ? CLZ(IN) : 64. */
2072 gen_helper_clz(o->out, o->in2);
2073
2074 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2075 value by 64, which is undefined. But since the shift is 64 iff the
2076 input is zero, we still get the correct result after and'ing. */
2077 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2078 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2079 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2080 return NO_EXIT;
2081 }
2082
2083 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2084 {
2085 int m3 = get_field(s->fields, m3);
2086 int pos, len, base = s->insn->data;
2087 TCGv_i64 tmp = tcg_temp_new_i64();
2088 uint64_t ccm;
2089
2090 switch (m3) {
2091 case 0xf:
2092 /* Effectively a 32-bit load. */
2093 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2094 len = 32;
2095 goto one_insert;
2096
2097 case 0xc:
2098 case 0x6:
2099 case 0x3:
2100 /* Effectively a 16-bit load. */
2101 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2102 len = 16;
2103 goto one_insert;
2104
2105 case 0x8:
2106 case 0x4:
2107 case 0x2:
2108 case 0x1:
2109 /* Effectively an 8-bit load. */
2110 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2111 len = 8;
2112 goto one_insert;
2113
2114 one_insert:
2115 pos = base + ctz32(m3) * 8;
2116 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2117 ccm = ((1ull << len) - 1) << pos;
2118 break;
2119
2120 default:
2121 /* This is going to be a sequence of loads and inserts. */
2122 pos = base + 32 - 8;
2123 ccm = 0;
2124 while (m3) {
2125 if (m3 & 0x8) {
2126 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2127 tcg_gen_addi_i64(o->in2, o->in2, 1);
2128 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2129 ccm |= 0xff << pos;
2130 }
2131 m3 = (m3 << 1) & 0xf;
2132 pos -= 8;
2133 }
2134 break;
2135 }
2136
2137 tcg_gen_movi_i64(tmp, ccm);
2138 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2139 tcg_temp_free_i64(tmp);
2140 return NO_EXIT;
2141 }
2142
2143 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2144 {
2145 int shift = s->insn->data & 0xff;
2146 int size = s->insn->data >> 8;
2147 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2148 return NO_EXIT;
2149 }
2150
2151 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2152 {
2153 TCGv_i64 t1;
2154
2155 gen_op_calc_cc(s);
2156 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2157
2158 t1 = tcg_temp_new_i64();
2159 tcg_gen_shli_i64(t1, psw_mask, 20);
2160 tcg_gen_shri_i64(t1, t1, 36);
2161 tcg_gen_or_i64(o->out, o->out, t1);
2162
2163 tcg_gen_extu_i32_i64(t1, cc_op);
2164 tcg_gen_shli_i64(t1, t1, 28);
2165 tcg_gen_or_i64(o->out, o->out, t1);
2166 tcg_temp_free_i64(t1);
2167 return NO_EXIT;
2168 }
2169
2170 #ifndef CONFIG_USER_ONLY
2171 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2172 {
2173 check_privileged(s);
2174 gen_helper_ipte(cpu_env, o->in1, o->in2);
2175 return NO_EXIT;
2176 }
2177
2178 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2179 {
2180 check_privileged(s);
2181 gen_helper_iske(o->out, cpu_env, o->in2);
2182 return NO_EXIT;
2183 }
2184 #endif
2185
2186 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2187 {
2188 gen_helper_ldeb(o->out, cpu_env, o->in2);
2189 return NO_EXIT;
2190 }
2191
2192 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2193 {
2194 gen_helper_ledb(o->out, cpu_env, o->in2);
2195 return NO_EXIT;
2196 }
2197
2198 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2199 {
2200 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2201 return NO_EXIT;
2202 }
2203
2204 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2205 {
2206 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2207 return NO_EXIT;
2208 }
2209
2210 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2211 {
2212 gen_helper_lxdb(o->out, cpu_env, o->in2);
2213 return_low128(o->out2);
2214 return NO_EXIT;
2215 }
2216
2217 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2218 {
2219 gen_helper_lxeb(o->out, cpu_env, o->in2);
2220 return_low128(o->out2);
2221 return NO_EXIT;
2222 }
2223
2224 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2225 {
2226 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2227 return NO_EXIT;
2228 }
2229
2230 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2231 {
2232 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2233 return NO_EXIT;
2234 }
2235
2236 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2237 {
2238 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2239 return NO_EXIT;
2240 }
2241
2242 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2243 {
2244 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2245 return NO_EXIT;
2246 }
2247
2248 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2249 {
2250 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2251 return NO_EXIT;
2252 }
2253
2254 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2255 {
2256 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2257 return NO_EXIT;
2258 }
2259
2260 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2261 {
2262 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2263 return NO_EXIT;
2264 }
2265
2266 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2267 {
2268 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2269 return NO_EXIT;
2270 }
2271
2272 #ifndef CONFIG_USER_ONLY
2273 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2274 {
2275 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2276 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2277 check_privileged(s);
2278 potential_page_fault(s);
2279 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2280 tcg_temp_free_i32(r1);
2281 tcg_temp_free_i32(r3);
2282 return NO_EXIT;
2283 }
2284
2285 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2286 {
2287 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2288 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2289 check_privileged(s);
2290 potential_page_fault(s);
2291 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2292 tcg_temp_free_i32(r1);
2293 tcg_temp_free_i32(r3);
2294 return NO_EXIT;
2295 }
2296 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2297 {
2298 check_privileged(s);
2299 potential_page_fault(s);
2300 gen_helper_lra(o->out, cpu_env, o->in2);
2301 set_cc_static(s);
2302 return NO_EXIT;
2303 }
2304
2305 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2306 {
2307 TCGv_i64 t1, t2;
2308
2309 check_privileged(s);
2310
2311 t1 = tcg_temp_new_i64();
2312 t2 = tcg_temp_new_i64();
2313 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2314 tcg_gen_addi_i64(o->in2, o->in2, 4);
2315 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2316 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2317 tcg_gen_shli_i64(t1, t1, 32);
2318 gen_helper_load_psw(cpu_env, t1, t2);
2319 tcg_temp_free_i64(t1);
2320 tcg_temp_free_i64(t2);
2321 return EXIT_NORETURN;
2322 }
2323 #endif
2324
2325 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2326 {
2327 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2328 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2329 potential_page_fault(s);
2330 gen_helper_lam(cpu_env, r1, o->in2, r3);
2331 tcg_temp_free_i32(r1);
2332 tcg_temp_free_i32(r3);
2333 return NO_EXIT;
2334 }
2335
2336 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2337 {
2338 int r1 = get_field(s->fields, r1);
2339 int r3 = get_field(s->fields, r3);
2340 TCGv_i64 t = tcg_temp_new_i64();
2341 TCGv_i64 t4 = tcg_const_i64(4);
2342
2343 while (1) {
2344 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2345 store_reg32_i64(r1, t);
2346 if (r1 == r3) {
2347 break;
2348 }
2349 tcg_gen_add_i64(o->in2, o->in2, t4);
2350 r1 = (r1 + 1) & 15;
2351 }
2352
2353 tcg_temp_free_i64(t);
2354 tcg_temp_free_i64(t4);
2355 return NO_EXIT;
2356 }
2357
2358 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2359 {
2360 int r1 = get_field(s->fields, r1);
2361 int r3 = get_field(s->fields, r3);
2362 TCGv_i64 t = tcg_temp_new_i64();
2363 TCGv_i64 t4 = tcg_const_i64(4);
2364
2365 while (1) {
2366 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2367 store_reg32h_i64(r1, t);
2368 if (r1 == r3) {
2369 break;
2370 }
2371 tcg_gen_add_i64(o->in2, o->in2, t4);
2372 r1 = (r1 + 1) & 15;
2373 }
2374
2375 tcg_temp_free_i64(t);
2376 tcg_temp_free_i64(t4);
2377 return NO_EXIT;
2378 }
2379
2380 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2381 {
2382 int r1 = get_field(s->fields, r1);
2383 int r3 = get_field(s->fields, r3);
2384 TCGv_i64 t8 = tcg_const_i64(8);
2385
2386 while (1) {
2387 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2388 if (r1 == r3) {
2389 break;
2390 }
2391 tcg_gen_add_i64(o->in2, o->in2, t8);
2392 r1 = (r1 + 1) & 15;
2393 }
2394
2395 tcg_temp_free_i64(t8);
2396 return NO_EXIT;
2397 }
2398
2399 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2400 {
2401 o->out = o->in2;
2402 o->g_out = o->g_in2;
2403 TCGV_UNUSED_I64(o->in2);
2404 o->g_in2 = false;
2405 return NO_EXIT;
2406 }
2407
2408 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2409 {
2410 o->out = o->in1;
2411 o->out2 = o->in2;
2412 o->g_out = o->g_in1;
2413 o->g_out2 = o->g_in2;
2414 TCGV_UNUSED_I64(o->in1);
2415 TCGV_UNUSED_I64(o->in2);
2416 o->g_in1 = o->g_in2 = false;
2417 return NO_EXIT;
2418 }
2419
2420 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2421 {
2422 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2423 potential_page_fault(s);
2424 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2425 tcg_temp_free_i32(l);
2426 return NO_EXIT;
2427 }
2428
2429 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2430 {
2431 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2432 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2433 potential_page_fault(s);
2434 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2435 tcg_temp_free_i32(r1);
2436 tcg_temp_free_i32(r2);
2437 set_cc_static(s);
2438 return NO_EXIT;
2439 }
2440
2441 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2442 {
2443 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2444 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2445 potential_page_fault(s);
2446 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2447 tcg_temp_free_i32(r1);
2448 tcg_temp_free_i32(r3);
2449 set_cc_static(s);
2450 return NO_EXIT;
2451 }
2452
2453 #ifndef CONFIG_USER_ONLY
2454 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2455 {
2456 int r1 = get_field(s->fields, l1);
2457 check_privileged(s);
2458 potential_page_fault(s);
2459 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2460 set_cc_static(s);
2461 return NO_EXIT;
2462 }
2463
2464 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2465 {
2466 int r1 = get_field(s->fields, l1);
2467 check_privileged(s);
2468 potential_page_fault(s);
2469 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2470 set_cc_static(s);
2471 return NO_EXIT;
2472 }
2473 #endif
2474
2475 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2476 {
2477 potential_page_fault(s);
2478 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2479 set_cc_static(s);
2480 return NO_EXIT;
2481 }
2482
2483 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2484 {
2485 potential_page_fault(s);
2486 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2487 set_cc_static(s);
2488 return_low128(o->in2);
2489 return NO_EXIT;
2490 }
2491
2492 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2493 {
2494 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2495 return NO_EXIT;
2496 }
2497
2498 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2499 {
2500 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2501 return_low128(o->out2);
2502 return NO_EXIT;
2503 }
2504
2505 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2506 {
2507 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2508 return NO_EXIT;
2509 }
2510
2511 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2512 {
2513 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2514 return NO_EXIT;
2515 }
2516
2517 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2518 {
2519 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2520 return NO_EXIT;
2521 }
2522
2523 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2524 {
2525 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2526 return_low128(o->out2);
2527 return NO_EXIT;
2528 }
2529
2530 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2531 {
2532 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2533 return_low128(o->out2);
2534 return NO_EXIT;
2535 }
2536
2537 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2538 {
2539 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2540 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2541 tcg_temp_free_i64(r3);
2542 return NO_EXIT;
2543 }
2544
2545 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2546 {
2547 int r3 = get_field(s->fields, r3);
2548 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2549 return NO_EXIT;
2550 }
2551
2552 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2553 {
2554 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2555 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2556 tcg_temp_free_i64(r3);
2557 return NO_EXIT;
2558 }
2559
2560 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2561 {
2562 int r3 = get_field(s->fields, r3);
2563 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2564 return NO_EXIT;
2565 }
2566
2567 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2568 {
2569 gen_helper_nabs_i64(o->out, o->in2);
2570 return NO_EXIT;
2571 }
2572
2573 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2574 {
2575 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2576 return NO_EXIT;
2577 }
2578
2579 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2580 {
2581 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2582 return NO_EXIT;
2583 }
2584
2585 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2586 {
2587 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2588 tcg_gen_mov_i64(o->out2, o->in2);
2589 return NO_EXIT;
2590 }
2591
2592 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2593 {
2594 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2595 potential_page_fault(s);
2596 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2597 tcg_temp_free_i32(l);
2598 set_cc_static(s);
2599 return NO_EXIT;
2600 }
2601
2602 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2603 {
2604 tcg_gen_neg_i64(o->out, o->in2);
2605 return NO_EXIT;
2606 }
2607
2608 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2609 {
2610 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2611 return NO_EXIT;
2612 }
2613
2614 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2615 {
2616 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2617 return NO_EXIT;
2618 }
2619
2620 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2621 {
2622 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2623 tcg_gen_mov_i64(o->out2, o->in2);
2624 return NO_EXIT;
2625 }
2626
2627 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2628 {
2629 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2630 potential_page_fault(s);
2631 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2632 tcg_temp_free_i32(l);
2633 set_cc_static(s);
2634 return NO_EXIT;
2635 }
2636
2637 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2638 {
2639 tcg_gen_or_i64(o->out, o->in1, o->in2);
2640 return NO_EXIT;
2641 }
2642
2643 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2644 {
2645 int shift = s->insn->data & 0xff;
2646 int size = s->insn->data >> 8;
2647 uint64_t mask = ((1ull << size) - 1) << shift;
2648
2649 assert(!o->g_in2);
2650 tcg_gen_shli_i64(o->in2, o->in2, shift);
2651 tcg_gen_or_i64(o->out, o->in1, o->in2);
2652
2653 /* Produce the CC from only the bits manipulated. */
2654 tcg_gen_andi_i64(cc_dst, o->out, mask);
2655 set_cc_nz_u64(s, cc_dst);
2656 return NO_EXIT;
2657 }
2658
2659 #ifndef CONFIG_USER_ONLY
2660 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2661 {
2662 check_privileged(s);
2663 gen_helper_ptlb(cpu_env);
2664 return NO_EXIT;
2665 }
2666 #endif
2667
2668 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2669 {
2670 tcg_gen_bswap16_i64(o->out, o->in2);
2671 return NO_EXIT;
2672 }
2673
2674 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2675 {
2676 tcg_gen_bswap32_i64(o->out, o->in2);
2677 return NO_EXIT;
2678 }
2679
2680 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2681 {
2682 tcg_gen_bswap64_i64(o->out, o->in2);
2683 return NO_EXIT;
2684 }
2685
2686 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2687 {
2688 TCGv_i32 t1 = tcg_temp_new_i32();
2689 TCGv_i32 t2 = tcg_temp_new_i32();
2690 TCGv_i32 to = tcg_temp_new_i32();
2691 tcg_gen_trunc_i64_i32(t1, o->in1);
2692 tcg_gen_trunc_i64_i32(t2, o->in2);
2693 tcg_gen_rotl_i32(to, t1, t2);
2694 tcg_gen_extu_i32_i64(o->out, to);
2695 tcg_temp_free_i32(t1);
2696 tcg_temp_free_i32(t2);
2697 tcg_temp_free_i32(to);
2698 return NO_EXIT;
2699 }
2700
2701 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2702 {
2703 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2704 return NO_EXIT;
2705 }
2706
2707 #ifndef CONFIG_USER_ONLY
2708 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2709 {
2710 check_privileged(s);
2711 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2712 set_cc_static(s);
2713 return NO_EXIT;
2714 }
2715 #endif
2716
2717 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2718 {
2719 int r1 = get_field(s->fields, r1);
2720 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2721 return NO_EXIT;
2722 }
2723
2724 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2725 {
2726 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2727 return NO_EXIT;
2728 }
2729
2730 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2731 {
2732 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2733 return NO_EXIT;
2734 }
2735
2736 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2737 {
2738 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2739 return_low128(o->out2);
2740 return NO_EXIT;
2741 }
2742
2743 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2744 {
2745 gen_helper_sqeb(o->out, cpu_env, o->in2);
2746 return NO_EXIT;
2747 }
2748
2749 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2750 {
2751 gen_helper_sqdb(o->out, cpu_env, o->in2);
2752 return NO_EXIT;
2753 }
2754
2755 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2756 {
2757 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2758 return_low128(o->out2);
2759 return NO_EXIT;
2760 }
2761
2762 #ifndef CONFIG_USER_ONLY
2763 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2764 {
2765 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2766 check_privileged(s);
2767 potential_page_fault(s);
2768 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2769 tcg_temp_free_i32(r1);
2770 return NO_EXIT;
2771 }
2772 #endif
2773
2774 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2775 {
2776 uint64_t sign = 1ull << s->insn->data;
2777 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2778 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2779 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2780 /* The arithmetic left shift is curious in that it does not affect
2781 the sign bit. Copy that over from the source unchanged. */
2782 tcg_gen_andi_i64(o->out, o->out, ~sign);
2783 tcg_gen_andi_i64(o->in1, o->in1, sign);
2784 tcg_gen_or_i64(o->out, o->out, o->in1);
2785 return NO_EXIT;
2786 }
2787
2788 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2789 {
2790 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2791 return NO_EXIT;
2792 }
2793
2794 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2795 {
2796 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2797 return NO_EXIT;
2798 }
2799
2800 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2801 {
2802 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2803 return NO_EXIT;
2804 }
2805
2806 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2807 {
2808 gen_helper_sfpc(cpu_env, o->in2);
2809 return NO_EXIT;
2810 }
2811
2812 #ifndef CONFIG_USER_ONLY
2813 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2814 {
2815 check_privileged(s);
2816 tcg_gen_shri_i64(o->in2, o->in2, 4);
2817 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2818 return NO_EXIT;
2819 }
2820
2821 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
2822 {
2823 check_privileged(s);
2824 gen_helper_sske(cpu_env, o->in1, o->in2);
2825 return NO_EXIT;
2826 }
2827
2828 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2829 {
2830 check_privileged(s);
2831 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2832 return NO_EXIT;
2833 }
2834
2835 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
2836 {
2837 check_privileged(s);
2838 /* ??? Surely cpu address != cpu number. In any case the previous
2839 version of this stored more than the required half-word, so it
2840 is unlikely this has ever been tested. */
2841 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2842 return NO_EXIT;
2843 }
2844
2845 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
2846 {
2847 gen_helper_stck(o->out, cpu_env);
2848 /* ??? We don't implement clock states. */
2849 gen_op_movi_cc(s, 0);
2850 return NO_EXIT;
2851 }
2852
2853 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
2854 {
2855 check_privileged(s);
2856 gen_helper_sckc(cpu_env, o->in2);
2857 return NO_EXIT;
2858 }
2859
2860 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
2861 {
2862 check_privileged(s);
2863 gen_helper_stckc(o->out, cpu_env);
2864 return NO_EXIT;
2865 }
2866
2867 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2868 {
2869 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2870 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2871 check_privileged(s);
2872 potential_page_fault(s);
2873 gen_helper_stctg(cpu_env, r1, o->in2, r3);
2874 tcg_temp_free_i32(r1);
2875 tcg_temp_free_i32(r3);
2876 return NO_EXIT;
2877 }
2878
2879 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
2880 {
2881 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2882 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2883 check_privileged(s);
2884 potential_page_fault(s);
2885 gen_helper_stctl(cpu_env, r1, o->in2, r3);
2886 tcg_temp_free_i32(r1);
2887 tcg_temp_free_i32(r3);
2888 return NO_EXIT;
2889 }
2890
2891 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
2892 {
2893 check_privileged(s);
2894 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2895 return NO_EXIT;
2896 }
2897
2898 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
2899 {
2900 check_privileged(s);
2901 gen_helper_spt(cpu_env, o->in2);
2902 return NO_EXIT;
2903 }
2904
2905 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
2906 {
2907 check_privileged(s);
2908 gen_helper_stpt(o->out, cpu_env);
2909 return NO_EXIT;
2910 }
2911
2912 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
2913 {
2914 check_privileged(s);
2915 gen_helper_spx(cpu_env, o->in2);
2916 return NO_EXIT;
2917 }
2918
2919 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
2920 {
2921 check_privileged(s);
2922 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
2923 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
2924 return NO_EXIT;
2925 }
2926
2927 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
2928 {
2929 uint64_t i2 = get_field(s->fields, i2);
2930 TCGv_i64 t;
2931
2932 check_privileged(s);
2933
2934 /* It is important to do what the instruction name says: STORE THEN.
2935 If we let the output hook perform the store then if we fault and
2936 restart, we'll have the wrong SYSTEM MASK in place. */
2937 t = tcg_temp_new_i64();
2938 tcg_gen_shri_i64(t, psw_mask, 56);
2939 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
2940 tcg_temp_free_i64(t);
2941
2942 if (s->fields->op == 0xac) {
2943 tcg_gen_andi_i64(psw_mask, psw_mask,
2944 (i2 << 56) | 0x00ffffffffffffffull);
2945 } else {
2946 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
2947 }
2948 return NO_EXIT;
2949 }
2950 #endif
2951
2952 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
2953 {
2954 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
2955 return NO_EXIT;
2956 }
2957
2958 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
2959 {
2960 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
2961 return NO_EXIT;
2962 }
2963
2964 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
2965 {
2966 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
2967 return NO_EXIT;
2968 }
2969
2970 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
2971 {
2972 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
2973 return NO_EXIT;
2974 }
2975
2976 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
2977 {
2978 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2979 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2980 potential_page_fault(s);
2981 gen_helper_stam(cpu_env, r1, o->in2, r3);
2982 tcg_temp_free_i32(r1);
2983 tcg_temp_free_i32(r3);
2984 return NO_EXIT;
2985 }
2986
2987 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
2988 {
2989 int m3 = get_field(s->fields, m3);
2990 int pos, base = s->insn->data;
2991 TCGv_i64 tmp = tcg_temp_new_i64();
2992
2993 pos = base + ctz32(m3) * 8;
2994 switch (m3) {
2995 case 0xf:
2996 /* Effectively a 32-bit store. */
2997 tcg_gen_shri_i64(tmp, o->in1, pos);
2998 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
2999 break;
3000
3001 case 0xc:
3002 case 0x6:
3003 case 0x3:
3004 /* Effectively a 16-bit store. */
3005 tcg_gen_shri_i64(tmp, o->in1, pos);
3006 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3007 break;
3008
3009 case 0x8:
3010 case 0x4:
3011 case 0x2:
3012 case 0x1:
3013 /* Effectively an 8-bit store. */
3014 tcg_gen_shri_i64(tmp, o->in1, pos);
3015 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3016 break;
3017
3018 default:
3019 /* This is going to be a sequence of shifts and stores. */
3020 pos = base + 32 - 8;
3021 while (m3) {
3022 if (m3 & 0x8) {
3023 tcg_gen_shri_i64(tmp, o->in1, pos);
3024 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3025 tcg_gen_addi_i64(o->in2, o->in2, 1);
3026 }
3027 m3 = (m3 << 1) & 0xf;
3028 pos -= 8;
3029 }
3030 break;
3031 }
3032 tcg_temp_free_i64(tmp);
3033 return NO_EXIT;
3034 }
3035
3036 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3037 {
3038 int r1 = get_field(s->fields, r1);
3039 int r3 = get_field(s->fields, r3);
3040 int size = s->insn->data;
3041 TCGv_i64 tsize = tcg_const_i64(size);
3042
3043 while (1) {
3044 if (size == 8) {
3045 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3046 } else {
3047 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3048 }
3049 if (r1 == r3) {
3050 break;
3051 }
3052 tcg_gen_add_i64(o->in2, o->in2, tsize);
3053 r1 = (r1 + 1) & 15;
3054 }
3055
3056 tcg_temp_free_i64(tsize);
3057 return NO_EXIT;
3058 }
3059
3060 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3061 {
3062 int r1 = get_field(s->fields, r1);
3063 int r3 = get_field(s->fields, r3);
3064 TCGv_i64 t = tcg_temp_new_i64();
3065 TCGv_i64 t4 = tcg_const_i64(4);
3066 TCGv_i64 t32 = tcg_const_i64(32);
3067
3068 while (1) {
3069 tcg_gen_shl_i64(t, regs[r1], t32);
3070 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3071 if (r1 == r3) {
3072 break;
3073 }
3074 tcg_gen_add_i64(o->in2, o->in2, t4);
3075 r1 = (r1 + 1) & 15;
3076 }
3077
3078 tcg_temp_free_i64(t);
3079 tcg_temp_free_i64(t4);
3080 tcg_temp_free_i64(t32);
3081 return NO_EXIT;
3082 }
3083
3084 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3085 {
3086 potential_page_fault(s);
3087 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3088 set_cc_static(s);
3089 return_low128(o->in2);
3090 return NO_EXIT;
3091 }
3092
3093 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3094 {
3095 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3096 return NO_EXIT;
3097 }
3098
3099 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3100 {
3101 TCGv_i64 cc;
3102
3103 assert(!o->g_in2);
3104 tcg_gen_not_i64(o->in2, o->in2);
3105 tcg_gen_add_i64(o->out, o->in1, o->in2);
3106
3107 /* XXX possible optimization point */
3108 gen_op_calc_cc(s);
3109 cc = tcg_temp_new_i64();
3110 tcg_gen_extu_i32_i64(cc, cc_op);
3111 tcg_gen_shri_i64(cc, cc, 1);
3112 tcg_gen_add_i64(o->out, o->out, cc);
3113 tcg_temp_free_i64(cc);
3114 return NO_EXIT;
3115 }
3116
3117 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3118 {
3119 TCGv_i32 t;
3120
3121 update_psw_addr(s);
3122 gen_op_calc_cc(s);
3123
3124 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3125 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3126 tcg_temp_free_i32(t);
3127
3128 t = tcg_const_i32(s->next_pc - s->pc);
3129 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3130 tcg_temp_free_i32(t);
3131
3132 gen_exception(EXCP_SVC);
3133 return EXIT_NORETURN;
3134 }
3135
3136 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3137 {
3138 gen_helper_tceb(cc_op, o->in1, o->in2);
3139 set_cc_static(s);
3140 return NO_EXIT;
3141 }
3142
3143 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3144 {
3145 gen_helper_tcdb(cc_op, o->in1, o->in2);
3146 set_cc_static(s);
3147 return NO_EXIT;
3148 }
3149
3150 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3151 {
3152 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3153 set_cc_static(s);
3154 return NO_EXIT;
3155 }
3156
3157 #ifndef CONFIG_USER_ONLY
3158 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3159 {
3160 potential_page_fault(s);
3161 gen_helper_tprot(cc_op, o->addr1, o->in2);
3162 set_cc_static(s);
3163 return NO_EXIT;
3164 }
3165 #endif
3166
3167 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3168 {
3169 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3170 potential_page_fault(s);
3171 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3172 tcg_temp_free_i32(l);
3173 set_cc_static(s);
3174 return NO_EXIT;
3175 }
3176
3177 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3178 {
3179 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3180 potential_page_fault(s);
3181 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3182 tcg_temp_free_i32(l);
3183 return NO_EXIT;
3184 }
3185
3186 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3187 {
3188 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3189 potential_page_fault(s);
3190 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3191 tcg_temp_free_i32(l);
3192 set_cc_static(s);
3193 return NO_EXIT;
3194 }
3195
3196 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3197 {
3198 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3199 return NO_EXIT;
3200 }
3201
3202 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3203 {
3204 int shift = s->insn->data & 0xff;
3205 int size = s->insn->data >> 8;
3206 uint64_t mask = ((1ull << size) - 1) << shift;
3207
3208 assert(!o->g_in2);
3209 tcg_gen_shli_i64(o->in2, o->in2, shift);
3210 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3211
3212 /* Produce the CC from only the bits manipulated. */
3213 tcg_gen_andi_i64(cc_dst, o->out, mask);
3214 set_cc_nz_u64(s, cc_dst);
3215 return NO_EXIT;
3216 }
3217
3218 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3219 {
3220 o->out = tcg_const_i64(0);
3221 return NO_EXIT;
3222 }
3223
3224 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3225 {
3226 o->out = tcg_const_i64(0);
3227 o->out2 = o->out;
3228 o->g_out2 = true;
3229 return NO_EXIT;
3230 }
3231
3232 /* ====================================================================== */
3233 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3234 the original inputs), update the various cc data structures in order to
3235 be able to compute the new condition code. */
3236
3237 static void cout_abs32(DisasContext *s, DisasOps *o)
3238 {
3239 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3240 }
3241
3242 static void cout_abs64(DisasContext *s, DisasOps *o)
3243 {
3244 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3245 }
3246
3247 static void cout_adds32(DisasContext *s, DisasOps *o)
3248 {
3249 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3250 }
3251
3252 static void cout_adds64(DisasContext *s, DisasOps *o)
3253 {
3254 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3255 }
3256
3257 static void cout_addu32(DisasContext *s, DisasOps *o)
3258 {
3259 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3260 }
3261
3262 static void cout_addu64(DisasContext *s, DisasOps *o)
3263 {
3264 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3265 }
3266
3267 static void cout_addc32(DisasContext *s, DisasOps *o)
3268 {
3269 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3270 }
3271
3272 static void cout_addc64(DisasContext *s, DisasOps *o)
3273 {
3274 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3275 }
3276
3277 static void cout_cmps32(DisasContext *s, DisasOps *o)
3278 {
3279 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3280 }
3281
3282 static void cout_cmps64(DisasContext *s, DisasOps *o)
3283 {
3284 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3285 }
3286
3287 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3288 {
3289 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3290 }
3291
3292 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3293 {
3294 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3295 }
3296
3297 static void cout_f32(DisasContext *s, DisasOps *o)
3298 {
3299 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3300 }
3301
3302 static void cout_f64(DisasContext *s, DisasOps *o)
3303 {
3304 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3305 }
3306
3307 static void cout_f128(DisasContext *s, DisasOps *o)
3308 {
3309 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3310 }
3311
3312 static void cout_nabs32(DisasContext *s, DisasOps *o)
3313 {
3314 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3315 }
3316
3317 static void cout_nabs64(DisasContext *s, DisasOps *o)
3318 {
3319 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3320 }
3321
3322 static void cout_neg32(DisasContext *s, DisasOps *o)
3323 {
3324 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3325 }
3326
3327 static void cout_neg64(DisasContext *s, DisasOps *o)
3328 {
3329 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3330 }
3331
3332 static void cout_nz32(DisasContext *s, DisasOps *o)
3333 {
3334 tcg_gen_ext32u_i64(cc_dst, o->out);
3335 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3336 }
3337
3338 static void cout_nz64(DisasContext *s, DisasOps *o)
3339 {
3340 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3341 }
3342
3343 static void cout_s32(DisasContext *s, DisasOps *o)
3344 {
3345 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3346 }
3347
3348 static void cout_s64(DisasContext *s, DisasOps *o)
3349 {
3350 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3351 }
3352
3353 static void cout_subs32(DisasContext *s, DisasOps *o)
3354 {
3355 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3356 }
3357
3358 static void cout_subs64(DisasContext *s, DisasOps *o)
3359 {
3360 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3361 }
3362
3363 static void cout_subu32(DisasContext *s, DisasOps *o)
3364 {
3365 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3366 }
3367
3368 static void cout_subu64(DisasContext *s, DisasOps *o)
3369 {
3370 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3371 }
3372
3373 static void cout_subb32(DisasContext *s, DisasOps *o)
3374 {
3375 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3376 }
3377
3378 static void cout_subb64(DisasContext *s, DisasOps *o)
3379 {
3380 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3381 }
3382
3383 static void cout_tm32(DisasContext *s, DisasOps *o)
3384 {
3385 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3386 }
3387
3388 static void cout_tm64(DisasContext *s, DisasOps *o)
3389 {
3390 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3391 }
3392
3393 /* ====================================================================== */
3394 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3395 with the TCG register to which we will write. Used in combination with
3396 the "wout" generators, in some cases we need a new temporary, and in
3397 some cases we can write to a TCG global. */
3398
3399 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3400 {
3401 o->out = tcg_temp_new_i64();
3402 }
3403
3404 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3405 {
3406 o->out = tcg_temp_new_i64();
3407 o->out2 = tcg_temp_new_i64();
3408 }
3409
3410 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3411 {
3412 o->out = regs[get_field(f, r1)];
3413 o->g_out = true;
3414 }
3415
3416 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3417 {
3418 /* ??? Specification exception: r1 must be even. */
3419 int r1 = get_field(f, r1);
3420 o->out = regs[r1];
3421 o->out2 = regs[(r1 + 1) & 15];
3422 o->g_out = o->g_out2 = true;
3423 }
3424
3425 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3426 {
3427 o->out = fregs[get_field(f, r1)];
3428 o->g_out = true;
3429 }
3430
3431 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3432 {
3433 /* ??? Specification exception: r1 must be < 14. */
3434 int r1 = get_field(f, r1);
3435 o->out = fregs[r1];
3436 o->out2 = fregs[(r1 + 2) & 15];
3437 o->g_out = o->g_out2 = true;
3438 }
3439
3440 /* ====================================================================== */
3441 /* The "Write OUTput" generators. These generally perform some non-trivial
3442 copy of data to TCG globals, or to main memory. The trivial cases are
3443 generally handled by having a "prep" generator install the TCG global
3444 as the destination of the operation. */
3445
3446 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3447 {
3448 store_reg(get_field(f, r1), o->out);
3449 }
3450
3451 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3452 {
3453 int r1 = get_field(f, r1);
3454 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3455 }
3456
3457 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3458 {
3459 int r1 = get_field(f, r1);
3460 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3461 }
3462
3463 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3464 {
3465 store_reg32_i64(get_field(f, r1), o->out);
3466 }
3467
3468 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3469 {
3470 /* ??? Specification exception: r1 must be even. */
3471 int r1 = get_field(f, r1);
3472 store_reg32_i64(r1, o->out);
3473 store_reg32_i64((r1 + 1) & 15, o->out2);
3474 }
3475
3476 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3477 {
3478 /* ??? Specification exception: r1 must be even. */
3479 int r1 = get_field(f, r1);
3480 store_reg32_i64((r1 + 1) & 15, o->out);
3481 tcg_gen_shri_i64(o->out, o->out, 32);
3482 store_reg32_i64(r1, o->out);
3483 }
3484
3485 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3486 {
3487 store_freg32_i64(get_field(f, r1), o->out);
3488 }
3489
3490 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3491 {
3492 store_freg(get_field(f, r1), o->out);
3493 }
3494
3495 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3496 {
3497 /* ??? Specification exception: r1 must be < 14. */
3498 int f1 = get_field(s->fields, r1);
3499 store_freg(f1, o->out);
3500 store_freg((f1 + 2) & 15, o->out2);
3501 }
3502
3503 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3504 {
3505 if (get_field(f, r1) != get_field(f, r2)) {
3506 store_reg32_i64(get_field(f, r1), o->out);
3507 }
3508 }
3509
3510 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3511 {
3512 if (get_field(f, r1) != get_field(f, r2)) {
3513 store_freg32_i64(get_field(f, r1), o->out);
3514 }
3515 }
3516
3517 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3518 {
3519 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3520 }
3521
3522 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3523 {
3524 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3525 }
3526
3527 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3528 {
3529 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3530 }
3531
3532 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3533 {
3534 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3535 }
3536
3537 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3538 {
3539 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3540 }
3541
3542 /* ====================================================================== */
3543 /* The "INput 1" generators. These load the first operand to an insn. */
3544
3545 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3546 {
3547 o->in1 = load_reg(get_field(f, r1));
3548 }
3549
3550 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3551 {
3552 o->in1 = regs[get_field(f, r1)];
3553 o->g_in1 = true;
3554 }
3555
3556 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3557 {
3558 o->in1 = tcg_temp_new_i64();
3559 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3560 }
3561
3562 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3563 {
3564 o->in1 = tcg_temp_new_i64();
3565 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3566 }
3567
3568 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3569 {
3570 o->in1 = tcg_temp_new_i64();
3571 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3572 }
3573
3574 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3575 {
3576 /* ??? Specification exception: r1 must be even. */
3577 int r1 = get_field(f, r1);
3578 o->in1 = load_reg((r1 + 1) & 15);
3579 }
3580
3581 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3582 {
3583 /* ??? Specification exception: r1 must be even. */
3584 int r1 = get_field(f, r1);
3585 o->in1 = tcg_temp_new_i64();
3586 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3587 }
3588
3589 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3590 {
3591 /* ??? Specification exception: r1 must be even. */
3592 int r1 = get_field(f, r1);
3593 o->in1 = tcg_temp_new_i64();
3594 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3595 }
3596
3597 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3598 {
3599 /* ??? Specification exception: r1 must be even. */
3600 int r1 = get_field(f, r1);
3601 o->in1 = tcg_temp_new_i64();
3602 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3603 }
3604
3605 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3606 {
3607 o->in1 = load_reg(get_field(f, r2));
3608 }
3609
3610 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3611 {
3612 o->in1 = load_reg(get_field(f, r3));
3613 }
3614
3615 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3616 {
3617 o->in1 = regs[get_field(f, r3)];
3618 o->g_in1 = true;
3619 }
3620
3621 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3622 {
3623 o->in1 = tcg_temp_new_i64();
3624 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3625 }
3626
3627 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3628 {
3629 o->in1 = tcg_temp_new_i64();
3630 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3631 }
3632
3633 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3634 {
3635 o->in1 = load_freg32_i64(get_field(f, r1));
3636 }
3637
3638 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3639 {
3640 o->in1 = fregs[get_field(f, r1)];
3641 o->g_in1 = true;
3642 }
3643
3644 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3645 {
3646 /* ??? Specification exception: r1 must be < 14. */
3647 int r1 = get_field(f, r1);
3648 o->out = fregs[r1];
3649 o->out2 = fregs[(r1 + 2) & 15];
3650 o->g_out = o->g_out2 = true;
3651 }
3652
3653 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3654 {
3655 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3656 }
3657
3658 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3659 {
3660 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3661 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3662 }
3663
3664 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3665 {
3666 in1_la1(s, f, o);
3667 o->in1 = tcg_temp_new_i64();
3668 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3669 }
3670
3671 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3672 {
3673 in1_la1(s, f, o);
3674 o->in1 = tcg_temp_new_i64();
3675 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3676 }
3677
3678 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3679 {
3680 in1_la1(s, f, o);
3681 o->in1 = tcg_temp_new_i64();
3682 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3683 }
3684
3685 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3686 {
3687 in1_la1(s, f, o);
3688 o->in1 = tcg_temp_new_i64();
3689 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3690 }
3691
3692 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3693 {
3694 in1_la1(s, f, o);
3695 o->in1 = tcg_temp_new_i64();
3696 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3697 }
3698
3699 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3700 {
3701 in1_la1(s, f, o);
3702 o->in1 = tcg_temp_new_i64();
3703 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3704 }
3705
3706 /* ====================================================================== */
3707 /* The "INput 2" generators. These load the second operand to an insn. */
3708
3709 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3710 {
3711 o->in2 = regs[get_field(f, r1)];
3712 o->g_in2 = true;
3713 }
3714
3715 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3716 {
3717 o->in2 = tcg_temp_new_i64();
3718 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3719 }
3720
3721 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3722 {
3723 o->in2 = tcg_temp_new_i64();
3724 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3725 }
3726
3727 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3728 {
3729 o->in2 = load_reg(get_field(f, r2));
3730 }
3731
3732 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3733 {
3734 o->in2 = regs[get_field(f, r2)];
3735 o->g_in2 = true;
3736 }
3737
3738 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3739 {
3740 int r2 = get_field(f, r2);
3741 if (r2 != 0) {
3742 o->in2 = load_reg(r2);
3743 }
3744 }
3745
3746 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3747 {
3748 o->in2 = tcg_temp_new_i64();
3749 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3750 }
3751
3752 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3753 {
3754 o->in2 = tcg_temp_new_i64();
3755 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3756 }
3757
3758 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3759 {
3760 o->in2 = tcg_temp_new_i64();
3761 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3762 }
3763
3764 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3765 {
3766 o->in2 = tcg_temp_new_i64();
3767 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3768 }
3769
3770 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3771 {
3772 o->in2 = load_reg(get_field(f, r3));
3773 }
3774
3775 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3776 {
3777 o->in2 = tcg_temp_new_i64();
3778 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3779 }
3780
3781 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3782 {
3783 o->in2 = tcg_temp_new_i64();
3784 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3785 }
3786
3787 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3788 {
3789 o->in2 = load_freg32_i64(get_field(f, r2));
3790 }
3791
3792 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3793 {
3794 o->in2 = fregs[get_field(f, r2)];
3795 o->g_in2 = true;
3796 }
3797
3798 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3799 {
3800 /* ??? Specification exception: r1 must be < 14. */
3801 int r2 = get_field(f, r2);
3802 o->in1 = fregs[r2];
3803 o->in2 = fregs[(r2 + 2) & 15];
3804 o->g_in1 = o->g_in2 = true;
3805 }
3806
3807 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
3808 {
3809 o->in2 = get_address(s, 0, get_field(f, r2), 0);
3810 }
3811
3812 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3813 {
3814 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3815 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3816 }
3817
3818 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3819 {
3820 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3821 }
3822
3823 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3824 {
3825 help_l2_shift(s, f, o, 31);
3826 }
3827
3828 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3829 {
3830 help_l2_shift(s, f, o, 63);
3831 }
3832
3833 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3834 {
3835 in2_a2(s, f, o);
3836 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3837 }
3838
3839 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3840 {
3841 in2_a2(s, f, o);
3842 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3843 }
3844
3845 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3846 {
3847 in2_a2(s, f, o);
3848 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3849 }
3850
3851 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3852 {
3853 in2_a2(s, f, o);
3854 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3855 }
3856
3857 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3858 {
3859 in2_a2(s, f, o);
3860 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3861 }
3862
3863 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3864 {
3865 in2_a2(s, f, o);
3866 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3867 }
3868
3869 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3870 {
3871 in2_ri2(s, f, o);
3872 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3873 }
3874
3875 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3876 {
3877 in2_ri2(s, f, o);
3878 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3879 }
3880
3881 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3882 {
3883 in2_ri2(s, f, o);
3884 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3885 }
3886
3887 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3888 {
3889 in2_ri2(s, f, o);
3890 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3891 }
3892
3893 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3894 {
3895 o->in2 = tcg_const_i64(get_field(f, i2));
3896 }
3897
3898 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3899 {
3900 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3901 }
3902
3903 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3904 {
3905 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3906 }
3907
3908 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3909 {
3910 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3911 }
3912
3913 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3914 {
3915 uint64_t i2 = (uint16_t)get_field(f, i2);
3916 o->in2 = tcg_const_i64(i2 << s->insn->data);
3917 }
3918
3919 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3920 {
3921 uint64_t i2 = (uint32_t)get_field(f, i2);
3922 o->in2 = tcg_const_i64(i2 << s->insn->data);
3923 }
3924
3925 /* ====================================================================== */
3926
3927 /* Find opc within the table of insns. This is formulated as a switch
3928 statement so that (1) we get compile-time notice of cut-paste errors
3929 for duplicated opcodes, and (2) the compiler generates the binary
3930 search tree, rather than us having to post-process the table. */
3931
3932 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3933 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3934
3935 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3936
3937 enum DisasInsnEnum {
3938 #include "insn-data.def"
3939 };
3940
3941 #undef D
3942 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3943 .opc = OPC, \
3944 .fmt = FMT_##FT, \
3945 .fac = FAC_##FC, \
3946 .name = #NM, \
3947 .help_in1 = in1_##I1, \
3948 .help_in2 = in2_##I2, \
3949 .help_prep = prep_##P, \
3950 .help_wout = wout_##W, \
3951 .help_cout = cout_##CC, \
3952 .help_op = op_##OP, \
3953 .data = D \
3954 },
3955
3956 /* Allow 0 to be used for NULL in the table below. */
3957 #define in1_0 NULL
3958 #define in2_0 NULL
3959 #define prep_0 NULL
3960 #define wout_0 NULL
3961 #define cout_0 NULL
3962 #define op_0 NULL
3963
3964 static const DisasInsn insn_info[] = {
3965 #include "insn-data.def"
3966 };
3967
3968 #undef D
3969 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3970 case OPC: return &insn_info[insn_ ## NM];
3971
3972 static const DisasInsn *lookup_opc(uint16_t opc)
3973 {
3974 switch (opc) {
3975 #include "insn-data.def"
3976 default:
3977 return NULL;
3978 }
3979 }
3980
3981 #undef D
3982 #undef C
3983
3984 /* Extract a field from the insn. The INSN should be left-aligned in
3985 the uint64_t so that we can more easily utilize the big-bit-endian
3986 definitions we extract from the Principals of Operation. */
3987
3988 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
3989 {
3990 uint32_t r, m;
3991
3992 if (f->size == 0) {
3993 return;
3994 }
3995
3996 /* Zero extract the field from the insn. */
3997 r = (insn << f->beg) >> (64 - f->size);
3998
3999 /* Sign-extend, or un-swap the field as necessary. */
4000 switch (f->type) {
4001 case 0: /* unsigned */
4002 break;
4003 case 1: /* signed */
4004 assert(f->size <= 32);
4005 m = 1u << (f->size - 1);
4006 r = (r ^ m) - m;
4007 break;
4008 case 2: /* dl+dh split, signed 20 bit. */
4009 r = ((int8_t)r << 12) | (r >> 8);
4010 break;
4011 default:
4012 abort();
4013 }
4014
4015 /* Validate that the "compressed" encoding we selected above is valid.
4016 I.e. we havn't make two different original fields overlap. */
4017 assert(((o->presentC >> f->indexC) & 1) == 0);
4018 o->presentC |= 1 << f->indexC;
4019 o->presentO |= 1 << f->indexO;
4020
4021 o->c[f->indexC] = r;
4022 }
4023
4024 /* Lookup the insn at the current PC, extracting the operands into O and
4025 returning the info struct for the insn. Returns NULL for invalid insn. */
4026
4027 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4028 DisasFields *f)
4029 {
4030 uint64_t insn, pc = s->pc;
4031 int op, op2, ilen;
4032 const DisasInsn *info;
4033
4034 insn = ld_code2(env, pc);
4035 op = (insn >> 8) & 0xff;
4036 ilen = get_ilen(op);
4037 s->next_pc = s->pc + ilen;
4038
4039 switch (ilen) {
4040 case 2:
4041 insn = insn << 48;
4042 break;
4043 case 4:
4044 insn = ld_code4(env, pc) << 32;
4045 break;
4046 case 6:
4047 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4048 break;
4049 default:
4050 abort();
4051 }
4052
4053 /* We can't actually determine the insn format until we've looked up
4054 the full insn opcode. Which we can't do without locating the
4055 secondary opcode. Assume by default that OP2 is at bit 40; for
4056 those smaller insns that don't actually have a secondary opcode
4057 this will correctly result in OP2 = 0. */
4058 switch (op) {
4059 case 0x01: /* E */
4060 case 0x80: /* S */
4061 case 0x82: /* S */
4062 case 0x93: /* S */
4063 case 0xb2: /* S, RRF, RRE */
4064 case 0xb3: /* RRE, RRD, RRF */
4065 case 0xb9: /* RRE, RRF */
4066 case 0xe5: /* SSE, SIL */
4067 op2 = (insn << 8) >> 56;
4068 break;
4069 case 0xa5: /* RI */
4070 case 0xa7: /* RI */
4071 case 0xc0: /* RIL */
4072 case 0xc2: /* RIL */
4073 case 0xc4: /* RIL */
4074 case 0xc6: /* RIL */
4075 case 0xc8: /* SSF */
4076 case 0xcc: /* RIL */
4077 op2 = (insn << 12) >> 60;
4078 break;
4079 case 0xd0 ... 0xdf: /* SS */
4080 case 0xe1: /* SS */
4081 case 0xe2: /* SS */
4082 case 0xe8: /* SS */
4083 case 0xe9: /* SS */
4084 case 0xea: /* SS */
4085 case 0xee ... 0xf3: /* SS */
4086 case 0xf8 ... 0xfd: /* SS */
4087 op2 = 0;
4088 break;
4089 default:
4090 op2 = (insn << 40) >> 56;
4091 break;
4092 }
4093
4094 memset(f, 0, sizeof(*f));
4095 f->op = op;
4096 f->op2 = op2;
4097
4098 /* Lookup the instruction. */
4099 info = lookup_opc(op << 8 | op2);
4100
4101 /* If we found it, extract the operands. */
4102 if (info != NULL) {
4103 DisasFormat fmt = info->fmt;
4104 int i;
4105
4106 for (i = 0; i < NUM_C_FIELD; ++i) {
4107 extract_field(f, &format_info[fmt].op[i], insn);
4108 }
4109 }
4110 return info;
4111 }
4112
4113 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4114 {
4115 const DisasInsn *insn;
4116 ExitStatus ret = NO_EXIT;
4117 DisasFields f;
4118 DisasOps o;
4119
4120 insn = extract_insn(env, s, &f);
4121
4122 /* If not found, try the old interpreter. This includes ILLOPC. */
4123 if (insn == NULL) {
4124 disas_s390_insn(env, s);
4125 switch (s->is_jmp) {
4126 case DISAS_NEXT:
4127 ret = NO_EXIT;
4128 break;
4129 case DISAS_TB_JUMP:
4130 ret = EXIT_GOTO_TB;
4131 break;
4132 case DISAS_JUMP:
4133 ret = EXIT_PC_UPDATED;
4134 break;
4135 case DISAS_EXCP:
4136 ret = EXIT_NORETURN;
4137 break;
4138 default:
4139 abort();
4140 }
4141
4142 s->pc = s->next_pc;
4143 return ret;
4144 }
4145
4146 /* Set up the strutures we use to communicate with the helpers. */
4147 s->insn = insn;
4148 s->fields = &f;
4149 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4150 TCGV_UNUSED_I64(o.out);
4151 TCGV_UNUSED_I64(o.out2);
4152 TCGV_UNUSED_I64(o.in1);
4153 TCGV_UNUSED_I64(o.in2);
4154 TCGV_UNUSED_I64(o.addr1);
4155
4156 /* Implement the instruction. */
4157 if (insn->help_in1) {
4158 insn->help_in1(s, &f, &o);
4159 }
4160 if (insn->help_in2) {
4161 insn->help_in2(s, &f, &o);
4162 }
4163 if (insn->help_prep) {
4164 insn->help_prep(s, &f, &o);
4165 }
4166 if (insn->help_op) {
4167 ret = insn->help_op(s, &o);
4168 }
4169 if (insn->help_wout) {
4170 insn->help_wout(s, &f, &o);
4171 }
4172 if (insn->help_cout) {
4173 insn->help_cout(s, &o);
4174 }
4175
4176 /* Free any temporaries created by the helpers. */
4177 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4178 tcg_temp_free_i64(o.out);
4179 }
4180 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4181 tcg_temp_free_i64(o.out2);
4182 }
4183 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4184 tcg_temp_free_i64(o.in1);
4185 }
4186 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4187 tcg_temp_free_i64(o.in2);
4188 }
4189 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4190 tcg_temp_free_i64(o.addr1);
4191 }
4192
4193 /* Advance to the next instruction. */
4194 s->pc = s->next_pc;
4195 return ret;
4196 }
4197
4198 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4199 TranslationBlock *tb,
4200 int search_pc)
4201 {
4202 DisasContext dc;
4203 target_ulong pc_start;
4204 uint64_t next_page_start;
4205 uint16_t *gen_opc_end;
4206 int j, lj = -1;
4207 int num_insns, max_insns;
4208 CPUBreakpoint *bp;
4209 ExitStatus status;
4210 bool do_debug;
4211
4212 pc_start = tb->pc;
4213
4214 /* 31-bit mode */
4215 if (!(tb->flags & FLAG_MASK_64)) {
4216 pc_start &= 0x7fffffff;
4217 }
4218
4219 dc.tb = tb;
4220 dc.pc = pc_start;
4221 dc.cc_op = CC_OP_DYNAMIC;
4222 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4223 dc.is_jmp = DISAS_NEXT;
4224
4225 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4226
4227 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4228
4229 num_insns = 0;
4230 max_insns = tb->cflags & CF_COUNT_MASK;
4231 if (max_insns == 0) {
4232 max_insns = CF_COUNT_MASK;
4233 }
4234
4235 gen_icount_start();
4236
4237 do {
4238 if (search_pc) {
4239 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4240 if (lj < j) {
4241 lj++;
4242 while (lj < j) {
4243 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4244 }
4245 }
4246 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4247 gen_opc_cc_op[lj] = dc.cc_op;
4248 tcg_ctx.gen_opc_instr_start[lj] = 1;
4249 tcg_ctx.gen_opc_icount[lj] = num_insns;
4250 }
4251 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4252 gen_io_start();
4253 }
4254
4255 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4256 tcg_gen_debug_insn_start(dc.pc);
4257 }
4258
4259 status = NO_EXIT;
4260 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4261 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4262 if (bp->pc == dc.pc) {
4263 status = EXIT_PC_STALE;
4264 do_debug = true;
4265 break;
4266 }
4267 }
4268 }
4269 if (status == NO_EXIT) {
4270 status = translate_one(env, &dc);
4271 }
4272
4273 /* If we reach a page boundary, are single stepping,
4274 or exhaust instruction count, stop generation. */
4275 if (status == NO_EXIT
4276 && (dc.pc >= next_page_start
4277 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4278 || num_insns >= max_insns
4279 || singlestep
4280 || env->singlestep_enabled)) {
4281 status = EXIT_PC_STALE;
4282 }
4283 } while (status == NO_EXIT);
4284
4285 if (tb->cflags & CF_LAST_IO) {
4286 gen_io_end();
4287 }
4288
4289 switch (status) {
4290 case EXIT_GOTO_TB:
4291 case EXIT_NORETURN:
4292 break;
4293 case EXIT_PC_STALE:
4294 update_psw_addr(&dc);
4295 /* FALLTHRU */
4296 case EXIT_PC_UPDATED:
4297 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4298 gen_op_calc_cc(&dc);
4299 } else {
4300 /* Next TB starts off with CC_OP_DYNAMIC,
4301 so make sure the cc op type is in env */
4302 gen_op_set_cc_op(&dc);
4303 }
4304 if (do_debug) {
4305 gen_exception(EXCP_DEBUG);
4306 } else {
4307 /* Generate the return instruction */
4308 tcg_gen_exit_tb(0);
4309 }
4310 break;
4311 default:
4312 abort();
4313 }
4314
4315 gen_icount_end(tb, num_insns);
4316 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4317 if (search_pc) {
4318 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4319 lj++;
4320 while (lj <= j) {
4321 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4322 }
4323 } else {
4324 tb->size = dc.pc - pc_start;
4325 tb->icount = num_insns;
4326 }
4327
4328 #if defined(S390X_DEBUG_DISAS)
4329 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4330 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4331 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4332 qemu_log("\n");
4333 }
4334 #endif
4335 }
4336
4337 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4338 {
4339 gen_intermediate_code_internal(env, tb, 0);
4340 }
4341
4342 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4343 {
4344 gen_intermediate_code_internal(env, tb, 1);
4345 }
4346
4347 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4348 {
4349 int cc_op;
4350 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4351 cc_op = gen_opc_cc_op[pc_pos];
4352 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4353 env->cc_op = cc_op;
4354 }
4355 }