]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Convert SSKE
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static inline void debug_insn(uint64_t insn)
83 {
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85 }
86
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 {
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
92 }
93 }
94 return pc;
95 }
96
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
99 {
100 int i;
101
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
108 }
109
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
116 }
117 }
118
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
125 }
126 }
127
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
135 }
136 }
137 #endif
138
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
143 }
144 #endif
145
146 cpu_fprintf(f, "\n");
147 }
148
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
151
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
156
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
160
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162
163 void s390x_translate_init(void)
164 {
165 int i;
166
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
174
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
196 }
197
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
201 }
202
203 static inline TCGv_i64 load_reg(int reg)
204 {
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
208 }
209
210 static inline TCGv_i64 load_freg(int reg)
211 {
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
215 }
216
217 static inline TCGv_i32 load_freg32(int reg)
218 {
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
226 }
227
228 static inline TCGv_i64 load_freg32_i64(int reg)
229 {
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
233 }
234
235 static inline TCGv_i32 load_reg32(int reg)
236 {
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
240 }
241
242 static inline TCGv_i64 load_reg32_i64(int reg)
243 {
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
247 }
248
249 static inline void store_reg(int reg, TCGv_i64 v)
250 {
251 tcg_gen_mov_i64(regs[reg], v);
252 }
253
254 static inline void store_freg(int reg, TCGv_i64 v)
255 {
256 tcg_gen_mov_i64(fregs[reg], v);
257 }
258
259 static inline void store_reg32(int reg, TCGv_i32 v)
260 {
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
268 }
269
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
271 {
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274 }
275
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
277 {
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
279 }
280
281 static inline void store_freg32(int reg, TCGv_i32 v)
282 {
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
286 #else
287 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
289 #endif
290 }
291
292 static inline void store_freg32_i64(int reg, TCGv_i64 v)
293 {
294 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
295 }
296
297 static inline void return_low128(TCGv_i64 dest)
298 {
299 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
300 }
301
302 static inline void update_psw_addr(DisasContext *s)
303 {
304 /* psw.addr */
305 tcg_gen_movi_i64(psw_addr, s->pc);
306 }
307
308 static inline void potential_page_fault(DisasContext *s)
309 {
310 #ifndef CONFIG_USER_ONLY
311 update_psw_addr(s);
312 gen_op_calc_cc(s);
313 #endif
314 }
315
316 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
317 {
318 return (uint64_t)cpu_lduw_code(env, pc);
319 }
320
321 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
322 {
323 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
324 }
325
326 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
327 {
328 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
329 }
330
331 static inline int get_mem_index(DisasContext *s)
332 {
333 switch (s->tb->flags & FLAG_MASK_ASC) {
334 case PSW_ASC_PRIMARY >> 32:
335 return 0;
336 case PSW_ASC_SECONDARY >> 32:
337 return 1;
338 case PSW_ASC_HOME >> 32:
339 return 2;
340 default:
341 tcg_abort();
342 break;
343 }
344 }
345
346 static void gen_exception(int excp)
347 {
348 TCGv_i32 tmp = tcg_const_i32(excp);
349 gen_helper_exception(cpu_env, tmp);
350 tcg_temp_free_i32(tmp);
351 }
352
353 static void gen_program_exception(DisasContext *s, int code)
354 {
355 TCGv_i32 tmp;
356
357 /* Remember what pgm exeption this was. */
358 tmp = tcg_const_i32(code);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
360 tcg_temp_free_i32(tmp);
361
362 tmp = tcg_const_i32(s->next_pc - s->pc);
363 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
364 tcg_temp_free_i32(tmp);
365
366 /* Advance past instruction. */
367 s->pc = s->next_pc;
368 update_psw_addr(s);
369
370 /* Save off cc. */
371 gen_op_calc_cc(s);
372
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM);
375
376 /* End TB here. */
377 s->is_jmp = DISAS_EXCP;
378 }
379
380 static inline void gen_illegal_opcode(DisasContext *s)
381 {
382 gen_program_exception(s, PGM_SPECIFICATION);
383 }
384
385 static inline void check_privileged(DisasContext *s)
386 {
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
389 }
390 }
391
392 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
393 {
394 TCGv_i64 tmp;
395
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s->tb->flags & FLAG_MASK_64)) {
398 d2 &= 0x7fffffffUL;
399 }
400
401 if (x2) {
402 if (d2) {
403 tmp = tcg_const_i64(d2);
404 tcg_gen_add_i64(tmp, tmp, regs[x2]);
405 } else {
406 tmp = load_reg(x2);
407 }
408 if (b2) {
409 tcg_gen_add_i64(tmp, tmp, regs[b2]);
410 }
411 } else if (b2) {
412 if (d2) {
413 tmp = tcg_const_i64(d2);
414 tcg_gen_add_i64(tmp, tmp, regs[b2]);
415 } else {
416 tmp = load_reg(b2);
417 }
418 } else {
419 tmp = tcg_const_i64(d2);
420 }
421
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
424 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
425 }
426
427 return tmp;
428 }
429
430 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
431 {
432 s->cc_op = CC_OP_CONST0 + val;
433 }
434
435 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
436 {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_mov_i64(cc_dst, dst);
439 tcg_gen_discard_i64(cc_vr);
440 s->cc_op = op;
441 }
442
443 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
444 {
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_extu_i32_i64(cc_dst, dst);
447 tcg_gen_discard_i64(cc_vr);
448 s->cc_op = op;
449 }
450
451 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
452 TCGv_i64 dst)
453 {
454 tcg_gen_mov_i64(cc_src, src);
455 tcg_gen_mov_i64(cc_dst, dst);
456 tcg_gen_discard_i64(cc_vr);
457 s->cc_op = op;
458 }
459
460 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
461 TCGv_i32 dst)
462 {
463 tcg_gen_extu_i32_i64(cc_src, src);
464 tcg_gen_extu_i32_i64(cc_dst, dst);
465 tcg_gen_discard_i64(cc_vr);
466 s->cc_op = op;
467 }
468
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
471 {
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
475 s->cc_op = op;
476 }
477
478 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
479 {
480 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
481 }
482
483 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
484 {
485 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
486 }
487
488 static inline void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
489 {
490 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
491 }
492
493 static inline void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
494 {
495 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
496 }
497
498 static inline void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
499 {
500 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
501 }
502
503 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
504 enum cc_op cond)
505 {
506 gen_op_update2_cc_i32(s, cond, v1, v2);
507 }
508
509 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
510 enum cc_op cond)
511 {
512 gen_op_update2_cc_i64(s, cond, v1, v2);
513 }
514
515 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
516 {
517 cmp_32(s, v1, v2, CC_OP_LTGT_32);
518 }
519
520 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
521 {
522 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
523 }
524
525 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
526 {
527 /* XXX optimize for the constant? put it in s? */
528 TCGv_i32 tmp = tcg_const_i32(v2);
529 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
530 tcg_temp_free_i32(tmp);
531 }
532
533 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
534 {
535 TCGv_i32 tmp = tcg_const_i32(v2);
536 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
537 tcg_temp_free_i32(tmp);
538 }
539
540 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
541 {
542 cmp_64(s, v1, v2, CC_OP_LTGT_64);
543 }
544
545 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
546 {
547 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
548 }
549
550 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
551 {
552 TCGv_i64 tmp = tcg_const_i64(v2);
553 cmp_s64(s, v1, tmp);
554 tcg_temp_free_i64(tmp);
555 }
556
557 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
558 {
559 TCGv_i64 tmp = tcg_const_i64(v2);
560 cmp_u64(s, v1, tmp);
561 tcg_temp_free_i64(tmp);
562 }
563
564 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
565 {
566 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
567 }
568
569 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
570 {
571 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
572 }
573
574 /* CC value is in env->cc_op */
575 static inline void set_cc_static(DisasContext *s)
576 {
577 tcg_gen_discard_i64(cc_src);
578 tcg_gen_discard_i64(cc_dst);
579 tcg_gen_discard_i64(cc_vr);
580 s->cc_op = CC_OP_STATIC;
581 }
582
583 static inline void gen_op_set_cc_op(DisasContext *s)
584 {
585 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
586 tcg_gen_movi_i32(cc_op, s->cc_op);
587 }
588 }
589
590 static inline void gen_update_cc_op(DisasContext *s)
591 {
592 gen_op_set_cc_op(s);
593 }
594
595 /* calculates cc into cc_op */
596 static void gen_op_calc_cc(DisasContext *s)
597 {
598 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
599 TCGv_i64 dummy = tcg_const_i64(0);
600
601 switch (s->cc_op) {
602 case CC_OP_CONST0:
603 case CC_OP_CONST1:
604 case CC_OP_CONST2:
605 case CC_OP_CONST3:
606 /* s->cc_op is the cc value */
607 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
608 break;
609 case CC_OP_STATIC:
610 /* env->cc_op already is the cc value */
611 break;
612 case CC_OP_NZ:
613 case CC_OP_ABS_64:
614 case CC_OP_NABS_64:
615 case CC_OP_ABS_32:
616 case CC_OP_NABS_32:
617 case CC_OP_LTGT0_32:
618 case CC_OP_LTGT0_64:
619 case CC_OP_COMP_32:
620 case CC_OP_COMP_64:
621 case CC_OP_NZ_F32:
622 case CC_OP_NZ_F64:
623 case CC_OP_FLOGR:
624 /* 1 argument */
625 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
626 break;
627 case CC_OP_ICM:
628 case CC_OP_LTGT_32:
629 case CC_OP_LTGT_64:
630 case CC_OP_LTUGTU_32:
631 case CC_OP_LTUGTU_64:
632 case CC_OP_TM_32:
633 case CC_OP_TM_64:
634 case CC_OP_SLA_32:
635 case CC_OP_SLA_64:
636 case CC_OP_NZ_F128:
637 /* 2 arguments */
638 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
639 break;
640 case CC_OP_ADD_64:
641 case CC_OP_ADDU_64:
642 case CC_OP_ADDC_64:
643 case CC_OP_SUB_64:
644 case CC_OP_SUBU_64:
645 case CC_OP_SUBB_64:
646 case CC_OP_ADD_32:
647 case CC_OP_ADDU_32:
648 case CC_OP_ADDC_32:
649 case CC_OP_SUB_32:
650 case CC_OP_SUBU_32:
651 case CC_OP_SUBB_32:
652 /* 3 arguments */
653 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
654 break;
655 case CC_OP_DYNAMIC:
656 /* unknown operation - assume 3 arguments and cc_op in env */
657 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
658 break;
659 default:
660 tcg_abort();
661 }
662
663 tcg_temp_free_i32(local_cc_op);
664 tcg_temp_free_i64(dummy);
665
666 /* We now have cc in cc_op as constant */
667 set_cc_static(s);
668 }
669
670 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
671 {
672 debug_insn(insn);
673
674 *r1 = (insn >> 4) & 0xf;
675 *r2 = insn & 0xf;
676 }
677
678 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
679 int *x2, int *b2, int *d2)
680 {
681 debug_insn(insn);
682
683 *r1 = (insn >> 20) & 0xf;
684 *x2 = (insn >> 16) & 0xf;
685 *b2 = (insn >> 12) & 0xf;
686 *d2 = insn & 0xfff;
687
688 return get_address(s, *x2, *b2, *d2);
689 }
690
691 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
692 int *b2, int *d2)
693 {
694 debug_insn(insn);
695
696 *r1 = (insn >> 20) & 0xf;
697 /* aka m3 */
698 *r3 = (insn >> 16) & 0xf;
699 *b2 = (insn >> 12) & 0xf;
700 *d2 = insn & 0xfff;
701 }
702
703 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
704 int *b1, int *d1)
705 {
706 debug_insn(insn);
707
708 *i2 = (insn >> 16) & 0xff;
709 *b1 = (insn >> 12) & 0xf;
710 *d1 = insn & 0xfff;
711
712 return get_address(s, 0, *b1, *d1);
713 }
714
715 static int use_goto_tb(DisasContext *s, uint64_t dest)
716 {
717 /* NOTE: we handle the case where the TB spans two pages here */
718 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
719 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
720 && !s->singlestep_enabled
721 && !(s->tb->cflags & CF_LAST_IO));
722 }
723
724 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
725 {
726 gen_update_cc_op(s);
727
728 if (use_goto_tb(s, pc)) {
729 tcg_gen_goto_tb(tb_num);
730 tcg_gen_movi_i64(psw_addr, pc);
731 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
732 } else {
733 /* jump to another page: currently not optimized */
734 tcg_gen_movi_i64(psw_addr, pc);
735 tcg_gen_exit_tb(0);
736 }
737 }
738
739 static inline void account_noninline_branch(DisasContext *s, int cc_op)
740 {
741 #ifdef DEBUG_INLINE_BRANCHES
742 inline_branch_miss[cc_op]++;
743 #endif
744 }
745
746 static inline void account_inline_branch(DisasContext *s, int cc_op)
747 {
748 #ifdef DEBUG_INLINE_BRANCHES
749 inline_branch_hit[cc_op]++;
750 #endif
751 }
752
753 /* Table of mask values to comparison codes, given a comparison as input.
754 For a true comparison CC=3 will never be set, but we treat this
755 conservatively for possible use when CC=3 indicates overflow. */
756 static const TCGCond ltgt_cond[16] = {
757 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
758 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
759 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
760 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
761 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
762 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
763 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
764 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
765 };
766
767 /* Table of mask values to comparison codes, given a logic op as input.
768 For such, only CC=0 and CC=1 should be possible. */
769 static const TCGCond nz_cond[16] = {
770 /* | | x | x */
771 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
772 /* | NE | x | x */
773 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
774 /* EQ | | x | x */
775 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
776 /* EQ | NE | x | x */
777 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
778 };
779
780 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
781 details required to generate a TCG comparison. */
782 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
783 {
784 TCGCond cond;
785 enum cc_op old_cc_op = s->cc_op;
786
787 if (mask == 15 || mask == 0) {
788 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
789 c->u.s32.a = cc_op;
790 c->u.s32.b = cc_op;
791 c->g1 = c->g2 = true;
792 c->is_64 = false;
793 return;
794 }
795
796 /* Find the TCG condition for the mask + cc op. */
797 switch (old_cc_op) {
798 case CC_OP_LTGT0_32:
799 case CC_OP_LTGT0_64:
800 case CC_OP_LTGT_32:
801 case CC_OP_LTGT_64:
802 cond = ltgt_cond[mask];
803 if (cond == TCG_COND_NEVER) {
804 goto do_dynamic;
805 }
806 account_inline_branch(s, old_cc_op);
807 break;
808
809 case CC_OP_LTUGTU_32:
810 case CC_OP_LTUGTU_64:
811 cond = tcg_unsigned_cond(ltgt_cond[mask]);
812 if (cond == TCG_COND_NEVER) {
813 goto do_dynamic;
814 }
815 account_inline_branch(s, old_cc_op);
816 break;
817
818 case CC_OP_NZ:
819 cond = nz_cond[mask];
820 if (cond == TCG_COND_NEVER) {
821 goto do_dynamic;
822 }
823 account_inline_branch(s, old_cc_op);
824 break;
825
826 case CC_OP_TM_32:
827 case CC_OP_TM_64:
828 switch (mask) {
829 case 8:
830 cond = TCG_COND_EQ;
831 break;
832 case 4 | 2 | 1:
833 cond = TCG_COND_NE;
834 break;
835 default:
836 goto do_dynamic;
837 }
838 account_inline_branch(s, old_cc_op);
839 break;
840
841 case CC_OP_ICM:
842 switch (mask) {
843 case 8:
844 cond = TCG_COND_EQ;
845 break;
846 case 4 | 2 | 1:
847 case 4 | 2:
848 cond = TCG_COND_NE;
849 break;
850 default:
851 goto do_dynamic;
852 }
853 account_inline_branch(s, old_cc_op);
854 break;
855
856 case CC_OP_FLOGR:
857 switch (mask & 0xa) {
858 case 8: /* src == 0 -> no one bit found */
859 cond = TCG_COND_EQ;
860 break;
861 case 2: /* src != 0 -> one bit found */
862 cond = TCG_COND_NE;
863 break;
864 default:
865 goto do_dynamic;
866 }
867 account_inline_branch(s, old_cc_op);
868 break;
869
870 default:
871 do_dynamic:
872 /* Calculate cc value. */
873 gen_op_calc_cc(s);
874 /* FALLTHRU */
875
876 case CC_OP_STATIC:
877 /* Jump based on CC. We'll load up the real cond below;
878 the assignment here merely avoids a compiler warning. */
879 account_noninline_branch(s, old_cc_op);
880 old_cc_op = CC_OP_STATIC;
881 cond = TCG_COND_NEVER;
882 break;
883 }
884
885 /* Load up the arguments of the comparison. */
886 c->is_64 = true;
887 c->g1 = c->g2 = false;
888 switch (old_cc_op) {
889 case CC_OP_LTGT0_32:
890 c->is_64 = false;
891 c->u.s32.a = tcg_temp_new_i32();
892 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
893 c->u.s32.b = tcg_const_i32(0);
894 break;
895 case CC_OP_LTGT_32:
896 case CC_OP_LTUGTU_32:
897 c->is_64 = false;
898 c->u.s32.a = tcg_temp_new_i32();
899 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
900 c->u.s32.b = tcg_temp_new_i32();
901 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
902 break;
903
904 case CC_OP_LTGT0_64:
905 case CC_OP_NZ:
906 case CC_OP_FLOGR:
907 c->u.s64.a = cc_dst;
908 c->u.s64.b = tcg_const_i64(0);
909 c->g1 = true;
910 break;
911 case CC_OP_LTGT_64:
912 case CC_OP_LTUGTU_64:
913 c->u.s64.a = cc_src;
914 c->u.s64.b = cc_dst;
915 c->g1 = c->g2 = true;
916 break;
917
918 case CC_OP_TM_32:
919 case CC_OP_TM_64:
920 case CC_OP_ICM:
921 c->u.s64.a = tcg_temp_new_i64();
922 c->u.s64.b = tcg_const_i64(0);
923 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
924 break;
925
926 case CC_OP_STATIC:
927 c->is_64 = false;
928 c->u.s32.a = cc_op;
929 c->g1 = true;
930 switch (mask) {
931 case 0x8 | 0x4 | 0x2: /* cc != 3 */
932 cond = TCG_COND_NE;
933 c->u.s32.b = tcg_const_i32(3);
934 break;
935 case 0x8 | 0x4 | 0x1: /* cc != 2 */
936 cond = TCG_COND_NE;
937 c->u.s32.b = tcg_const_i32(2);
938 break;
939 case 0x8 | 0x2 | 0x1: /* cc != 1 */
940 cond = TCG_COND_NE;
941 c->u.s32.b = tcg_const_i32(1);
942 break;
943 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
944 cond = TCG_COND_EQ;
945 c->g1 = false;
946 c->u.s32.a = tcg_temp_new_i32();
947 c->u.s32.b = tcg_const_i32(0);
948 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
949 break;
950 case 0x8 | 0x4: /* cc < 2 */
951 cond = TCG_COND_LTU;
952 c->u.s32.b = tcg_const_i32(2);
953 break;
954 case 0x8: /* cc == 0 */
955 cond = TCG_COND_EQ;
956 c->u.s32.b = tcg_const_i32(0);
957 break;
958 case 0x4 | 0x2 | 0x1: /* cc != 0 */
959 cond = TCG_COND_NE;
960 c->u.s32.b = tcg_const_i32(0);
961 break;
962 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
963 cond = TCG_COND_NE;
964 c->g1 = false;
965 c->u.s32.a = tcg_temp_new_i32();
966 c->u.s32.b = tcg_const_i32(0);
967 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
968 break;
969 case 0x4: /* cc == 1 */
970 cond = TCG_COND_EQ;
971 c->u.s32.b = tcg_const_i32(1);
972 break;
973 case 0x2 | 0x1: /* cc > 1 */
974 cond = TCG_COND_GTU;
975 c->u.s32.b = tcg_const_i32(1);
976 break;
977 case 0x2: /* cc == 2 */
978 cond = TCG_COND_EQ;
979 c->u.s32.b = tcg_const_i32(2);
980 break;
981 case 0x1: /* cc == 3 */
982 cond = TCG_COND_EQ;
983 c->u.s32.b = tcg_const_i32(3);
984 break;
985 default:
986 /* CC is masked by something else: (8 >> cc) & mask. */
987 cond = TCG_COND_NE;
988 c->g1 = false;
989 c->u.s32.a = tcg_const_i32(8);
990 c->u.s32.b = tcg_const_i32(0);
991 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
992 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
993 break;
994 }
995 break;
996
997 default:
998 abort();
999 }
1000 c->cond = cond;
1001 }
1002
1003 static void free_compare(DisasCompare *c)
1004 {
1005 if (!c->g1) {
1006 if (c->is_64) {
1007 tcg_temp_free_i64(c->u.s64.a);
1008 } else {
1009 tcg_temp_free_i32(c->u.s32.a);
1010 }
1011 }
1012 if (!c->g2) {
1013 if (c->is_64) {
1014 tcg_temp_free_i64(c->u.s64.b);
1015 } else {
1016 tcg_temp_free_i32(c->u.s32.b);
1017 }
1018 }
1019 }
1020
1021 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1022 uint32_t insn)
1023 {
1024 #ifndef CONFIG_USER_ONLY
1025 TCGv_i64 tmp, tmp2, tmp3;
1026 TCGv_i32 tmp32_1, tmp32_2;
1027 int r1, r2;
1028 int r3, d2, b2;
1029
1030 r1 = (insn >> 4) & 0xf;
1031 r2 = insn & 0xf;
1032
1033 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1034
1035 switch (op) {
1036 case 0x2a: /* RRBE R1,R2 [RRE] */
1037 /* Set Storage Key Extended */
1038 check_privileged(s);
1039 r1 = (insn >> 4) & 0xf;
1040 r2 = insn & 0xf;
1041 tmp32_1 = load_reg32(r1);
1042 tmp = load_reg(r2);
1043 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1044 set_cc_static(s);
1045 tcg_temp_free_i32(tmp32_1);
1046 tcg_temp_free_i64(tmp);
1047 break;
1048 case 0x34: /* STCH ? */
1049 /* Store Subchannel */
1050 check_privileged(s);
1051 gen_op_movi_cc(s, 3);
1052 break;
1053 case 0x46: /* STURA R1,R2 [RRE] */
1054 /* Store Using Real Address */
1055 check_privileged(s);
1056 r1 = (insn >> 4) & 0xf;
1057 r2 = insn & 0xf;
1058 tmp32_1 = load_reg32(r1);
1059 tmp = load_reg(r2);
1060 potential_page_fault(s);
1061 gen_helper_stura(cpu_env, tmp, tmp32_1);
1062 tcg_temp_free_i32(tmp32_1);
1063 tcg_temp_free_i64(tmp);
1064 break;
1065 case 0x50: /* CSP R1,R2 [RRE] */
1066 /* Compare And Swap And Purge */
1067 check_privileged(s);
1068 r1 = (insn >> 4) & 0xf;
1069 r2 = insn & 0xf;
1070 tmp32_1 = tcg_const_i32(r1);
1071 tmp32_2 = tcg_const_i32(r2);
1072 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1073 set_cc_static(s);
1074 tcg_temp_free_i32(tmp32_1);
1075 tcg_temp_free_i32(tmp32_2);
1076 break;
1077 case 0x5f: /* CHSC ? */
1078 /* Channel Subsystem Call */
1079 check_privileged(s);
1080 gen_op_movi_cc(s, 3);
1081 break;
1082 case 0x78: /* STCKE D2(B2) [S] */
1083 /* Store Clock Extended */
1084 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1085 tmp = get_address(s, 0, b2, d2);
1086 potential_page_fault(s);
1087 gen_helper_stcke(cc_op, cpu_env, tmp);
1088 set_cc_static(s);
1089 tcg_temp_free_i64(tmp);
1090 break;
1091 case 0x79: /* SACF D2(B2) [S] */
1092 /* Set Address Space Control Fast */
1093 check_privileged(s);
1094 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1095 tmp = get_address(s, 0, b2, d2);
1096 potential_page_fault(s);
1097 gen_helper_sacf(cpu_env, tmp);
1098 tcg_temp_free_i64(tmp);
1099 /* addressing mode has changed, so end the block */
1100 s->pc = s->next_pc;
1101 update_psw_addr(s);
1102 s->is_jmp = DISAS_JUMP;
1103 break;
1104 case 0x7d: /* STSI D2,(B2) [S] */
1105 check_privileged(s);
1106 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1107 tmp = get_address(s, 0, b2, d2);
1108 tmp32_1 = load_reg32(0);
1109 tmp32_2 = load_reg32(1);
1110 potential_page_fault(s);
1111 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1112 set_cc_static(s);
1113 tcg_temp_free_i64(tmp);
1114 tcg_temp_free_i32(tmp32_1);
1115 tcg_temp_free_i32(tmp32_2);
1116 break;
1117 case 0xb1: /* STFL D2(B2) [S] */
1118 /* Store Facility List (CPU features) at 200 */
1119 check_privileged(s);
1120 tmp2 = tcg_const_i64(0xc0000000);
1121 tmp = tcg_const_i64(200);
1122 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1123 tcg_temp_free_i64(tmp2);
1124 tcg_temp_free_i64(tmp);
1125 break;
1126 case 0xb2: /* LPSWE D2(B2) [S] */
1127 /* Load PSW Extended */
1128 check_privileged(s);
1129 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1130 tmp = get_address(s, 0, b2, d2);
1131 tmp2 = tcg_temp_new_i64();
1132 tmp3 = tcg_temp_new_i64();
1133 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1134 tcg_gen_addi_i64(tmp, tmp, 8);
1135 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1136 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1137 /* we need to keep cc_op intact */
1138 s->is_jmp = DISAS_JUMP;
1139 tcg_temp_free_i64(tmp);
1140 tcg_temp_free_i64(tmp2);
1141 tcg_temp_free_i64(tmp3);
1142 break;
1143 case 0x20: /* SERVC R1,R2 [RRE] */
1144 /* SCLP Service call (PV hypercall) */
1145 check_privileged(s);
1146 potential_page_fault(s);
1147 tmp32_1 = load_reg32(r2);
1148 tmp = load_reg(r1);
1149 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1150 set_cc_static(s);
1151 tcg_temp_free_i32(tmp32_1);
1152 tcg_temp_free_i64(tmp);
1153 break;
1154 default:
1155 #endif
1156 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1157 gen_illegal_opcode(s);
1158 #ifndef CONFIG_USER_ONLY
1159 break;
1160 }
1161 #endif
1162 }
1163
1164 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1165 {
1166 unsigned char opc;
1167 uint64_t insn;
1168 int op;
1169
1170 opc = cpu_ldub_code(env, s->pc);
1171 LOG_DISAS("opc 0x%x\n", opc);
1172
1173 switch (opc) {
1174 case 0xb2:
1175 insn = ld_code4(env, s->pc);
1176 op = (insn >> 16) & 0xff;
1177 disas_b2(env, s, op, insn);
1178 break;
1179 default:
1180 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
1181 gen_illegal_opcode(s);
1182 break;
1183 }
1184 }
1185
1186 /* ====================================================================== */
1187 /* Define the insn format enumeration. */
1188 #define F0(N) FMT_##N,
1189 #define F1(N, X1) F0(N)
1190 #define F2(N, X1, X2) F0(N)
1191 #define F3(N, X1, X2, X3) F0(N)
1192 #define F4(N, X1, X2, X3, X4) F0(N)
1193 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1194
1195 typedef enum {
1196 #include "insn-format.def"
1197 } DisasFormat;
1198
1199 #undef F0
1200 #undef F1
1201 #undef F2
1202 #undef F3
1203 #undef F4
1204 #undef F5
1205
1206 /* Define a structure to hold the decoded fields. We'll store each inside
1207 an array indexed by an enum. In order to conserve memory, we'll arrange
1208 for fields that do not exist at the same time to overlap, thus the "C"
1209 for compact. For checking purposes there is an "O" for original index
1210 as well that will be applied to availability bitmaps. */
1211
1212 enum DisasFieldIndexO {
1213 FLD_O_r1,
1214 FLD_O_r2,
1215 FLD_O_r3,
1216 FLD_O_m1,
1217 FLD_O_m3,
1218 FLD_O_m4,
1219 FLD_O_b1,
1220 FLD_O_b2,
1221 FLD_O_b4,
1222 FLD_O_d1,
1223 FLD_O_d2,
1224 FLD_O_d4,
1225 FLD_O_x2,
1226 FLD_O_l1,
1227 FLD_O_l2,
1228 FLD_O_i1,
1229 FLD_O_i2,
1230 FLD_O_i3,
1231 FLD_O_i4,
1232 FLD_O_i5
1233 };
1234
1235 enum DisasFieldIndexC {
1236 FLD_C_r1 = 0,
1237 FLD_C_m1 = 0,
1238 FLD_C_b1 = 0,
1239 FLD_C_i1 = 0,
1240
1241 FLD_C_r2 = 1,
1242 FLD_C_b2 = 1,
1243 FLD_C_i2 = 1,
1244
1245 FLD_C_r3 = 2,
1246 FLD_C_m3 = 2,
1247 FLD_C_i3 = 2,
1248
1249 FLD_C_m4 = 3,
1250 FLD_C_b4 = 3,
1251 FLD_C_i4 = 3,
1252 FLD_C_l1 = 3,
1253
1254 FLD_C_i5 = 4,
1255 FLD_C_d1 = 4,
1256
1257 FLD_C_d2 = 5,
1258
1259 FLD_C_d4 = 6,
1260 FLD_C_x2 = 6,
1261 FLD_C_l2 = 6,
1262
1263 NUM_C_FIELD = 7
1264 };
1265
1266 struct DisasFields {
1267 unsigned op:8;
1268 unsigned op2:8;
1269 unsigned presentC:16;
1270 unsigned int presentO;
1271 int c[NUM_C_FIELD];
1272 };
1273
1274 /* This is the way fields are to be accessed out of DisasFields. */
1275 #define have_field(S, F) have_field1((S), FLD_O_##F)
1276 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1277
1278 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1279 {
1280 return (f->presentO >> c) & 1;
1281 }
1282
1283 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1284 enum DisasFieldIndexC c)
1285 {
1286 assert(have_field1(f, o));
1287 return f->c[c];
1288 }
1289
1290 /* Describe the layout of each field in each format. */
1291 typedef struct DisasField {
1292 unsigned int beg:8;
1293 unsigned int size:8;
1294 unsigned int type:2;
1295 unsigned int indexC:6;
1296 enum DisasFieldIndexO indexO:8;
1297 } DisasField;
1298
1299 typedef struct DisasFormatInfo {
1300 DisasField op[NUM_C_FIELD];
1301 } DisasFormatInfo;
1302
1303 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1304 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1305 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1306 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1307 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1308 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1309 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1310 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1311 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1312 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1313 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1314 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1315 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1316 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1317
1318 #define F0(N) { { } },
1319 #define F1(N, X1) { { X1 } },
1320 #define F2(N, X1, X2) { { X1, X2 } },
1321 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1322 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1323 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1324
1325 static const DisasFormatInfo format_info[] = {
1326 #include "insn-format.def"
1327 };
1328
1329 #undef F0
1330 #undef F1
1331 #undef F2
1332 #undef F3
1333 #undef F4
1334 #undef F5
1335 #undef R
1336 #undef M
1337 #undef BD
1338 #undef BXD
1339 #undef BDL
1340 #undef BXDL
1341 #undef I
1342 #undef L
1343
1344 /* Generally, we'll extract operands into this structures, operate upon
1345 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1346 of routines below for more details. */
1347 typedef struct {
1348 bool g_out, g_out2, g_in1, g_in2;
1349 TCGv_i64 out, out2, in1, in2;
1350 TCGv_i64 addr1;
1351 } DisasOps;
1352
1353 /* Return values from translate_one, indicating the state of the TB. */
1354 typedef enum {
1355 /* Continue the TB. */
1356 NO_EXIT,
1357 /* We have emitted one or more goto_tb. No fixup required. */
1358 EXIT_GOTO_TB,
1359 /* We are not using a goto_tb (for whatever reason), but have updated
1360 the PC (for whatever reason), so there's no need to do it again on
1361 exiting the TB. */
1362 EXIT_PC_UPDATED,
1363 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1364 updated the PC for the next instruction to be executed. */
1365 EXIT_PC_STALE,
1366 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1367 No following code will be executed. */
1368 EXIT_NORETURN,
1369 } ExitStatus;
1370
1371 typedef enum DisasFacility {
1372 FAC_Z, /* zarch (default) */
1373 FAC_CASS, /* compare and swap and store */
1374 FAC_CASS2, /* compare and swap and store 2*/
1375 FAC_DFP, /* decimal floating point */
1376 FAC_DFPR, /* decimal floating point rounding */
1377 FAC_DO, /* distinct operands */
1378 FAC_EE, /* execute extensions */
1379 FAC_EI, /* extended immediate */
1380 FAC_FPE, /* floating point extension */
1381 FAC_FPSSH, /* floating point support sign handling */
1382 FAC_FPRGR, /* FPR-GR transfer */
1383 FAC_GIE, /* general instructions extension */
1384 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1385 FAC_HW, /* high-word */
1386 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1387 FAC_LOC, /* load/store on condition */
1388 FAC_LD, /* long displacement */
1389 FAC_PC, /* population count */
1390 FAC_SCF, /* store clock fast */
1391 FAC_SFLE, /* store facility list extended */
1392 } DisasFacility;
1393
1394 struct DisasInsn {
1395 unsigned opc:16;
1396 DisasFormat fmt:6;
1397 DisasFacility fac:6;
1398
1399 const char *name;
1400
1401 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1402 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1403 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1404 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1405 void (*help_cout)(DisasContext *, DisasOps *);
1406 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1407
1408 uint64_t data;
1409 };
1410
1411 /* ====================================================================== */
1412 /* Miscelaneous helpers, used by several operations. */
1413
1414 static void help_l2_shift(DisasContext *s, DisasFields *f,
1415 DisasOps *o, int mask)
1416 {
1417 int b2 = get_field(f, b2);
1418 int d2 = get_field(f, d2);
1419
1420 if (b2 == 0) {
1421 o->in2 = tcg_const_i64(d2 & mask);
1422 } else {
1423 o->in2 = get_address(s, 0, b2, d2);
1424 tcg_gen_andi_i64(o->in2, o->in2, mask);
1425 }
1426 }
1427
1428 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1429 {
1430 if (dest == s->next_pc) {
1431 return NO_EXIT;
1432 }
1433 if (use_goto_tb(s, dest)) {
1434 gen_update_cc_op(s);
1435 tcg_gen_goto_tb(0);
1436 tcg_gen_movi_i64(psw_addr, dest);
1437 tcg_gen_exit_tb((tcg_target_long)s->tb);
1438 return EXIT_GOTO_TB;
1439 } else {
1440 tcg_gen_movi_i64(psw_addr, dest);
1441 return EXIT_PC_UPDATED;
1442 }
1443 }
1444
1445 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1446 bool is_imm, int imm, TCGv_i64 cdest)
1447 {
1448 ExitStatus ret;
1449 uint64_t dest = s->pc + 2 * imm;
1450 int lab;
1451
1452 /* Take care of the special cases first. */
1453 if (c->cond == TCG_COND_NEVER) {
1454 ret = NO_EXIT;
1455 goto egress;
1456 }
1457 if (is_imm) {
1458 if (dest == s->next_pc) {
1459 /* Branch to next. */
1460 ret = NO_EXIT;
1461 goto egress;
1462 }
1463 if (c->cond == TCG_COND_ALWAYS) {
1464 ret = help_goto_direct(s, dest);
1465 goto egress;
1466 }
1467 } else {
1468 if (TCGV_IS_UNUSED_I64(cdest)) {
1469 /* E.g. bcr %r0 -> no branch. */
1470 ret = NO_EXIT;
1471 goto egress;
1472 }
1473 if (c->cond == TCG_COND_ALWAYS) {
1474 tcg_gen_mov_i64(psw_addr, cdest);
1475 ret = EXIT_PC_UPDATED;
1476 goto egress;
1477 }
1478 }
1479
1480 if (use_goto_tb(s, s->next_pc)) {
1481 if (is_imm && use_goto_tb(s, dest)) {
1482 /* Both exits can use goto_tb. */
1483 gen_update_cc_op(s);
1484
1485 lab = gen_new_label();
1486 if (c->is_64) {
1487 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1488 } else {
1489 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1490 }
1491
1492 /* Branch not taken. */
1493 tcg_gen_goto_tb(0);
1494 tcg_gen_movi_i64(psw_addr, s->next_pc);
1495 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1496
1497 /* Branch taken. */
1498 gen_set_label(lab);
1499 tcg_gen_goto_tb(1);
1500 tcg_gen_movi_i64(psw_addr, dest);
1501 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1502
1503 ret = EXIT_GOTO_TB;
1504 } else {
1505 /* Fallthru can use goto_tb, but taken branch cannot. */
1506 /* Store taken branch destination before the brcond. This
1507 avoids having to allocate a new local temp to hold it.
1508 We'll overwrite this in the not taken case anyway. */
1509 if (!is_imm) {
1510 tcg_gen_mov_i64(psw_addr, cdest);
1511 }
1512
1513 lab = gen_new_label();
1514 if (c->is_64) {
1515 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1516 } else {
1517 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1518 }
1519
1520 /* Branch not taken. */
1521 gen_update_cc_op(s);
1522 tcg_gen_goto_tb(0);
1523 tcg_gen_movi_i64(psw_addr, s->next_pc);
1524 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1525
1526 gen_set_label(lab);
1527 if (is_imm) {
1528 tcg_gen_movi_i64(psw_addr, dest);
1529 }
1530 ret = EXIT_PC_UPDATED;
1531 }
1532 } else {
1533 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1534 Most commonly we're single-stepping or some other condition that
1535 disables all use of goto_tb. Just update the PC and exit. */
1536
1537 TCGv_i64 next = tcg_const_i64(s->next_pc);
1538 if (is_imm) {
1539 cdest = tcg_const_i64(dest);
1540 }
1541
1542 if (c->is_64) {
1543 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1544 cdest, next);
1545 } else {
1546 TCGv_i32 t0 = tcg_temp_new_i32();
1547 TCGv_i64 t1 = tcg_temp_new_i64();
1548 TCGv_i64 z = tcg_const_i64(0);
1549 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1550 tcg_gen_extu_i32_i64(t1, t0);
1551 tcg_temp_free_i32(t0);
1552 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1553 tcg_temp_free_i64(t1);
1554 tcg_temp_free_i64(z);
1555 }
1556
1557 if (is_imm) {
1558 tcg_temp_free_i64(cdest);
1559 }
1560 tcg_temp_free_i64(next);
1561
1562 ret = EXIT_PC_UPDATED;
1563 }
1564
1565 egress:
1566 free_compare(c);
1567 return ret;
1568 }
1569
1570 /* ====================================================================== */
1571 /* The operations. These perform the bulk of the work for any insn,
1572 usually after the operands have been loaded and output initialized. */
1573
1574 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1575 {
1576 gen_helper_abs_i64(o->out, o->in2);
1577 return NO_EXIT;
1578 }
1579
1580 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1581 {
1582 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1583 return NO_EXIT;
1584 }
1585
1586 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1587 {
1588 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1589 return NO_EXIT;
1590 }
1591
1592 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1593 {
1594 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1595 tcg_gen_mov_i64(o->out2, o->in2);
1596 return NO_EXIT;
1597 }
1598
1599 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1600 {
1601 tcg_gen_add_i64(o->out, o->in1, o->in2);
1602 return NO_EXIT;
1603 }
1604
1605 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1606 {
1607 TCGv_i64 cc;
1608
1609 tcg_gen_add_i64(o->out, o->in1, o->in2);
1610
1611 /* XXX possible optimization point */
1612 gen_op_calc_cc(s);
1613 cc = tcg_temp_new_i64();
1614 tcg_gen_extu_i32_i64(cc, cc_op);
1615 tcg_gen_shri_i64(cc, cc, 1);
1616
1617 tcg_gen_add_i64(o->out, o->out, cc);
1618 tcg_temp_free_i64(cc);
1619 return NO_EXIT;
1620 }
1621
1622 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1623 {
1624 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1625 return NO_EXIT;
1626 }
1627
1628 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1629 {
1630 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1631 return NO_EXIT;
1632 }
1633
1634 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1635 {
1636 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1637 return_low128(o->out2);
1638 return NO_EXIT;
1639 }
1640
1641 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1642 {
1643 tcg_gen_and_i64(o->out, o->in1, o->in2);
1644 return NO_EXIT;
1645 }
1646
1647 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1648 {
1649 int shift = s->insn->data & 0xff;
1650 int size = s->insn->data >> 8;
1651 uint64_t mask = ((1ull << size) - 1) << shift;
1652
1653 assert(!o->g_in2);
1654 tcg_gen_shli_i64(o->in2, o->in2, shift);
1655 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1656 tcg_gen_and_i64(o->out, o->in1, o->in2);
1657
1658 /* Produce the CC from only the bits manipulated. */
1659 tcg_gen_andi_i64(cc_dst, o->out, mask);
1660 set_cc_nz_u64(s, cc_dst);
1661 return NO_EXIT;
1662 }
1663
1664 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1665 {
1666 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1667 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1668 tcg_gen_mov_i64(psw_addr, o->in2);
1669 return EXIT_PC_UPDATED;
1670 } else {
1671 return NO_EXIT;
1672 }
1673 }
1674
1675 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1676 {
1677 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1678 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1679 }
1680
1681 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1682 {
1683 int m1 = get_field(s->fields, m1);
1684 bool is_imm = have_field(s->fields, i2);
1685 int imm = is_imm ? get_field(s->fields, i2) : 0;
1686 DisasCompare c;
1687
1688 disas_jcc(s, &c, m1);
1689 return help_branch(s, &c, is_imm, imm, o->in2);
1690 }
1691
1692 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1693 {
1694 int r1 = get_field(s->fields, r1);
1695 bool is_imm = have_field(s->fields, i2);
1696 int imm = is_imm ? get_field(s->fields, i2) : 0;
1697 DisasCompare c;
1698 TCGv_i64 t;
1699
1700 c.cond = TCG_COND_NE;
1701 c.is_64 = false;
1702 c.g1 = false;
1703 c.g2 = false;
1704
1705 t = tcg_temp_new_i64();
1706 tcg_gen_subi_i64(t, regs[r1], 1);
1707 store_reg32_i64(r1, t);
1708 c.u.s32.a = tcg_temp_new_i32();
1709 c.u.s32.b = tcg_const_i32(0);
1710 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1711 tcg_temp_free_i64(t);
1712
1713 return help_branch(s, &c, is_imm, imm, o->in2);
1714 }
1715
1716 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1717 {
1718 int r1 = get_field(s->fields, r1);
1719 bool is_imm = have_field(s->fields, i2);
1720 int imm = is_imm ? get_field(s->fields, i2) : 0;
1721 DisasCompare c;
1722
1723 c.cond = TCG_COND_NE;
1724 c.is_64 = true;
1725 c.g1 = true;
1726 c.g2 = false;
1727
1728 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1729 c.u.s64.a = regs[r1];
1730 c.u.s64.b = tcg_const_i64(0);
1731
1732 return help_branch(s, &c, is_imm, imm, o->in2);
1733 }
1734
1735 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1736 {
1737 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1738 set_cc_static(s);
1739 return NO_EXIT;
1740 }
1741
1742 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1743 {
1744 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1745 set_cc_static(s);
1746 return NO_EXIT;
1747 }
1748
1749 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1750 {
1751 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1752 set_cc_static(s);
1753 return NO_EXIT;
1754 }
1755
1756 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1757 {
1758 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1759 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1760 tcg_temp_free_i32(m3);
1761 gen_set_cc_nz_f32(s, o->in2);
1762 return NO_EXIT;
1763 }
1764
1765 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1766 {
1767 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1768 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1769 tcg_temp_free_i32(m3);
1770 gen_set_cc_nz_f64(s, o->in2);
1771 return NO_EXIT;
1772 }
1773
1774 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1775 {
1776 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1777 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1778 tcg_temp_free_i32(m3);
1779 gen_set_cc_nz_f128(s, o->in1, o->in2);
1780 return NO_EXIT;
1781 }
1782
1783 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1784 {
1785 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1786 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1787 tcg_temp_free_i32(m3);
1788 gen_set_cc_nz_f32(s, o->in2);
1789 return NO_EXIT;
1790 }
1791
1792 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1793 {
1794 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1795 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1796 tcg_temp_free_i32(m3);
1797 gen_set_cc_nz_f64(s, o->in2);
1798 return NO_EXIT;
1799 }
1800
1801 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1802 {
1803 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1804 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1805 tcg_temp_free_i32(m3);
1806 gen_set_cc_nz_f128(s, o->in1, o->in2);
1807 return NO_EXIT;
1808 }
1809
1810 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1811 {
1812 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1813 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1814 tcg_temp_free_i32(m3);
1815 return NO_EXIT;
1816 }
1817
1818 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1819 {
1820 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1821 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1822 tcg_temp_free_i32(m3);
1823 return NO_EXIT;
1824 }
1825
1826 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1827 {
1828 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1829 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1830 tcg_temp_free_i32(m3);
1831 return_low128(o->out2);
1832 return NO_EXIT;
1833 }
1834
1835 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1836 {
1837 int r2 = get_field(s->fields, r2);
1838 TCGv_i64 len = tcg_temp_new_i64();
1839
1840 potential_page_fault(s);
1841 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1842 set_cc_static(s);
1843 return_low128(o->out);
1844
1845 tcg_gen_add_i64(regs[r2], regs[r2], len);
1846 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1847 tcg_temp_free_i64(len);
1848
1849 return NO_EXIT;
1850 }
1851
1852 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1853 {
1854 int l = get_field(s->fields, l1);
1855 TCGv_i32 vl;
1856
1857 switch (l + 1) {
1858 case 1:
1859 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1860 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1861 break;
1862 case 2:
1863 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1864 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1865 break;
1866 case 4:
1867 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1868 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1869 break;
1870 case 8:
1871 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1872 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1873 break;
1874 default:
1875 potential_page_fault(s);
1876 vl = tcg_const_i32(l);
1877 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1878 tcg_temp_free_i32(vl);
1879 set_cc_static(s);
1880 return NO_EXIT;
1881 }
1882 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1883 return NO_EXIT;
1884 }
1885
1886 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1887 {
1888 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1889 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1890 potential_page_fault(s);
1891 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1892 tcg_temp_free_i32(r1);
1893 tcg_temp_free_i32(r3);
1894 set_cc_static(s);
1895 return NO_EXIT;
1896 }
1897
1898 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1899 {
1900 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1901 TCGv_i32 t1 = tcg_temp_new_i32();
1902 tcg_gen_trunc_i64_i32(t1, o->in1);
1903 potential_page_fault(s);
1904 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1905 set_cc_static(s);
1906 tcg_temp_free_i32(t1);
1907 tcg_temp_free_i32(m3);
1908 return NO_EXIT;
1909 }
1910
1911 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1912 {
1913 potential_page_fault(s);
1914 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1915 set_cc_static(s);
1916 return_low128(o->in2);
1917 return NO_EXIT;
1918 }
1919
1920 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1921 {
1922 int r3 = get_field(s->fields, r3);
1923 potential_page_fault(s);
1924 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1925 set_cc_static(s);
1926 return NO_EXIT;
1927 }
1928
1929 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1930 {
1931 int r3 = get_field(s->fields, r3);
1932 potential_page_fault(s);
1933 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1934 set_cc_static(s);
1935 return NO_EXIT;
1936 }
1937
1938 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1939 {
1940 int r3 = get_field(s->fields, r3);
1941 TCGv_i64 in3 = tcg_temp_new_i64();
1942 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1943 potential_page_fault(s);
1944 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1945 tcg_temp_free_i64(in3);
1946 set_cc_static(s);
1947 return NO_EXIT;
1948 }
1949
1950 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1951 {
1952 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1953 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1954 potential_page_fault(s);
1955 /* XXX rewrite in tcg */
1956 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1957 set_cc_static(s);
1958 return NO_EXIT;
1959 }
1960
1961 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1962 {
1963 TCGv_i64 t1 = tcg_temp_new_i64();
1964 TCGv_i32 t2 = tcg_temp_new_i32();
1965 tcg_gen_trunc_i64_i32(t2, o->in1);
1966 gen_helper_cvd(t1, t2);
1967 tcg_temp_free_i32(t2);
1968 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1969 tcg_temp_free_i64(t1);
1970 return NO_EXIT;
1971 }
1972
1973 #ifndef CONFIG_USER_ONLY
1974 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1975 {
1976 TCGv_i32 tmp;
1977
1978 check_privileged(s);
1979 potential_page_fault(s);
1980
1981 /* We pretend the format is RX_a so that D2 is the field we want. */
1982 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1983 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1984 tcg_temp_free_i32(tmp);
1985 return NO_EXIT;
1986 }
1987 #endif
1988
1989 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1990 {
1991 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1992 return_low128(o->out);
1993 return NO_EXIT;
1994 }
1995
1996 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1997 {
1998 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1999 return_low128(o->out);
2000 return NO_EXIT;
2001 }
2002
2003 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2004 {
2005 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2006 return_low128(o->out);
2007 return NO_EXIT;
2008 }
2009
2010 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2011 {
2012 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2013 return_low128(o->out);
2014 return NO_EXIT;
2015 }
2016
2017 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2018 {
2019 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2020 return NO_EXIT;
2021 }
2022
2023 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2024 {
2025 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2026 return NO_EXIT;
2027 }
2028
2029 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2030 {
2031 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2032 return_low128(o->out2);
2033 return NO_EXIT;
2034 }
2035
2036 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2037 {
2038 int r2 = get_field(s->fields, r2);
2039 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2040 return NO_EXIT;
2041 }
2042
2043 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2044 {
2045 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2046 return NO_EXIT;
2047 }
2048
2049 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2050 {
2051 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2052 tb->flags, (ab)use the tb->cs_base field as the address of
2053 the template in memory, and grab 8 bits of tb->flags/cflags for
2054 the contents of the register. We would then recognize all this
2055 in gen_intermediate_code_internal, generating code for exactly
2056 one instruction. This new TB then gets executed normally.
2057
2058 On the other hand, this seems to be mostly used for modifying
2059 MVC inside of memcpy, which needs a helper call anyway. So
2060 perhaps this doesn't bear thinking about any further. */
2061
2062 TCGv_i64 tmp;
2063
2064 update_psw_addr(s);
2065 gen_op_calc_cc(s);
2066
2067 tmp = tcg_const_i64(s->next_pc);
2068 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2069 tcg_temp_free_i64(tmp);
2070
2071 set_cc_static(s);
2072 return NO_EXIT;
2073 }
2074
2075 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2076 {
2077 /* We'll use the original input for cc computation, since we get to
2078 compare that against 0, which ought to be better than comparing
2079 the real output against 64. It also lets cc_dst be a convenient
2080 temporary during our computation. */
2081 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2082
2083 /* R1 = IN ? CLZ(IN) : 64. */
2084 gen_helper_clz(o->out, o->in2);
2085
2086 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2087 value by 64, which is undefined. But since the shift is 64 iff the
2088 input is zero, we still get the correct result after and'ing. */
2089 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2090 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2091 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2092 return NO_EXIT;
2093 }
2094
2095 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2096 {
2097 int m3 = get_field(s->fields, m3);
2098 int pos, len, base = s->insn->data;
2099 TCGv_i64 tmp = tcg_temp_new_i64();
2100 uint64_t ccm;
2101
2102 switch (m3) {
2103 case 0xf:
2104 /* Effectively a 32-bit load. */
2105 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2106 len = 32;
2107 goto one_insert;
2108
2109 case 0xc:
2110 case 0x6:
2111 case 0x3:
2112 /* Effectively a 16-bit load. */
2113 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2114 len = 16;
2115 goto one_insert;
2116
2117 case 0x8:
2118 case 0x4:
2119 case 0x2:
2120 case 0x1:
2121 /* Effectively an 8-bit load. */
2122 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2123 len = 8;
2124 goto one_insert;
2125
2126 one_insert:
2127 pos = base + ctz32(m3) * 8;
2128 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2129 ccm = ((1ull << len) - 1) << pos;
2130 break;
2131
2132 default:
2133 /* This is going to be a sequence of loads and inserts. */
2134 pos = base + 32 - 8;
2135 ccm = 0;
2136 while (m3) {
2137 if (m3 & 0x8) {
2138 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2139 tcg_gen_addi_i64(o->in2, o->in2, 1);
2140 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2141 ccm |= 0xff << pos;
2142 }
2143 m3 = (m3 << 1) & 0xf;
2144 pos -= 8;
2145 }
2146 break;
2147 }
2148
2149 tcg_gen_movi_i64(tmp, ccm);
2150 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2151 tcg_temp_free_i64(tmp);
2152 return NO_EXIT;
2153 }
2154
2155 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2156 {
2157 int shift = s->insn->data & 0xff;
2158 int size = s->insn->data >> 8;
2159 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2160 return NO_EXIT;
2161 }
2162
2163 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2164 {
2165 TCGv_i64 t1;
2166
2167 gen_op_calc_cc(s);
2168 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2169
2170 t1 = tcg_temp_new_i64();
2171 tcg_gen_shli_i64(t1, psw_mask, 20);
2172 tcg_gen_shri_i64(t1, t1, 36);
2173 tcg_gen_or_i64(o->out, o->out, t1);
2174
2175 tcg_gen_extu_i32_i64(t1, cc_op);
2176 tcg_gen_shli_i64(t1, t1, 28);
2177 tcg_gen_or_i64(o->out, o->out, t1);
2178 tcg_temp_free_i64(t1);
2179 return NO_EXIT;
2180 }
2181
2182 #ifndef CONFIG_USER_ONLY
2183 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2184 {
2185 check_privileged(s);
2186 gen_helper_ipte(cpu_env, o->in1, o->in2);
2187 return NO_EXIT;
2188 }
2189
2190 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2191 {
2192 check_privileged(s);
2193 gen_helper_iske(o->out, cpu_env, o->in2);
2194 return NO_EXIT;
2195 }
2196 #endif
2197
2198 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2199 {
2200 gen_helper_ldeb(o->out, cpu_env, o->in2);
2201 return NO_EXIT;
2202 }
2203
2204 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2205 {
2206 gen_helper_ledb(o->out, cpu_env, o->in2);
2207 return NO_EXIT;
2208 }
2209
2210 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2211 {
2212 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2213 return NO_EXIT;
2214 }
2215
2216 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2217 {
2218 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2219 return NO_EXIT;
2220 }
2221
2222 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2223 {
2224 gen_helper_lxdb(o->out, cpu_env, o->in2);
2225 return_low128(o->out2);
2226 return NO_EXIT;
2227 }
2228
2229 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2230 {
2231 gen_helper_lxeb(o->out, cpu_env, o->in2);
2232 return_low128(o->out2);
2233 return NO_EXIT;
2234 }
2235
2236 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2237 {
2238 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2239 return NO_EXIT;
2240 }
2241
2242 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2243 {
2244 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2245 return NO_EXIT;
2246 }
2247
2248 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2249 {
2250 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2251 return NO_EXIT;
2252 }
2253
2254 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2255 {
2256 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2257 return NO_EXIT;
2258 }
2259
2260 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2261 {
2262 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2263 return NO_EXIT;
2264 }
2265
2266 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2267 {
2268 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2269 return NO_EXIT;
2270 }
2271
2272 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2273 {
2274 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2275 return NO_EXIT;
2276 }
2277
2278 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2279 {
2280 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2281 return NO_EXIT;
2282 }
2283
2284 #ifndef CONFIG_USER_ONLY
2285 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2286 {
2287 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2288 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2289 check_privileged(s);
2290 potential_page_fault(s);
2291 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2292 tcg_temp_free_i32(r1);
2293 tcg_temp_free_i32(r3);
2294 return NO_EXIT;
2295 }
2296
2297 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2298 {
2299 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2300 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2301 check_privileged(s);
2302 potential_page_fault(s);
2303 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2304 tcg_temp_free_i32(r1);
2305 tcg_temp_free_i32(r3);
2306 return NO_EXIT;
2307 }
2308 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2309 {
2310 check_privileged(s);
2311 potential_page_fault(s);
2312 gen_helper_lra(o->out, cpu_env, o->in2);
2313 set_cc_static(s);
2314 return NO_EXIT;
2315 }
2316
2317 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2318 {
2319 TCGv_i64 t1, t2;
2320
2321 check_privileged(s);
2322
2323 t1 = tcg_temp_new_i64();
2324 t2 = tcg_temp_new_i64();
2325 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2326 tcg_gen_addi_i64(o->in2, o->in2, 4);
2327 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2328 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2329 tcg_gen_shli_i64(t1, t1, 32);
2330 gen_helper_load_psw(cpu_env, t1, t2);
2331 tcg_temp_free_i64(t1);
2332 tcg_temp_free_i64(t2);
2333 return EXIT_NORETURN;
2334 }
2335 #endif
2336
2337 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2338 {
2339 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2340 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2341 potential_page_fault(s);
2342 gen_helper_lam(cpu_env, r1, o->in2, r3);
2343 tcg_temp_free_i32(r1);
2344 tcg_temp_free_i32(r3);
2345 return NO_EXIT;
2346 }
2347
2348 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2349 {
2350 int r1 = get_field(s->fields, r1);
2351 int r3 = get_field(s->fields, r3);
2352 TCGv_i64 t = tcg_temp_new_i64();
2353 TCGv_i64 t4 = tcg_const_i64(4);
2354
2355 while (1) {
2356 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2357 store_reg32_i64(r1, t);
2358 if (r1 == r3) {
2359 break;
2360 }
2361 tcg_gen_add_i64(o->in2, o->in2, t4);
2362 r1 = (r1 + 1) & 15;
2363 }
2364
2365 tcg_temp_free_i64(t);
2366 tcg_temp_free_i64(t4);
2367 return NO_EXIT;
2368 }
2369
2370 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2371 {
2372 int r1 = get_field(s->fields, r1);
2373 int r3 = get_field(s->fields, r3);
2374 TCGv_i64 t = tcg_temp_new_i64();
2375 TCGv_i64 t4 = tcg_const_i64(4);
2376
2377 while (1) {
2378 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2379 store_reg32h_i64(r1, t);
2380 if (r1 == r3) {
2381 break;
2382 }
2383 tcg_gen_add_i64(o->in2, o->in2, t4);
2384 r1 = (r1 + 1) & 15;
2385 }
2386
2387 tcg_temp_free_i64(t);
2388 tcg_temp_free_i64(t4);
2389 return NO_EXIT;
2390 }
2391
2392 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2393 {
2394 int r1 = get_field(s->fields, r1);
2395 int r3 = get_field(s->fields, r3);
2396 TCGv_i64 t8 = tcg_const_i64(8);
2397
2398 while (1) {
2399 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2400 if (r1 == r3) {
2401 break;
2402 }
2403 tcg_gen_add_i64(o->in2, o->in2, t8);
2404 r1 = (r1 + 1) & 15;
2405 }
2406
2407 tcg_temp_free_i64(t8);
2408 return NO_EXIT;
2409 }
2410
2411 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2412 {
2413 o->out = o->in2;
2414 o->g_out = o->g_in2;
2415 TCGV_UNUSED_I64(o->in2);
2416 o->g_in2 = false;
2417 return NO_EXIT;
2418 }
2419
2420 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2421 {
2422 o->out = o->in1;
2423 o->out2 = o->in2;
2424 o->g_out = o->g_in1;
2425 o->g_out2 = o->g_in2;
2426 TCGV_UNUSED_I64(o->in1);
2427 TCGV_UNUSED_I64(o->in2);
2428 o->g_in1 = o->g_in2 = false;
2429 return NO_EXIT;
2430 }
2431
2432 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2433 {
2434 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2435 potential_page_fault(s);
2436 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2437 tcg_temp_free_i32(l);
2438 return NO_EXIT;
2439 }
2440
2441 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2442 {
2443 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2444 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2445 potential_page_fault(s);
2446 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2447 tcg_temp_free_i32(r1);
2448 tcg_temp_free_i32(r2);
2449 set_cc_static(s);
2450 return NO_EXIT;
2451 }
2452
2453 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2454 {
2455 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2456 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2457 potential_page_fault(s);
2458 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2459 tcg_temp_free_i32(r1);
2460 tcg_temp_free_i32(r3);
2461 set_cc_static(s);
2462 return NO_EXIT;
2463 }
2464
2465 #ifndef CONFIG_USER_ONLY
2466 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2467 {
2468 int r1 = get_field(s->fields, l1);
2469 check_privileged(s);
2470 potential_page_fault(s);
2471 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2472 set_cc_static(s);
2473 return NO_EXIT;
2474 }
2475
2476 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2477 {
2478 int r1 = get_field(s->fields, l1);
2479 check_privileged(s);
2480 potential_page_fault(s);
2481 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2482 set_cc_static(s);
2483 return NO_EXIT;
2484 }
2485 #endif
2486
2487 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2488 {
2489 potential_page_fault(s);
2490 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2491 set_cc_static(s);
2492 return NO_EXIT;
2493 }
2494
2495 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2496 {
2497 potential_page_fault(s);
2498 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2499 set_cc_static(s);
2500 return_low128(o->in2);
2501 return NO_EXIT;
2502 }
2503
2504 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2505 {
2506 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2507 return NO_EXIT;
2508 }
2509
2510 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2511 {
2512 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2513 return_low128(o->out2);
2514 return NO_EXIT;
2515 }
2516
2517 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2518 {
2519 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2520 return NO_EXIT;
2521 }
2522
2523 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2524 {
2525 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2526 return NO_EXIT;
2527 }
2528
2529 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2530 {
2531 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2532 return NO_EXIT;
2533 }
2534
2535 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2536 {
2537 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2538 return_low128(o->out2);
2539 return NO_EXIT;
2540 }
2541
2542 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2543 {
2544 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2545 return_low128(o->out2);
2546 return NO_EXIT;
2547 }
2548
2549 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2550 {
2551 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2552 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2553 tcg_temp_free_i64(r3);
2554 return NO_EXIT;
2555 }
2556
2557 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2558 {
2559 int r3 = get_field(s->fields, r3);
2560 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2561 return NO_EXIT;
2562 }
2563
2564 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2565 {
2566 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2567 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2568 tcg_temp_free_i64(r3);
2569 return NO_EXIT;
2570 }
2571
2572 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2573 {
2574 int r3 = get_field(s->fields, r3);
2575 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2576 return NO_EXIT;
2577 }
2578
2579 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2580 {
2581 gen_helper_nabs_i64(o->out, o->in2);
2582 return NO_EXIT;
2583 }
2584
2585 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2586 {
2587 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2588 return NO_EXIT;
2589 }
2590
2591 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2592 {
2593 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2594 return NO_EXIT;
2595 }
2596
2597 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2598 {
2599 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2600 tcg_gen_mov_i64(o->out2, o->in2);
2601 return NO_EXIT;
2602 }
2603
2604 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2605 {
2606 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2607 potential_page_fault(s);
2608 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2609 tcg_temp_free_i32(l);
2610 set_cc_static(s);
2611 return NO_EXIT;
2612 }
2613
2614 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2615 {
2616 tcg_gen_neg_i64(o->out, o->in2);
2617 return NO_EXIT;
2618 }
2619
2620 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2621 {
2622 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2623 return NO_EXIT;
2624 }
2625
2626 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2627 {
2628 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2629 return NO_EXIT;
2630 }
2631
2632 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2633 {
2634 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2635 tcg_gen_mov_i64(o->out2, o->in2);
2636 return NO_EXIT;
2637 }
2638
2639 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2640 {
2641 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2642 potential_page_fault(s);
2643 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2644 tcg_temp_free_i32(l);
2645 set_cc_static(s);
2646 return NO_EXIT;
2647 }
2648
2649 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2650 {
2651 tcg_gen_or_i64(o->out, o->in1, o->in2);
2652 return NO_EXIT;
2653 }
2654
2655 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2656 {
2657 int shift = s->insn->data & 0xff;
2658 int size = s->insn->data >> 8;
2659 uint64_t mask = ((1ull << size) - 1) << shift;
2660
2661 assert(!o->g_in2);
2662 tcg_gen_shli_i64(o->in2, o->in2, shift);
2663 tcg_gen_or_i64(o->out, o->in1, o->in2);
2664
2665 /* Produce the CC from only the bits manipulated. */
2666 tcg_gen_andi_i64(cc_dst, o->out, mask);
2667 set_cc_nz_u64(s, cc_dst);
2668 return NO_EXIT;
2669 }
2670
2671 #ifndef CONFIG_USER_ONLY
2672 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2673 {
2674 check_privileged(s);
2675 gen_helper_ptlb(cpu_env);
2676 return NO_EXIT;
2677 }
2678 #endif
2679
2680 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2681 {
2682 tcg_gen_bswap16_i64(o->out, o->in2);
2683 return NO_EXIT;
2684 }
2685
2686 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2687 {
2688 tcg_gen_bswap32_i64(o->out, o->in2);
2689 return NO_EXIT;
2690 }
2691
2692 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2693 {
2694 tcg_gen_bswap64_i64(o->out, o->in2);
2695 return NO_EXIT;
2696 }
2697
2698 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2699 {
2700 TCGv_i32 t1 = tcg_temp_new_i32();
2701 TCGv_i32 t2 = tcg_temp_new_i32();
2702 TCGv_i32 to = tcg_temp_new_i32();
2703 tcg_gen_trunc_i64_i32(t1, o->in1);
2704 tcg_gen_trunc_i64_i32(t2, o->in2);
2705 tcg_gen_rotl_i32(to, t1, t2);
2706 tcg_gen_extu_i32_i64(o->out, to);
2707 tcg_temp_free_i32(t1);
2708 tcg_temp_free_i32(t2);
2709 tcg_temp_free_i32(to);
2710 return NO_EXIT;
2711 }
2712
2713 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2714 {
2715 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2716 return NO_EXIT;
2717 }
2718
2719 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2720 {
2721 int r1 = get_field(s->fields, r1);
2722 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2723 return NO_EXIT;
2724 }
2725
2726 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2727 {
2728 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2729 return NO_EXIT;
2730 }
2731
2732 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2733 {
2734 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2735 return NO_EXIT;
2736 }
2737
2738 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2739 {
2740 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2741 return_low128(o->out2);
2742 return NO_EXIT;
2743 }
2744
2745 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2746 {
2747 gen_helper_sqeb(o->out, cpu_env, o->in2);
2748 return NO_EXIT;
2749 }
2750
2751 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2752 {
2753 gen_helper_sqdb(o->out, cpu_env, o->in2);
2754 return NO_EXIT;
2755 }
2756
2757 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2758 {
2759 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2760 return_low128(o->out2);
2761 return NO_EXIT;
2762 }
2763
2764 #ifndef CONFIG_USER_ONLY
2765 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2766 {
2767 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2768 check_privileged(s);
2769 potential_page_fault(s);
2770 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2771 tcg_temp_free_i32(r1);
2772 return NO_EXIT;
2773 }
2774 #endif
2775
2776 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2777 {
2778 uint64_t sign = 1ull << s->insn->data;
2779 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2780 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2781 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2782 /* The arithmetic left shift is curious in that it does not affect
2783 the sign bit. Copy that over from the source unchanged. */
2784 tcg_gen_andi_i64(o->out, o->out, ~sign);
2785 tcg_gen_andi_i64(o->in1, o->in1, sign);
2786 tcg_gen_or_i64(o->out, o->out, o->in1);
2787 return NO_EXIT;
2788 }
2789
2790 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2791 {
2792 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2793 return NO_EXIT;
2794 }
2795
2796 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2797 {
2798 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2799 return NO_EXIT;
2800 }
2801
2802 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2803 {
2804 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2805 return NO_EXIT;
2806 }
2807
2808 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2809 {
2810 gen_helper_sfpc(cpu_env, o->in2);
2811 return NO_EXIT;
2812 }
2813
2814 #ifndef CONFIG_USER_ONLY
2815 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2816 {
2817 check_privileged(s);
2818 tcg_gen_shri_i64(o->in2, o->in2, 4);
2819 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2820 return NO_EXIT;
2821 }
2822
2823 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
2824 {
2825 check_privileged(s);
2826 gen_helper_sske(cpu_env, o->in1, o->in2);
2827 return NO_EXIT;
2828 }
2829
2830 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2831 {
2832 check_privileged(s);
2833 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2834 return NO_EXIT;
2835 }
2836
2837 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
2838 {
2839 check_privileged(s);
2840 /* ??? Surely cpu address != cpu number. In any case the previous
2841 version of this stored more than the required half-word, so it
2842 is unlikely this has ever been tested. */
2843 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2844 return NO_EXIT;
2845 }
2846
2847 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
2848 {
2849 gen_helper_stck(o->out, cpu_env);
2850 /* ??? We don't implement clock states. */
2851 gen_op_movi_cc(s, 0);
2852 return NO_EXIT;
2853 }
2854
2855 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
2856 {
2857 check_privileged(s);
2858 gen_helper_sckc(cpu_env, o->in2);
2859 return NO_EXIT;
2860 }
2861
2862 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
2863 {
2864 check_privileged(s);
2865 gen_helper_stckc(o->out, cpu_env);
2866 return NO_EXIT;
2867 }
2868
2869 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2870 {
2871 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2872 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2873 check_privileged(s);
2874 potential_page_fault(s);
2875 gen_helper_stctg(cpu_env, r1, o->in2, r3);
2876 tcg_temp_free_i32(r1);
2877 tcg_temp_free_i32(r3);
2878 return NO_EXIT;
2879 }
2880
2881 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
2882 {
2883 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2884 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2885 check_privileged(s);
2886 potential_page_fault(s);
2887 gen_helper_stctl(cpu_env, r1, o->in2, r3);
2888 tcg_temp_free_i32(r1);
2889 tcg_temp_free_i32(r3);
2890 return NO_EXIT;
2891 }
2892
2893 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
2894 {
2895 check_privileged(s);
2896 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2897 return NO_EXIT;
2898 }
2899
2900 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
2901 {
2902 check_privileged(s);
2903 gen_helper_spt(cpu_env, o->in2);
2904 return NO_EXIT;
2905 }
2906
2907 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
2908 {
2909 check_privileged(s);
2910 gen_helper_stpt(o->out, cpu_env);
2911 return NO_EXIT;
2912 }
2913
2914 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
2915 {
2916 check_privileged(s);
2917 gen_helper_spx(cpu_env, o->in2);
2918 return NO_EXIT;
2919 }
2920
2921 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
2922 {
2923 check_privileged(s);
2924 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
2925 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
2926 return NO_EXIT;
2927 }
2928
2929 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
2930 {
2931 uint64_t i2 = get_field(s->fields, i2);
2932 TCGv_i64 t;
2933
2934 check_privileged(s);
2935
2936 /* It is important to do what the instruction name says: STORE THEN.
2937 If we let the output hook perform the store then if we fault and
2938 restart, we'll have the wrong SYSTEM MASK in place. */
2939 t = tcg_temp_new_i64();
2940 tcg_gen_shri_i64(t, psw_mask, 56);
2941 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
2942 tcg_temp_free_i64(t);
2943
2944 if (s->fields->op == 0xac) {
2945 tcg_gen_andi_i64(psw_mask, psw_mask,
2946 (i2 << 56) | 0x00ffffffffffffffull);
2947 } else {
2948 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
2949 }
2950 return NO_EXIT;
2951 }
2952 #endif
2953
2954 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
2955 {
2956 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
2957 return NO_EXIT;
2958 }
2959
2960 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
2961 {
2962 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
2963 return NO_EXIT;
2964 }
2965
2966 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
2967 {
2968 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
2969 return NO_EXIT;
2970 }
2971
2972 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
2973 {
2974 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
2975 return NO_EXIT;
2976 }
2977
2978 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
2979 {
2980 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2981 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2982 potential_page_fault(s);
2983 gen_helper_stam(cpu_env, r1, o->in2, r3);
2984 tcg_temp_free_i32(r1);
2985 tcg_temp_free_i32(r3);
2986 return NO_EXIT;
2987 }
2988
2989 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
2990 {
2991 int m3 = get_field(s->fields, m3);
2992 int pos, base = s->insn->data;
2993 TCGv_i64 tmp = tcg_temp_new_i64();
2994
2995 pos = base + ctz32(m3) * 8;
2996 switch (m3) {
2997 case 0xf:
2998 /* Effectively a 32-bit store. */
2999 tcg_gen_shri_i64(tmp, o->in1, pos);
3000 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3001 break;
3002
3003 case 0xc:
3004 case 0x6:
3005 case 0x3:
3006 /* Effectively a 16-bit store. */
3007 tcg_gen_shri_i64(tmp, o->in1, pos);
3008 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3009 break;
3010
3011 case 0x8:
3012 case 0x4:
3013 case 0x2:
3014 case 0x1:
3015 /* Effectively an 8-bit store. */
3016 tcg_gen_shri_i64(tmp, o->in1, pos);
3017 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3018 break;
3019
3020 default:
3021 /* This is going to be a sequence of shifts and stores. */
3022 pos = base + 32 - 8;
3023 while (m3) {
3024 if (m3 & 0x8) {
3025 tcg_gen_shri_i64(tmp, o->in1, pos);
3026 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3027 tcg_gen_addi_i64(o->in2, o->in2, 1);
3028 }
3029 m3 = (m3 << 1) & 0xf;
3030 pos -= 8;
3031 }
3032 break;
3033 }
3034 tcg_temp_free_i64(tmp);
3035 return NO_EXIT;
3036 }
3037
3038 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3039 {
3040 int r1 = get_field(s->fields, r1);
3041 int r3 = get_field(s->fields, r3);
3042 int size = s->insn->data;
3043 TCGv_i64 tsize = tcg_const_i64(size);
3044
3045 while (1) {
3046 if (size == 8) {
3047 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3048 } else {
3049 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3050 }
3051 if (r1 == r3) {
3052 break;
3053 }
3054 tcg_gen_add_i64(o->in2, o->in2, tsize);
3055 r1 = (r1 + 1) & 15;
3056 }
3057
3058 tcg_temp_free_i64(tsize);
3059 return NO_EXIT;
3060 }
3061
3062 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3063 {
3064 int r1 = get_field(s->fields, r1);
3065 int r3 = get_field(s->fields, r3);
3066 TCGv_i64 t = tcg_temp_new_i64();
3067 TCGv_i64 t4 = tcg_const_i64(4);
3068 TCGv_i64 t32 = tcg_const_i64(32);
3069
3070 while (1) {
3071 tcg_gen_shl_i64(t, regs[r1], t32);
3072 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3073 if (r1 == r3) {
3074 break;
3075 }
3076 tcg_gen_add_i64(o->in2, o->in2, t4);
3077 r1 = (r1 + 1) & 15;
3078 }
3079
3080 tcg_temp_free_i64(t);
3081 tcg_temp_free_i64(t4);
3082 tcg_temp_free_i64(t32);
3083 return NO_EXIT;
3084 }
3085
3086 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3087 {
3088 potential_page_fault(s);
3089 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3090 set_cc_static(s);
3091 return_low128(o->in2);
3092 return NO_EXIT;
3093 }
3094
3095 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3096 {
3097 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3098 return NO_EXIT;
3099 }
3100
3101 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3102 {
3103 TCGv_i64 cc;
3104
3105 assert(!o->g_in2);
3106 tcg_gen_not_i64(o->in2, o->in2);
3107 tcg_gen_add_i64(o->out, o->in1, o->in2);
3108
3109 /* XXX possible optimization point */
3110 gen_op_calc_cc(s);
3111 cc = tcg_temp_new_i64();
3112 tcg_gen_extu_i32_i64(cc, cc_op);
3113 tcg_gen_shri_i64(cc, cc, 1);
3114 tcg_gen_add_i64(o->out, o->out, cc);
3115 tcg_temp_free_i64(cc);
3116 return NO_EXIT;
3117 }
3118
3119 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3120 {
3121 TCGv_i32 t;
3122
3123 update_psw_addr(s);
3124 gen_op_calc_cc(s);
3125
3126 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3127 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3128 tcg_temp_free_i32(t);
3129
3130 t = tcg_const_i32(s->next_pc - s->pc);
3131 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3132 tcg_temp_free_i32(t);
3133
3134 gen_exception(EXCP_SVC);
3135 return EXIT_NORETURN;
3136 }
3137
3138 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3139 {
3140 gen_helper_tceb(cc_op, o->in1, o->in2);
3141 set_cc_static(s);
3142 return NO_EXIT;
3143 }
3144
3145 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3146 {
3147 gen_helper_tcdb(cc_op, o->in1, o->in2);
3148 set_cc_static(s);
3149 return NO_EXIT;
3150 }
3151
3152 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3153 {
3154 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3155 set_cc_static(s);
3156 return NO_EXIT;
3157 }
3158
3159 #ifndef CONFIG_USER_ONLY
3160 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3161 {
3162 potential_page_fault(s);
3163 gen_helper_tprot(cc_op, o->addr1, o->in2);
3164 set_cc_static(s);
3165 return NO_EXIT;
3166 }
3167 #endif
3168
3169 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3170 {
3171 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3172 potential_page_fault(s);
3173 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3174 tcg_temp_free_i32(l);
3175 set_cc_static(s);
3176 return NO_EXIT;
3177 }
3178
3179 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3180 {
3181 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3182 potential_page_fault(s);
3183 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3184 tcg_temp_free_i32(l);
3185 return NO_EXIT;
3186 }
3187
3188 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3189 {
3190 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3191 potential_page_fault(s);
3192 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3193 tcg_temp_free_i32(l);
3194 set_cc_static(s);
3195 return NO_EXIT;
3196 }
3197
3198 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3199 {
3200 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3201 return NO_EXIT;
3202 }
3203
3204 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3205 {
3206 int shift = s->insn->data & 0xff;
3207 int size = s->insn->data >> 8;
3208 uint64_t mask = ((1ull << size) - 1) << shift;
3209
3210 assert(!o->g_in2);
3211 tcg_gen_shli_i64(o->in2, o->in2, shift);
3212 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3213
3214 /* Produce the CC from only the bits manipulated. */
3215 tcg_gen_andi_i64(cc_dst, o->out, mask);
3216 set_cc_nz_u64(s, cc_dst);
3217 return NO_EXIT;
3218 }
3219
3220 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3221 {
3222 o->out = tcg_const_i64(0);
3223 return NO_EXIT;
3224 }
3225
3226 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3227 {
3228 o->out = tcg_const_i64(0);
3229 o->out2 = o->out;
3230 o->g_out2 = true;
3231 return NO_EXIT;
3232 }
3233
3234 /* ====================================================================== */
3235 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3236 the original inputs), update the various cc data structures in order to
3237 be able to compute the new condition code. */
3238
3239 static void cout_abs32(DisasContext *s, DisasOps *o)
3240 {
3241 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3242 }
3243
3244 static void cout_abs64(DisasContext *s, DisasOps *o)
3245 {
3246 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3247 }
3248
3249 static void cout_adds32(DisasContext *s, DisasOps *o)
3250 {
3251 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3252 }
3253
3254 static void cout_adds64(DisasContext *s, DisasOps *o)
3255 {
3256 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3257 }
3258
3259 static void cout_addu32(DisasContext *s, DisasOps *o)
3260 {
3261 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3262 }
3263
3264 static void cout_addu64(DisasContext *s, DisasOps *o)
3265 {
3266 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3267 }
3268
3269 static void cout_addc32(DisasContext *s, DisasOps *o)
3270 {
3271 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3272 }
3273
3274 static void cout_addc64(DisasContext *s, DisasOps *o)
3275 {
3276 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3277 }
3278
3279 static void cout_cmps32(DisasContext *s, DisasOps *o)
3280 {
3281 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3282 }
3283
3284 static void cout_cmps64(DisasContext *s, DisasOps *o)
3285 {
3286 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3287 }
3288
3289 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3290 {
3291 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3292 }
3293
3294 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3295 {
3296 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3297 }
3298
3299 static void cout_f32(DisasContext *s, DisasOps *o)
3300 {
3301 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3302 }
3303
3304 static void cout_f64(DisasContext *s, DisasOps *o)
3305 {
3306 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3307 }
3308
3309 static void cout_f128(DisasContext *s, DisasOps *o)
3310 {
3311 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3312 }
3313
3314 static void cout_nabs32(DisasContext *s, DisasOps *o)
3315 {
3316 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3317 }
3318
3319 static void cout_nabs64(DisasContext *s, DisasOps *o)
3320 {
3321 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3322 }
3323
3324 static void cout_neg32(DisasContext *s, DisasOps *o)
3325 {
3326 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3327 }
3328
3329 static void cout_neg64(DisasContext *s, DisasOps *o)
3330 {
3331 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3332 }
3333
3334 static void cout_nz32(DisasContext *s, DisasOps *o)
3335 {
3336 tcg_gen_ext32u_i64(cc_dst, o->out);
3337 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3338 }
3339
3340 static void cout_nz64(DisasContext *s, DisasOps *o)
3341 {
3342 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3343 }
3344
3345 static void cout_s32(DisasContext *s, DisasOps *o)
3346 {
3347 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3348 }
3349
3350 static void cout_s64(DisasContext *s, DisasOps *o)
3351 {
3352 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3353 }
3354
3355 static void cout_subs32(DisasContext *s, DisasOps *o)
3356 {
3357 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3358 }
3359
3360 static void cout_subs64(DisasContext *s, DisasOps *o)
3361 {
3362 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3363 }
3364
3365 static void cout_subu32(DisasContext *s, DisasOps *o)
3366 {
3367 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3368 }
3369
3370 static void cout_subu64(DisasContext *s, DisasOps *o)
3371 {
3372 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3373 }
3374
3375 static void cout_subb32(DisasContext *s, DisasOps *o)
3376 {
3377 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3378 }
3379
3380 static void cout_subb64(DisasContext *s, DisasOps *o)
3381 {
3382 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3383 }
3384
3385 static void cout_tm32(DisasContext *s, DisasOps *o)
3386 {
3387 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3388 }
3389
3390 static void cout_tm64(DisasContext *s, DisasOps *o)
3391 {
3392 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3393 }
3394
3395 /* ====================================================================== */
3396 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3397 with the TCG register to which we will write. Used in combination with
3398 the "wout" generators, in some cases we need a new temporary, and in
3399 some cases we can write to a TCG global. */
3400
3401 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3402 {
3403 o->out = tcg_temp_new_i64();
3404 }
3405
3406 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3407 {
3408 o->out = tcg_temp_new_i64();
3409 o->out2 = tcg_temp_new_i64();
3410 }
3411
3412 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3413 {
3414 o->out = regs[get_field(f, r1)];
3415 o->g_out = true;
3416 }
3417
3418 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3419 {
3420 /* ??? Specification exception: r1 must be even. */
3421 int r1 = get_field(f, r1);
3422 o->out = regs[r1];
3423 o->out2 = regs[(r1 + 1) & 15];
3424 o->g_out = o->g_out2 = true;
3425 }
3426
3427 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3428 {
3429 o->out = fregs[get_field(f, r1)];
3430 o->g_out = true;
3431 }
3432
3433 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3434 {
3435 /* ??? Specification exception: r1 must be < 14. */
3436 int r1 = get_field(f, r1);
3437 o->out = fregs[r1];
3438 o->out2 = fregs[(r1 + 2) & 15];
3439 o->g_out = o->g_out2 = true;
3440 }
3441
3442 /* ====================================================================== */
3443 /* The "Write OUTput" generators. These generally perform some non-trivial
3444 copy of data to TCG globals, or to main memory. The trivial cases are
3445 generally handled by having a "prep" generator install the TCG global
3446 as the destination of the operation. */
3447
3448 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3449 {
3450 store_reg(get_field(f, r1), o->out);
3451 }
3452
3453 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3454 {
3455 int r1 = get_field(f, r1);
3456 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3457 }
3458
3459 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3460 {
3461 int r1 = get_field(f, r1);
3462 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3463 }
3464
3465 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3466 {
3467 store_reg32_i64(get_field(f, r1), o->out);
3468 }
3469
3470 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3471 {
3472 /* ??? Specification exception: r1 must be even. */
3473 int r1 = get_field(f, r1);
3474 store_reg32_i64(r1, o->out);
3475 store_reg32_i64((r1 + 1) & 15, o->out2);
3476 }
3477
3478 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3479 {
3480 /* ??? Specification exception: r1 must be even. */
3481 int r1 = get_field(f, r1);
3482 store_reg32_i64((r1 + 1) & 15, o->out);
3483 tcg_gen_shri_i64(o->out, o->out, 32);
3484 store_reg32_i64(r1, o->out);
3485 }
3486
3487 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3488 {
3489 store_freg32_i64(get_field(f, r1), o->out);
3490 }
3491
3492 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3493 {
3494 store_freg(get_field(f, r1), o->out);
3495 }
3496
3497 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3498 {
3499 /* ??? Specification exception: r1 must be < 14. */
3500 int f1 = get_field(s->fields, r1);
3501 store_freg(f1, o->out);
3502 store_freg((f1 + 2) & 15, o->out2);
3503 }
3504
3505 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3506 {
3507 if (get_field(f, r1) != get_field(f, r2)) {
3508 store_reg32_i64(get_field(f, r1), o->out);
3509 }
3510 }
3511
3512 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3513 {
3514 if (get_field(f, r1) != get_field(f, r2)) {
3515 store_freg32_i64(get_field(f, r1), o->out);
3516 }
3517 }
3518
3519 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3520 {
3521 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3522 }
3523
3524 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3525 {
3526 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3527 }
3528
3529 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3530 {
3531 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3532 }
3533
3534 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3535 {
3536 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3537 }
3538
3539 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3540 {
3541 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3542 }
3543
3544 /* ====================================================================== */
3545 /* The "INput 1" generators. These load the first operand to an insn. */
3546
3547 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3548 {
3549 o->in1 = load_reg(get_field(f, r1));
3550 }
3551
3552 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3553 {
3554 o->in1 = regs[get_field(f, r1)];
3555 o->g_in1 = true;
3556 }
3557
3558 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3559 {
3560 o->in1 = tcg_temp_new_i64();
3561 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3562 }
3563
3564 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3565 {
3566 o->in1 = tcg_temp_new_i64();
3567 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3568 }
3569
3570 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3571 {
3572 o->in1 = tcg_temp_new_i64();
3573 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3574 }
3575
3576 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3577 {
3578 /* ??? Specification exception: r1 must be even. */
3579 int r1 = get_field(f, r1);
3580 o->in1 = load_reg((r1 + 1) & 15);
3581 }
3582
3583 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3584 {
3585 /* ??? Specification exception: r1 must be even. */
3586 int r1 = get_field(f, r1);
3587 o->in1 = tcg_temp_new_i64();
3588 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3589 }
3590
3591 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3592 {
3593 /* ??? Specification exception: r1 must be even. */
3594 int r1 = get_field(f, r1);
3595 o->in1 = tcg_temp_new_i64();
3596 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3597 }
3598
3599 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3600 {
3601 /* ??? Specification exception: r1 must be even. */
3602 int r1 = get_field(f, r1);
3603 o->in1 = tcg_temp_new_i64();
3604 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3605 }
3606
3607 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3608 {
3609 o->in1 = load_reg(get_field(f, r2));
3610 }
3611
3612 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3613 {
3614 o->in1 = load_reg(get_field(f, r3));
3615 }
3616
3617 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3618 {
3619 o->in1 = regs[get_field(f, r3)];
3620 o->g_in1 = true;
3621 }
3622
3623 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3624 {
3625 o->in1 = tcg_temp_new_i64();
3626 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3627 }
3628
3629 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3630 {
3631 o->in1 = tcg_temp_new_i64();
3632 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3633 }
3634
3635 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3636 {
3637 o->in1 = load_freg32_i64(get_field(f, r1));
3638 }
3639
3640 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3641 {
3642 o->in1 = fregs[get_field(f, r1)];
3643 o->g_in1 = true;
3644 }
3645
3646 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3647 {
3648 /* ??? Specification exception: r1 must be < 14. */
3649 int r1 = get_field(f, r1);
3650 o->out = fregs[r1];
3651 o->out2 = fregs[(r1 + 2) & 15];
3652 o->g_out = o->g_out2 = true;
3653 }
3654
3655 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3656 {
3657 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3658 }
3659
3660 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3661 {
3662 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3663 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3664 }
3665
3666 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3667 {
3668 in1_la1(s, f, o);
3669 o->in1 = tcg_temp_new_i64();
3670 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3671 }
3672
3673 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3674 {
3675 in1_la1(s, f, o);
3676 o->in1 = tcg_temp_new_i64();
3677 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3678 }
3679
3680 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3681 {
3682 in1_la1(s, f, o);
3683 o->in1 = tcg_temp_new_i64();
3684 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3685 }
3686
3687 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3688 {
3689 in1_la1(s, f, o);
3690 o->in1 = tcg_temp_new_i64();
3691 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3692 }
3693
3694 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3695 {
3696 in1_la1(s, f, o);
3697 o->in1 = tcg_temp_new_i64();
3698 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3699 }
3700
3701 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3702 {
3703 in1_la1(s, f, o);
3704 o->in1 = tcg_temp_new_i64();
3705 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3706 }
3707
3708 /* ====================================================================== */
3709 /* The "INput 2" generators. These load the second operand to an insn. */
3710
3711 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3712 {
3713 o->in2 = regs[get_field(f, r1)];
3714 o->g_in2 = true;
3715 }
3716
3717 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3718 {
3719 o->in2 = tcg_temp_new_i64();
3720 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3721 }
3722
3723 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3724 {
3725 o->in2 = tcg_temp_new_i64();
3726 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3727 }
3728
3729 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3730 {
3731 o->in2 = load_reg(get_field(f, r2));
3732 }
3733
3734 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3735 {
3736 o->in2 = regs[get_field(f, r2)];
3737 o->g_in2 = true;
3738 }
3739
3740 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3741 {
3742 int r2 = get_field(f, r2);
3743 if (r2 != 0) {
3744 o->in2 = load_reg(r2);
3745 }
3746 }
3747
3748 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3749 {
3750 o->in2 = tcg_temp_new_i64();
3751 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3752 }
3753
3754 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3755 {
3756 o->in2 = tcg_temp_new_i64();
3757 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3758 }
3759
3760 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3761 {
3762 o->in2 = tcg_temp_new_i64();
3763 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3764 }
3765
3766 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3767 {
3768 o->in2 = tcg_temp_new_i64();
3769 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3770 }
3771
3772 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3773 {
3774 o->in2 = load_reg(get_field(f, r3));
3775 }
3776
3777 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3778 {
3779 o->in2 = tcg_temp_new_i64();
3780 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3781 }
3782
3783 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3784 {
3785 o->in2 = tcg_temp_new_i64();
3786 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3787 }
3788
3789 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3790 {
3791 o->in2 = load_freg32_i64(get_field(f, r2));
3792 }
3793
3794 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3795 {
3796 o->in2 = fregs[get_field(f, r2)];
3797 o->g_in2 = true;
3798 }
3799
3800 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3801 {
3802 /* ??? Specification exception: r1 must be < 14. */
3803 int r2 = get_field(f, r2);
3804 o->in1 = fregs[r2];
3805 o->in2 = fregs[(r2 + 2) & 15];
3806 o->g_in1 = o->g_in2 = true;
3807 }
3808
3809 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
3810 {
3811 o->in2 = get_address(s, 0, get_field(f, r2), 0);
3812 }
3813
3814 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3815 {
3816 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3817 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3818 }
3819
3820 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3821 {
3822 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3823 }
3824
3825 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3826 {
3827 help_l2_shift(s, f, o, 31);
3828 }
3829
3830 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3831 {
3832 help_l2_shift(s, f, o, 63);
3833 }
3834
3835 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3836 {
3837 in2_a2(s, f, o);
3838 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3839 }
3840
3841 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3842 {
3843 in2_a2(s, f, o);
3844 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3845 }
3846
3847 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3848 {
3849 in2_a2(s, f, o);
3850 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3851 }
3852
3853 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3854 {
3855 in2_a2(s, f, o);
3856 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3857 }
3858
3859 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3860 {
3861 in2_a2(s, f, o);
3862 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3863 }
3864
3865 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3866 {
3867 in2_a2(s, f, o);
3868 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3869 }
3870
3871 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3872 {
3873 in2_ri2(s, f, o);
3874 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3875 }
3876
3877 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3878 {
3879 in2_ri2(s, f, o);
3880 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3881 }
3882
3883 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3884 {
3885 in2_ri2(s, f, o);
3886 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3887 }
3888
3889 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3890 {
3891 in2_ri2(s, f, o);
3892 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3893 }
3894
3895 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3896 {
3897 o->in2 = tcg_const_i64(get_field(f, i2));
3898 }
3899
3900 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3901 {
3902 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3903 }
3904
3905 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3906 {
3907 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3908 }
3909
3910 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3911 {
3912 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3913 }
3914
3915 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3916 {
3917 uint64_t i2 = (uint16_t)get_field(f, i2);
3918 o->in2 = tcg_const_i64(i2 << s->insn->data);
3919 }
3920
3921 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3922 {
3923 uint64_t i2 = (uint32_t)get_field(f, i2);
3924 o->in2 = tcg_const_i64(i2 << s->insn->data);
3925 }
3926
3927 /* ====================================================================== */
3928
3929 /* Find opc within the table of insns. This is formulated as a switch
3930 statement so that (1) we get compile-time notice of cut-paste errors
3931 for duplicated opcodes, and (2) the compiler generates the binary
3932 search tree, rather than us having to post-process the table. */
3933
3934 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3935 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3936
3937 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3938
3939 enum DisasInsnEnum {
3940 #include "insn-data.def"
3941 };
3942
3943 #undef D
3944 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3945 .opc = OPC, \
3946 .fmt = FMT_##FT, \
3947 .fac = FAC_##FC, \
3948 .name = #NM, \
3949 .help_in1 = in1_##I1, \
3950 .help_in2 = in2_##I2, \
3951 .help_prep = prep_##P, \
3952 .help_wout = wout_##W, \
3953 .help_cout = cout_##CC, \
3954 .help_op = op_##OP, \
3955 .data = D \
3956 },
3957
3958 /* Allow 0 to be used for NULL in the table below. */
3959 #define in1_0 NULL
3960 #define in2_0 NULL
3961 #define prep_0 NULL
3962 #define wout_0 NULL
3963 #define cout_0 NULL
3964 #define op_0 NULL
3965
3966 static const DisasInsn insn_info[] = {
3967 #include "insn-data.def"
3968 };
3969
3970 #undef D
3971 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3972 case OPC: return &insn_info[insn_ ## NM];
3973
3974 static const DisasInsn *lookup_opc(uint16_t opc)
3975 {
3976 switch (opc) {
3977 #include "insn-data.def"
3978 default:
3979 return NULL;
3980 }
3981 }
3982
3983 #undef D
3984 #undef C
3985
3986 /* Extract a field from the insn. The INSN should be left-aligned in
3987 the uint64_t so that we can more easily utilize the big-bit-endian
3988 definitions we extract from the Principals of Operation. */
3989
3990 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
3991 {
3992 uint32_t r, m;
3993
3994 if (f->size == 0) {
3995 return;
3996 }
3997
3998 /* Zero extract the field from the insn. */
3999 r = (insn << f->beg) >> (64 - f->size);
4000
4001 /* Sign-extend, or un-swap the field as necessary. */
4002 switch (f->type) {
4003 case 0: /* unsigned */
4004 break;
4005 case 1: /* signed */
4006 assert(f->size <= 32);
4007 m = 1u << (f->size - 1);
4008 r = (r ^ m) - m;
4009 break;
4010 case 2: /* dl+dh split, signed 20 bit. */
4011 r = ((int8_t)r << 12) | (r >> 8);
4012 break;
4013 default:
4014 abort();
4015 }
4016
4017 /* Validate that the "compressed" encoding we selected above is valid.
4018 I.e. we havn't make two different original fields overlap. */
4019 assert(((o->presentC >> f->indexC) & 1) == 0);
4020 o->presentC |= 1 << f->indexC;
4021 o->presentO |= 1 << f->indexO;
4022
4023 o->c[f->indexC] = r;
4024 }
4025
4026 /* Lookup the insn at the current PC, extracting the operands into O and
4027 returning the info struct for the insn. Returns NULL for invalid insn. */
4028
4029 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4030 DisasFields *f)
4031 {
4032 uint64_t insn, pc = s->pc;
4033 int op, op2, ilen;
4034 const DisasInsn *info;
4035
4036 insn = ld_code2(env, pc);
4037 op = (insn >> 8) & 0xff;
4038 ilen = get_ilen(op);
4039 s->next_pc = s->pc + ilen;
4040
4041 switch (ilen) {
4042 case 2:
4043 insn = insn << 48;
4044 break;
4045 case 4:
4046 insn = ld_code4(env, pc) << 32;
4047 break;
4048 case 6:
4049 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4050 break;
4051 default:
4052 abort();
4053 }
4054
4055 /* We can't actually determine the insn format until we've looked up
4056 the full insn opcode. Which we can't do without locating the
4057 secondary opcode. Assume by default that OP2 is at bit 40; for
4058 those smaller insns that don't actually have a secondary opcode
4059 this will correctly result in OP2 = 0. */
4060 switch (op) {
4061 case 0x01: /* E */
4062 case 0x80: /* S */
4063 case 0x82: /* S */
4064 case 0x93: /* S */
4065 case 0xb2: /* S, RRF, RRE */
4066 case 0xb3: /* RRE, RRD, RRF */
4067 case 0xb9: /* RRE, RRF */
4068 case 0xe5: /* SSE, SIL */
4069 op2 = (insn << 8) >> 56;
4070 break;
4071 case 0xa5: /* RI */
4072 case 0xa7: /* RI */
4073 case 0xc0: /* RIL */
4074 case 0xc2: /* RIL */
4075 case 0xc4: /* RIL */
4076 case 0xc6: /* RIL */
4077 case 0xc8: /* SSF */
4078 case 0xcc: /* RIL */
4079 op2 = (insn << 12) >> 60;
4080 break;
4081 case 0xd0 ... 0xdf: /* SS */
4082 case 0xe1: /* SS */
4083 case 0xe2: /* SS */
4084 case 0xe8: /* SS */
4085 case 0xe9: /* SS */
4086 case 0xea: /* SS */
4087 case 0xee ... 0xf3: /* SS */
4088 case 0xf8 ... 0xfd: /* SS */
4089 op2 = 0;
4090 break;
4091 default:
4092 op2 = (insn << 40) >> 56;
4093 break;
4094 }
4095
4096 memset(f, 0, sizeof(*f));
4097 f->op = op;
4098 f->op2 = op2;
4099
4100 /* Lookup the instruction. */
4101 info = lookup_opc(op << 8 | op2);
4102
4103 /* If we found it, extract the operands. */
4104 if (info != NULL) {
4105 DisasFormat fmt = info->fmt;
4106 int i;
4107
4108 for (i = 0; i < NUM_C_FIELD; ++i) {
4109 extract_field(f, &format_info[fmt].op[i], insn);
4110 }
4111 }
4112 return info;
4113 }
4114
4115 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4116 {
4117 const DisasInsn *insn;
4118 ExitStatus ret = NO_EXIT;
4119 DisasFields f;
4120 DisasOps o;
4121
4122 insn = extract_insn(env, s, &f);
4123
4124 /* If not found, try the old interpreter. This includes ILLOPC. */
4125 if (insn == NULL) {
4126 disas_s390_insn(env, s);
4127 switch (s->is_jmp) {
4128 case DISAS_NEXT:
4129 ret = NO_EXIT;
4130 break;
4131 case DISAS_TB_JUMP:
4132 ret = EXIT_GOTO_TB;
4133 break;
4134 case DISAS_JUMP:
4135 ret = EXIT_PC_UPDATED;
4136 break;
4137 case DISAS_EXCP:
4138 ret = EXIT_NORETURN;
4139 break;
4140 default:
4141 abort();
4142 }
4143
4144 s->pc = s->next_pc;
4145 return ret;
4146 }
4147
4148 /* Set up the strutures we use to communicate with the helpers. */
4149 s->insn = insn;
4150 s->fields = &f;
4151 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4152 TCGV_UNUSED_I64(o.out);
4153 TCGV_UNUSED_I64(o.out2);
4154 TCGV_UNUSED_I64(o.in1);
4155 TCGV_UNUSED_I64(o.in2);
4156 TCGV_UNUSED_I64(o.addr1);
4157
4158 /* Implement the instruction. */
4159 if (insn->help_in1) {
4160 insn->help_in1(s, &f, &o);
4161 }
4162 if (insn->help_in2) {
4163 insn->help_in2(s, &f, &o);
4164 }
4165 if (insn->help_prep) {
4166 insn->help_prep(s, &f, &o);
4167 }
4168 if (insn->help_op) {
4169 ret = insn->help_op(s, &o);
4170 }
4171 if (insn->help_wout) {
4172 insn->help_wout(s, &f, &o);
4173 }
4174 if (insn->help_cout) {
4175 insn->help_cout(s, &o);
4176 }
4177
4178 /* Free any temporaries created by the helpers. */
4179 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4180 tcg_temp_free_i64(o.out);
4181 }
4182 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4183 tcg_temp_free_i64(o.out2);
4184 }
4185 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4186 tcg_temp_free_i64(o.in1);
4187 }
4188 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4189 tcg_temp_free_i64(o.in2);
4190 }
4191 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4192 tcg_temp_free_i64(o.addr1);
4193 }
4194
4195 /* Advance to the next instruction. */
4196 s->pc = s->next_pc;
4197 return ret;
4198 }
4199
4200 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4201 TranslationBlock *tb,
4202 int search_pc)
4203 {
4204 DisasContext dc;
4205 target_ulong pc_start;
4206 uint64_t next_page_start;
4207 uint16_t *gen_opc_end;
4208 int j, lj = -1;
4209 int num_insns, max_insns;
4210 CPUBreakpoint *bp;
4211 ExitStatus status;
4212 bool do_debug;
4213
4214 pc_start = tb->pc;
4215
4216 /* 31-bit mode */
4217 if (!(tb->flags & FLAG_MASK_64)) {
4218 pc_start &= 0x7fffffff;
4219 }
4220
4221 dc.tb = tb;
4222 dc.pc = pc_start;
4223 dc.cc_op = CC_OP_DYNAMIC;
4224 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4225 dc.is_jmp = DISAS_NEXT;
4226
4227 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4228
4229 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4230
4231 num_insns = 0;
4232 max_insns = tb->cflags & CF_COUNT_MASK;
4233 if (max_insns == 0) {
4234 max_insns = CF_COUNT_MASK;
4235 }
4236
4237 gen_icount_start();
4238
4239 do {
4240 if (search_pc) {
4241 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4242 if (lj < j) {
4243 lj++;
4244 while (lj < j) {
4245 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4246 }
4247 }
4248 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4249 gen_opc_cc_op[lj] = dc.cc_op;
4250 tcg_ctx.gen_opc_instr_start[lj] = 1;
4251 tcg_ctx.gen_opc_icount[lj] = num_insns;
4252 }
4253 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4254 gen_io_start();
4255 }
4256
4257 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4258 tcg_gen_debug_insn_start(dc.pc);
4259 }
4260
4261 status = NO_EXIT;
4262 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4263 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4264 if (bp->pc == dc.pc) {
4265 status = EXIT_PC_STALE;
4266 do_debug = true;
4267 break;
4268 }
4269 }
4270 }
4271 if (status == NO_EXIT) {
4272 status = translate_one(env, &dc);
4273 }
4274
4275 /* If we reach a page boundary, are single stepping,
4276 or exhaust instruction count, stop generation. */
4277 if (status == NO_EXIT
4278 && (dc.pc >= next_page_start
4279 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4280 || num_insns >= max_insns
4281 || singlestep
4282 || env->singlestep_enabled)) {
4283 status = EXIT_PC_STALE;
4284 }
4285 } while (status == NO_EXIT);
4286
4287 if (tb->cflags & CF_LAST_IO) {
4288 gen_io_end();
4289 }
4290
4291 switch (status) {
4292 case EXIT_GOTO_TB:
4293 case EXIT_NORETURN:
4294 break;
4295 case EXIT_PC_STALE:
4296 update_psw_addr(&dc);
4297 /* FALLTHRU */
4298 case EXIT_PC_UPDATED:
4299 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4300 gen_op_calc_cc(&dc);
4301 } else {
4302 /* Next TB starts off with CC_OP_DYNAMIC,
4303 so make sure the cc op type is in env */
4304 gen_op_set_cc_op(&dc);
4305 }
4306 if (do_debug) {
4307 gen_exception(EXCP_DEBUG);
4308 } else {
4309 /* Generate the return instruction */
4310 tcg_gen_exit_tb(0);
4311 }
4312 break;
4313 default:
4314 abort();
4315 }
4316
4317 gen_icount_end(tb, num_insns);
4318 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4319 if (search_pc) {
4320 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4321 lj++;
4322 while (lj <= j) {
4323 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4324 }
4325 } else {
4326 tb->size = dc.pc - pc_start;
4327 tb->icount = num_insns;
4328 }
4329
4330 #if defined(S390X_DEBUG_DISAS)
4331 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4332 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4333 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4334 qemu_log("\n");
4335 }
4336 #endif
4337 }
4338
4339 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4340 {
4341 gen_intermediate_code_internal(env, tb, 0);
4342 }
4343
4344 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4345 {
4346 gen_intermediate_code_internal(env, tb, 1);
4347 }
4348
4349 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4350 {
4351 int cc_op;
4352 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4353 cc_op = gen_opc_cc_op[pc_pos];
4354 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4355 env->cc_op = cc_op;
4356 }
4357 }