]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/translate.c
target-s390: Implement SET ROUNDING MODE
[mirror_qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 };
59
60 /* Information carried about a condition to be evaluated. */
61 typedef struct {
62 TCGCond cond:8;
63 bool is_64;
64 bool g1;
65 bool g2;
66 union {
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
69 } u;
70 } DisasCompare;
71
72 #define DISAS_EXCP 4
73
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit[CC_OP_MAX];
76 static uint64_t inline_branch_miss[CC_OP_MAX];
77 #endif
78
79 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
80 {
81 if (!(s->tb->flags & FLAG_MASK_64)) {
82 if (s->tb->flags & FLAG_MASK_32) {
83 return pc | 0x80000000;
84 }
85 }
86 return pc;
87 }
88
89 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
90 int flags)
91 {
92 int i;
93
94 if (env->cc_op > 3) {
95 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
96 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
97 } else {
98 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
99 env->psw.mask, env->psw.addr, env->cc_op);
100 }
101
102 for (i = 0; i < 16; i++) {
103 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
104 if ((i % 4) == 3) {
105 cpu_fprintf(f, "\n");
106 } else {
107 cpu_fprintf(f, " ");
108 }
109 }
110
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
113 if ((i % 4) == 3) {
114 cpu_fprintf(f, "\n");
115 } else {
116 cpu_fprintf(f, " ");
117 }
118 }
119
120 #ifndef CONFIG_USER_ONLY
121 for (i = 0; i < 16; i++) {
122 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
123 if ((i % 4) == 3) {
124 cpu_fprintf(f, "\n");
125 } else {
126 cpu_fprintf(f, " ");
127 }
128 }
129 #endif
130
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i = 0; i < CC_OP_MAX; i++) {
133 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
134 inline_branch_miss[i], inline_branch_hit[i]);
135 }
136 #endif
137
138 cpu_fprintf(f, "\n");
139 }
140
141 static TCGv_i64 psw_addr;
142 static TCGv_i64 psw_mask;
143
144 static TCGv_i32 cc_op;
145 static TCGv_i64 cc_src;
146 static TCGv_i64 cc_dst;
147 static TCGv_i64 cc_vr;
148
149 static char cpu_reg_names[32][4];
150 static TCGv_i64 regs[16];
151 static TCGv_i64 fregs[16];
152
153 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
154
155 void s390x_translate_init(void)
156 {
157 int i;
158
159 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
160 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
161 offsetof(CPUS390XState, psw.addr),
162 "psw_addr");
163 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
164 offsetof(CPUS390XState, psw.mask),
165 "psw_mask");
166
167 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
168 "cc_op");
169 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
170 "cc_src");
171 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
172 "cc_dst");
173 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
174 "cc_vr");
175
176 for (i = 0; i < 16; i++) {
177 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
178 regs[i] = tcg_global_mem_new(TCG_AREG0,
179 offsetof(CPUS390XState, regs[i]),
180 cpu_reg_names[i]);
181 }
182
183 for (i = 0; i < 16; i++) {
184 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
185 fregs[i] = tcg_global_mem_new(TCG_AREG0,
186 offsetof(CPUS390XState, fregs[i].d),
187 cpu_reg_names[i + 16]);
188 }
189
190 /* register helpers */
191 #define GEN_HELPER 2
192 #include "helper.h"
193 }
194
195 static TCGv_i64 load_reg(int reg)
196 {
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
200 }
201
202 static TCGv_i64 load_freg32_i64(int reg)
203 {
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
206 return r;
207 }
208
209 static void store_reg(int reg, TCGv_i64 v)
210 {
211 tcg_gen_mov_i64(regs[reg], v);
212 }
213
214 static void store_freg(int reg, TCGv_i64 v)
215 {
216 tcg_gen_mov_i64(fregs[reg], v);
217 }
218
219 static void store_reg32_i64(int reg, TCGv_i64 v)
220 {
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
223 }
224
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
226 {
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
228 }
229
230 static void store_freg32_i64(int reg, TCGv_i64 v)
231 {
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
233 }
234
235 static void return_low128(TCGv_i64 dest)
236 {
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
238 }
239
240 static void update_psw_addr(DisasContext *s)
241 {
242 /* psw.addr */
243 tcg_gen_movi_i64(psw_addr, s->pc);
244 }
245
246 static void update_cc_op(DisasContext *s)
247 {
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
250 }
251 }
252
253 static void potential_page_fault(DisasContext *s)
254 {
255 update_psw_addr(s);
256 update_cc_op(s);
257 }
258
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
260 {
261 return (uint64_t)cpu_lduw_code(env, pc);
262 }
263
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
265 {
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
267 }
268
269 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
270 {
271 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
272 }
273
274 static int get_mem_index(DisasContext *s)
275 {
276 switch (s->tb->flags & FLAG_MASK_ASC) {
277 case PSW_ASC_PRIMARY >> 32:
278 return 0;
279 case PSW_ASC_SECONDARY >> 32:
280 return 1;
281 case PSW_ASC_HOME >> 32:
282 return 2;
283 default:
284 tcg_abort();
285 break;
286 }
287 }
288
289 static void gen_exception(int excp)
290 {
291 TCGv_i32 tmp = tcg_const_i32(excp);
292 gen_helper_exception(cpu_env, tmp);
293 tcg_temp_free_i32(tmp);
294 }
295
296 static void gen_program_exception(DisasContext *s, int code)
297 {
298 TCGv_i32 tmp;
299
300 /* Remember what pgm exeption this was. */
301 tmp = tcg_const_i32(code);
302 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
303 tcg_temp_free_i32(tmp);
304
305 tmp = tcg_const_i32(s->next_pc - s->pc);
306 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
307 tcg_temp_free_i32(tmp);
308
309 /* Advance past instruction. */
310 s->pc = s->next_pc;
311 update_psw_addr(s);
312
313 /* Save off cc. */
314 update_cc_op(s);
315
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM);
318 }
319
320 static inline void gen_illegal_opcode(DisasContext *s)
321 {
322 gen_program_exception(s, PGM_SPECIFICATION);
323 }
324
325 static inline void check_privileged(DisasContext *s)
326 {
327 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
328 gen_program_exception(s, PGM_PRIVILEGED);
329 }
330 }
331
332 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
333 {
334 TCGv_i64 tmp;
335
336 /* 31-bitify the immediate part; register contents are dealt with below */
337 if (!(s->tb->flags & FLAG_MASK_64)) {
338 d2 &= 0x7fffffffUL;
339 }
340
341 if (x2) {
342 if (d2) {
343 tmp = tcg_const_i64(d2);
344 tcg_gen_add_i64(tmp, tmp, regs[x2]);
345 } else {
346 tmp = load_reg(x2);
347 }
348 if (b2) {
349 tcg_gen_add_i64(tmp, tmp, regs[b2]);
350 }
351 } else if (b2) {
352 if (d2) {
353 tmp = tcg_const_i64(d2);
354 tcg_gen_add_i64(tmp, tmp, regs[b2]);
355 } else {
356 tmp = load_reg(b2);
357 }
358 } else {
359 tmp = tcg_const_i64(d2);
360 }
361
362 /* 31-bit mode mask if there are values loaded from registers */
363 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
364 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
365 }
366
367 return tmp;
368 }
369
370 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
371 {
372 s->cc_op = CC_OP_CONST0 + val;
373 }
374
375 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
376 {
377 tcg_gen_discard_i64(cc_src);
378 tcg_gen_mov_i64(cc_dst, dst);
379 tcg_gen_discard_i64(cc_vr);
380 s->cc_op = op;
381 }
382
383 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
384 TCGv_i64 dst)
385 {
386 tcg_gen_mov_i64(cc_src, src);
387 tcg_gen_mov_i64(cc_dst, dst);
388 tcg_gen_discard_i64(cc_vr);
389 s->cc_op = op;
390 }
391
392 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
393 TCGv_i64 dst, TCGv_i64 vr)
394 {
395 tcg_gen_mov_i64(cc_src, src);
396 tcg_gen_mov_i64(cc_dst, dst);
397 tcg_gen_mov_i64(cc_vr, vr);
398 s->cc_op = op;
399 }
400
401 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
402 {
403 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
404 }
405
406 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
407 {
408 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
409 }
410
411 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
412 {
413 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
414 }
415
416 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
417 {
418 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
419 }
420
421 /* CC value is in env->cc_op */
422 static void set_cc_static(DisasContext *s)
423 {
424 tcg_gen_discard_i64(cc_src);
425 tcg_gen_discard_i64(cc_dst);
426 tcg_gen_discard_i64(cc_vr);
427 s->cc_op = CC_OP_STATIC;
428 }
429
430 /* calculates cc into cc_op */
431 static void gen_op_calc_cc(DisasContext *s)
432 {
433 TCGv_i32 local_cc_op;
434 TCGv_i64 dummy;
435
436 TCGV_UNUSED_I32(local_cc_op);
437 TCGV_UNUSED_I64(dummy);
438 switch (s->cc_op) {
439 default:
440 dummy = tcg_const_i64(0);
441 /* FALLTHRU */
442 case CC_OP_ADD_64:
443 case CC_OP_ADDU_64:
444 case CC_OP_ADDC_64:
445 case CC_OP_SUB_64:
446 case CC_OP_SUBU_64:
447 case CC_OP_SUBB_64:
448 case CC_OP_ADD_32:
449 case CC_OP_ADDU_32:
450 case CC_OP_ADDC_32:
451 case CC_OP_SUB_32:
452 case CC_OP_SUBU_32:
453 case CC_OP_SUBB_32:
454 local_cc_op = tcg_const_i32(s->cc_op);
455 break;
456 case CC_OP_CONST0:
457 case CC_OP_CONST1:
458 case CC_OP_CONST2:
459 case CC_OP_CONST3:
460 case CC_OP_STATIC:
461 case CC_OP_DYNAMIC:
462 break;
463 }
464
465 switch (s->cc_op) {
466 case CC_OP_CONST0:
467 case CC_OP_CONST1:
468 case CC_OP_CONST2:
469 case CC_OP_CONST3:
470 /* s->cc_op is the cc value */
471 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
472 break;
473 case CC_OP_STATIC:
474 /* env->cc_op already is the cc value */
475 break;
476 case CC_OP_NZ:
477 case CC_OP_ABS_64:
478 case CC_OP_NABS_64:
479 case CC_OP_ABS_32:
480 case CC_OP_NABS_32:
481 case CC_OP_LTGT0_32:
482 case CC_OP_LTGT0_64:
483 case CC_OP_COMP_32:
484 case CC_OP_COMP_64:
485 case CC_OP_NZ_F32:
486 case CC_OP_NZ_F64:
487 case CC_OP_FLOGR:
488 /* 1 argument */
489 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
490 break;
491 case CC_OP_ICM:
492 case CC_OP_LTGT_32:
493 case CC_OP_LTGT_64:
494 case CC_OP_LTUGTU_32:
495 case CC_OP_LTUGTU_64:
496 case CC_OP_TM_32:
497 case CC_OP_TM_64:
498 case CC_OP_SLA_32:
499 case CC_OP_SLA_64:
500 case CC_OP_NZ_F128:
501 /* 2 arguments */
502 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
503 break;
504 case CC_OP_ADD_64:
505 case CC_OP_ADDU_64:
506 case CC_OP_ADDC_64:
507 case CC_OP_SUB_64:
508 case CC_OP_SUBU_64:
509 case CC_OP_SUBB_64:
510 case CC_OP_ADD_32:
511 case CC_OP_ADDU_32:
512 case CC_OP_ADDC_32:
513 case CC_OP_SUB_32:
514 case CC_OP_SUBU_32:
515 case CC_OP_SUBB_32:
516 /* 3 arguments */
517 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
518 break;
519 case CC_OP_DYNAMIC:
520 /* unknown operation - assume 3 arguments and cc_op in env */
521 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
522 break;
523 default:
524 tcg_abort();
525 }
526
527 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
528 tcg_temp_free_i32(local_cc_op);
529 }
530 if (!TCGV_IS_UNUSED_I64(dummy)) {
531 tcg_temp_free_i64(dummy);
532 }
533
534 /* We now have cc in cc_op as constant */
535 set_cc_static(s);
536 }
537
538 static int use_goto_tb(DisasContext *s, uint64_t dest)
539 {
540 /* NOTE: we handle the case where the TB spans two pages here */
541 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
542 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
543 && !s->singlestep_enabled
544 && !(s->tb->cflags & CF_LAST_IO));
545 }
546
547 static void account_noninline_branch(DisasContext *s, int cc_op)
548 {
549 #ifdef DEBUG_INLINE_BRANCHES
550 inline_branch_miss[cc_op]++;
551 #endif
552 }
553
554 static void account_inline_branch(DisasContext *s, int cc_op)
555 {
556 #ifdef DEBUG_INLINE_BRANCHES
557 inline_branch_hit[cc_op]++;
558 #endif
559 }
560
561 /* Table of mask values to comparison codes, given a comparison as input.
562 For a true comparison CC=3 will never be set, but we treat this
563 conservatively for possible use when CC=3 indicates overflow. */
564 static const TCGCond ltgt_cond[16] = {
565 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
566 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
567 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
568 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
569 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
570 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
571 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
572 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
573 };
574
575 /* Table of mask values to comparison codes, given a logic op as input.
576 For such, only CC=0 and CC=1 should be possible. */
577 static const TCGCond nz_cond[16] = {
578 /* | | x | x */
579 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
580 /* | NE | x | x */
581 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
582 /* EQ | | x | x */
583 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
584 /* EQ | NE | x | x */
585 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
586 };
587
588 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
589 details required to generate a TCG comparison. */
590 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
591 {
592 TCGCond cond;
593 enum cc_op old_cc_op = s->cc_op;
594
595 if (mask == 15 || mask == 0) {
596 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
597 c->u.s32.a = cc_op;
598 c->u.s32.b = cc_op;
599 c->g1 = c->g2 = true;
600 c->is_64 = false;
601 return;
602 }
603
604 /* Find the TCG condition for the mask + cc op. */
605 switch (old_cc_op) {
606 case CC_OP_LTGT0_32:
607 case CC_OP_LTGT0_64:
608 case CC_OP_LTGT_32:
609 case CC_OP_LTGT_64:
610 cond = ltgt_cond[mask];
611 if (cond == TCG_COND_NEVER) {
612 goto do_dynamic;
613 }
614 account_inline_branch(s, old_cc_op);
615 break;
616
617 case CC_OP_LTUGTU_32:
618 case CC_OP_LTUGTU_64:
619 cond = tcg_unsigned_cond(ltgt_cond[mask]);
620 if (cond == TCG_COND_NEVER) {
621 goto do_dynamic;
622 }
623 account_inline_branch(s, old_cc_op);
624 break;
625
626 case CC_OP_NZ:
627 cond = nz_cond[mask];
628 if (cond == TCG_COND_NEVER) {
629 goto do_dynamic;
630 }
631 account_inline_branch(s, old_cc_op);
632 break;
633
634 case CC_OP_TM_32:
635 case CC_OP_TM_64:
636 switch (mask) {
637 case 8:
638 cond = TCG_COND_EQ;
639 break;
640 case 4 | 2 | 1:
641 cond = TCG_COND_NE;
642 break;
643 default:
644 goto do_dynamic;
645 }
646 account_inline_branch(s, old_cc_op);
647 break;
648
649 case CC_OP_ICM:
650 switch (mask) {
651 case 8:
652 cond = TCG_COND_EQ;
653 break;
654 case 4 | 2 | 1:
655 case 4 | 2:
656 cond = TCG_COND_NE;
657 break;
658 default:
659 goto do_dynamic;
660 }
661 account_inline_branch(s, old_cc_op);
662 break;
663
664 case CC_OP_FLOGR:
665 switch (mask & 0xa) {
666 case 8: /* src == 0 -> no one bit found */
667 cond = TCG_COND_EQ;
668 break;
669 case 2: /* src != 0 -> one bit found */
670 cond = TCG_COND_NE;
671 break;
672 default:
673 goto do_dynamic;
674 }
675 account_inline_branch(s, old_cc_op);
676 break;
677
678 default:
679 do_dynamic:
680 /* Calculate cc value. */
681 gen_op_calc_cc(s);
682 /* FALLTHRU */
683
684 case CC_OP_STATIC:
685 /* Jump based on CC. We'll load up the real cond below;
686 the assignment here merely avoids a compiler warning. */
687 account_noninline_branch(s, old_cc_op);
688 old_cc_op = CC_OP_STATIC;
689 cond = TCG_COND_NEVER;
690 break;
691 }
692
693 /* Load up the arguments of the comparison. */
694 c->is_64 = true;
695 c->g1 = c->g2 = false;
696 switch (old_cc_op) {
697 case CC_OP_LTGT0_32:
698 c->is_64 = false;
699 c->u.s32.a = tcg_temp_new_i32();
700 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
701 c->u.s32.b = tcg_const_i32(0);
702 break;
703 case CC_OP_LTGT_32:
704 case CC_OP_LTUGTU_32:
705 c->is_64 = false;
706 c->u.s32.a = tcg_temp_new_i32();
707 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
708 c->u.s32.b = tcg_temp_new_i32();
709 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
710 break;
711
712 case CC_OP_LTGT0_64:
713 case CC_OP_NZ:
714 case CC_OP_FLOGR:
715 c->u.s64.a = cc_dst;
716 c->u.s64.b = tcg_const_i64(0);
717 c->g1 = true;
718 break;
719 case CC_OP_LTGT_64:
720 case CC_OP_LTUGTU_64:
721 c->u.s64.a = cc_src;
722 c->u.s64.b = cc_dst;
723 c->g1 = c->g2 = true;
724 break;
725
726 case CC_OP_TM_32:
727 case CC_OP_TM_64:
728 case CC_OP_ICM:
729 c->u.s64.a = tcg_temp_new_i64();
730 c->u.s64.b = tcg_const_i64(0);
731 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
732 break;
733
734 case CC_OP_STATIC:
735 c->is_64 = false;
736 c->u.s32.a = cc_op;
737 c->g1 = true;
738 switch (mask) {
739 case 0x8 | 0x4 | 0x2: /* cc != 3 */
740 cond = TCG_COND_NE;
741 c->u.s32.b = tcg_const_i32(3);
742 break;
743 case 0x8 | 0x4 | 0x1: /* cc != 2 */
744 cond = TCG_COND_NE;
745 c->u.s32.b = tcg_const_i32(2);
746 break;
747 case 0x8 | 0x2 | 0x1: /* cc != 1 */
748 cond = TCG_COND_NE;
749 c->u.s32.b = tcg_const_i32(1);
750 break;
751 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
752 cond = TCG_COND_EQ;
753 c->g1 = false;
754 c->u.s32.a = tcg_temp_new_i32();
755 c->u.s32.b = tcg_const_i32(0);
756 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
757 break;
758 case 0x8 | 0x4: /* cc < 2 */
759 cond = TCG_COND_LTU;
760 c->u.s32.b = tcg_const_i32(2);
761 break;
762 case 0x8: /* cc == 0 */
763 cond = TCG_COND_EQ;
764 c->u.s32.b = tcg_const_i32(0);
765 break;
766 case 0x4 | 0x2 | 0x1: /* cc != 0 */
767 cond = TCG_COND_NE;
768 c->u.s32.b = tcg_const_i32(0);
769 break;
770 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
771 cond = TCG_COND_NE;
772 c->g1 = false;
773 c->u.s32.a = tcg_temp_new_i32();
774 c->u.s32.b = tcg_const_i32(0);
775 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
776 break;
777 case 0x4: /* cc == 1 */
778 cond = TCG_COND_EQ;
779 c->u.s32.b = tcg_const_i32(1);
780 break;
781 case 0x2 | 0x1: /* cc > 1 */
782 cond = TCG_COND_GTU;
783 c->u.s32.b = tcg_const_i32(1);
784 break;
785 case 0x2: /* cc == 2 */
786 cond = TCG_COND_EQ;
787 c->u.s32.b = tcg_const_i32(2);
788 break;
789 case 0x1: /* cc == 3 */
790 cond = TCG_COND_EQ;
791 c->u.s32.b = tcg_const_i32(3);
792 break;
793 default:
794 /* CC is masked by something else: (8 >> cc) & mask. */
795 cond = TCG_COND_NE;
796 c->g1 = false;
797 c->u.s32.a = tcg_const_i32(8);
798 c->u.s32.b = tcg_const_i32(0);
799 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
800 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
801 break;
802 }
803 break;
804
805 default:
806 abort();
807 }
808 c->cond = cond;
809 }
810
811 static void free_compare(DisasCompare *c)
812 {
813 if (!c->g1) {
814 if (c->is_64) {
815 tcg_temp_free_i64(c->u.s64.a);
816 } else {
817 tcg_temp_free_i32(c->u.s32.a);
818 }
819 }
820 if (!c->g2) {
821 if (c->is_64) {
822 tcg_temp_free_i64(c->u.s64.b);
823 } else {
824 tcg_temp_free_i32(c->u.s32.b);
825 }
826 }
827 }
828
829 /* ====================================================================== */
830 /* Define the insn format enumeration. */
831 #define F0(N) FMT_##N,
832 #define F1(N, X1) F0(N)
833 #define F2(N, X1, X2) F0(N)
834 #define F3(N, X1, X2, X3) F0(N)
835 #define F4(N, X1, X2, X3, X4) F0(N)
836 #define F5(N, X1, X2, X3, X4, X5) F0(N)
837
838 typedef enum {
839 #include "insn-format.def"
840 } DisasFormat;
841
842 #undef F0
843 #undef F1
844 #undef F2
845 #undef F3
846 #undef F4
847 #undef F5
848
849 /* Define a structure to hold the decoded fields. We'll store each inside
850 an array indexed by an enum. In order to conserve memory, we'll arrange
851 for fields that do not exist at the same time to overlap, thus the "C"
852 for compact. For checking purposes there is an "O" for original index
853 as well that will be applied to availability bitmaps. */
854
855 enum DisasFieldIndexO {
856 FLD_O_r1,
857 FLD_O_r2,
858 FLD_O_r3,
859 FLD_O_m1,
860 FLD_O_m3,
861 FLD_O_m4,
862 FLD_O_b1,
863 FLD_O_b2,
864 FLD_O_b4,
865 FLD_O_d1,
866 FLD_O_d2,
867 FLD_O_d4,
868 FLD_O_x2,
869 FLD_O_l1,
870 FLD_O_l2,
871 FLD_O_i1,
872 FLD_O_i2,
873 FLD_O_i3,
874 FLD_O_i4,
875 FLD_O_i5
876 };
877
878 enum DisasFieldIndexC {
879 FLD_C_r1 = 0,
880 FLD_C_m1 = 0,
881 FLD_C_b1 = 0,
882 FLD_C_i1 = 0,
883
884 FLD_C_r2 = 1,
885 FLD_C_b2 = 1,
886 FLD_C_i2 = 1,
887
888 FLD_C_r3 = 2,
889 FLD_C_m3 = 2,
890 FLD_C_i3 = 2,
891
892 FLD_C_m4 = 3,
893 FLD_C_b4 = 3,
894 FLD_C_i4 = 3,
895 FLD_C_l1 = 3,
896
897 FLD_C_i5 = 4,
898 FLD_C_d1 = 4,
899
900 FLD_C_d2 = 5,
901
902 FLD_C_d4 = 6,
903 FLD_C_x2 = 6,
904 FLD_C_l2 = 6,
905
906 NUM_C_FIELD = 7
907 };
908
909 struct DisasFields {
910 unsigned op:8;
911 unsigned op2:8;
912 unsigned presentC:16;
913 unsigned int presentO;
914 int c[NUM_C_FIELD];
915 };
916
917 /* This is the way fields are to be accessed out of DisasFields. */
918 #define have_field(S, F) have_field1((S), FLD_O_##F)
919 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
920
921 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
922 {
923 return (f->presentO >> c) & 1;
924 }
925
926 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
927 enum DisasFieldIndexC c)
928 {
929 assert(have_field1(f, o));
930 return f->c[c];
931 }
932
933 /* Describe the layout of each field in each format. */
934 typedef struct DisasField {
935 unsigned int beg:8;
936 unsigned int size:8;
937 unsigned int type:2;
938 unsigned int indexC:6;
939 enum DisasFieldIndexO indexO:8;
940 } DisasField;
941
942 typedef struct DisasFormatInfo {
943 DisasField op[NUM_C_FIELD];
944 } DisasFormatInfo;
945
946 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
947 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
948 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
949 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
950 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
951 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
952 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
953 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
954 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
955 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
956 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
957 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
958 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
959 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
960
961 #define F0(N) { { } },
962 #define F1(N, X1) { { X1 } },
963 #define F2(N, X1, X2) { { X1, X2 } },
964 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
965 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
966 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
967
968 static const DisasFormatInfo format_info[] = {
969 #include "insn-format.def"
970 };
971
972 #undef F0
973 #undef F1
974 #undef F2
975 #undef F3
976 #undef F4
977 #undef F5
978 #undef R
979 #undef M
980 #undef BD
981 #undef BXD
982 #undef BDL
983 #undef BXDL
984 #undef I
985 #undef L
986
987 /* Generally, we'll extract operands into this structures, operate upon
988 them, and store them back. See the "in1", "in2", "prep", "wout" sets
989 of routines below for more details. */
990 typedef struct {
991 bool g_out, g_out2, g_in1, g_in2;
992 TCGv_i64 out, out2, in1, in2;
993 TCGv_i64 addr1;
994 } DisasOps;
995
996 /* Instructions can place constraints on their operands, raising specification
997 exceptions if they are violated. To make this easy to automate, each "in1",
998 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
999 of the following, or 0. To make this easy to document, we'll put the
1000 SPEC_<name> defines next to <name>. */
1001
1002 #define SPEC_r1_even 1
1003 #define SPEC_r2_even 2
1004 #define SPEC_r1_f128 4
1005 #define SPEC_r2_f128 8
1006
1007 /* Return values from translate_one, indicating the state of the TB. */
1008 typedef enum {
1009 /* Continue the TB. */
1010 NO_EXIT,
1011 /* We have emitted one or more goto_tb. No fixup required. */
1012 EXIT_GOTO_TB,
1013 /* We are not using a goto_tb (for whatever reason), but have updated
1014 the PC (for whatever reason), so there's no need to do it again on
1015 exiting the TB. */
1016 EXIT_PC_UPDATED,
1017 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1018 updated the PC for the next instruction to be executed. */
1019 EXIT_PC_STALE,
1020 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1021 No following code will be executed. */
1022 EXIT_NORETURN,
1023 } ExitStatus;
1024
1025 typedef enum DisasFacility {
1026 FAC_Z, /* zarch (default) */
1027 FAC_CASS, /* compare and swap and store */
1028 FAC_CASS2, /* compare and swap and store 2*/
1029 FAC_DFP, /* decimal floating point */
1030 FAC_DFPR, /* decimal floating point rounding */
1031 FAC_DO, /* distinct operands */
1032 FAC_EE, /* execute extensions */
1033 FAC_EI, /* extended immediate */
1034 FAC_FPE, /* floating point extension */
1035 FAC_FPSSH, /* floating point support sign handling */
1036 FAC_FPRGR, /* FPR-GR transfer */
1037 FAC_GIE, /* general instructions extension */
1038 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1039 FAC_HW, /* high-word */
1040 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1041 FAC_LOC, /* load/store on condition */
1042 FAC_LD, /* long displacement */
1043 FAC_PC, /* population count */
1044 FAC_SCF, /* store clock fast */
1045 FAC_SFLE, /* store facility list extended */
1046 } DisasFacility;
1047
1048 struct DisasInsn {
1049 unsigned opc:16;
1050 DisasFormat fmt:6;
1051 DisasFacility fac:6;
1052 unsigned spec:4;
1053
1054 const char *name;
1055
1056 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1057 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1058 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1059 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1060 void (*help_cout)(DisasContext *, DisasOps *);
1061 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1062
1063 uint64_t data;
1064 };
1065
1066 /* ====================================================================== */
1067 /* Miscelaneous helpers, used by several operations. */
1068
1069 static void help_l2_shift(DisasContext *s, DisasFields *f,
1070 DisasOps *o, int mask)
1071 {
1072 int b2 = get_field(f, b2);
1073 int d2 = get_field(f, d2);
1074
1075 if (b2 == 0) {
1076 o->in2 = tcg_const_i64(d2 & mask);
1077 } else {
1078 o->in2 = get_address(s, 0, b2, d2);
1079 tcg_gen_andi_i64(o->in2, o->in2, mask);
1080 }
1081 }
1082
1083 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1084 {
1085 if (dest == s->next_pc) {
1086 return NO_EXIT;
1087 }
1088 if (use_goto_tb(s, dest)) {
1089 update_cc_op(s);
1090 tcg_gen_goto_tb(0);
1091 tcg_gen_movi_i64(psw_addr, dest);
1092 tcg_gen_exit_tb((tcg_target_long)s->tb);
1093 return EXIT_GOTO_TB;
1094 } else {
1095 tcg_gen_movi_i64(psw_addr, dest);
1096 return EXIT_PC_UPDATED;
1097 }
1098 }
1099
1100 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1101 bool is_imm, int imm, TCGv_i64 cdest)
1102 {
1103 ExitStatus ret;
1104 uint64_t dest = s->pc + 2 * imm;
1105 int lab;
1106
1107 /* Take care of the special cases first. */
1108 if (c->cond == TCG_COND_NEVER) {
1109 ret = NO_EXIT;
1110 goto egress;
1111 }
1112 if (is_imm) {
1113 if (dest == s->next_pc) {
1114 /* Branch to next. */
1115 ret = NO_EXIT;
1116 goto egress;
1117 }
1118 if (c->cond == TCG_COND_ALWAYS) {
1119 ret = help_goto_direct(s, dest);
1120 goto egress;
1121 }
1122 } else {
1123 if (TCGV_IS_UNUSED_I64(cdest)) {
1124 /* E.g. bcr %r0 -> no branch. */
1125 ret = NO_EXIT;
1126 goto egress;
1127 }
1128 if (c->cond == TCG_COND_ALWAYS) {
1129 tcg_gen_mov_i64(psw_addr, cdest);
1130 ret = EXIT_PC_UPDATED;
1131 goto egress;
1132 }
1133 }
1134
1135 if (use_goto_tb(s, s->next_pc)) {
1136 if (is_imm && use_goto_tb(s, dest)) {
1137 /* Both exits can use goto_tb. */
1138 update_cc_op(s);
1139
1140 lab = gen_new_label();
1141 if (c->is_64) {
1142 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1143 } else {
1144 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1145 }
1146
1147 /* Branch not taken. */
1148 tcg_gen_goto_tb(0);
1149 tcg_gen_movi_i64(psw_addr, s->next_pc);
1150 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1151
1152 /* Branch taken. */
1153 gen_set_label(lab);
1154 tcg_gen_goto_tb(1);
1155 tcg_gen_movi_i64(psw_addr, dest);
1156 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1157
1158 ret = EXIT_GOTO_TB;
1159 } else {
1160 /* Fallthru can use goto_tb, but taken branch cannot. */
1161 /* Store taken branch destination before the brcond. This
1162 avoids having to allocate a new local temp to hold it.
1163 We'll overwrite this in the not taken case anyway. */
1164 if (!is_imm) {
1165 tcg_gen_mov_i64(psw_addr, cdest);
1166 }
1167
1168 lab = gen_new_label();
1169 if (c->is_64) {
1170 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1171 } else {
1172 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1173 }
1174
1175 /* Branch not taken. */
1176 update_cc_op(s);
1177 tcg_gen_goto_tb(0);
1178 tcg_gen_movi_i64(psw_addr, s->next_pc);
1179 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1180
1181 gen_set_label(lab);
1182 if (is_imm) {
1183 tcg_gen_movi_i64(psw_addr, dest);
1184 }
1185 ret = EXIT_PC_UPDATED;
1186 }
1187 } else {
1188 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1189 Most commonly we're single-stepping or some other condition that
1190 disables all use of goto_tb. Just update the PC and exit. */
1191
1192 TCGv_i64 next = tcg_const_i64(s->next_pc);
1193 if (is_imm) {
1194 cdest = tcg_const_i64(dest);
1195 }
1196
1197 if (c->is_64) {
1198 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1199 cdest, next);
1200 } else {
1201 TCGv_i32 t0 = tcg_temp_new_i32();
1202 TCGv_i64 t1 = tcg_temp_new_i64();
1203 TCGv_i64 z = tcg_const_i64(0);
1204 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1205 tcg_gen_extu_i32_i64(t1, t0);
1206 tcg_temp_free_i32(t0);
1207 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1208 tcg_temp_free_i64(t1);
1209 tcg_temp_free_i64(z);
1210 }
1211
1212 if (is_imm) {
1213 tcg_temp_free_i64(cdest);
1214 }
1215 tcg_temp_free_i64(next);
1216
1217 ret = EXIT_PC_UPDATED;
1218 }
1219
1220 egress:
1221 free_compare(c);
1222 return ret;
1223 }
1224
1225 /* ====================================================================== */
1226 /* The operations. These perform the bulk of the work for any insn,
1227 usually after the operands have been loaded and output initialized. */
1228
1229 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1230 {
1231 gen_helper_abs_i64(o->out, o->in2);
1232 return NO_EXIT;
1233 }
1234
1235 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1236 {
1237 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1238 return NO_EXIT;
1239 }
1240
1241 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1242 {
1243 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1244 return NO_EXIT;
1245 }
1246
1247 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1248 {
1249 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1250 tcg_gen_mov_i64(o->out2, o->in2);
1251 return NO_EXIT;
1252 }
1253
1254 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1255 {
1256 tcg_gen_add_i64(o->out, o->in1, o->in2);
1257 return NO_EXIT;
1258 }
1259
1260 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1261 {
1262 TCGv_i64 cc;
1263
1264 tcg_gen_add_i64(o->out, o->in1, o->in2);
1265
1266 /* XXX possible optimization point */
1267 gen_op_calc_cc(s);
1268 cc = tcg_temp_new_i64();
1269 tcg_gen_extu_i32_i64(cc, cc_op);
1270 tcg_gen_shri_i64(cc, cc, 1);
1271
1272 tcg_gen_add_i64(o->out, o->out, cc);
1273 tcg_temp_free_i64(cc);
1274 return NO_EXIT;
1275 }
1276
1277 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1278 {
1279 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1280 return NO_EXIT;
1281 }
1282
1283 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1284 {
1285 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1286 return NO_EXIT;
1287 }
1288
1289 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1290 {
1291 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1292 return_low128(o->out2);
1293 return NO_EXIT;
1294 }
1295
1296 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1297 {
1298 tcg_gen_and_i64(o->out, o->in1, o->in2);
1299 return NO_EXIT;
1300 }
1301
1302 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1303 {
1304 int shift = s->insn->data & 0xff;
1305 int size = s->insn->data >> 8;
1306 uint64_t mask = ((1ull << size) - 1) << shift;
1307
1308 assert(!o->g_in2);
1309 tcg_gen_shli_i64(o->in2, o->in2, shift);
1310 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1311 tcg_gen_and_i64(o->out, o->in1, o->in2);
1312
1313 /* Produce the CC from only the bits manipulated. */
1314 tcg_gen_andi_i64(cc_dst, o->out, mask);
1315 set_cc_nz_u64(s, cc_dst);
1316 return NO_EXIT;
1317 }
1318
1319 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1320 {
1321 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1322 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1323 tcg_gen_mov_i64(psw_addr, o->in2);
1324 return EXIT_PC_UPDATED;
1325 } else {
1326 return NO_EXIT;
1327 }
1328 }
1329
1330 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1331 {
1332 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1333 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1334 }
1335
1336 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1337 {
1338 int m1 = get_field(s->fields, m1);
1339 bool is_imm = have_field(s->fields, i2);
1340 int imm = is_imm ? get_field(s->fields, i2) : 0;
1341 DisasCompare c;
1342
1343 disas_jcc(s, &c, m1);
1344 return help_branch(s, &c, is_imm, imm, o->in2);
1345 }
1346
1347 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1348 {
1349 int r1 = get_field(s->fields, r1);
1350 bool is_imm = have_field(s->fields, i2);
1351 int imm = is_imm ? get_field(s->fields, i2) : 0;
1352 DisasCompare c;
1353 TCGv_i64 t;
1354
1355 c.cond = TCG_COND_NE;
1356 c.is_64 = false;
1357 c.g1 = false;
1358 c.g2 = false;
1359
1360 t = tcg_temp_new_i64();
1361 tcg_gen_subi_i64(t, regs[r1], 1);
1362 store_reg32_i64(r1, t);
1363 c.u.s32.a = tcg_temp_new_i32();
1364 c.u.s32.b = tcg_const_i32(0);
1365 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1366 tcg_temp_free_i64(t);
1367
1368 return help_branch(s, &c, is_imm, imm, o->in2);
1369 }
1370
1371 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1372 {
1373 int r1 = get_field(s->fields, r1);
1374 bool is_imm = have_field(s->fields, i2);
1375 int imm = is_imm ? get_field(s->fields, i2) : 0;
1376 DisasCompare c;
1377
1378 c.cond = TCG_COND_NE;
1379 c.is_64 = true;
1380 c.g1 = true;
1381 c.g2 = false;
1382
1383 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1384 c.u.s64.a = regs[r1];
1385 c.u.s64.b = tcg_const_i64(0);
1386
1387 return help_branch(s, &c, is_imm, imm, o->in2);
1388 }
1389
1390 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1391 {
1392 int r1 = get_field(s->fields, r1);
1393 int r3 = get_field(s->fields, r3);
1394 bool is_imm = have_field(s->fields, i2);
1395 int imm = is_imm ? get_field(s->fields, i2) : 0;
1396 DisasCompare c;
1397 TCGv_i64 t;
1398
1399 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1400 c.is_64 = false;
1401 c.g1 = false;
1402 c.g2 = false;
1403
1404 t = tcg_temp_new_i64();
1405 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1406 c.u.s32.a = tcg_temp_new_i32();
1407 c.u.s32.b = tcg_temp_new_i32();
1408 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1409 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1410 store_reg32_i64(r1, t);
1411 tcg_temp_free_i64(t);
1412
1413 return help_branch(s, &c, is_imm, imm, o->in2);
1414 }
1415
1416 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1417 {
1418 int r1 = get_field(s->fields, r1);
1419 int r3 = get_field(s->fields, r3);
1420 bool is_imm = have_field(s->fields, i2);
1421 int imm = is_imm ? get_field(s->fields, i2) : 0;
1422 DisasCompare c;
1423
1424 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1425 c.is_64 = true;
1426
1427 if (r1 == (r3 | 1)) {
1428 c.u.s64.b = load_reg(r3 | 1);
1429 c.g2 = false;
1430 } else {
1431 c.u.s64.b = regs[r3 | 1];
1432 c.g2 = true;
1433 }
1434
1435 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1436 c.u.s64.a = regs[r1];
1437 c.g1 = true;
1438
1439 return help_branch(s, &c, is_imm, imm, o->in2);
1440 }
1441
1442 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1443 {
1444 int imm, m3 = get_field(s->fields, m3);
1445 bool is_imm;
1446 DisasCompare c;
1447
1448 /* Bit 3 of the m3 field is reserved and should be zero.
1449 Choose to ignore it wrt the ltgt_cond table above. */
1450 c.cond = ltgt_cond[m3 & 14];
1451 if (s->insn->data) {
1452 c.cond = tcg_unsigned_cond(c.cond);
1453 }
1454 c.is_64 = c.g1 = c.g2 = true;
1455 c.u.s64.a = o->in1;
1456 c.u.s64.b = o->in2;
1457
1458 is_imm = have_field(s->fields, i4);
1459 if (is_imm) {
1460 imm = get_field(s->fields, i4);
1461 } else {
1462 imm = 0;
1463 o->out = get_address(s, 0, get_field(s->fields, b4),
1464 get_field(s->fields, d4));
1465 }
1466
1467 return help_branch(s, &c, is_imm, imm, o->out);
1468 }
1469
1470 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1471 {
1472 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1473 set_cc_static(s);
1474 return NO_EXIT;
1475 }
1476
1477 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1478 {
1479 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1480 set_cc_static(s);
1481 return NO_EXIT;
1482 }
1483
1484 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1485 {
1486 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1487 set_cc_static(s);
1488 return NO_EXIT;
1489 }
1490
1491 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1492 {
1493 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1494 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1495 tcg_temp_free_i32(m3);
1496 gen_set_cc_nz_f32(s, o->in2);
1497 return NO_EXIT;
1498 }
1499
1500 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1501 {
1502 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1503 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1504 tcg_temp_free_i32(m3);
1505 gen_set_cc_nz_f64(s, o->in2);
1506 return NO_EXIT;
1507 }
1508
1509 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1510 {
1511 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1512 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1513 tcg_temp_free_i32(m3);
1514 gen_set_cc_nz_f128(s, o->in1, o->in2);
1515 return NO_EXIT;
1516 }
1517
1518 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1519 {
1520 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1521 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1522 tcg_temp_free_i32(m3);
1523 gen_set_cc_nz_f32(s, o->in2);
1524 return NO_EXIT;
1525 }
1526
1527 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1528 {
1529 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1530 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1531 tcg_temp_free_i32(m3);
1532 gen_set_cc_nz_f64(s, o->in2);
1533 return NO_EXIT;
1534 }
1535
1536 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1537 {
1538 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1539 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1540 tcg_temp_free_i32(m3);
1541 gen_set_cc_nz_f128(s, o->in1, o->in2);
1542 return NO_EXIT;
1543 }
1544
1545 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1546 {
1547 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1548 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1549 tcg_temp_free_i32(m3);
1550 gen_set_cc_nz_f32(s, o->in2);
1551 return NO_EXIT;
1552 }
1553
1554 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1555 {
1556 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1557 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1558 tcg_temp_free_i32(m3);
1559 gen_set_cc_nz_f64(s, o->in2);
1560 return NO_EXIT;
1561 }
1562
1563 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1564 {
1565 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1566 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1567 tcg_temp_free_i32(m3);
1568 gen_set_cc_nz_f128(s, o->in1, o->in2);
1569 return NO_EXIT;
1570 }
1571
1572 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1573 {
1574 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1575 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1576 tcg_temp_free_i32(m3);
1577 gen_set_cc_nz_f32(s, o->in2);
1578 return NO_EXIT;
1579 }
1580
1581 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1582 {
1583 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1584 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1585 tcg_temp_free_i32(m3);
1586 gen_set_cc_nz_f64(s, o->in2);
1587 return NO_EXIT;
1588 }
1589
1590 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1591 {
1592 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1593 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1594 tcg_temp_free_i32(m3);
1595 gen_set_cc_nz_f128(s, o->in1, o->in2);
1596 return NO_EXIT;
1597 }
1598
1599 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1600 {
1601 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1602 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1603 tcg_temp_free_i32(m3);
1604 return NO_EXIT;
1605 }
1606
1607 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1608 {
1609 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1610 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1611 tcg_temp_free_i32(m3);
1612 return NO_EXIT;
1613 }
1614
1615 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1616 {
1617 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1618 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1619 tcg_temp_free_i32(m3);
1620 return_low128(o->out2);
1621 return NO_EXIT;
1622 }
1623
1624 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1625 {
1626 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1627 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1628 tcg_temp_free_i32(m3);
1629 return NO_EXIT;
1630 }
1631
1632 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1633 {
1634 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1635 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1636 tcg_temp_free_i32(m3);
1637 return NO_EXIT;
1638 }
1639
1640 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1641 {
1642 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1643 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1644 tcg_temp_free_i32(m3);
1645 return_low128(o->out2);
1646 return NO_EXIT;
1647 }
1648
1649 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1650 {
1651 int r2 = get_field(s->fields, r2);
1652 TCGv_i64 len = tcg_temp_new_i64();
1653
1654 potential_page_fault(s);
1655 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1656 set_cc_static(s);
1657 return_low128(o->out);
1658
1659 tcg_gen_add_i64(regs[r2], regs[r2], len);
1660 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1661 tcg_temp_free_i64(len);
1662
1663 return NO_EXIT;
1664 }
1665
1666 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1667 {
1668 int l = get_field(s->fields, l1);
1669 TCGv_i32 vl;
1670
1671 switch (l + 1) {
1672 case 1:
1673 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1674 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1675 break;
1676 case 2:
1677 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1678 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1679 break;
1680 case 4:
1681 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1682 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1683 break;
1684 case 8:
1685 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1686 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1687 break;
1688 default:
1689 potential_page_fault(s);
1690 vl = tcg_const_i32(l);
1691 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1692 tcg_temp_free_i32(vl);
1693 set_cc_static(s);
1694 return NO_EXIT;
1695 }
1696 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1697 return NO_EXIT;
1698 }
1699
1700 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1701 {
1702 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1703 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1704 potential_page_fault(s);
1705 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1706 tcg_temp_free_i32(r1);
1707 tcg_temp_free_i32(r3);
1708 set_cc_static(s);
1709 return NO_EXIT;
1710 }
1711
1712 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1713 {
1714 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1715 TCGv_i32 t1 = tcg_temp_new_i32();
1716 tcg_gen_trunc_i64_i32(t1, o->in1);
1717 potential_page_fault(s);
1718 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1719 set_cc_static(s);
1720 tcg_temp_free_i32(t1);
1721 tcg_temp_free_i32(m3);
1722 return NO_EXIT;
1723 }
1724
1725 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1726 {
1727 potential_page_fault(s);
1728 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1729 set_cc_static(s);
1730 return_low128(o->in2);
1731 return NO_EXIT;
1732 }
1733
1734 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1735 {
1736 TCGv_i64 t = tcg_temp_new_i64();
1737 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1738 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1739 tcg_gen_or_i64(o->out, o->out, t);
1740 tcg_temp_free_i64(t);
1741 return NO_EXIT;
1742 }
1743
1744 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1745 {
1746 int r3 = get_field(s->fields, r3);
1747 potential_page_fault(s);
1748 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1749 set_cc_static(s);
1750 return NO_EXIT;
1751 }
1752
1753 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1754 {
1755 int r3 = get_field(s->fields, r3);
1756 potential_page_fault(s);
1757 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1758 set_cc_static(s);
1759 return NO_EXIT;
1760 }
1761
1762 #ifndef CONFIG_USER_ONLY
1763 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1764 {
1765 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1766 check_privileged(s);
1767 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1768 tcg_temp_free_i32(r1);
1769 set_cc_static(s);
1770 return NO_EXIT;
1771 }
1772 #endif
1773
1774 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1775 {
1776 int r3 = get_field(s->fields, r3);
1777 TCGv_i64 in3 = tcg_temp_new_i64();
1778 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1779 potential_page_fault(s);
1780 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1781 tcg_temp_free_i64(in3);
1782 set_cc_static(s);
1783 return NO_EXIT;
1784 }
1785
1786 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1787 {
1788 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1789 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1790 potential_page_fault(s);
1791 /* XXX rewrite in tcg */
1792 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1793 set_cc_static(s);
1794 return NO_EXIT;
1795 }
1796
1797 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1798 {
1799 TCGv_i64 t1 = tcg_temp_new_i64();
1800 TCGv_i32 t2 = tcg_temp_new_i32();
1801 tcg_gen_trunc_i64_i32(t2, o->in1);
1802 gen_helper_cvd(t1, t2);
1803 tcg_temp_free_i32(t2);
1804 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1805 tcg_temp_free_i64(t1);
1806 return NO_EXIT;
1807 }
1808
1809 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1810 {
1811 int m3 = get_field(s->fields, m3);
1812 int lab = gen_new_label();
1813 TCGv_i32 t;
1814 TCGCond c;
1815
1816 /* Bit 3 of the m3 field is reserved and should be zero.
1817 Choose to ignore it wrt the ltgt_cond table above. */
1818 c = tcg_invert_cond(ltgt_cond[m3 & 14]);
1819 if (s->insn->data) {
1820 c = tcg_unsigned_cond(c);
1821 }
1822 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1823
1824 /* Set DXC to 0xff. */
1825 t = tcg_temp_new_i32();
1826 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1827 tcg_gen_ori_i32(t, t, 0xff00);
1828 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1829 tcg_temp_free_i32(t);
1830
1831 /* Trap. */
1832 gen_program_exception(s, PGM_DATA);
1833
1834 gen_set_label(lab);
1835 return NO_EXIT;
1836 }
1837
1838 #ifndef CONFIG_USER_ONLY
1839 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1840 {
1841 TCGv_i32 tmp;
1842
1843 check_privileged(s);
1844 potential_page_fault(s);
1845
1846 /* We pretend the format is RX_a so that D2 is the field we want. */
1847 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1848 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1849 tcg_temp_free_i32(tmp);
1850 return NO_EXIT;
1851 }
1852 #endif
1853
1854 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1855 {
1856 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1857 return_low128(o->out);
1858 return NO_EXIT;
1859 }
1860
1861 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1862 {
1863 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1864 return_low128(o->out);
1865 return NO_EXIT;
1866 }
1867
1868 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1869 {
1870 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1871 return_low128(o->out);
1872 return NO_EXIT;
1873 }
1874
1875 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1876 {
1877 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
1878 return_low128(o->out);
1879 return NO_EXIT;
1880 }
1881
1882 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
1883 {
1884 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
1885 return NO_EXIT;
1886 }
1887
1888 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
1889 {
1890 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
1891 return NO_EXIT;
1892 }
1893
1894 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
1895 {
1896 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1897 return_low128(o->out2);
1898 return NO_EXIT;
1899 }
1900
1901 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
1902 {
1903 int r2 = get_field(s->fields, r2);
1904 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1905 return NO_EXIT;
1906 }
1907
1908 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
1909 {
1910 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
1911 return NO_EXIT;
1912 }
1913
1914 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
1915 {
1916 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
1917 tb->flags, (ab)use the tb->cs_base field as the address of
1918 the template in memory, and grab 8 bits of tb->flags/cflags for
1919 the contents of the register. We would then recognize all this
1920 in gen_intermediate_code_internal, generating code for exactly
1921 one instruction. This new TB then gets executed normally.
1922
1923 On the other hand, this seems to be mostly used for modifying
1924 MVC inside of memcpy, which needs a helper call anyway. So
1925 perhaps this doesn't bear thinking about any further. */
1926
1927 TCGv_i64 tmp;
1928
1929 update_psw_addr(s);
1930 update_cc_op(s);
1931
1932 tmp = tcg_const_i64(s->next_pc);
1933 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
1934 tcg_temp_free_i64(tmp);
1935
1936 set_cc_static(s);
1937 return NO_EXIT;
1938 }
1939
1940 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
1941 {
1942 /* We'll use the original input for cc computation, since we get to
1943 compare that against 0, which ought to be better than comparing
1944 the real output against 64. It also lets cc_dst be a convenient
1945 temporary during our computation. */
1946 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
1947
1948 /* R1 = IN ? CLZ(IN) : 64. */
1949 gen_helper_clz(o->out, o->in2);
1950
1951 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
1952 value by 64, which is undefined. But since the shift is 64 iff the
1953 input is zero, we still get the correct result after and'ing. */
1954 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
1955 tcg_gen_shr_i64(o->out2, o->out2, o->out);
1956 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
1957 return NO_EXIT;
1958 }
1959
1960 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
1961 {
1962 int m3 = get_field(s->fields, m3);
1963 int pos, len, base = s->insn->data;
1964 TCGv_i64 tmp = tcg_temp_new_i64();
1965 uint64_t ccm;
1966
1967 switch (m3) {
1968 case 0xf:
1969 /* Effectively a 32-bit load. */
1970 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
1971 len = 32;
1972 goto one_insert;
1973
1974 case 0xc:
1975 case 0x6:
1976 case 0x3:
1977 /* Effectively a 16-bit load. */
1978 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
1979 len = 16;
1980 goto one_insert;
1981
1982 case 0x8:
1983 case 0x4:
1984 case 0x2:
1985 case 0x1:
1986 /* Effectively an 8-bit load. */
1987 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
1988 len = 8;
1989 goto one_insert;
1990
1991 one_insert:
1992 pos = base + ctz32(m3) * 8;
1993 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
1994 ccm = ((1ull << len) - 1) << pos;
1995 break;
1996
1997 default:
1998 /* This is going to be a sequence of loads and inserts. */
1999 pos = base + 32 - 8;
2000 ccm = 0;
2001 while (m3) {
2002 if (m3 & 0x8) {
2003 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2004 tcg_gen_addi_i64(o->in2, o->in2, 1);
2005 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2006 ccm |= 0xff << pos;
2007 }
2008 m3 = (m3 << 1) & 0xf;
2009 pos -= 8;
2010 }
2011 break;
2012 }
2013
2014 tcg_gen_movi_i64(tmp, ccm);
2015 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2016 tcg_temp_free_i64(tmp);
2017 return NO_EXIT;
2018 }
2019
2020 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2021 {
2022 int shift = s->insn->data & 0xff;
2023 int size = s->insn->data >> 8;
2024 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2025 return NO_EXIT;
2026 }
2027
2028 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2029 {
2030 TCGv_i64 t1;
2031
2032 gen_op_calc_cc(s);
2033 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2034
2035 t1 = tcg_temp_new_i64();
2036 tcg_gen_shli_i64(t1, psw_mask, 20);
2037 tcg_gen_shri_i64(t1, t1, 36);
2038 tcg_gen_or_i64(o->out, o->out, t1);
2039
2040 tcg_gen_extu_i32_i64(t1, cc_op);
2041 tcg_gen_shli_i64(t1, t1, 28);
2042 tcg_gen_or_i64(o->out, o->out, t1);
2043 tcg_temp_free_i64(t1);
2044 return NO_EXIT;
2045 }
2046
2047 #ifndef CONFIG_USER_ONLY
2048 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2049 {
2050 check_privileged(s);
2051 gen_helper_ipte(cpu_env, o->in1, o->in2);
2052 return NO_EXIT;
2053 }
2054
2055 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2056 {
2057 check_privileged(s);
2058 gen_helper_iske(o->out, cpu_env, o->in2);
2059 return NO_EXIT;
2060 }
2061 #endif
2062
2063 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2064 {
2065 gen_helper_ldeb(o->out, cpu_env, o->in2);
2066 return NO_EXIT;
2067 }
2068
2069 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2070 {
2071 gen_helper_ledb(o->out, cpu_env, o->in2);
2072 return NO_EXIT;
2073 }
2074
2075 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2076 {
2077 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2078 return NO_EXIT;
2079 }
2080
2081 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2082 {
2083 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2084 return NO_EXIT;
2085 }
2086
2087 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2088 {
2089 gen_helper_lxdb(o->out, cpu_env, o->in2);
2090 return_low128(o->out2);
2091 return NO_EXIT;
2092 }
2093
2094 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2095 {
2096 gen_helper_lxeb(o->out, cpu_env, o->in2);
2097 return_low128(o->out2);
2098 return NO_EXIT;
2099 }
2100
2101 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2102 {
2103 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2104 return NO_EXIT;
2105 }
2106
2107 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2108 {
2109 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2110 return NO_EXIT;
2111 }
2112
2113 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2114 {
2115 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2116 return NO_EXIT;
2117 }
2118
2119 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2120 {
2121 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2122 return NO_EXIT;
2123 }
2124
2125 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2126 {
2127 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2128 return NO_EXIT;
2129 }
2130
2131 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2132 {
2133 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2134 return NO_EXIT;
2135 }
2136
2137 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2138 {
2139 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2140 return NO_EXIT;
2141 }
2142
2143 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2144 {
2145 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2146 return NO_EXIT;
2147 }
2148
2149 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2150 {
2151 DisasCompare c;
2152
2153 disas_jcc(s, &c, get_field(s->fields, m3));
2154
2155 if (c.is_64) {
2156 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2157 o->in2, o->in1);
2158 free_compare(&c);
2159 } else {
2160 TCGv_i32 t32 = tcg_temp_new_i32();
2161 TCGv_i64 t, z;
2162
2163 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2164 free_compare(&c);
2165
2166 t = tcg_temp_new_i64();
2167 tcg_gen_extu_i32_i64(t, t32);
2168 tcg_temp_free_i32(t32);
2169
2170 z = tcg_const_i64(0);
2171 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2172 tcg_temp_free_i64(t);
2173 tcg_temp_free_i64(z);
2174 }
2175
2176 return NO_EXIT;
2177 }
2178
2179 #ifndef CONFIG_USER_ONLY
2180 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2181 {
2182 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2183 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2184 check_privileged(s);
2185 potential_page_fault(s);
2186 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2187 tcg_temp_free_i32(r1);
2188 tcg_temp_free_i32(r3);
2189 return NO_EXIT;
2190 }
2191
2192 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2193 {
2194 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2195 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2196 check_privileged(s);
2197 potential_page_fault(s);
2198 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2199 tcg_temp_free_i32(r1);
2200 tcg_temp_free_i32(r3);
2201 return NO_EXIT;
2202 }
2203 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2204 {
2205 check_privileged(s);
2206 potential_page_fault(s);
2207 gen_helper_lra(o->out, cpu_env, o->in2);
2208 set_cc_static(s);
2209 return NO_EXIT;
2210 }
2211
2212 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2213 {
2214 TCGv_i64 t1, t2;
2215
2216 check_privileged(s);
2217
2218 t1 = tcg_temp_new_i64();
2219 t2 = tcg_temp_new_i64();
2220 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2221 tcg_gen_addi_i64(o->in2, o->in2, 4);
2222 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2223 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2224 tcg_gen_shli_i64(t1, t1, 32);
2225 gen_helper_load_psw(cpu_env, t1, t2);
2226 tcg_temp_free_i64(t1);
2227 tcg_temp_free_i64(t2);
2228 return EXIT_NORETURN;
2229 }
2230
2231 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2232 {
2233 TCGv_i64 t1, t2;
2234
2235 check_privileged(s);
2236
2237 t1 = tcg_temp_new_i64();
2238 t2 = tcg_temp_new_i64();
2239 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2240 tcg_gen_addi_i64(o->in2, o->in2, 8);
2241 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2242 gen_helper_load_psw(cpu_env, t1, t2);
2243 tcg_temp_free_i64(t1);
2244 tcg_temp_free_i64(t2);
2245 return EXIT_NORETURN;
2246 }
2247 #endif
2248
2249 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2250 {
2251 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2252 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2253 potential_page_fault(s);
2254 gen_helper_lam(cpu_env, r1, o->in2, r3);
2255 tcg_temp_free_i32(r1);
2256 tcg_temp_free_i32(r3);
2257 return NO_EXIT;
2258 }
2259
2260 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2261 {
2262 int r1 = get_field(s->fields, r1);
2263 int r3 = get_field(s->fields, r3);
2264 TCGv_i64 t = tcg_temp_new_i64();
2265 TCGv_i64 t4 = tcg_const_i64(4);
2266
2267 while (1) {
2268 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2269 store_reg32_i64(r1, t);
2270 if (r1 == r3) {
2271 break;
2272 }
2273 tcg_gen_add_i64(o->in2, o->in2, t4);
2274 r1 = (r1 + 1) & 15;
2275 }
2276
2277 tcg_temp_free_i64(t);
2278 tcg_temp_free_i64(t4);
2279 return NO_EXIT;
2280 }
2281
2282 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2283 {
2284 int r1 = get_field(s->fields, r1);
2285 int r3 = get_field(s->fields, r3);
2286 TCGv_i64 t = tcg_temp_new_i64();
2287 TCGv_i64 t4 = tcg_const_i64(4);
2288
2289 while (1) {
2290 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2291 store_reg32h_i64(r1, t);
2292 if (r1 == r3) {
2293 break;
2294 }
2295 tcg_gen_add_i64(o->in2, o->in2, t4);
2296 r1 = (r1 + 1) & 15;
2297 }
2298
2299 tcg_temp_free_i64(t);
2300 tcg_temp_free_i64(t4);
2301 return NO_EXIT;
2302 }
2303
2304 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2305 {
2306 int r1 = get_field(s->fields, r1);
2307 int r3 = get_field(s->fields, r3);
2308 TCGv_i64 t8 = tcg_const_i64(8);
2309
2310 while (1) {
2311 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2312 if (r1 == r3) {
2313 break;
2314 }
2315 tcg_gen_add_i64(o->in2, o->in2, t8);
2316 r1 = (r1 + 1) & 15;
2317 }
2318
2319 tcg_temp_free_i64(t8);
2320 return NO_EXIT;
2321 }
2322
2323 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2324 {
2325 o->out = o->in2;
2326 o->g_out = o->g_in2;
2327 TCGV_UNUSED_I64(o->in2);
2328 o->g_in2 = false;
2329 return NO_EXIT;
2330 }
2331
2332 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2333 {
2334 o->out = o->in1;
2335 o->out2 = o->in2;
2336 o->g_out = o->g_in1;
2337 o->g_out2 = o->g_in2;
2338 TCGV_UNUSED_I64(o->in1);
2339 TCGV_UNUSED_I64(o->in2);
2340 o->g_in1 = o->g_in2 = false;
2341 return NO_EXIT;
2342 }
2343
2344 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2345 {
2346 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2347 potential_page_fault(s);
2348 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2349 tcg_temp_free_i32(l);
2350 return NO_EXIT;
2351 }
2352
2353 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2354 {
2355 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2356 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2357 potential_page_fault(s);
2358 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2359 tcg_temp_free_i32(r1);
2360 tcg_temp_free_i32(r2);
2361 set_cc_static(s);
2362 return NO_EXIT;
2363 }
2364
2365 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2366 {
2367 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2368 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2369 potential_page_fault(s);
2370 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2371 tcg_temp_free_i32(r1);
2372 tcg_temp_free_i32(r3);
2373 set_cc_static(s);
2374 return NO_EXIT;
2375 }
2376
2377 #ifndef CONFIG_USER_ONLY
2378 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2379 {
2380 int r1 = get_field(s->fields, l1);
2381 check_privileged(s);
2382 potential_page_fault(s);
2383 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2384 set_cc_static(s);
2385 return NO_EXIT;
2386 }
2387
2388 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2389 {
2390 int r1 = get_field(s->fields, l1);
2391 check_privileged(s);
2392 potential_page_fault(s);
2393 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2394 set_cc_static(s);
2395 return NO_EXIT;
2396 }
2397 #endif
2398
2399 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2400 {
2401 potential_page_fault(s);
2402 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2403 set_cc_static(s);
2404 return NO_EXIT;
2405 }
2406
2407 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2408 {
2409 potential_page_fault(s);
2410 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2411 set_cc_static(s);
2412 return_low128(o->in2);
2413 return NO_EXIT;
2414 }
2415
2416 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2417 {
2418 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2419 return NO_EXIT;
2420 }
2421
2422 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2423 {
2424 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2425 return_low128(o->out2);
2426 return NO_EXIT;
2427 }
2428
2429 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2430 {
2431 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2432 return NO_EXIT;
2433 }
2434
2435 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2436 {
2437 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2438 return NO_EXIT;
2439 }
2440
2441 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2442 {
2443 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2444 return NO_EXIT;
2445 }
2446
2447 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2448 {
2449 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2450 return_low128(o->out2);
2451 return NO_EXIT;
2452 }
2453
2454 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2455 {
2456 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2457 return_low128(o->out2);
2458 return NO_EXIT;
2459 }
2460
2461 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2462 {
2463 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2464 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2465 tcg_temp_free_i64(r3);
2466 return NO_EXIT;
2467 }
2468
2469 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2470 {
2471 int r3 = get_field(s->fields, r3);
2472 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2473 return NO_EXIT;
2474 }
2475
2476 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2477 {
2478 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2479 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2480 tcg_temp_free_i64(r3);
2481 return NO_EXIT;
2482 }
2483
2484 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2485 {
2486 int r3 = get_field(s->fields, r3);
2487 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2488 return NO_EXIT;
2489 }
2490
2491 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2492 {
2493 gen_helper_nabs_i64(o->out, o->in2);
2494 return NO_EXIT;
2495 }
2496
2497 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2498 {
2499 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2500 return NO_EXIT;
2501 }
2502
2503 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2504 {
2505 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2506 return NO_EXIT;
2507 }
2508
2509 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2510 {
2511 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2512 tcg_gen_mov_i64(o->out2, o->in2);
2513 return NO_EXIT;
2514 }
2515
2516 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2517 {
2518 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2519 potential_page_fault(s);
2520 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2521 tcg_temp_free_i32(l);
2522 set_cc_static(s);
2523 return NO_EXIT;
2524 }
2525
2526 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2527 {
2528 tcg_gen_neg_i64(o->out, o->in2);
2529 return NO_EXIT;
2530 }
2531
2532 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2533 {
2534 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2535 return NO_EXIT;
2536 }
2537
2538 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2539 {
2540 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2541 return NO_EXIT;
2542 }
2543
2544 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2545 {
2546 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2547 tcg_gen_mov_i64(o->out2, o->in2);
2548 return NO_EXIT;
2549 }
2550
2551 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2552 {
2553 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2554 potential_page_fault(s);
2555 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2556 tcg_temp_free_i32(l);
2557 set_cc_static(s);
2558 return NO_EXIT;
2559 }
2560
2561 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2562 {
2563 tcg_gen_or_i64(o->out, o->in1, o->in2);
2564 return NO_EXIT;
2565 }
2566
2567 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2568 {
2569 int shift = s->insn->data & 0xff;
2570 int size = s->insn->data >> 8;
2571 uint64_t mask = ((1ull << size) - 1) << shift;
2572
2573 assert(!o->g_in2);
2574 tcg_gen_shli_i64(o->in2, o->in2, shift);
2575 tcg_gen_or_i64(o->out, o->in1, o->in2);
2576
2577 /* Produce the CC from only the bits manipulated. */
2578 tcg_gen_andi_i64(cc_dst, o->out, mask);
2579 set_cc_nz_u64(s, cc_dst);
2580 return NO_EXIT;
2581 }
2582
2583 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2584 {
2585 gen_helper_popcnt(o->out, o->in2);
2586 return NO_EXIT;
2587 }
2588
2589 #ifndef CONFIG_USER_ONLY
2590 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2591 {
2592 check_privileged(s);
2593 gen_helper_ptlb(cpu_env);
2594 return NO_EXIT;
2595 }
2596 #endif
2597
2598 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2599 {
2600 int i3 = get_field(s->fields, i3);
2601 int i4 = get_field(s->fields, i4);
2602 int i5 = get_field(s->fields, i5);
2603 int do_zero = i4 & 0x80;
2604 uint64_t mask, imask, pmask;
2605 int pos, len, rot;
2606
2607 /* Adjust the arguments for the specific insn. */
2608 switch (s->fields->op2) {
2609 case 0x55: /* risbg */
2610 i3 &= 63;
2611 i4 &= 63;
2612 pmask = ~0;
2613 break;
2614 case 0x5d: /* risbhg */
2615 i3 &= 31;
2616 i4 &= 31;
2617 pmask = 0xffffffff00000000ull;
2618 break;
2619 case 0x51: /* risblg */
2620 i3 &= 31;
2621 i4 &= 31;
2622 pmask = 0x00000000ffffffffull;
2623 break;
2624 default:
2625 abort();
2626 }
2627
2628 /* MASK is the set of bits to be inserted from R2.
2629 Take care for I3/I4 wraparound. */
2630 mask = pmask >> i3;
2631 if (i3 <= i4) {
2632 mask ^= pmask >> i4 >> 1;
2633 } else {
2634 mask |= ~(pmask >> i4 >> 1);
2635 }
2636 mask &= pmask;
2637
2638 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2639 insns, we need to keep the other half of the register. */
2640 imask = ~mask | ~pmask;
2641 if (do_zero) {
2642 if (s->fields->op2 == 0x55) {
2643 imask = 0;
2644 } else {
2645 imask = ~pmask;
2646 }
2647 }
2648
2649 /* In some cases we can implement this with deposit, which can be more
2650 efficient on some hosts. */
2651 if (~mask == imask && i3 <= i4) {
2652 if (s->fields->op2 == 0x5d) {
2653 i3 += 32, i4 += 32;
2654 }
2655 /* Note that we rotate the bits to be inserted to the lsb, not to
2656 the position as described in the PoO. */
2657 len = i4 - i3 + 1;
2658 pos = 63 - i4;
2659 rot = (i5 - pos) & 63;
2660 } else {
2661 pos = len = -1;
2662 rot = i5 & 63;
2663 }
2664
2665 /* Rotate the input as necessary. */
2666 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2667
2668 /* Insert the selected bits into the output. */
2669 if (pos >= 0) {
2670 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2671 } else if (imask == 0) {
2672 tcg_gen_andi_i64(o->out, o->in2, mask);
2673 } else {
2674 tcg_gen_andi_i64(o->in2, o->in2, mask);
2675 tcg_gen_andi_i64(o->out, o->out, imask);
2676 tcg_gen_or_i64(o->out, o->out, o->in2);
2677 }
2678 return NO_EXIT;
2679 }
2680
2681 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2682 {
2683 int i3 = get_field(s->fields, i3);
2684 int i4 = get_field(s->fields, i4);
2685 int i5 = get_field(s->fields, i5);
2686 uint64_t mask;
2687
2688 /* If this is a test-only form, arrange to discard the result. */
2689 if (i3 & 0x80) {
2690 o->out = tcg_temp_new_i64();
2691 o->g_out = false;
2692 }
2693
2694 i3 &= 63;
2695 i4 &= 63;
2696 i5 &= 63;
2697
2698 /* MASK is the set of bits to be operated on from R2.
2699 Take care for I3/I4 wraparound. */
2700 mask = ~0ull >> i3;
2701 if (i3 <= i4) {
2702 mask ^= ~0ull >> i4 >> 1;
2703 } else {
2704 mask |= ~(~0ull >> i4 >> 1);
2705 }
2706
2707 /* Rotate the input as necessary. */
2708 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2709
2710 /* Operate. */
2711 switch (s->fields->op2) {
2712 case 0x55: /* AND */
2713 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2714 tcg_gen_and_i64(o->out, o->out, o->in2);
2715 break;
2716 case 0x56: /* OR */
2717 tcg_gen_andi_i64(o->in2, o->in2, mask);
2718 tcg_gen_or_i64(o->out, o->out, o->in2);
2719 break;
2720 case 0x57: /* XOR */
2721 tcg_gen_andi_i64(o->in2, o->in2, mask);
2722 tcg_gen_xor_i64(o->out, o->out, o->in2);
2723 break;
2724 default:
2725 abort();
2726 }
2727
2728 /* Set the CC. */
2729 tcg_gen_andi_i64(cc_dst, o->out, mask);
2730 set_cc_nz_u64(s, cc_dst);
2731 return NO_EXIT;
2732 }
2733
2734 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2735 {
2736 tcg_gen_bswap16_i64(o->out, o->in2);
2737 return NO_EXIT;
2738 }
2739
2740 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2741 {
2742 tcg_gen_bswap32_i64(o->out, o->in2);
2743 return NO_EXIT;
2744 }
2745
2746 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2747 {
2748 tcg_gen_bswap64_i64(o->out, o->in2);
2749 return NO_EXIT;
2750 }
2751
2752 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2753 {
2754 TCGv_i32 t1 = tcg_temp_new_i32();
2755 TCGv_i32 t2 = tcg_temp_new_i32();
2756 TCGv_i32 to = tcg_temp_new_i32();
2757 tcg_gen_trunc_i64_i32(t1, o->in1);
2758 tcg_gen_trunc_i64_i32(t2, o->in2);
2759 tcg_gen_rotl_i32(to, t1, t2);
2760 tcg_gen_extu_i32_i64(o->out, to);
2761 tcg_temp_free_i32(t1);
2762 tcg_temp_free_i32(t2);
2763 tcg_temp_free_i32(to);
2764 return NO_EXIT;
2765 }
2766
2767 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2768 {
2769 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2770 return NO_EXIT;
2771 }
2772
2773 #ifndef CONFIG_USER_ONLY
2774 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2775 {
2776 check_privileged(s);
2777 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2778 set_cc_static(s);
2779 return NO_EXIT;
2780 }
2781
2782 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2783 {
2784 check_privileged(s);
2785 gen_helper_sacf(cpu_env, o->in2);
2786 /* Addressing mode has changed, so end the block. */
2787 return EXIT_PC_STALE;
2788 }
2789 #endif
2790
2791 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2792 {
2793 int r1 = get_field(s->fields, r1);
2794 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2795 return NO_EXIT;
2796 }
2797
2798 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2799 {
2800 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2801 return NO_EXIT;
2802 }
2803
2804 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2805 {
2806 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2807 return NO_EXIT;
2808 }
2809
2810 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2811 {
2812 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2813 return_low128(o->out2);
2814 return NO_EXIT;
2815 }
2816
2817 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2818 {
2819 gen_helper_sqeb(o->out, cpu_env, o->in2);
2820 return NO_EXIT;
2821 }
2822
2823 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2824 {
2825 gen_helper_sqdb(o->out, cpu_env, o->in2);
2826 return NO_EXIT;
2827 }
2828
2829 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2830 {
2831 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2832 return_low128(o->out2);
2833 return NO_EXIT;
2834 }
2835
2836 #ifndef CONFIG_USER_ONLY
2837 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2838 {
2839 check_privileged(s);
2840 potential_page_fault(s);
2841 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2842 set_cc_static(s);
2843 return NO_EXIT;
2844 }
2845
2846 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2847 {
2848 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2849 check_privileged(s);
2850 potential_page_fault(s);
2851 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2852 tcg_temp_free_i32(r1);
2853 return NO_EXIT;
2854 }
2855 #endif
2856
2857 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
2858 {
2859 DisasCompare c;
2860 TCGv_i64 a;
2861 int lab, r1;
2862
2863 disas_jcc(s, &c, get_field(s->fields, m3));
2864
2865 lab = gen_new_label();
2866 if (c.is_64) {
2867 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
2868 } else {
2869 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
2870 }
2871 free_compare(&c);
2872
2873 r1 = get_field(s->fields, r1);
2874 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2875 if (s->insn->data) {
2876 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
2877 } else {
2878 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
2879 }
2880 tcg_temp_free_i64(a);
2881
2882 gen_set_label(lab);
2883 return NO_EXIT;
2884 }
2885
2886 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2887 {
2888 uint64_t sign = 1ull << s->insn->data;
2889 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2890 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2891 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2892 /* The arithmetic left shift is curious in that it does not affect
2893 the sign bit. Copy that over from the source unchanged. */
2894 tcg_gen_andi_i64(o->out, o->out, ~sign);
2895 tcg_gen_andi_i64(o->in1, o->in1, sign);
2896 tcg_gen_or_i64(o->out, o->out, o->in1);
2897 return NO_EXIT;
2898 }
2899
2900 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2901 {
2902 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2903 return NO_EXIT;
2904 }
2905
2906 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2907 {
2908 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2909 return NO_EXIT;
2910 }
2911
2912 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2913 {
2914 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2915 return NO_EXIT;
2916 }
2917
2918 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2919 {
2920 gen_helper_sfpc(cpu_env, o->in2);
2921 return NO_EXIT;
2922 }
2923
2924 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
2925 {
2926 int b2 = get_field(s->fields, b2);
2927 int d2 = get_field(s->fields, d2);
2928 TCGv_i64 t1 = tcg_temp_new_i64();
2929 TCGv_i64 t2 = tcg_temp_new_i64();
2930 int mask, pos, len;
2931
2932 switch (s->fields->op2) {
2933 case 0x99: /* SRNM */
2934 pos = 0, len = 2;
2935 break;
2936 case 0xb8: /* SRNMB */
2937 pos = 0, len = 3;
2938 break;
2939 case 0xb9: /* SRNMT */
2940 pos = 4, len = 3;
2941 default:
2942 tcg_abort();
2943 }
2944 mask = (1 << len) - 1;
2945
2946 /* Insert the value into the appropriate field of the FPC. */
2947 if (b2 == 0) {
2948 tcg_gen_movi_i64(t1, d2 & mask);
2949 } else {
2950 tcg_gen_addi_i64(t1, regs[b2], d2);
2951 tcg_gen_andi_i64(t1, t1, mask);
2952 }
2953 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
2954 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
2955 tcg_temp_free_i64(t1);
2956
2957 /* Then install the new FPC to set the rounding mode in fpu_status. */
2958 gen_helper_sfpc(cpu_env, t2);
2959 tcg_temp_free_i64(t2);
2960 return NO_EXIT;
2961 }
2962
2963 #ifndef CONFIG_USER_ONLY
2964 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2965 {
2966 check_privileged(s);
2967 tcg_gen_shri_i64(o->in2, o->in2, 4);
2968 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2969 return NO_EXIT;
2970 }
2971
2972 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
2973 {
2974 check_privileged(s);
2975 gen_helper_sske(cpu_env, o->in1, o->in2);
2976 return NO_EXIT;
2977 }
2978
2979 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2980 {
2981 check_privileged(s);
2982 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2983 return NO_EXIT;
2984 }
2985
2986 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
2987 {
2988 check_privileged(s);
2989 /* ??? Surely cpu address != cpu number. In any case the previous
2990 version of this stored more than the required half-word, so it
2991 is unlikely this has ever been tested. */
2992 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2993 return NO_EXIT;
2994 }
2995
2996 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
2997 {
2998 gen_helper_stck(o->out, cpu_env);
2999 /* ??? We don't implement clock states. */
3000 gen_op_movi_cc(s, 0);
3001 return NO_EXIT;
3002 }
3003
3004 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3005 {
3006 TCGv_i64 c1 = tcg_temp_new_i64();
3007 TCGv_i64 c2 = tcg_temp_new_i64();
3008 gen_helper_stck(c1, cpu_env);
3009 /* Shift the 64-bit value into its place as a zero-extended
3010 104-bit value. Note that "bit positions 64-103 are always
3011 non-zero so that they compare differently to STCK"; we set
3012 the least significant bit to 1. */
3013 tcg_gen_shli_i64(c2, c1, 56);
3014 tcg_gen_shri_i64(c1, c1, 8);
3015 tcg_gen_ori_i64(c2, c2, 0x10000);
3016 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3017 tcg_gen_addi_i64(o->in2, o->in2, 8);
3018 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3019 tcg_temp_free_i64(c1);
3020 tcg_temp_free_i64(c2);
3021 /* ??? We don't implement clock states. */
3022 gen_op_movi_cc(s, 0);
3023 return NO_EXIT;
3024 }
3025
3026 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3027 {
3028 check_privileged(s);
3029 gen_helper_sckc(cpu_env, o->in2);
3030 return NO_EXIT;
3031 }
3032
3033 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3034 {
3035 check_privileged(s);
3036 gen_helper_stckc(o->out, cpu_env);
3037 return NO_EXIT;
3038 }
3039
3040 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3041 {
3042 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3043 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3044 check_privileged(s);
3045 potential_page_fault(s);
3046 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3047 tcg_temp_free_i32(r1);
3048 tcg_temp_free_i32(r3);
3049 return NO_EXIT;
3050 }
3051
3052 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3053 {
3054 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3055 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3056 check_privileged(s);
3057 potential_page_fault(s);
3058 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3059 tcg_temp_free_i32(r1);
3060 tcg_temp_free_i32(r3);
3061 return NO_EXIT;
3062 }
3063
3064 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3065 {
3066 check_privileged(s);
3067 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3068 return NO_EXIT;
3069 }
3070
3071 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3072 {
3073 check_privileged(s);
3074 gen_helper_spt(cpu_env, o->in2);
3075 return NO_EXIT;
3076 }
3077
3078 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3079 {
3080 TCGv_i64 f, a;
3081 /* We really ought to have more complete indication of facilities
3082 that we implement. Address this when STFLE is implemented. */
3083 check_privileged(s);
3084 f = tcg_const_i64(0xc0000000);
3085 a = tcg_const_i64(200);
3086 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3087 tcg_temp_free_i64(f);
3088 tcg_temp_free_i64(a);
3089 return NO_EXIT;
3090 }
3091
3092 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3093 {
3094 check_privileged(s);
3095 gen_helper_stpt(o->out, cpu_env);
3096 return NO_EXIT;
3097 }
3098
3099 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3100 {
3101 check_privileged(s);
3102 potential_page_fault(s);
3103 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3104 set_cc_static(s);
3105 return NO_EXIT;
3106 }
3107
3108 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3109 {
3110 check_privileged(s);
3111 gen_helper_spx(cpu_env, o->in2);
3112 return NO_EXIT;
3113 }
3114
3115 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3116 {
3117 check_privileged(s);
3118 /* Not operational. */
3119 gen_op_movi_cc(s, 3);
3120 return NO_EXIT;
3121 }
3122
3123 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3124 {
3125 check_privileged(s);
3126 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3127 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3128 return NO_EXIT;
3129 }
3130
3131 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3132 {
3133 uint64_t i2 = get_field(s->fields, i2);
3134 TCGv_i64 t;
3135
3136 check_privileged(s);
3137
3138 /* It is important to do what the instruction name says: STORE THEN.
3139 If we let the output hook perform the store then if we fault and
3140 restart, we'll have the wrong SYSTEM MASK in place. */
3141 t = tcg_temp_new_i64();
3142 tcg_gen_shri_i64(t, psw_mask, 56);
3143 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3144 tcg_temp_free_i64(t);
3145
3146 if (s->fields->op == 0xac) {
3147 tcg_gen_andi_i64(psw_mask, psw_mask,
3148 (i2 << 56) | 0x00ffffffffffffffull);
3149 } else {
3150 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3151 }
3152 return NO_EXIT;
3153 }
3154
3155 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3156 {
3157 check_privileged(s);
3158 potential_page_fault(s);
3159 gen_helper_stura(cpu_env, o->in2, o->in1);
3160 return NO_EXIT;
3161 }
3162 #endif
3163
3164 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3165 {
3166 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3167 return NO_EXIT;
3168 }
3169
3170 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3171 {
3172 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3173 return NO_EXIT;
3174 }
3175
3176 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3177 {
3178 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3179 return NO_EXIT;
3180 }
3181
3182 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3183 {
3184 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3185 return NO_EXIT;
3186 }
3187
3188 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3189 {
3190 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3191 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3192 potential_page_fault(s);
3193 gen_helper_stam(cpu_env, r1, o->in2, r3);
3194 tcg_temp_free_i32(r1);
3195 tcg_temp_free_i32(r3);
3196 return NO_EXIT;
3197 }
3198
3199 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3200 {
3201 int m3 = get_field(s->fields, m3);
3202 int pos, base = s->insn->data;
3203 TCGv_i64 tmp = tcg_temp_new_i64();
3204
3205 pos = base + ctz32(m3) * 8;
3206 switch (m3) {
3207 case 0xf:
3208 /* Effectively a 32-bit store. */
3209 tcg_gen_shri_i64(tmp, o->in1, pos);
3210 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3211 break;
3212
3213 case 0xc:
3214 case 0x6:
3215 case 0x3:
3216 /* Effectively a 16-bit store. */
3217 tcg_gen_shri_i64(tmp, o->in1, pos);
3218 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3219 break;
3220
3221 case 0x8:
3222 case 0x4:
3223 case 0x2:
3224 case 0x1:
3225 /* Effectively an 8-bit store. */
3226 tcg_gen_shri_i64(tmp, o->in1, pos);
3227 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3228 break;
3229
3230 default:
3231 /* This is going to be a sequence of shifts and stores. */
3232 pos = base + 32 - 8;
3233 while (m3) {
3234 if (m3 & 0x8) {
3235 tcg_gen_shri_i64(tmp, o->in1, pos);
3236 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3237 tcg_gen_addi_i64(o->in2, o->in2, 1);
3238 }
3239 m3 = (m3 << 1) & 0xf;
3240 pos -= 8;
3241 }
3242 break;
3243 }
3244 tcg_temp_free_i64(tmp);
3245 return NO_EXIT;
3246 }
3247
3248 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3249 {
3250 int r1 = get_field(s->fields, r1);
3251 int r3 = get_field(s->fields, r3);
3252 int size = s->insn->data;
3253 TCGv_i64 tsize = tcg_const_i64(size);
3254
3255 while (1) {
3256 if (size == 8) {
3257 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3258 } else {
3259 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3260 }
3261 if (r1 == r3) {
3262 break;
3263 }
3264 tcg_gen_add_i64(o->in2, o->in2, tsize);
3265 r1 = (r1 + 1) & 15;
3266 }
3267
3268 tcg_temp_free_i64(tsize);
3269 return NO_EXIT;
3270 }
3271
3272 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3273 {
3274 int r1 = get_field(s->fields, r1);
3275 int r3 = get_field(s->fields, r3);
3276 TCGv_i64 t = tcg_temp_new_i64();
3277 TCGv_i64 t4 = tcg_const_i64(4);
3278 TCGv_i64 t32 = tcg_const_i64(32);
3279
3280 while (1) {
3281 tcg_gen_shl_i64(t, regs[r1], t32);
3282 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3283 if (r1 == r3) {
3284 break;
3285 }
3286 tcg_gen_add_i64(o->in2, o->in2, t4);
3287 r1 = (r1 + 1) & 15;
3288 }
3289
3290 tcg_temp_free_i64(t);
3291 tcg_temp_free_i64(t4);
3292 tcg_temp_free_i64(t32);
3293 return NO_EXIT;
3294 }
3295
3296 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3297 {
3298 potential_page_fault(s);
3299 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3300 set_cc_static(s);
3301 return_low128(o->in2);
3302 return NO_EXIT;
3303 }
3304
3305 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3306 {
3307 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3308 return NO_EXIT;
3309 }
3310
3311 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3312 {
3313 TCGv_i64 cc;
3314
3315 assert(!o->g_in2);
3316 tcg_gen_not_i64(o->in2, o->in2);
3317 tcg_gen_add_i64(o->out, o->in1, o->in2);
3318
3319 /* XXX possible optimization point */
3320 gen_op_calc_cc(s);
3321 cc = tcg_temp_new_i64();
3322 tcg_gen_extu_i32_i64(cc, cc_op);
3323 tcg_gen_shri_i64(cc, cc, 1);
3324 tcg_gen_add_i64(o->out, o->out, cc);
3325 tcg_temp_free_i64(cc);
3326 return NO_EXIT;
3327 }
3328
3329 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3330 {
3331 TCGv_i32 t;
3332
3333 update_psw_addr(s);
3334 update_cc_op(s);
3335
3336 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3337 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3338 tcg_temp_free_i32(t);
3339
3340 t = tcg_const_i32(s->next_pc - s->pc);
3341 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3342 tcg_temp_free_i32(t);
3343
3344 gen_exception(EXCP_SVC);
3345 return EXIT_NORETURN;
3346 }
3347
3348 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3349 {
3350 gen_helper_tceb(cc_op, o->in1, o->in2);
3351 set_cc_static(s);
3352 return NO_EXIT;
3353 }
3354
3355 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3356 {
3357 gen_helper_tcdb(cc_op, o->in1, o->in2);
3358 set_cc_static(s);
3359 return NO_EXIT;
3360 }
3361
3362 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3363 {
3364 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3365 set_cc_static(s);
3366 return NO_EXIT;
3367 }
3368
3369 #ifndef CONFIG_USER_ONLY
3370 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3371 {
3372 potential_page_fault(s);
3373 gen_helper_tprot(cc_op, o->addr1, o->in2);
3374 set_cc_static(s);
3375 return NO_EXIT;
3376 }
3377 #endif
3378
3379 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3380 {
3381 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3382 potential_page_fault(s);
3383 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3384 tcg_temp_free_i32(l);
3385 set_cc_static(s);
3386 return NO_EXIT;
3387 }
3388
3389 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3390 {
3391 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3392 potential_page_fault(s);
3393 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3394 tcg_temp_free_i32(l);
3395 return NO_EXIT;
3396 }
3397
3398 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3399 {
3400 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3401 potential_page_fault(s);
3402 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3403 tcg_temp_free_i32(l);
3404 set_cc_static(s);
3405 return NO_EXIT;
3406 }
3407
3408 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3409 {
3410 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3411 return NO_EXIT;
3412 }
3413
3414 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3415 {
3416 int shift = s->insn->data & 0xff;
3417 int size = s->insn->data >> 8;
3418 uint64_t mask = ((1ull << size) - 1) << shift;
3419
3420 assert(!o->g_in2);
3421 tcg_gen_shli_i64(o->in2, o->in2, shift);
3422 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3423
3424 /* Produce the CC from only the bits manipulated. */
3425 tcg_gen_andi_i64(cc_dst, o->out, mask);
3426 set_cc_nz_u64(s, cc_dst);
3427 return NO_EXIT;
3428 }
3429
3430 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3431 {
3432 o->out = tcg_const_i64(0);
3433 return NO_EXIT;
3434 }
3435
3436 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3437 {
3438 o->out = tcg_const_i64(0);
3439 o->out2 = o->out;
3440 o->g_out2 = true;
3441 return NO_EXIT;
3442 }
3443
3444 /* ====================================================================== */
3445 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3446 the original inputs), update the various cc data structures in order to
3447 be able to compute the new condition code. */
3448
3449 static void cout_abs32(DisasContext *s, DisasOps *o)
3450 {
3451 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3452 }
3453
3454 static void cout_abs64(DisasContext *s, DisasOps *o)
3455 {
3456 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3457 }
3458
3459 static void cout_adds32(DisasContext *s, DisasOps *o)
3460 {
3461 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3462 }
3463
3464 static void cout_adds64(DisasContext *s, DisasOps *o)
3465 {
3466 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3467 }
3468
3469 static void cout_addu32(DisasContext *s, DisasOps *o)
3470 {
3471 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3472 }
3473
3474 static void cout_addu64(DisasContext *s, DisasOps *o)
3475 {
3476 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3477 }
3478
3479 static void cout_addc32(DisasContext *s, DisasOps *o)
3480 {
3481 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3482 }
3483
3484 static void cout_addc64(DisasContext *s, DisasOps *o)
3485 {
3486 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3487 }
3488
3489 static void cout_cmps32(DisasContext *s, DisasOps *o)
3490 {
3491 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3492 }
3493
3494 static void cout_cmps64(DisasContext *s, DisasOps *o)
3495 {
3496 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3497 }
3498
3499 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3500 {
3501 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3502 }
3503
3504 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3505 {
3506 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3507 }
3508
3509 static void cout_f32(DisasContext *s, DisasOps *o)
3510 {
3511 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3512 }
3513
3514 static void cout_f64(DisasContext *s, DisasOps *o)
3515 {
3516 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3517 }
3518
3519 static void cout_f128(DisasContext *s, DisasOps *o)
3520 {
3521 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3522 }
3523
3524 static void cout_nabs32(DisasContext *s, DisasOps *o)
3525 {
3526 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3527 }
3528
3529 static void cout_nabs64(DisasContext *s, DisasOps *o)
3530 {
3531 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3532 }
3533
3534 static void cout_neg32(DisasContext *s, DisasOps *o)
3535 {
3536 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3537 }
3538
3539 static void cout_neg64(DisasContext *s, DisasOps *o)
3540 {
3541 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3542 }
3543
3544 static void cout_nz32(DisasContext *s, DisasOps *o)
3545 {
3546 tcg_gen_ext32u_i64(cc_dst, o->out);
3547 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3548 }
3549
3550 static void cout_nz64(DisasContext *s, DisasOps *o)
3551 {
3552 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3553 }
3554
3555 static void cout_s32(DisasContext *s, DisasOps *o)
3556 {
3557 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3558 }
3559
3560 static void cout_s64(DisasContext *s, DisasOps *o)
3561 {
3562 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3563 }
3564
3565 static void cout_subs32(DisasContext *s, DisasOps *o)
3566 {
3567 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3568 }
3569
3570 static void cout_subs64(DisasContext *s, DisasOps *o)
3571 {
3572 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3573 }
3574
3575 static void cout_subu32(DisasContext *s, DisasOps *o)
3576 {
3577 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3578 }
3579
3580 static void cout_subu64(DisasContext *s, DisasOps *o)
3581 {
3582 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3583 }
3584
3585 static void cout_subb32(DisasContext *s, DisasOps *o)
3586 {
3587 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3588 }
3589
3590 static void cout_subb64(DisasContext *s, DisasOps *o)
3591 {
3592 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3593 }
3594
3595 static void cout_tm32(DisasContext *s, DisasOps *o)
3596 {
3597 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3598 }
3599
3600 static void cout_tm64(DisasContext *s, DisasOps *o)
3601 {
3602 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3603 }
3604
3605 /* ====================================================================== */
3606 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3607 with the TCG register to which we will write. Used in combination with
3608 the "wout" generators, in some cases we need a new temporary, and in
3609 some cases we can write to a TCG global. */
3610
3611 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3612 {
3613 o->out = tcg_temp_new_i64();
3614 }
3615 #define SPEC_prep_new 0
3616
3617 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3618 {
3619 o->out = tcg_temp_new_i64();
3620 o->out2 = tcg_temp_new_i64();
3621 }
3622 #define SPEC_prep_new_P 0
3623
3624 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3625 {
3626 o->out = regs[get_field(f, r1)];
3627 o->g_out = true;
3628 }
3629 #define SPEC_prep_r1 0
3630
3631 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3632 {
3633 int r1 = get_field(f, r1);
3634 o->out = regs[r1];
3635 o->out2 = regs[r1 + 1];
3636 o->g_out = o->g_out2 = true;
3637 }
3638 #define SPEC_prep_r1_P SPEC_r1_even
3639
3640 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3641 {
3642 o->out = fregs[get_field(f, r1)];
3643 o->g_out = true;
3644 }
3645 #define SPEC_prep_f1 0
3646
3647 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3648 {
3649 int r1 = get_field(f, r1);
3650 o->out = fregs[r1];
3651 o->out2 = fregs[r1 + 2];
3652 o->g_out = o->g_out2 = true;
3653 }
3654 #define SPEC_prep_x1 SPEC_r1_f128
3655
3656 /* ====================================================================== */
3657 /* The "Write OUTput" generators. These generally perform some non-trivial
3658 copy of data to TCG globals, or to main memory. The trivial cases are
3659 generally handled by having a "prep" generator install the TCG global
3660 as the destination of the operation. */
3661
3662 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3663 {
3664 store_reg(get_field(f, r1), o->out);
3665 }
3666 #define SPEC_wout_r1 0
3667
3668 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3669 {
3670 int r1 = get_field(f, r1);
3671 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3672 }
3673 #define SPEC_wout_r1_8 0
3674
3675 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3676 {
3677 int r1 = get_field(f, r1);
3678 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3679 }
3680 #define SPEC_wout_r1_16 0
3681
3682 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3683 {
3684 store_reg32_i64(get_field(f, r1), o->out);
3685 }
3686 #define SPEC_wout_r1_32 0
3687
3688 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3689 {
3690 int r1 = get_field(f, r1);
3691 store_reg32_i64(r1, o->out);
3692 store_reg32_i64(r1 + 1, o->out2);
3693 }
3694 #define SPEC_wout_r1_P32 SPEC_r1_even
3695
3696 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3697 {
3698 int r1 = get_field(f, r1);
3699 store_reg32_i64(r1 + 1, o->out);
3700 tcg_gen_shri_i64(o->out, o->out, 32);
3701 store_reg32_i64(r1, o->out);
3702 }
3703 #define SPEC_wout_r1_D32 SPEC_r1_even
3704
3705 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3706 {
3707 store_freg32_i64(get_field(f, r1), o->out);
3708 }
3709 #define SPEC_wout_e1 0
3710
3711 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3712 {
3713 store_freg(get_field(f, r1), o->out);
3714 }
3715 #define SPEC_wout_f1 0
3716
3717 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3718 {
3719 int f1 = get_field(s->fields, r1);
3720 store_freg(f1, o->out);
3721 store_freg(f1 + 2, o->out2);
3722 }
3723 #define SPEC_wout_x1 SPEC_r1_f128
3724
3725 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3726 {
3727 if (get_field(f, r1) != get_field(f, r2)) {
3728 store_reg32_i64(get_field(f, r1), o->out);
3729 }
3730 }
3731 #define SPEC_wout_cond_r1r2_32 0
3732
3733 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3734 {
3735 if (get_field(f, r1) != get_field(f, r2)) {
3736 store_freg32_i64(get_field(f, r1), o->out);
3737 }
3738 }
3739 #define SPEC_wout_cond_e1e2 0
3740
3741 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3742 {
3743 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3744 }
3745 #define SPEC_wout_m1_8 0
3746
3747 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3748 {
3749 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3750 }
3751 #define SPEC_wout_m1_16 0
3752
3753 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3754 {
3755 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3756 }
3757 #define SPEC_wout_m1_32 0
3758
3759 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3760 {
3761 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3762 }
3763 #define SPEC_wout_m1_64 0
3764
3765 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3766 {
3767 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3768 }
3769 #define SPEC_wout_m2_32 0
3770
3771 /* ====================================================================== */
3772 /* The "INput 1" generators. These load the first operand to an insn. */
3773
3774 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3775 {
3776 o->in1 = load_reg(get_field(f, r1));
3777 }
3778 #define SPEC_in1_r1 0
3779
3780 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3781 {
3782 o->in1 = regs[get_field(f, r1)];
3783 o->g_in1 = true;
3784 }
3785 #define SPEC_in1_r1_o 0
3786
3787 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3788 {
3789 o->in1 = tcg_temp_new_i64();
3790 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3791 }
3792 #define SPEC_in1_r1_32s 0
3793
3794 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3795 {
3796 o->in1 = tcg_temp_new_i64();
3797 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3798 }
3799 #define SPEC_in1_r1_32u 0
3800
3801 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3802 {
3803 o->in1 = tcg_temp_new_i64();
3804 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3805 }
3806 #define SPEC_in1_r1_sr32 0
3807
3808 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3809 {
3810 o->in1 = load_reg(get_field(f, r1) + 1);
3811 }
3812 #define SPEC_in1_r1p1 SPEC_r1_even
3813
3814 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3815 {
3816 o->in1 = tcg_temp_new_i64();
3817 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
3818 }
3819 #define SPEC_in1_r1p1_32s SPEC_r1_even
3820
3821 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3822 {
3823 o->in1 = tcg_temp_new_i64();
3824 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
3825 }
3826 #define SPEC_in1_r1p1_32u SPEC_r1_even
3827
3828 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3829 {
3830 int r1 = get_field(f, r1);
3831 o->in1 = tcg_temp_new_i64();
3832 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3833 }
3834 #define SPEC_in1_r1_D32 SPEC_r1_even
3835
3836 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3837 {
3838 o->in1 = load_reg(get_field(f, r2));
3839 }
3840 #define SPEC_in1_r2 0
3841
3842 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3843 {
3844 o->in1 = load_reg(get_field(f, r3));
3845 }
3846 #define SPEC_in1_r3 0
3847
3848 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3849 {
3850 o->in1 = regs[get_field(f, r3)];
3851 o->g_in1 = true;
3852 }
3853 #define SPEC_in1_r3_o 0
3854
3855 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3856 {
3857 o->in1 = tcg_temp_new_i64();
3858 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3859 }
3860 #define SPEC_in1_r3_32s 0
3861
3862 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3863 {
3864 o->in1 = tcg_temp_new_i64();
3865 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3866 }
3867 #define SPEC_in1_r3_32u 0
3868
3869 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3870 {
3871 o->in1 = load_freg32_i64(get_field(f, r1));
3872 }
3873 #define SPEC_in1_e1 0
3874
3875 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3876 {
3877 o->in1 = fregs[get_field(f, r1)];
3878 o->g_in1 = true;
3879 }
3880 #define SPEC_in1_f1_o 0
3881
3882 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3883 {
3884 int r1 = get_field(f, r1);
3885 o->out = fregs[r1];
3886 o->out2 = fregs[r1 + 2];
3887 o->g_out = o->g_out2 = true;
3888 }
3889 #define SPEC_in1_x1_o SPEC_r1_f128
3890
3891 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3892 {
3893 o->in1 = fregs[get_field(f, r3)];
3894 o->g_in1 = true;
3895 }
3896 #define SPEC_in1_f3_o 0
3897
3898 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3899 {
3900 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3901 }
3902 #define SPEC_in1_la1 0
3903
3904 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3905 {
3906 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3907 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3908 }
3909 #define SPEC_in1_la2 0
3910
3911 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3912 {
3913 in1_la1(s, f, o);
3914 o->in1 = tcg_temp_new_i64();
3915 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3916 }
3917 #define SPEC_in1_m1_8u 0
3918
3919 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3920 {
3921 in1_la1(s, f, o);
3922 o->in1 = tcg_temp_new_i64();
3923 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3924 }
3925 #define SPEC_in1_m1_16s 0
3926
3927 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3928 {
3929 in1_la1(s, f, o);
3930 o->in1 = tcg_temp_new_i64();
3931 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3932 }
3933 #define SPEC_in1_m1_16u 0
3934
3935 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3936 {
3937 in1_la1(s, f, o);
3938 o->in1 = tcg_temp_new_i64();
3939 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3940 }
3941 #define SPEC_in1_m1_32s 0
3942
3943 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3944 {
3945 in1_la1(s, f, o);
3946 o->in1 = tcg_temp_new_i64();
3947 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3948 }
3949 #define SPEC_in1_m1_32u 0
3950
3951 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3952 {
3953 in1_la1(s, f, o);
3954 o->in1 = tcg_temp_new_i64();
3955 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3956 }
3957 #define SPEC_in1_m1_64 0
3958
3959 /* ====================================================================== */
3960 /* The "INput 2" generators. These load the second operand to an insn. */
3961
3962 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3963 {
3964 o->in2 = regs[get_field(f, r1)];
3965 o->g_in2 = true;
3966 }
3967 #define SPEC_in2_r1_o 0
3968
3969 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3970 {
3971 o->in2 = tcg_temp_new_i64();
3972 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3973 }
3974 #define SPEC_in2_r1_16u 0
3975
3976 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3977 {
3978 o->in2 = tcg_temp_new_i64();
3979 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3980 }
3981 #define SPEC_in2_r1_32u 0
3982
3983 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3984 {
3985 o->in2 = load_reg(get_field(f, r2));
3986 }
3987 #define SPEC_in2_r2 0
3988
3989 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3990 {
3991 o->in2 = regs[get_field(f, r2)];
3992 o->g_in2 = true;
3993 }
3994 #define SPEC_in2_r2_o 0
3995
3996 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3997 {
3998 int r2 = get_field(f, r2);
3999 if (r2 != 0) {
4000 o->in2 = load_reg(r2);
4001 }
4002 }
4003 #define SPEC_in2_r2_nz 0
4004
4005 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4006 {
4007 o->in2 = tcg_temp_new_i64();
4008 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4009 }
4010 #define SPEC_in2_r2_8s 0
4011
4012 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4013 {
4014 o->in2 = tcg_temp_new_i64();
4015 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4016 }
4017 #define SPEC_in2_r2_8u 0
4018
4019 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4020 {
4021 o->in2 = tcg_temp_new_i64();
4022 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4023 }
4024 #define SPEC_in2_r2_16s 0
4025
4026 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4027 {
4028 o->in2 = tcg_temp_new_i64();
4029 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4030 }
4031 #define SPEC_in2_r2_16u 0
4032
4033 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4034 {
4035 o->in2 = load_reg(get_field(f, r3));
4036 }
4037 #define SPEC_in2_r3 0
4038
4039 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4040 {
4041 o->in2 = tcg_temp_new_i64();
4042 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4043 }
4044 #define SPEC_in2_r2_32s 0
4045
4046 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4047 {
4048 o->in2 = tcg_temp_new_i64();
4049 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4050 }
4051 #define SPEC_in2_r2_32u 0
4052
4053 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4054 {
4055 o->in2 = load_freg32_i64(get_field(f, r2));
4056 }
4057 #define SPEC_in2_e2 0
4058
4059 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4060 {
4061 o->in2 = fregs[get_field(f, r2)];
4062 o->g_in2 = true;
4063 }
4064 #define SPEC_in2_f2_o 0
4065
4066 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4067 {
4068 int r2 = get_field(f, r2);
4069 o->in1 = fregs[r2];
4070 o->in2 = fregs[r2 + 2];
4071 o->g_in1 = o->g_in2 = true;
4072 }
4073 #define SPEC_in2_x2_o SPEC_r2_f128
4074
4075 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4076 {
4077 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4078 }
4079 #define SPEC_in2_ra2 0
4080
4081 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4082 {
4083 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4084 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4085 }
4086 #define SPEC_in2_a2 0
4087
4088 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4089 {
4090 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4091 }
4092 #define SPEC_in2_ri2 0
4093
4094 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4095 {
4096 help_l2_shift(s, f, o, 31);
4097 }
4098 #define SPEC_in2_sh32 0
4099
4100 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4101 {
4102 help_l2_shift(s, f, o, 63);
4103 }
4104 #define SPEC_in2_sh64 0
4105
4106 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4107 {
4108 in2_a2(s, f, o);
4109 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4110 }
4111 #define SPEC_in2_m2_8u 0
4112
4113 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4114 {
4115 in2_a2(s, f, o);
4116 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4117 }
4118 #define SPEC_in2_m2_16s 0
4119
4120 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4121 {
4122 in2_a2(s, f, o);
4123 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4124 }
4125 #define SPEC_in2_m2_16u 0
4126
4127 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4128 {
4129 in2_a2(s, f, o);
4130 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4131 }
4132 #define SPEC_in2_m2_32s 0
4133
4134 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4135 {
4136 in2_a2(s, f, o);
4137 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4138 }
4139 #define SPEC_in2_m2_32u 0
4140
4141 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4142 {
4143 in2_a2(s, f, o);
4144 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4145 }
4146 #define SPEC_in2_m2_64 0
4147
4148 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4149 {
4150 in2_ri2(s, f, o);
4151 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4152 }
4153 #define SPEC_in2_mri2_16u 0
4154
4155 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4156 {
4157 in2_ri2(s, f, o);
4158 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4159 }
4160 #define SPEC_in2_mri2_32s 0
4161
4162 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4163 {
4164 in2_ri2(s, f, o);
4165 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4166 }
4167 #define SPEC_in2_mri2_32u 0
4168
4169 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4170 {
4171 in2_ri2(s, f, o);
4172 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4173 }
4174 #define SPEC_in2_mri2_64 0
4175
4176 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4177 {
4178 o->in2 = tcg_const_i64(get_field(f, i2));
4179 }
4180 #define SPEC_in2_i2 0
4181
4182 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4183 {
4184 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4185 }
4186 #define SPEC_in2_i2_8u 0
4187
4188 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4189 {
4190 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4191 }
4192 #define SPEC_in2_i2_16u 0
4193
4194 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4195 {
4196 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4197 }
4198 #define SPEC_in2_i2_32u 0
4199
4200 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4201 {
4202 uint64_t i2 = (uint16_t)get_field(f, i2);
4203 o->in2 = tcg_const_i64(i2 << s->insn->data);
4204 }
4205 #define SPEC_in2_i2_16u_shl 0
4206
4207 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4208 {
4209 uint64_t i2 = (uint32_t)get_field(f, i2);
4210 o->in2 = tcg_const_i64(i2 << s->insn->data);
4211 }
4212 #define SPEC_in2_i2_32u_shl 0
4213
4214 /* ====================================================================== */
4215
4216 /* Find opc within the table of insns. This is formulated as a switch
4217 statement so that (1) we get compile-time notice of cut-paste errors
4218 for duplicated opcodes, and (2) the compiler generates the binary
4219 search tree, rather than us having to post-process the table. */
4220
4221 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4222 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4223
4224 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4225
4226 enum DisasInsnEnum {
4227 #include "insn-data.def"
4228 };
4229
4230 #undef D
4231 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4232 .opc = OPC, \
4233 .fmt = FMT_##FT, \
4234 .fac = FAC_##FC, \
4235 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4236 .name = #NM, \
4237 .help_in1 = in1_##I1, \
4238 .help_in2 = in2_##I2, \
4239 .help_prep = prep_##P, \
4240 .help_wout = wout_##W, \
4241 .help_cout = cout_##CC, \
4242 .help_op = op_##OP, \
4243 .data = D \
4244 },
4245
4246 /* Allow 0 to be used for NULL in the table below. */
4247 #define in1_0 NULL
4248 #define in2_0 NULL
4249 #define prep_0 NULL
4250 #define wout_0 NULL
4251 #define cout_0 NULL
4252 #define op_0 NULL
4253
4254 #define SPEC_in1_0 0
4255 #define SPEC_in2_0 0
4256 #define SPEC_prep_0 0
4257 #define SPEC_wout_0 0
4258
4259 static const DisasInsn insn_info[] = {
4260 #include "insn-data.def"
4261 };
4262
4263 #undef D
4264 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4265 case OPC: return &insn_info[insn_ ## NM];
4266
4267 static const DisasInsn *lookup_opc(uint16_t opc)
4268 {
4269 switch (opc) {
4270 #include "insn-data.def"
4271 default:
4272 return NULL;
4273 }
4274 }
4275
4276 #undef D
4277 #undef C
4278
4279 /* Extract a field from the insn. The INSN should be left-aligned in
4280 the uint64_t so that we can more easily utilize the big-bit-endian
4281 definitions we extract from the Principals of Operation. */
4282
4283 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4284 {
4285 uint32_t r, m;
4286
4287 if (f->size == 0) {
4288 return;
4289 }
4290
4291 /* Zero extract the field from the insn. */
4292 r = (insn << f->beg) >> (64 - f->size);
4293
4294 /* Sign-extend, or un-swap the field as necessary. */
4295 switch (f->type) {
4296 case 0: /* unsigned */
4297 break;
4298 case 1: /* signed */
4299 assert(f->size <= 32);
4300 m = 1u << (f->size - 1);
4301 r = (r ^ m) - m;
4302 break;
4303 case 2: /* dl+dh split, signed 20 bit. */
4304 r = ((int8_t)r << 12) | (r >> 8);
4305 break;
4306 default:
4307 abort();
4308 }
4309
4310 /* Validate that the "compressed" encoding we selected above is valid.
4311 I.e. we havn't make two different original fields overlap. */
4312 assert(((o->presentC >> f->indexC) & 1) == 0);
4313 o->presentC |= 1 << f->indexC;
4314 o->presentO |= 1 << f->indexO;
4315
4316 o->c[f->indexC] = r;
4317 }
4318
4319 /* Lookup the insn at the current PC, extracting the operands into O and
4320 returning the info struct for the insn. Returns NULL for invalid insn. */
4321
4322 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4323 DisasFields *f)
4324 {
4325 uint64_t insn, pc = s->pc;
4326 int op, op2, ilen;
4327 const DisasInsn *info;
4328
4329 insn = ld_code2(env, pc);
4330 op = (insn >> 8) & 0xff;
4331 ilen = get_ilen(op);
4332 s->next_pc = s->pc + ilen;
4333
4334 switch (ilen) {
4335 case 2:
4336 insn = insn << 48;
4337 break;
4338 case 4:
4339 insn = ld_code4(env, pc) << 32;
4340 break;
4341 case 6:
4342 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4343 break;
4344 default:
4345 abort();
4346 }
4347
4348 /* We can't actually determine the insn format until we've looked up
4349 the full insn opcode. Which we can't do without locating the
4350 secondary opcode. Assume by default that OP2 is at bit 40; for
4351 those smaller insns that don't actually have a secondary opcode
4352 this will correctly result in OP2 = 0. */
4353 switch (op) {
4354 case 0x01: /* E */
4355 case 0x80: /* S */
4356 case 0x82: /* S */
4357 case 0x93: /* S */
4358 case 0xb2: /* S, RRF, RRE */
4359 case 0xb3: /* RRE, RRD, RRF */
4360 case 0xb9: /* RRE, RRF */
4361 case 0xe5: /* SSE, SIL */
4362 op2 = (insn << 8) >> 56;
4363 break;
4364 case 0xa5: /* RI */
4365 case 0xa7: /* RI */
4366 case 0xc0: /* RIL */
4367 case 0xc2: /* RIL */
4368 case 0xc4: /* RIL */
4369 case 0xc6: /* RIL */
4370 case 0xc8: /* SSF */
4371 case 0xcc: /* RIL */
4372 op2 = (insn << 12) >> 60;
4373 break;
4374 case 0xd0 ... 0xdf: /* SS */
4375 case 0xe1: /* SS */
4376 case 0xe2: /* SS */
4377 case 0xe8: /* SS */
4378 case 0xe9: /* SS */
4379 case 0xea: /* SS */
4380 case 0xee ... 0xf3: /* SS */
4381 case 0xf8 ... 0xfd: /* SS */
4382 op2 = 0;
4383 break;
4384 default:
4385 op2 = (insn << 40) >> 56;
4386 break;
4387 }
4388
4389 memset(f, 0, sizeof(*f));
4390 f->op = op;
4391 f->op2 = op2;
4392
4393 /* Lookup the instruction. */
4394 info = lookup_opc(op << 8 | op2);
4395
4396 /* If we found it, extract the operands. */
4397 if (info != NULL) {
4398 DisasFormat fmt = info->fmt;
4399 int i;
4400
4401 for (i = 0; i < NUM_C_FIELD; ++i) {
4402 extract_field(f, &format_info[fmt].op[i], insn);
4403 }
4404 }
4405 return info;
4406 }
4407
4408 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4409 {
4410 const DisasInsn *insn;
4411 ExitStatus ret = NO_EXIT;
4412 DisasFields f;
4413 DisasOps o;
4414
4415 /* Search for the insn in the table. */
4416 insn = extract_insn(env, s, &f);
4417
4418 /* Not found means unimplemented/illegal opcode. */
4419 if (insn == NULL) {
4420 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4421 f.op, f.op2);
4422 gen_illegal_opcode(s);
4423 return EXIT_NORETURN;
4424 }
4425
4426 /* Check for insn specification exceptions. */
4427 if (insn->spec) {
4428 int spec = insn->spec, excp = 0, r;
4429
4430 if (spec & SPEC_r1_even) {
4431 r = get_field(&f, r1);
4432 if (r & 1) {
4433 excp = PGM_SPECIFICATION;
4434 }
4435 }
4436 if (spec & SPEC_r2_even) {
4437 r = get_field(&f, r2);
4438 if (r & 1) {
4439 excp = PGM_SPECIFICATION;
4440 }
4441 }
4442 if (spec & SPEC_r1_f128) {
4443 r = get_field(&f, r1);
4444 if (r > 13) {
4445 excp = PGM_SPECIFICATION;
4446 }
4447 }
4448 if (spec & SPEC_r2_f128) {
4449 r = get_field(&f, r2);
4450 if (r > 13) {
4451 excp = PGM_SPECIFICATION;
4452 }
4453 }
4454 if (excp) {
4455 gen_program_exception(s, excp);
4456 return EXIT_NORETURN;
4457 }
4458 }
4459
4460 /* Set up the strutures we use to communicate with the helpers. */
4461 s->insn = insn;
4462 s->fields = &f;
4463 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4464 TCGV_UNUSED_I64(o.out);
4465 TCGV_UNUSED_I64(o.out2);
4466 TCGV_UNUSED_I64(o.in1);
4467 TCGV_UNUSED_I64(o.in2);
4468 TCGV_UNUSED_I64(o.addr1);
4469
4470 /* Implement the instruction. */
4471 if (insn->help_in1) {
4472 insn->help_in1(s, &f, &o);
4473 }
4474 if (insn->help_in2) {
4475 insn->help_in2(s, &f, &o);
4476 }
4477 if (insn->help_prep) {
4478 insn->help_prep(s, &f, &o);
4479 }
4480 if (insn->help_op) {
4481 ret = insn->help_op(s, &o);
4482 }
4483 if (insn->help_wout) {
4484 insn->help_wout(s, &f, &o);
4485 }
4486 if (insn->help_cout) {
4487 insn->help_cout(s, &o);
4488 }
4489
4490 /* Free any temporaries created by the helpers. */
4491 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4492 tcg_temp_free_i64(o.out);
4493 }
4494 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4495 tcg_temp_free_i64(o.out2);
4496 }
4497 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4498 tcg_temp_free_i64(o.in1);
4499 }
4500 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4501 tcg_temp_free_i64(o.in2);
4502 }
4503 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4504 tcg_temp_free_i64(o.addr1);
4505 }
4506
4507 /* Advance to the next instruction. */
4508 s->pc = s->next_pc;
4509 return ret;
4510 }
4511
4512 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4513 TranslationBlock *tb,
4514 int search_pc)
4515 {
4516 DisasContext dc;
4517 target_ulong pc_start;
4518 uint64_t next_page_start;
4519 uint16_t *gen_opc_end;
4520 int j, lj = -1;
4521 int num_insns, max_insns;
4522 CPUBreakpoint *bp;
4523 ExitStatus status;
4524 bool do_debug;
4525
4526 pc_start = tb->pc;
4527
4528 /* 31-bit mode */
4529 if (!(tb->flags & FLAG_MASK_64)) {
4530 pc_start &= 0x7fffffff;
4531 }
4532
4533 dc.tb = tb;
4534 dc.pc = pc_start;
4535 dc.cc_op = CC_OP_DYNAMIC;
4536 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4537
4538 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4539
4540 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4541
4542 num_insns = 0;
4543 max_insns = tb->cflags & CF_COUNT_MASK;
4544 if (max_insns == 0) {
4545 max_insns = CF_COUNT_MASK;
4546 }
4547
4548 gen_icount_start();
4549
4550 do {
4551 if (search_pc) {
4552 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4553 if (lj < j) {
4554 lj++;
4555 while (lj < j) {
4556 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4557 }
4558 }
4559 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4560 gen_opc_cc_op[lj] = dc.cc_op;
4561 tcg_ctx.gen_opc_instr_start[lj] = 1;
4562 tcg_ctx.gen_opc_icount[lj] = num_insns;
4563 }
4564 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4565 gen_io_start();
4566 }
4567
4568 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4569 tcg_gen_debug_insn_start(dc.pc);
4570 }
4571
4572 status = NO_EXIT;
4573 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4574 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4575 if (bp->pc == dc.pc) {
4576 status = EXIT_PC_STALE;
4577 do_debug = true;
4578 break;
4579 }
4580 }
4581 }
4582 if (status == NO_EXIT) {
4583 status = translate_one(env, &dc);
4584 }
4585
4586 /* If we reach a page boundary, are single stepping,
4587 or exhaust instruction count, stop generation. */
4588 if (status == NO_EXIT
4589 && (dc.pc >= next_page_start
4590 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4591 || num_insns >= max_insns
4592 || singlestep
4593 || env->singlestep_enabled)) {
4594 status = EXIT_PC_STALE;
4595 }
4596 } while (status == NO_EXIT);
4597
4598 if (tb->cflags & CF_LAST_IO) {
4599 gen_io_end();
4600 }
4601
4602 switch (status) {
4603 case EXIT_GOTO_TB:
4604 case EXIT_NORETURN:
4605 break;
4606 case EXIT_PC_STALE:
4607 update_psw_addr(&dc);
4608 /* FALLTHRU */
4609 case EXIT_PC_UPDATED:
4610 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4611 cc op type is in env */
4612 update_cc_op(&dc);
4613 /* Exit the TB, either by raising a debug exception or by return. */
4614 if (do_debug) {
4615 gen_exception(EXCP_DEBUG);
4616 } else {
4617 tcg_gen_exit_tb(0);
4618 }
4619 break;
4620 default:
4621 abort();
4622 }
4623
4624 gen_icount_end(tb, num_insns);
4625 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4626 if (search_pc) {
4627 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4628 lj++;
4629 while (lj <= j) {
4630 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4631 }
4632 } else {
4633 tb->size = dc.pc - pc_start;
4634 tb->icount = num_insns;
4635 }
4636
4637 #if defined(S390X_DEBUG_DISAS)
4638 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4639 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4640 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4641 qemu_log("\n");
4642 }
4643 #endif
4644 }
4645
4646 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4647 {
4648 gen_intermediate_code_internal(env, tb, 0);
4649 }
4650
4651 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4652 {
4653 gen_intermediate_code_internal(env, tb, 1);
4654 }
4655
4656 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4657 {
4658 int cc_op;
4659 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4660 cc_op = gen_opc_cc_op[pc_pos];
4661 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4662 env->cc_op = cc_op;
4663 }
4664 }