]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/translate.c
target-s390: Optmize emitting discards
[mirror_qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 };
59
60 /* Information carried about a condition to be evaluated. */
61 typedef struct {
62 TCGCond cond:8;
63 bool is_64;
64 bool g1;
65 bool g2;
66 union {
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
69 } u;
70 } DisasCompare;
71
72 #define DISAS_EXCP 4
73
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit[CC_OP_MAX];
76 static uint64_t inline_branch_miss[CC_OP_MAX];
77 #endif
78
79 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
80 {
81 if (!(s->tb->flags & FLAG_MASK_64)) {
82 if (s->tb->flags & FLAG_MASK_32) {
83 return pc | 0x80000000;
84 }
85 }
86 return pc;
87 }
88
89 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
90 int flags)
91 {
92 int i;
93
94 if (env->cc_op > 3) {
95 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
96 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
97 } else {
98 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
99 env->psw.mask, env->psw.addr, env->cc_op);
100 }
101
102 for (i = 0; i < 16; i++) {
103 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
104 if ((i % 4) == 3) {
105 cpu_fprintf(f, "\n");
106 } else {
107 cpu_fprintf(f, " ");
108 }
109 }
110
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
113 if ((i % 4) == 3) {
114 cpu_fprintf(f, "\n");
115 } else {
116 cpu_fprintf(f, " ");
117 }
118 }
119
120 #ifndef CONFIG_USER_ONLY
121 for (i = 0; i < 16; i++) {
122 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
123 if ((i % 4) == 3) {
124 cpu_fprintf(f, "\n");
125 } else {
126 cpu_fprintf(f, " ");
127 }
128 }
129 #endif
130
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i = 0; i < CC_OP_MAX; i++) {
133 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
134 inline_branch_miss[i], inline_branch_hit[i]);
135 }
136 #endif
137
138 cpu_fprintf(f, "\n");
139 }
140
141 static TCGv_i64 psw_addr;
142 static TCGv_i64 psw_mask;
143
144 static TCGv_i32 cc_op;
145 static TCGv_i64 cc_src;
146 static TCGv_i64 cc_dst;
147 static TCGv_i64 cc_vr;
148
149 static char cpu_reg_names[32][4];
150 static TCGv_i64 regs[16];
151 static TCGv_i64 fregs[16];
152
153 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
154
155 void s390x_translate_init(void)
156 {
157 int i;
158
159 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
160 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
161 offsetof(CPUS390XState, psw.addr),
162 "psw_addr");
163 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
164 offsetof(CPUS390XState, psw.mask),
165 "psw_mask");
166
167 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
168 "cc_op");
169 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
170 "cc_src");
171 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
172 "cc_dst");
173 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
174 "cc_vr");
175
176 for (i = 0; i < 16; i++) {
177 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
178 regs[i] = tcg_global_mem_new(TCG_AREG0,
179 offsetof(CPUS390XState, regs[i]),
180 cpu_reg_names[i]);
181 }
182
183 for (i = 0; i < 16; i++) {
184 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
185 fregs[i] = tcg_global_mem_new(TCG_AREG0,
186 offsetof(CPUS390XState, fregs[i].d),
187 cpu_reg_names[i + 16]);
188 }
189
190 /* register helpers */
191 #define GEN_HELPER 2
192 #include "helper.h"
193 }
194
195 static TCGv_i64 load_reg(int reg)
196 {
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
200 }
201
202 static TCGv_i64 load_freg32_i64(int reg)
203 {
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
206 return r;
207 }
208
209 static void store_reg(int reg, TCGv_i64 v)
210 {
211 tcg_gen_mov_i64(regs[reg], v);
212 }
213
214 static void store_freg(int reg, TCGv_i64 v)
215 {
216 tcg_gen_mov_i64(fregs[reg], v);
217 }
218
219 static void store_reg32_i64(int reg, TCGv_i64 v)
220 {
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
223 }
224
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
226 {
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
228 }
229
230 static void store_freg32_i64(int reg, TCGv_i64 v)
231 {
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
233 }
234
235 static void return_low128(TCGv_i64 dest)
236 {
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
238 }
239
240 static void update_psw_addr(DisasContext *s)
241 {
242 /* psw.addr */
243 tcg_gen_movi_i64(psw_addr, s->pc);
244 }
245
246 static void update_cc_op(DisasContext *s)
247 {
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
250 }
251 }
252
253 static void potential_page_fault(DisasContext *s)
254 {
255 update_psw_addr(s);
256 update_cc_op(s);
257 }
258
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
260 {
261 return (uint64_t)cpu_lduw_code(env, pc);
262 }
263
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
265 {
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
267 }
268
269 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
270 {
271 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
272 }
273
274 static int get_mem_index(DisasContext *s)
275 {
276 switch (s->tb->flags & FLAG_MASK_ASC) {
277 case PSW_ASC_PRIMARY >> 32:
278 return 0;
279 case PSW_ASC_SECONDARY >> 32:
280 return 1;
281 case PSW_ASC_HOME >> 32:
282 return 2;
283 default:
284 tcg_abort();
285 break;
286 }
287 }
288
289 static void gen_exception(int excp)
290 {
291 TCGv_i32 tmp = tcg_const_i32(excp);
292 gen_helper_exception(cpu_env, tmp);
293 tcg_temp_free_i32(tmp);
294 }
295
296 static void gen_program_exception(DisasContext *s, int code)
297 {
298 TCGv_i32 tmp;
299
300 /* Remember what pgm exeption this was. */
301 tmp = tcg_const_i32(code);
302 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
303 tcg_temp_free_i32(tmp);
304
305 tmp = tcg_const_i32(s->next_pc - s->pc);
306 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
307 tcg_temp_free_i32(tmp);
308
309 /* Advance past instruction. */
310 s->pc = s->next_pc;
311 update_psw_addr(s);
312
313 /* Save off cc. */
314 update_cc_op(s);
315
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM);
318 }
319
320 static inline void gen_illegal_opcode(DisasContext *s)
321 {
322 gen_program_exception(s, PGM_SPECIFICATION);
323 }
324
325 static inline void check_privileged(DisasContext *s)
326 {
327 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
328 gen_program_exception(s, PGM_PRIVILEGED);
329 }
330 }
331
332 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
333 {
334 TCGv_i64 tmp;
335
336 /* 31-bitify the immediate part; register contents are dealt with below */
337 if (!(s->tb->flags & FLAG_MASK_64)) {
338 d2 &= 0x7fffffffUL;
339 }
340
341 if (x2) {
342 if (d2) {
343 tmp = tcg_const_i64(d2);
344 tcg_gen_add_i64(tmp, tmp, regs[x2]);
345 } else {
346 tmp = load_reg(x2);
347 }
348 if (b2) {
349 tcg_gen_add_i64(tmp, tmp, regs[b2]);
350 }
351 } else if (b2) {
352 if (d2) {
353 tmp = tcg_const_i64(d2);
354 tcg_gen_add_i64(tmp, tmp, regs[b2]);
355 } else {
356 tmp = load_reg(b2);
357 }
358 } else {
359 tmp = tcg_const_i64(d2);
360 }
361
362 /* 31-bit mode mask if there are values loaded from registers */
363 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
364 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
365 }
366
367 return tmp;
368 }
369
370 static inline bool live_cc_data(DisasContext *s)
371 {
372 return (s->cc_op != CC_OP_DYNAMIC
373 && s->cc_op != CC_OP_STATIC
374 && s->cc_op > 3);
375 }
376
377 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
378 {
379 if (live_cc_data(s)) {
380 tcg_gen_discard_i64(cc_src);
381 tcg_gen_discard_i64(cc_dst);
382 tcg_gen_discard_i64(cc_vr);
383 }
384 s->cc_op = CC_OP_CONST0 + val;
385 }
386
387 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
388 {
389 if (live_cc_data(s)) {
390 tcg_gen_discard_i64(cc_src);
391 tcg_gen_discard_i64(cc_vr);
392 }
393 tcg_gen_mov_i64(cc_dst, dst);
394 s->cc_op = op;
395 }
396
397 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
398 TCGv_i64 dst)
399 {
400 if (live_cc_data(s)) {
401 tcg_gen_discard_i64(cc_vr);
402 }
403 tcg_gen_mov_i64(cc_src, src);
404 tcg_gen_mov_i64(cc_dst, dst);
405 s->cc_op = op;
406 }
407
408 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
409 TCGv_i64 dst, TCGv_i64 vr)
410 {
411 tcg_gen_mov_i64(cc_src, src);
412 tcg_gen_mov_i64(cc_dst, dst);
413 tcg_gen_mov_i64(cc_vr, vr);
414 s->cc_op = op;
415 }
416
417 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
418 {
419 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
420 }
421
422 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
423 {
424 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
425 }
426
427 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
428 {
429 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
430 }
431
432 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
433 {
434 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
435 }
436
437 /* CC value is in env->cc_op */
438 static void set_cc_static(DisasContext *s)
439 {
440 if (live_cc_data(s)) {
441 tcg_gen_discard_i64(cc_src);
442 tcg_gen_discard_i64(cc_dst);
443 tcg_gen_discard_i64(cc_vr);
444 }
445 s->cc_op = CC_OP_STATIC;
446 }
447
448 /* calculates cc into cc_op */
449 static void gen_op_calc_cc(DisasContext *s)
450 {
451 TCGv_i32 local_cc_op;
452 TCGv_i64 dummy;
453
454 TCGV_UNUSED_I32(local_cc_op);
455 TCGV_UNUSED_I64(dummy);
456 switch (s->cc_op) {
457 default:
458 dummy = tcg_const_i64(0);
459 /* FALLTHRU */
460 case CC_OP_ADD_64:
461 case CC_OP_ADDU_64:
462 case CC_OP_ADDC_64:
463 case CC_OP_SUB_64:
464 case CC_OP_SUBU_64:
465 case CC_OP_SUBB_64:
466 case CC_OP_ADD_32:
467 case CC_OP_ADDU_32:
468 case CC_OP_ADDC_32:
469 case CC_OP_SUB_32:
470 case CC_OP_SUBU_32:
471 case CC_OP_SUBB_32:
472 local_cc_op = tcg_const_i32(s->cc_op);
473 break;
474 case CC_OP_CONST0:
475 case CC_OP_CONST1:
476 case CC_OP_CONST2:
477 case CC_OP_CONST3:
478 case CC_OP_STATIC:
479 case CC_OP_DYNAMIC:
480 break;
481 }
482
483 switch (s->cc_op) {
484 case CC_OP_CONST0:
485 case CC_OP_CONST1:
486 case CC_OP_CONST2:
487 case CC_OP_CONST3:
488 /* s->cc_op is the cc value */
489 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
490 break;
491 case CC_OP_STATIC:
492 /* env->cc_op already is the cc value */
493 break;
494 case CC_OP_NZ:
495 case CC_OP_ABS_64:
496 case CC_OP_NABS_64:
497 case CC_OP_ABS_32:
498 case CC_OP_NABS_32:
499 case CC_OP_LTGT0_32:
500 case CC_OP_LTGT0_64:
501 case CC_OP_COMP_32:
502 case CC_OP_COMP_64:
503 case CC_OP_NZ_F32:
504 case CC_OP_NZ_F64:
505 case CC_OP_FLOGR:
506 /* 1 argument */
507 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
508 break;
509 case CC_OP_ICM:
510 case CC_OP_LTGT_32:
511 case CC_OP_LTGT_64:
512 case CC_OP_LTUGTU_32:
513 case CC_OP_LTUGTU_64:
514 case CC_OP_TM_32:
515 case CC_OP_TM_64:
516 case CC_OP_SLA_32:
517 case CC_OP_SLA_64:
518 case CC_OP_NZ_F128:
519 /* 2 arguments */
520 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
521 break;
522 case CC_OP_ADD_64:
523 case CC_OP_ADDU_64:
524 case CC_OP_ADDC_64:
525 case CC_OP_SUB_64:
526 case CC_OP_SUBU_64:
527 case CC_OP_SUBB_64:
528 case CC_OP_ADD_32:
529 case CC_OP_ADDU_32:
530 case CC_OP_ADDC_32:
531 case CC_OP_SUB_32:
532 case CC_OP_SUBU_32:
533 case CC_OP_SUBB_32:
534 /* 3 arguments */
535 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
536 break;
537 case CC_OP_DYNAMIC:
538 /* unknown operation - assume 3 arguments and cc_op in env */
539 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
540 break;
541 default:
542 tcg_abort();
543 }
544
545 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
546 tcg_temp_free_i32(local_cc_op);
547 }
548 if (!TCGV_IS_UNUSED_I64(dummy)) {
549 tcg_temp_free_i64(dummy);
550 }
551
552 /* We now have cc in cc_op as constant */
553 set_cc_static(s);
554 }
555
556 static int use_goto_tb(DisasContext *s, uint64_t dest)
557 {
558 /* NOTE: we handle the case where the TB spans two pages here */
559 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
560 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
561 && !s->singlestep_enabled
562 && !(s->tb->cflags & CF_LAST_IO));
563 }
564
565 static void account_noninline_branch(DisasContext *s, int cc_op)
566 {
567 #ifdef DEBUG_INLINE_BRANCHES
568 inline_branch_miss[cc_op]++;
569 #endif
570 }
571
572 static void account_inline_branch(DisasContext *s, int cc_op)
573 {
574 #ifdef DEBUG_INLINE_BRANCHES
575 inline_branch_hit[cc_op]++;
576 #endif
577 }
578
579 /* Table of mask values to comparison codes, given a comparison as input.
580 For a true comparison CC=3 will never be set, but we treat this
581 conservatively for possible use when CC=3 indicates overflow. */
582 static const TCGCond ltgt_cond[16] = {
583 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
584 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
585 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
586 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
587 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
588 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
589 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
590 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
591 };
592
593 /* Table of mask values to comparison codes, given a logic op as input.
594 For such, only CC=0 and CC=1 should be possible. */
595 static const TCGCond nz_cond[16] = {
596 /* | | x | x */
597 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
598 /* | NE | x | x */
599 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
600 /* EQ | | x | x */
601 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
602 /* EQ | NE | x | x */
603 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
604 };
605
606 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
607 details required to generate a TCG comparison. */
608 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
609 {
610 TCGCond cond;
611 enum cc_op old_cc_op = s->cc_op;
612
613 if (mask == 15 || mask == 0) {
614 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
615 c->u.s32.a = cc_op;
616 c->u.s32.b = cc_op;
617 c->g1 = c->g2 = true;
618 c->is_64 = false;
619 return;
620 }
621
622 /* Find the TCG condition for the mask + cc op. */
623 switch (old_cc_op) {
624 case CC_OP_LTGT0_32:
625 case CC_OP_LTGT0_64:
626 case CC_OP_LTGT_32:
627 case CC_OP_LTGT_64:
628 cond = ltgt_cond[mask];
629 if (cond == TCG_COND_NEVER) {
630 goto do_dynamic;
631 }
632 account_inline_branch(s, old_cc_op);
633 break;
634
635 case CC_OP_LTUGTU_32:
636 case CC_OP_LTUGTU_64:
637 cond = tcg_unsigned_cond(ltgt_cond[mask]);
638 if (cond == TCG_COND_NEVER) {
639 goto do_dynamic;
640 }
641 account_inline_branch(s, old_cc_op);
642 break;
643
644 case CC_OP_NZ:
645 cond = nz_cond[mask];
646 if (cond == TCG_COND_NEVER) {
647 goto do_dynamic;
648 }
649 account_inline_branch(s, old_cc_op);
650 break;
651
652 case CC_OP_TM_32:
653 case CC_OP_TM_64:
654 switch (mask) {
655 case 8:
656 cond = TCG_COND_EQ;
657 break;
658 case 4 | 2 | 1:
659 cond = TCG_COND_NE;
660 break;
661 default:
662 goto do_dynamic;
663 }
664 account_inline_branch(s, old_cc_op);
665 break;
666
667 case CC_OP_ICM:
668 switch (mask) {
669 case 8:
670 cond = TCG_COND_EQ;
671 break;
672 case 4 | 2 | 1:
673 case 4 | 2:
674 cond = TCG_COND_NE;
675 break;
676 default:
677 goto do_dynamic;
678 }
679 account_inline_branch(s, old_cc_op);
680 break;
681
682 case CC_OP_FLOGR:
683 switch (mask & 0xa) {
684 case 8: /* src == 0 -> no one bit found */
685 cond = TCG_COND_EQ;
686 break;
687 case 2: /* src != 0 -> one bit found */
688 cond = TCG_COND_NE;
689 break;
690 default:
691 goto do_dynamic;
692 }
693 account_inline_branch(s, old_cc_op);
694 break;
695
696 default:
697 do_dynamic:
698 /* Calculate cc value. */
699 gen_op_calc_cc(s);
700 /* FALLTHRU */
701
702 case CC_OP_STATIC:
703 /* Jump based on CC. We'll load up the real cond below;
704 the assignment here merely avoids a compiler warning. */
705 account_noninline_branch(s, old_cc_op);
706 old_cc_op = CC_OP_STATIC;
707 cond = TCG_COND_NEVER;
708 break;
709 }
710
711 /* Load up the arguments of the comparison. */
712 c->is_64 = true;
713 c->g1 = c->g2 = false;
714 switch (old_cc_op) {
715 case CC_OP_LTGT0_32:
716 c->is_64 = false;
717 c->u.s32.a = tcg_temp_new_i32();
718 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
719 c->u.s32.b = tcg_const_i32(0);
720 break;
721 case CC_OP_LTGT_32:
722 case CC_OP_LTUGTU_32:
723 c->is_64 = false;
724 c->u.s32.a = tcg_temp_new_i32();
725 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
726 c->u.s32.b = tcg_temp_new_i32();
727 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
728 break;
729
730 case CC_OP_LTGT0_64:
731 case CC_OP_NZ:
732 case CC_OP_FLOGR:
733 c->u.s64.a = cc_dst;
734 c->u.s64.b = tcg_const_i64(0);
735 c->g1 = true;
736 break;
737 case CC_OP_LTGT_64:
738 case CC_OP_LTUGTU_64:
739 c->u.s64.a = cc_src;
740 c->u.s64.b = cc_dst;
741 c->g1 = c->g2 = true;
742 break;
743
744 case CC_OP_TM_32:
745 case CC_OP_TM_64:
746 case CC_OP_ICM:
747 c->u.s64.a = tcg_temp_new_i64();
748 c->u.s64.b = tcg_const_i64(0);
749 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
750 break;
751
752 case CC_OP_STATIC:
753 c->is_64 = false;
754 c->u.s32.a = cc_op;
755 c->g1 = true;
756 switch (mask) {
757 case 0x8 | 0x4 | 0x2: /* cc != 3 */
758 cond = TCG_COND_NE;
759 c->u.s32.b = tcg_const_i32(3);
760 break;
761 case 0x8 | 0x4 | 0x1: /* cc != 2 */
762 cond = TCG_COND_NE;
763 c->u.s32.b = tcg_const_i32(2);
764 break;
765 case 0x8 | 0x2 | 0x1: /* cc != 1 */
766 cond = TCG_COND_NE;
767 c->u.s32.b = tcg_const_i32(1);
768 break;
769 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
770 cond = TCG_COND_EQ;
771 c->g1 = false;
772 c->u.s32.a = tcg_temp_new_i32();
773 c->u.s32.b = tcg_const_i32(0);
774 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
775 break;
776 case 0x8 | 0x4: /* cc < 2 */
777 cond = TCG_COND_LTU;
778 c->u.s32.b = tcg_const_i32(2);
779 break;
780 case 0x8: /* cc == 0 */
781 cond = TCG_COND_EQ;
782 c->u.s32.b = tcg_const_i32(0);
783 break;
784 case 0x4 | 0x2 | 0x1: /* cc != 0 */
785 cond = TCG_COND_NE;
786 c->u.s32.b = tcg_const_i32(0);
787 break;
788 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
789 cond = TCG_COND_NE;
790 c->g1 = false;
791 c->u.s32.a = tcg_temp_new_i32();
792 c->u.s32.b = tcg_const_i32(0);
793 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
794 break;
795 case 0x4: /* cc == 1 */
796 cond = TCG_COND_EQ;
797 c->u.s32.b = tcg_const_i32(1);
798 break;
799 case 0x2 | 0x1: /* cc > 1 */
800 cond = TCG_COND_GTU;
801 c->u.s32.b = tcg_const_i32(1);
802 break;
803 case 0x2: /* cc == 2 */
804 cond = TCG_COND_EQ;
805 c->u.s32.b = tcg_const_i32(2);
806 break;
807 case 0x1: /* cc == 3 */
808 cond = TCG_COND_EQ;
809 c->u.s32.b = tcg_const_i32(3);
810 break;
811 default:
812 /* CC is masked by something else: (8 >> cc) & mask. */
813 cond = TCG_COND_NE;
814 c->g1 = false;
815 c->u.s32.a = tcg_const_i32(8);
816 c->u.s32.b = tcg_const_i32(0);
817 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
818 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
819 break;
820 }
821 break;
822
823 default:
824 abort();
825 }
826 c->cond = cond;
827 }
828
829 static void free_compare(DisasCompare *c)
830 {
831 if (!c->g1) {
832 if (c->is_64) {
833 tcg_temp_free_i64(c->u.s64.a);
834 } else {
835 tcg_temp_free_i32(c->u.s32.a);
836 }
837 }
838 if (!c->g2) {
839 if (c->is_64) {
840 tcg_temp_free_i64(c->u.s64.b);
841 } else {
842 tcg_temp_free_i32(c->u.s32.b);
843 }
844 }
845 }
846
847 /* ====================================================================== */
848 /* Define the insn format enumeration. */
849 #define F0(N) FMT_##N,
850 #define F1(N, X1) F0(N)
851 #define F2(N, X1, X2) F0(N)
852 #define F3(N, X1, X2, X3) F0(N)
853 #define F4(N, X1, X2, X3, X4) F0(N)
854 #define F5(N, X1, X2, X3, X4, X5) F0(N)
855
856 typedef enum {
857 #include "insn-format.def"
858 } DisasFormat;
859
860 #undef F0
861 #undef F1
862 #undef F2
863 #undef F3
864 #undef F4
865 #undef F5
866
867 /* Define a structure to hold the decoded fields. We'll store each inside
868 an array indexed by an enum. In order to conserve memory, we'll arrange
869 for fields that do not exist at the same time to overlap, thus the "C"
870 for compact. For checking purposes there is an "O" for original index
871 as well that will be applied to availability bitmaps. */
872
873 enum DisasFieldIndexO {
874 FLD_O_r1,
875 FLD_O_r2,
876 FLD_O_r3,
877 FLD_O_m1,
878 FLD_O_m3,
879 FLD_O_m4,
880 FLD_O_b1,
881 FLD_O_b2,
882 FLD_O_b4,
883 FLD_O_d1,
884 FLD_O_d2,
885 FLD_O_d4,
886 FLD_O_x2,
887 FLD_O_l1,
888 FLD_O_l2,
889 FLD_O_i1,
890 FLD_O_i2,
891 FLD_O_i3,
892 FLD_O_i4,
893 FLD_O_i5
894 };
895
896 enum DisasFieldIndexC {
897 FLD_C_r1 = 0,
898 FLD_C_m1 = 0,
899 FLD_C_b1 = 0,
900 FLD_C_i1 = 0,
901
902 FLD_C_r2 = 1,
903 FLD_C_b2 = 1,
904 FLD_C_i2 = 1,
905
906 FLD_C_r3 = 2,
907 FLD_C_m3 = 2,
908 FLD_C_i3 = 2,
909
910 FLD_C_m4 = 3,
911 FLD_C_b4 = 3,
912 FLD_C_i4 = 3,
913 FLD_C_l1 = 3,
914
915 FLD_C_i5 = 4,
916 FLD_C_d1 = 4,
917
918 FLD_C_d2 = 5,
919
920 FLD_C_d4 = 6,
921 FLD_C_x2 = 6,
922 FLD_C_l2 = 6,
923
924 NUM_C_FIELD = 7
925 };
926
927 struct DisasFields {
928 unsigned op:8;
929 unsigned op2:8;
930 unsigned presentC:16;
931 unsigned int presentO;
932 int c[NUM_C_FIELD];
933 };
934
935 /* This is the way fields are to be accessed out of DisasFields. */
936 #define have_field(S, F) have_field1((S), FLD_O_##F)
937 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
938
939 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
940 {
941 return (f->presentO >> c) & 1;
942 }
943
944 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
945 enum DisasFieldIndexC c)
946 {
947 assert(have_field1(f, o));
948 return f->c[c];
949 }
950
951 /* Describe the layout of each field in each format. */
952 typedef struct DisasField {
953 unsigned int beg:8;
954 unsigned int size:8;
955 unsigned int type:2;
956 unsigned int indexC:6;
957 enum DisasFieldIndexO indexO:8;
958 } DisasField;
959
960 typedef struct DisasFormatInfo {
961 DisasField op[NUM_C_FIELD];
962 } DisasFormatInfo;
963
964 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
965 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
966 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
967 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
968 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
969 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
970 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
971 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
972 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
973 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
974 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
975 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
976 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
977 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
978
979 #define F0(N) { { } },
980 #define F1(N, X1) { { X1 } },
981 #define F2(N, X1, X2) { { X1, X2 } },
982 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
983 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
984 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
985
986 static const DisasFormatInfo format_info[] = {
987 #include "insn-format.def"
988 };
989
990 #undef F0
991 #undef F1
992 #undef F2
993 #undef F3
994 #undef F4
995 #undef F5
996 #undef R
997 #undef M
998 #undef BD
999 #undef BXD
1000 #undef BDL
1001 #undef BXDL
1002 #undef I
1003 #undef L
1004
1005 /* Generally, we'll extract operands into this structures, operate upon
1006 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1007 of routines below for more details. */
1008 typedef struct {
1009 bool g_out, g_out2, g_in1, g_in2;
1010 TCGv_i64 out, out2, in1, in2;
1011 TCGv_i64 addr1;
1012 } DisasOps;
1013
1014 /* Instructions can place constraints on their operands, raising specification
1015 exceptions if they are violated. To make this easy to automate, each "in1",
1016 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1017 of the following, or 0. To make this easy to document, we'll put the
1018 SPEC_<name> defines next to <name>. */
1019
1020 #define SPEC_r1_even 1
1021 #define SPEC_r2_even 2
1022 #define SPEC_r1_f128 4
1023 #define SPEC_r2_f128 8
1024
1025 /* Return values from translate_one, indicating the state of the TB. */
1026 typedef enum {
1027 /* Continue the TB. */
1028 NO_EXIT,
1029 /* We have emitted one or more goto_tb. No fixup required. */
1030 EXIT_GOTO_TB,
1031 /* We are not using a goto_tb (for whatever reason), but have updated
1032 the PC (for whatever reason), so there's no need to do it again on
1033 exiting the TB. */
1034 EXIT_PC_UPDATED,
1035 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1036 updated the PC for the next instruction to be executed. */
1037 EXIT_PC_STALE,
1038 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1039 No following code will be executed. */
1040 EXIT_NORETURN,
1041 } ExitStatus;
1042
1043 typedef enum DisasFacility {
1044 FAC_Z, /* zarch (default) */
1045 FAC_CASS, /* compare and swap and store */
1046 FAC_CASS2, /* compare and swap and store 2*/
1047 FAC_DFP, /* decimal floating point */
1048 FAC_DFPR, /* decimal floating point rounding */
1049 FAC_DO, /* distinct operands */
1050 FAC_EE, /* execute extensions */
1051 FAC_EI, /* extended immediate */
1052 FAC_FPE, /* floating point extension */
1053 FAC_FPSSH, /* floating point support sign handling */
1054 FAC_FPRGR, /* FPR-GR transfer */
1055 FAC_GIE, /* general instructions extension */
1056 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1057 FAC_HW, /* high-word */
1058 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1059 FAC_LOC, /* load/store on condition */
1060 FAC_LD, /* long displacement */
1061 FAC_PC, /* population count */
1062 FAC_SCF, /* store clock fast */
1063 FAC_SFLE, /* store facility list extended */
1064 } DisasFacility;
1065
1066 struct DisasInsn {
1067 unsigned opc:16;
1068 DisasFormat fmt:6;
1069 DisasFacility fac:6;
1070 unsigned spec:4;
1071
1072 const char *name;
1073
1074 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1075 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1076 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1077 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1078 void (*help_cout)(DisasContext *, DisasOps *);
1079 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1080
1081 uint64_t data;
1082 };
1083
1084 /* ====================================================================== */
1085 /* Miscelaneous helpers, used by several operations. */
1086
1087 static void help_l2_shift(DisasContext *s, DisasFields *f,
1088 DisasOps *o, int mask)
1089 {
1090 int b2 = get_field(f, b2);
1091 int d2 = get_field(f, d2);
1092
1093 if (b2 == 0) {
1094 o->in2 = tcg_const_i64(d2 & mask);
1095 } else {
1096 o->in2 = get_address(s, 0, b2, d2);
1097 tcg_gen_andi_i64(o->in2, o->in2, mask);
1098 }
1099 }
1100
1101 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1102 {
1103 if (dest == s->next_pc) {
1104 return NO_EXIT;
1105 }
1106 if (use_goto_tb(s, dest)) {
1107 update_cc_op(s);
1108 tcg_gen_goto_tb(0);
1109 tcg_gen_movi_i64(psw_addr, dest);
1110 tcg_gen_exit_tb((tcg_target_long)s->tb);
1111 return EXIT_GOTO_TB;
1112 } else {
1113 tcg_gen_movi_i64(psw_addr, dest);
1114 return EXIT_PC_UPDATED;
1115 }
1116 }
1117
1118 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1119 bool is_imm, int imm, TCGv_i64 cdest)
1120 {
1121 ExitStatus ret;
1122 uint64_t dest = s->pc + 2 * imm;
1123 int lab;
1124
1125 /* Take care of the special cases first. */
1126 if (c->cond == TCG_COND_NEVER) {
1127 ret = NO_EXIT;
1128 goto egress;
1129 }
1130 if (is_imm) {
1131 if (dest == s->next_pc) {
1132 /* Branch to next. */
1133 ret = NO_EXIT;
1134 goto egress;
1135 }
1136 if (c->cond == TCG_COND_ALWAYS) {
1137 ret = help_goto_direct(s, dest);
1138 goto egress;
1139 }
1140 } else {
1141 if (TCGV_IS_UNUSED_I64(cdest)) {
1142 /* E.g. bcr %r0 -> no branch. */
1143 ret = NO_EXIT;
1144 goto egress;
1145 }
1146 if (c->cond == TCG_COND_ALWAYS) {
1147 tcg_gen_mov_i64(psw_addr, cdest);
1148 ret = EXIT_PC_UPDATED;
1149 goto egress;
1150 }
1151 }
1152
1153 if (use_goto_tb(s, s->next_pc)) {
1154 if (is_imm && use_goto_tb(s, dest)) {
1155 /* Both exits can use goto_tb. */
1156 update_cc_op(s);
1157
1158 lab = gen_new_label();
1159 if (c->is_64) {
1160 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1161 } else {
1162 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1163 }
1164
1165 /* Branch not taken. */
1166 tcg_gen_goto_tb(0);
1167 tcg_gen_movi_i64(psw_addr, s->next_pc);
1168 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1169
1170 /* Branch taken. */
1171 gen_set_label(lab);
1172 tcg_gen_goto_tb(1);
1173 tcg_gen_movi_i64(psw_addr, dest);
1174 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1175
1176 ret = EXIT_GOTO_TB;
1177 } else {
1178 /* Fallthru can use goto_tb, but taken branch cannot. */
1179 /* Store taken branch destination before the brcond. This
1180 avoids having to allocate a new local temp to hold it.
1181 We'll overwrite this in the not taken case anyway. */
1182 if (!is_imm) {
1183 tcg_gen_mov_i64(psw_addr, cdest);
1184 }
1185
1186 lab = gen_new_label();
1187 if (c->is_64) {
1188 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1189 } else {
1190 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1191 }
1192
1193 /* Branch not taken. */
1194 update_cc_op(s);
1195 tcg_gen_goto_tb(0);
1196 tcg_gen_movi_i64(psw_addr, s->next_pc);
1197 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1198
1199 gen_set_label(lab);
1200 if (is_imm) {
1201 tcg_gen_movi_i64(psw_addr, dest);
1202 }
1203 ret = EXIT_PC_UPDATED;
1204 }
1205 } else {
1206 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1207 Most commonly we're single-stepping or some other condition that
1208 disables all use of goto_tb. Just update the PC and exit. */
1209
1210 TCGv_i64 next = tcg_const_i64(s->next_pc);
1211 if (is_imm) {
1212 cdest = tcg_const_i64(dest);
1213 }
1214
1215 if (c->is_64) {
1216 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1217 cdest, next);
1218 } else {
1219 TCGv_i32 t0 = tcg_temp_new_i32();
1220 TCGv_i64 t1 = tcg_temp_new_i64();
1221 TCGv_i64 z = tcg_const_i64(0);
1222 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1223 tcg_gen_extu_i32_i64(t1, t0);
1224 tcg_temp_free_i32(t0);
1225 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1226 tcg_temp_free_i64(t1);
1227 tcg_temp_free_i64(z);
1228 }
1229
1230 if (is_imm) {
1231 tcg_temp_free_i64(cdest);
1232 }
1233 tcg_temp_free_i64(next);
1234
1235 ret = EXIT_PC_UPDATED;
1236 }
1237
1238 egress:
1239 free_compare(c);
1240 return ret;
1241 }
1242
1243 /* ====================================================================== */
1244 /* The operations. These perform the bulk of the work for any insn,
1245 usually after the operands have been loaded and output initialized. */
1246
1247 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1248 {
1249 gen_helper_abs_i64(o->out, o->in2);
1250 return NO_EXIT;
1251 }
1252
1253 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1254 {
1255 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1256 return NO_EXIT;
1257 }
1258
1259 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1260 {
1261 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1262 return NO_EXIT;
1263 }
1264
1265 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1266 {
1267 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1268 tcg_gen_mov_i64(o->out2, o->in2);
1269 return NO_EXIT;
1270 }
1271
1272 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1273 {
1274 tcg_gen_add_i64(o->out, o->in1, o->in2);
1275 return NO_EXIT;
1276 }
1277
1278 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1279 {
1280 TCGv_i64 cc;
1281
1282 tcg_gen_add_i64(o->out, o->in1, o->in2);
1283
1284 /* XXX possible optimization point */
1285 gen_op_calc_cc(s);
1286 cc = tcg_temp_new_i64();
1287 tcg_gen_extu_i32_i64(cc, cc_op);
1288 tcg_gen_shri_i64(cc, cc, 1);
1289
1290 tcg_gen_add_i64(o->out, o->out, cc);
1291 tcg_temp_free_i64(cc);
1292 return NO_EXIT;
1293 }
1294
1295 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1296 {
1297 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1298 return NO_EXIT;
1299 }
1300
1301 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1302 {
1303 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1304 return NO_EXIT;
1305 }
1306
1307 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1308 {
1309 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1310 return_low128(o->out2);
1311 return NO_EXIT;
1312 }
1313
1314 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1315 {
1316 tcg_gen_and_i64(o->out, o->in1, o->in2);
1317 return NO_EXIT;
1318 }
1319
1320 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1321 {
1322 int shift = s->insn->data & 0xff;
1323 int size = s->insn->data >> 8;
1324 uint64_t mask = ((1ull << size) - 1) << shift;
1325
1326 assert(!o->g_in2);
1327 tcg_gen_shli_i64(o->in2, o->in2, shift);
1328 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1329 tcg_gen_and_i64(o->out, o->in1, o->in2);
1330
1331 /* Produce the CC from only the bits manipulated. */
1332 tcg_gen_andi_i64(cc_dst, o->out, mask);
1333 set_cc_nz_u64(s, cc_dst);
1334 return NO_EXIT;
1335 }
1336
1337 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1338 {
1339 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1340 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1341 tcg_gen_mov_i64(psw_addr, o->in2);
1342 return EXIT_PC_UPDATED;
1343 } else {
1344 return NO_EXIT;
1345 }
1346 }
1347
1348 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1349 {
1350 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1351 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1352 }
1353
1354 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1355 {
1356 int m1 = get_field(s->fields, m1);
1357 bool is_imm = have_field(s->fields, i2);
1358 int imm = is_imm ? get_field(s->fields, i2) : 0;
1359 DisasCompare c;
1360
1361 disas_jcc(s, &c, m1);
1362 return help_branch(s, &c, is_imm, imm, o->in2);
1363 }
1364
1365 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1366 {
1367 int r1 = get_field(s->fields, r1);
1368 bool is_imm = have_field(s->fields, i2);
1369 int imm = is_imm ? get_field(s->fields, i2) : 0;
1370 DisasCompare c;
1371 TCGv_i64 t;
1372
1373 c.cond = TCG_COND_NE;
1374 c.is_64 = false;
1375 c.g1 = false;
1376 c.g2 = false;
1377
1378 t = tcg_temp_new_i64();
1379 tcg_gen_subi_i64(t, regs[r1], 1);
1380 store_reg32_i64(r1, t);
1381 c.u.s32.a = tcg_temp_new_i32();
1382 c.u.s32.b = tcg_const_i32(0);
1383 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1384 tcg_temp_free_i64(t);
1385
1386 return help_branch(s, &c, is_imm, imm, o->in2);
1387 }
1388
1389 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1390 {
1391 int r1 = get_field(s->fields, r1);
1392 bool is_imm = have_field(s->fields, i2);
1393 int imm = is_imm ? get_field(s->fields, i2) : 0;
1394 DisasCompare c;
1395
1396 c.cond = TCG_COND_NE;
1397 c.is_64 = true;
1398 c.g1 = true;
1399 c.g2 = false;
1400
1401 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1402 c.u.s64.a = regs[r1];
1403 c.u.s64.b = tcg_const_i64(0);
1404
1405 return help_branch(s, &c, is_imm, imm, o->in2);
1406 }
1407
1408 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1409 {
1410 int r1 = get_field(s->fields, r1);
1411 int r3 = get_field(s->fields, r3);
1412 bool is_imm = have_field(s->fields, i2);
1413 int imm = is_imm ? get_field(s->fields, i2) : 0;
1414 DisasCompare c;
1415 TCGv_i64 t;
1416
1417 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1418 c.is_64 = false;
1419 c.g1 = false;
1420 c.g2 = false;
1421
1422 t = tcg_temp_new_i64();
1423 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1424 c.u.s32.a = tcg_temp_new_i32();
1425 c.u.s32.b = tcg_temp_new_i32();
1426 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1427 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1428 store_reg32_i64(r1, t);
1429 tcg_temp_free_i64(t);
1430
1431 return help_branch(s, &c, is_imm, imm, o->in2);
1432 }
1433
1434 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1435 {
1436 int r1 = get_field(s->fields, r1);
1437 int r3 = get_field(s->fields, r3);
1438 bool is_imm = have_field(s->fields, i2);
1439 int imm = is_imm ? get_field(s->fields, i2) : 0;
1440 DisasCompare c;
1441
1442 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1443 c.is_64 = true;
1444
1445 if (r1 == (r3 | 1)) {
1446 c.u.s64.b = load_reg(r3 | 1);
1447 c.g2 = false;
1448 } else {
1449 c.u.s64.b = regs[r3 | 1];
1450 c.g2 = true;
1451 }
1452
1453 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1454 c.u.s64.a = regs[r1];
1455 c.g1 = true;
1456
1457 return help_branch(s, &c, is_imm, imm, o->in2);
1458 }
1459
1460 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1461 {
1462 int imm, m3 = get_field(s->fields, m3);
1463 bool is_imm;
1464 DisasCompare c;
1465
1466 /* Bit 3 of the m3 field is reserved and should be zero.
1467 Choose to ignore it wrt the ltgt_cond table above. */
1468 c.cond = ltgt_cond[m3 & 14];
1469 if (s->insn->data) {
1470 c.cond = tcg_unsigned_cond(c.cond);
1471 }
1472 c.is_64 = c.g1 = c.g2 = true;
1473 c.u.s64.a = o->in1;
1474 c.u.s64.b = o->in2;
1475
1476 is_imm = have_field(s->fields, i4);
1477 if (is_imm) {
1478 imm = get_field(s->fields, i4);
1479 } else {
1480 imm = 0;
1481 o->out = get_address(s, 0, get_field(s->fields, b4),
1482 get_field(s->fields, d4));
1483 }
1484
1485 return help_branch(s, &c, is_imm, imm, o->out);
1486 }
1487
1488 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1489 {
1490 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1491 set_cc_static(s);
1492 return NO_EXIT;
1493 }
1494
1495 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1496 {
1497 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1498 set_cc_static(s);
1499 return NO_EXIT;
1500 }
1501
1502 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1503 {
1504 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1505 set_cc_static(s);
1506 return NO_EXIT;
1507 }
1508
1509 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1510 {
1511 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1512 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1513 tcg_temp_free_i32(m3);
1514 gen_set_cc_nz_f32(s, o->in2);
1515 return NO_EXIT;
1516 }
1517
1518 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1519 {
1520 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1521 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1522 tcg_temp_free_i32(m3);
1523 gen_set_cc_nz_f64(s, o->in2);
1524 return NO_EXIT;
1525 }
1526
1527 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1528 {
1529 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1530 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1531 tcg_temp_free_i32(m3);
1532 gen_set_cc_nz_f128(s, o->in1, o->in2);
1533 return NO_EXIT;
1534 }
1535
1536 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1537 {
1538 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1539 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1540 tcg_temp_free_i32(m3);
1541 gen_set_cc_nz_f32(s, o->in2);
1542 return NO_EXIT;
1543 }
1544
1545 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1546 {
1547 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1548 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1549 tcg_temp_free_i32(m3);
1550 gen_set_cc_nz_f64(s, o->in2);
1551 return NO_EXIT;
1552 }
1553
1554 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1555 {
1556 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1557 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1558 tcg_temp_free_i32(m3);
1559 gen_set_cc_nz_f128(s, o->in1, o->in2);
1560 return NO_EXIT;
1561 }
1562
1563 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1564 {
1565 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1566 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1567 tcg_temp_free_i32(m3);
1568 gen_set_cc_nz_f32(s, o->in2);
1569 return NO_EXIT;
1570 }
1571
1572 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1573 {
1574 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1575 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1576 tcg_temp_free_i32(m3);
1577 gen_set_cc_nz_f64(s, o->in2);
1578 return NO_EXIT;
1579 }
1580
1581 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1582 {
1583 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1584 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1585 tcg_temp_free_i32(m3);
1586 gen_set_cc_nz_f128(s, o->in1, o->in2);
1587 return NO_EXIT;
1588 }
1589
1590 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1591 {
1592 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1593 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1594 tcg_temp_free_i32(m3);
1595 gen_set_cc_nz_f32(s, o->in2);
1596 return NO_EXIT;
1597 }
1598
1599 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1600 {
1601 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1602 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1603 tcg_temp_free_i32(m3);
1604 gen_set_cc_nz_f64(s, o->in2);
1605 return NO_EXIT;
1606 }
1607
1608 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1609 {
1610 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1611 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1612 tcg_temp_free_i32(m3);
1613 gen_set_cc_nz_f128(s, o->in1, o->in2);
1614 return NO_EXIT;
1615 }
1616
1617 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1618 {
1619 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1620 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1621 tcg_temp_free_i32(m3);
1622 return NO_EXIT;
1623 }
1624
1625 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1626 {
1627 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1628 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1629 tcg_temp_free_i32(m3);
1630 return NO_EXIT;
1631 }
1632
1633 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1634 {
1635 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1636 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1637 tcg_temp_free_i32(m3);
1638 return_low128(o->out2);
1639 return NO_EXIT;
1640 }
1641
1642 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1643 {
1644 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1645 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1646 tcg_temp_free_i32(m3);
1647 return NO_EXIT;
1648 }
1649
1650 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1651 {
1652 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1653 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1654 tcg_temp_free_i32(m3);
1655 return NO_EXIT;
1656 }
1657
1658 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1659 {
1660 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1661 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1662 tcg_temp_free_i32(m3);
1663 return_low128(o->out2);
1664 return NO_EXIT;
1665 }
1666
1667 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1668 {
1669 int r2 = get_field(s->fields, r2);
1670 TCGv_i64 len = tcg_temp_new_i64();
1671
1672 potential_page_fault(s);
1673 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1674 set_cc_static(s);
1675 return_low128(o->out);
1676
1677 tcg_gen_add_i64(regs[r2], regs[r2], len);
1678 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1679 tcg_temp_free_i64(len);
1680
1681 return NO_EXIT;
1682 }
1683
1684 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1685 {
1686 int l = get_field(s->fields, l1);
1687 TCGv_i32 vl;
1688
1689 switch (l + 1) {
1690 case 1:
1691 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1692 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1693 break;
1694 case 2:
1695 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1696 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1697 break;
1698 case 4:
1699 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1700 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1701 break;
1702 case 8:
1703 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1704 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1705 break;
1706 default:
1707 potential_page_fault(s);
1708 vl = tcg_const_i32(l);
1709 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1710 tcg_temp_free_i32(vl);
1711 set_cc_static(s);
1712 return NO_EXIT;
1713 }
1714 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1715 return NO_EXIT;
1716 }
1717
1718 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1719 {
1720 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1721 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1722 potential_page_fault(s);
1723 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1724 tcg_temp_free_i32(r1);
1725 tcg_temp_free_i32(r3);
1726 set_cc_static(s);
1727 return NO_EXIT;
1728 }
1729
1730 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1731 {
1732 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1733 TCGv_i32 t1 = tcg_temp_new_i32();
1734 tcg_gen_trunc_i64_i32(t1, o->in1);
1735 potential_page_fault(s);
1736 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1737 set_cc_static(s);
1738 tcg_temp_free_i32(t1);
1739 tcg_temp_free_i32(m3);
1740 return NO_EXIT;
1741 }
1742
1743 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1744 {
1745 potential_page_fault(s);
1746 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1747 set_cc_static(s);
1748 return_low128(o->in2);
1749 return NO_EXIT;
1750 }
1751
1752 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1753 {
1754 TCGv_i64 t = tcg_temp_new_i64();
1755 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1756 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1757 tcg_gen_or_i64(o->out, o->out, t);
1758 tcg_temp_free_i64(t);
1759 return NO_EXIT;
1760 }
1761
1762 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1763 {
1764 int r3 = get_field(s->fields, r3);
1765 potential_page_fault(s);
1766 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1767 set_cc_static(s);
1768 return NO_EXIT;
1769 }
1770
1771 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1772 {
1773 int r3 = get_field(s->fields, r3);
1774 potential_page_fault(s);
1775 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1776 set_cc_static(s);
1777 return NO_EXIT;
1778 }
1779
1780 #ifndef CONFIG_USER_ONLY
1781 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1782 {
1783 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1784 check_privileged(s);
1785 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1786 tcg_temp_free_i32(r1);
1787 set_cc_static(s);
1788 return NO_EXIT;
1789 }
1790 #endif
1791
1792 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1793 {
1794 int r3 = get_field(s->fields, r3);
1795 TCGv_i64 in3 = tcg_temp_new_i64();
1796 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1797 potential_page_fault(s);
1798 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1799 tcg_temp_free_i64(in3);
1800 set_cc_static(s);
1801 return NO_EXIT;
1802 }
1803
1804 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1805 {
1806 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1807 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1808 potential_page_fault(s);
1809 /* XXX rewrite in tcg */
1810 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1811 set_cc_static(s);
1812 return NO_EXIT;
1813 }
1814
1815 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1816 {
1817 TCGv_i64 t1 = tcg_temp_new_i64();
1818 TCGv_i32 t2 = tcg_temp_new_i32();
1819 tcg_gen_trunc_i64_i32(t2, o->in1);
1820 gen_helper_cvd(t1, t2);
1821 tcg_temp_free_i32(t2);
1822 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1823 tcg_temp_free_i64(t1);
1824 return NO_EXIT;
1825 }
1826
1827 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1828 {
1829 int m3 = get_field(s->fields, m3);
1830 int lab = gen_new_label();
1831 TCGv_i32 t;
1832 TCGCond c;
1833
1834 /* Bit 3 of the m3 field is reserved and should be zero.
1835 Choose to ignore it wrt the ltgt_cond table above. */
1836 c = tcg_invert_cond(ltgt_cond[m3 & 14]);
1837 if (s->insn->data) {
1838 c = tcg_unsigned_cond(c);
1839 }
1840 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1841
1842 /* Set DXC to 0xff. */
1843 t = tcg_temp_new_i32();
1844 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1845 tcg_gen_ori_i32(t, t, 0xff00);
1846 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1847 tcg_temp_free_i32(t);
1848
1849 /* Trap. */
1850 gen_program_exception(s, PGM_DATA);
1851
1852 gen_set_label(lab);
1853 return NO_EXIT;
1854 }
1855
1856 #ifndef CONFIG_USER_ONLY
1857 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1858 {
1859 TCGv_i32 tmp;
1860
1861 check_privileged(s);
1862 potential_page_fault(s);
1863
1864 /* We pretend the format is RX_a so that D2 is the field we want. */
1865 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1866 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1867 tcg_temp_free_i32(tmp);
1868 return NO_EXIT;
1869 }
1870 #endif
1871
1872 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1873 {
1874 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1875 return_low128(o->out);
1876 return NO_EXIT;
1877 }
1878
1879 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1880 {
1881 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1882 return_low128(o->out);
1883 return NO_EXIT;
1884 }
1885
1886 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1887 {
1888 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1889 return_low128(o->out);
1890 return NO_EXIT;
1891 }
1892
1893 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1894 {
1895 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
1896 return_low128(o->out);
1897 return NO_EXIT;
1898 }
1899
1900 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
1901 {
1902 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
1903 return NO_EXIT;
1904 }
1905
1906 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
1907 {
1908 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
1909 return NO_EXIT;
1910 }
1911
1912 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
1913 {
1914 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1915 return_low128(o->out2);
1916 return NO_EXIT;
1917 }
1918
1919 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
1920 {
1921 int r2 = get_field(s->fields, r2);
1922 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1923 return NO_EXIT;
1924 }
1925
1926 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
1927 {
1928 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
1929 return NO_EXIT;
1930 }
1931
1932 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
1933 {
1934 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
1935 tb->flags, (ab)use the tb->cs_base field as the address of
1936 the template in memory, and grab 8 bits of tb->flags/cflags for
1937 the contents of the register. We would then recognize all this
1938 in gen_intermediate_code_internal, generating code for exactly
1939 one instruction. This new TB then gets executed normally.
1940
1941 On the other hand, this seems to be mostly used for modifying
1942 MVC inside of memcpy, which needs a helper call anyway. So
1943 perhaps this doesn't bear thinking about any further. */
1944
1945 TCGv_i64 tmp;
1946
1947 update_psw_addr(s);
1948 update_cc_op(s);
1949
1950 tmp = tcg_const_i64(s->next_pc);
1951 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
1952 tcg_temp_free_i64(tmp);
1953
1954 set_cc_static(s);
1955 return NO_EXIT;
1956 }
1957
1958 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
1959 {
1960 /* We'll use the original input for cc computation, since we get to
1961 compare that against 0, which ought to be better than comparing
1962 the real output against 64. It also lets cc_dst be a convenient
1963 temporary during our computation. */
1964 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
1965
1966 /* R1 = IN ? CLZ(IN) : 64. */
1967 gen_helper_clz(o->out, o->in2);
1968
1969 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
1970 value by 64, which is undefined. But since the shift is 64 iff the
1971 input is zero, we still get the correct result after and'ing. */
1972 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
1973 tcg_gen_shr_i64(o->out2, o->out2, o->out);
1974 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
1975 return NO_EXIT;
1976 }
1977
1978 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
1979 {
1980 int m3 = get_field(s->fields, m3);
1981 int pos, len, base = s->insn->data;
1982 TCGv_i64 tmp = tcg_temp_new_i64();
1983 uint64_t ccm;
1984
1985 switch (m3) {
1986 case 0xf:
1987 /* Effectively a 32-bit load. */
1988 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
1989 len = 32;
1990 goto one_insert;
1991
1992 case 0xc:
1993 case 0x6:
1994 case 0x3:
1995 /* Effectively a 16-bit load. */
1996 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
1997 len = 16;
1998 goto one_insert;
1999
2000 case 0x8:
2001 case 0x4:
2002 case 0x2:
2003 case 0x1:
2004 /* Effectively an 8-bit load. */
2005 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2006 len = 8;
2007 goto one_insert;
2008
2009 one_insert:
2010 pos = base + ctz32(m3) * 8;
2011 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2012 ccm = ((1ull << len) - 1) << pos;
2013 break;
2014
2015 default:
2016 /* This is going to be a sequence of loads and inserts. */
2017 pos = base + 32 - 8;
2018 ccm = 0;
2019 while (m3) {
2020 if (m3 & 0x8) {
2021 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2022 tcg_gen_addi_i64(o->in2, o->in2, 1);
2023 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2024 ccm |= 0xff << pos;
2025 }
2026 m3 = (m3 << 1) & 0xf;
2027 pos -= 8;
2028 }
2029 break;
2030 }
2031
2032 tcg_gen_movi_i64(tmp, ccm);
2033 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2034 tcg_temp_free_i64(tmp);
2035 return NO_EXIT;
2036 }
2037
2038 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2039 {
2040 int shift = s->insn->data & 0xff;
2041 int size = s->insn->data >> 8;
2042 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2043 return NO_EXIT;
2044 }
2045
2046 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2047 {
2048 TCGv_i64 t1;
2049
2050 gen_op_calc_cc(s);
2051 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2052
2053 t1 = tcg_temp_new_i64();
2054 tcg_gen_shli_i64(t1, psw_mask, 20);
2055 tcg_gen_shri_i64(t1, t1, 36);
2056 tcg_gen_or_i64(o->out, o->out, t1);
2057
2058 tcg_gen_extu_i32_i64(t1, cc_op);
2059 tcg_gen_shli_i64(t1, t1, 28);
2060 tcg_gen_or_i64(o->out, o->out, t1);
2061 tcg_temp_free_i64(t1);
2062 return NO_EXIT;
2063 }
2064
2065 #ifndef CONFIG_USER_ONLY
2066 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2067 {
2068 check_privileged(s);
2069 gen_helper_ipte(cpu_env, o->in1, o->in2);
2070 return NO_EXIT;
2071 }
2072
2073 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2074 {
2075 check_privileged(s);
2076 gen_helper_iske(o->out, cpu_env, o->in2);
2077 return NO_EXIT;
2078 }
2079 #endif
2080
2081 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2082 {
2083 gen_helper_ldeb(o->out, cpu_env, o->in2);
2084 return NO_EXIT;
2085 }
2086
2087 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2088 {
2089 gen_helper_ledb(o->out, cpu_env, o->in2);
2090 return NO_EXIT;
2091 }
2092
2093 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2094 {
2095 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2096 return NO_EXIT;
2097 }
2098
2099 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2100 {
2101 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2102 return NO_EXIT;
2103 }
2104
2105 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2106 {
2107 gen_helper_lxdb(o->out, cpu_env, o->in2);
2108 return_low128(o->out2);
2109 return NO_EXIT;
2110 }
2111
2112 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2113 {
2114 gen_helper_lxeb(o->out, cpu_env, o->in2);
2115 return_low128(o->out2);
2116 return NO_EXIT;
2117 }
2118
2119 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2120 {
2121 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2122 return NO_EXIT;
2123 }
2124
2125 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2126 {
2127 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2128 return NO_EXIT;
2129 }
2130
2131 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2132 {
2133 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2134 return NO_EXIT;
2135 }
2136
2137 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2138 {
2139 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2140 return NO_EXIT;
2141 }
2142
2143 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2144 {
2145 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2146 return NO_EXIT;
2147 }
2148
2149 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2150 {
2151 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2152 return NO_EXIT;
2153 }
2154
2155 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2156 {
2157 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2158 return NO_EXIT;
2159 }
2160
2161 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2162 {
2163 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2164 return NO_EXIT;
2165 }
2166
2167 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2168 {
2169 DisasCompare c;
2170
2171 disas_jcc(s, &c, get_field(s->fields, m3));
2172
2173 if (c.is_64) {
2174 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2175 o->in2, o->in1);
2176 free_compare(&c);
2177 } else {
2178 TCGv_i32 t32 = tcg_temp_new_i32();
2179 TCGv_i64 t, z;
2180
2181 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2182 free_compare(&c);
2183
2184 t = tcg_temp_new_i64();
2185 tcg_gen_extu_i32_i64(t, t32);
2186 tcg_temp_free_i32(t32);
2187
2188 z = tcg_const_i64(0);
2189 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2190 tcg_temp_free_i64(t);
2191 tcg_temp_free_i64(z);
2192 }
2193
2194 return NO_EXIT;
2195 }
2196
2197 #ifndef CONFIG_USER_ONLY
2198 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2199 {
2200 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2201 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2202 check_privileged(s);
2203 potential_page_fault(s);
2204 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2205 tcg_temp_free_i32(r1);
2206 tcg_temp_free_i32(r3);
2207 return NO_EXIT;
2208 }
2209
2210 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2211 {
2212 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2213 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2214 check_privileged(s);
2215 potential_page_fault(s);
2216 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2217 tcg_temp_free_i32(r1);
2218 tcg_temp_free_i32(r3);
2219 return NO_EXIT;
2220 }
2221 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2222 {
2223 check_privileged(s);
2224 potential_page_fault(s);
2225 gen_helper_lra(o->out, cpu_env, o->in2);
2226 set_cc_static(s);
2227 return NO_EXIT;
2228 }
2229
2230 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2231 {
2232 TCGv_i64 t1, t2;
2233
2234 check_privileged(s);
2235
2236 t1 = tcg_temp_new_i64();
2237 t2 = tcg_temp_new_i64();
2238 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2239 tcg_gen_addi_i64(o->in2, o->in2, 4);
2240 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2241 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2242 tcg_gen_shli_i64(t1, t1, 32);
2243 gen_helper_load_psw(cpu_env, t1, t2);
2244 tcg_temp_free_i64(t1);
2245 tcg_temp_free_i64(t2);
2246 return EXIT_NORETURN;
2247 }
2248
2249 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2250 {
2251 TCGv_i64 t1, t2;
2252
2253 check_privileged(s);
2254
2255 t1 = tcg_temp_new_i64();
2256 t2 = tcg_temp_new_i64();
2257 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2258 tcg_gen_addi_i64(o->in2, o->in2, 8);
2259 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2260 gen_helper_load_psw(cpu_env, t1, t2);
2261 tcg_temp_free_i64(t1);
2262 tcg_temp_free_i64(t2);
2263 return EXIT_NORETURN;
2264 }
2265 #endif
2266
2267 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2268 {
2269 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2270 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2271 potential_page_fault(s);
2272 gen_helper_lam(cpu_env, r1, o->in2, r3);
2273 tcg_temp_free_i32(r1);
2274 tcg_temp_free_i32(r3);
2275 return NO_EXIT;
2276 }
2277
2278 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2279 {
2280 int r1 = get_field(s->fields, r1);
2281 int r3 = get_field(s->fields, r3);
2282 TCGv_i64 t = tcg_temp_new_i64();
2283 TCGv_i64 t4 = tcg_const_i64(4);
2284
2285 while (1) {
2286 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2287 store_reg32_i64(r1, t);
2288 if (r1 == r3) {
2289 break;
2290 }
2291 tcg_gen_add_i64(o->in2, o->in2, t4);
2292 r1 = (r1 + 1) & 15;
2293 }
2294
2295 tcg_temp_free_i64(t);
2296 tcg_temp_free_i64(t4);
2297 return NO_EXIT;
2298 }
2299
2300 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2301 {
2302 int r1 = get_field(s->fields, r1);
2303 int r3 = get_field(s->fields, r3);
2304 TCGv_i64 t = tcg_temp_new_i64();
2305 TCGv_i64 t4 = tcg_const_i64(4);
2306
2307 while (1) {
2308 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2309 store_reg32h_i64(r1, t);
2310 if (r1 == r3) {
2311 break;
2312 }
2313 tcg_gen_add_i64(o->in2, o->in2, t4);
2314 r1 = (r1 + 1) & 15;
2315 }
2316
2317 tcg_temp_free_i64(t);
2318 tcg_temp_free_i64(t4);
2319 return NO_EXIT;
2320 }
2321
2322 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2323 {
2324 int r1 = get_field(s->fields, r1);
2325 int r3 = get_field(s->fields, r3);
2326 TCGv_i64 t8 = tcg_const_i64(8);
2327
2328 while (1) {
2329 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2330 if (r1 == r3) {
2331 break;
2332 }
2333 tcg_gen_add_i64(o->in2, o->in2, t8);
2334 r1 = (r1 + 1) & 15;
2335 }
2336
2337 tcg_temp_free_i64(t8);
2338 return NO_EXIT;
2339 }
2340
2341 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2342 {
2343 o->out = o->in2;
2344 o->g_out = o->g_in2;
2345 TCGV_UNUSED_I64(o->in2);
2346 o->g_in2 = false;
2347 return NO_EXIT;
2348 }
2349
2350 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2351 {
2352 o->out = o->in1;
2353 o->out2 = o->in2;
2354 o->g_out = o->g_in1;
2355 o->g_out2 = o->g_in2;
2356 TCGV_UNUSED_I64(o->in1);
2357 TCGV_UNUSED_I64(o->in2);
2358 o->g_in1 = o->g_in2 = false;
2359 return NO_EXIT;
2360 }
2361
2362 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2363 {
2364 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2365 potential_page_fault(s);
2366 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2367 tcg_temp_free_i32(l);
2368 return NO_EXIT;
2369 }
2370
2371 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2372 {
2373 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2374 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2375 potential_page_fault(s);
2376 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2377 tcg_temp_free_i32(r1);
2378 tcg_temp_free_i32(r2);
2379 set_cc_static(s);
2380 return NO_EXIT;
2381 }
2382
2383 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2384 {
2385 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2386 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2387 potential_page_fault(s);
2388 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2389 tcg_temp_free_i32(r1);
2390 tcg_temp_free_i32(r3);
2391 set_cc_static(s);
2392 return NO_EXIT;
2393 }
2394
2395 #ifndef CONFIG_USER_ONLY
2396 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2397 {
2398 int r1 = get_field(s->fields, l1);
2399 check_privileged(s);
2400 potential_page_fault(s);
2401 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2402 set_cc_static(s);
2403 return NO_EXIT;
2404 }
2405
2406 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2407 {
2408 int r1 = get_field(s->fields, l1);
2409 check_privileged(s);
2410 potential_page_fault(s);
2411 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2412 set_cc_static(s);
2413 return NO_EXIT;
2414 }
2415 #endif
2416
2417 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2418 {
2419 potential_page_fault(s);
2420 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2421 set_cc_static(s);
2422 return NO_EXIT;
2423 }
2424
2425 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2426 {
2427 potential_page_fault(s);
2428 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2429 set_cc_static(s);
2430 return_low128(o->in2);
2431 return NO_EXIT;
2432 }
2433
2434 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2435 {
2436 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2437 return NO_EXIT;
2438 }
2439
2440 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2441 {
2442 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2443 return_low128(o->out2);
2444 return NO_EXIT;
2445 }
2446
2447 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2448 {
2449 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2450 return NO_EXIT;
2451 }
2452
2453 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2454 {
2455 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2456 return NO_EXIT;
2457 }
2458
2459 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2460 {
2461 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2462 return NO_EXIT;
2463 }
2464
2465 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2466 {
2467 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2468 return_low128(o->out2);
2469 return NO_EXIT;
2470 }
2471
2472 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2473 {
2474 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2475 return_low128(o->out2);
2476 return NO_EXIT;
2477 }
2478
2479 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2480 {
2481 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2482 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2483 tcg_temp_free_i64(r3);
2484 return NO_EXIT;
2485 }
2486
2487 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2488 {
2489 int r3 = get_field(s->fields, r3);
2490 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2491 return NO_EXIT;
2492 }
2493
2494 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2495 {
2496 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2497 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2498 tcg_temp_free_i64(r3);
2499 return NO_EXIT;
2500 }
2501
2502 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2503 {
2504 int r3 = get_field(s->fields, r3);
2505 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2506 return NO_EXIT;
2507 }
2508
2509 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2510 {
2511 gen_helper_nabs_i64(o->out, o->in2);
2512 return NO_EXIT;
2513 }
2514
2515 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2516 {
2517 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2518 return NO_EXIT;
2519 }
2520
2521 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2522 {
2523 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2524 return NO_EXIT;
2525 }
2526
2527 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2528 {
2529 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2530 tcg_gen_mov_i64(o->out2, o->in2);
2531 return NO_EXIT;
2532 }
2533
2534 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2535 {
2536 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2537 potential_page_fault(s);
2538 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2539 tcg_temp_free_i32(l);
2540 set_cc_static(s);
2541 return NO_EXIT;
2542 }
2543
2544 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2545 {
2546 tcg_gen_neg_i64(o->out, o->in2);
2547 return NO_EXIT;
2548 }
2549
2550 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2551 {
2552 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2553 return NO_EXIT;
2554 }
2555
2556 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2557 {
2558 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2559 return NO_EXIT;
2560 }
2561
2562 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2563 {
2564 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2565 tcg_gen_mov_i64(o->out2, o->in2);
2566 return NO_EXIT;
2567 }
2568
2569 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2570 {
2571 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2572 potential_page_fault(s);
2573 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2574 tcg_temp_free_i32(l);
2575 set_cc_static(s);
2576 return NO_EXIT;
2577 }
2578
2579 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2580 {
2581 tcg_gen_or_i64(o->out, o->in1, o->in2);
2582 return NO_EXIT;
2583 }
2584
2585 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2586 {
2587 int shift = s->insn->data & 0xff;
2588 int size = s->insn->data >> 8;
2589 uint64_t mask = ((1ull << size) - 1) << shift;
2590
2591 assert(!o->g_in2);
2592 tcg_gen_shli_i64(o->in2, o->in2, shift);
2593 tcg_gen_or_i64(o->out, o->in1, o->in2);
2594
2595 /* Produce the CC from only the bits manipulated. */
2596 tcg_gen_andi_i64(cc_dst, o->out, mask);
2597 set_cc_nz_u64(s, cc_dst);
2598 return NO_EXIT;
2599 }
2600
2601 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2602 {
2603 gen_helper_popcnt(o->out, o->in2);
2604 return NO_EXIT;
2605 }
2606
2607 #ifndef CONFIG_USER_ONLY
2608 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2609 {
2610 check_privileged(s);
2611 gen_helper_ptlb(cpu_env);
2612 return NO_EXIT;
2613 }
2614 #endif
2615
2616 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2617 {
2618 int i3 = get_field(s->fields, i3);
2619 int i4 = get_field(s->fields, i4);
2620 int i5 = get_field(s->fields, i5);
2621 int do_zero = i4 & 0x80;
2622 uint64_t mask, imask, pmask;
2623 int pos, len, rot;
2624
2625 /* Adjust the arguments for the specific insn. */
2626 switch (s->fields->op2) {
2627 case 0x55: /* risbg */
2628 i3 &= 63;
2629 i4 &= 63;
2630 pmask = ~0;
2631 break;
2632 case 0x5d: /* risbhg */
2633 i3 &= 31;
2634 i4 &= 31;
2635 pmask = 0xffffffff00000000ull;
2636 break;
2637 case 0x51: /* risblg */
2638 i3 &= 31;
2639 i4 &= 31;
2640 pmask = 0x00000000ffffffffull;
2641 break;
2642 default:
2643 abort();
2644 }
2645
2646 /* MASK is the set of bits to be inserted from R2.
2647 Take care for I3/I4 wraparound. */
2648 mask = pmask >> i3;
2649 if (i3 <= i4) {
2650 mask ^= pmask >> i4 >> 1;
2651 } else {
2652 mask |= ~(pmask >> i4 >> 1);
2653 }
2654 mask &= pmask;
2655
2656 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2657 insns, we need to keep the other half of the register. */
2658 imask = ~mask | ~pmask;
2659 if (do_zero) {
2660 if (s->fields->op2 == 0x55) {
2661 imask = 0;
2662 } else {
2663 imask = ~pmask;
2664 }
2665 }
2666
2667 /* In some cases we can implement this with deposit, which can be more
2668 efficient on some hosts. */
2669 if (~mask == imask && i3 <= i4) {
2670 if (s->fields->op2 == 0x5d) {
2671 i3 += 32, i4 += 32;
2672 }
2673 /* Note that we rotate the bits to be inserted to the lsb, not to
2674 the position as described in the PoO. */
2675 len = i4 - i3 + 1;
2676 pos = 63 - i4;
2677 rot = (i5 - pos) & 63;
2678 } else {
2679 pos = len = -1;
2680 rot = i5 & 63;
2681 }
2682
2683 /* Rotate the input as necessary. */
2684 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2685
2686 /* Insert the selected bits into the output. */
2687 if (pos >= 0) {
2688 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2689 } else if (imask == 0) {
2690 tcg_gen_andi_i64(o->out, o->in2, mask);
2691 } else {
2692 tcg_gen_andi_i64(o->in2, o->in2, mask);
2693 tcg_gen_andi_i64(o->out, o->out, imask);
2694 tcg_gen_or_i64(o->out, o->out, o->in2);
2695 }
2696 return NO_EXIT;
2697 }
2698
2699 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2700 {
2701 int i3 = get_field(s->fields, i3);
2702 int i4 = get_field(s->fields, i4);
2703 int i5 = get_field(s->fields, i5);
2704 uint64_t mask;
2705
2706 /* If this is a test-only form, arrange to discard the result. */
2707 if (i3 & 0x80) {
2708 o->out = tcg_temp_new_i64();
2709 o->g_out = false;
2710 }
2711
2712 i3 &= 63;
2713 i4 &= 63;
2714 i5 &= 63;
2715
2716 /* MASK is the set of bits to be operated on from R2.
2717 Take care for I3/I4 wraparound. */
2718 mask = ~0ull >> i3;
2719 if (i3 <= i4) {
2720 mask ^= ~0ull >> i4 >> 1;
2721 } else {
2722 mask |= ~(~0ull >> i4 >> 1);
2723 }
2724
2725 /* Rotate the input as necessary. */
2726 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2727
2728 /* Operate. */
2729 switch (s->fields->op2) {
2730 case 0x55: /* AND */
2731 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2732 tcg_gen_and_i64(o->out, o->out, o->in2);
2733 break;
2734 case 0x56: /* OR */
2735 tcg_gen_andi_i64(o->in2, o->in2, mask);
2736 tcg_gen_or_i64(o->out, o->out, o->in2);
2737 break;
2738 case 0x57: /* XOR */
2739 tcg_gen_andi_i64(o->in2, o->in2, mask);
2740 tcg_gen_xor_i64(o->out, o->out, o->in2);
2741 break;
2742 default:
2743 abort();
2744 }
2745
2746 /* Set the CC. */
2747 tcg_gen_andi_i64(cc_dst, o->out, mask);
2748 set_cc_nz_u64(s, cc_dst);
2749 return NO_EXIT;
2750 }
2751
2752 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2753 {
2754 tcg_gen_bswap16_i64(o->out, o->in2);
2755 return NO_EXIT;
2756 }
2757
2758 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2759 {
2760 tcg_gen_bswap32_i64(o->out, o->in2);
2761 return NO_EXIT;
2762 }
2763
2764 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2765 {
2766 tcg_gen_bswap64_i64(o->out, o->in2);
2767 return NO_EXIT;
2768 }
2769
2770 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2771 {
2772 TCGv_i32 t1 = tcg_temp_new_i32();
2773 TCGv_i32 t2 = tcg_temp_new_i32();
2774 TCGv_i32 to = tcg_temp_new_i32();
2775 tcg_gen_trunc_i64_i32(t1, o->in1);
2776 tcg_gen_trunc_i64_i32(t2, o->in2);
2777 tcg_gen_rotl_i32(to, t1, t2);
2778 tcg_gen_extu_i32_i64(o->out, to);
2779 tcg_temp_free_i32(t1);
2780 tcg_temp_free_i32(t2);
2781 tcg_temp_free_i32(to);
2782 return NO_EXIT;
2783 }
2784
2785 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2786 {
2787 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2788 return NO_EXIT;
2789 }
2790
2791 #ifndef CONFIG_USER_ONLY
2792 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2793 {
2794 check_privileged(s);
2795 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2796 set_cc_static(s);
2797 return NO_EXIT;
2798 }
2799
2800 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2801 {
2802 check_privileged(s);
2803 gen_helper_sacf(cpu_env, o->in2);
2804 /* Addressing mode has changed, so end the block. */
2805 return EXIT_PC_STALE;
2806 }
2807 #endif
2808
2809 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2810 {
2811 int r1 = get_field(s->fields, r1);
2812 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2813 return NO_EXIT;
2814 }
2815
2816 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2817 {
2818 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2819 return NO_EXIT;
2820 }
2821
2822 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2823 {
2824 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2825 return NO_EXIT;
2826 }
2827
2828 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2829 {
2830 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2831 return_low128(o->out2);
2832 return NO_EXIT;
2833 }
2834
2835 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2836 {
2837 gen_helper_sqeb(o->out, cpu_env, o->in2);
2838 return NO_EXIT;
2839 }
2840
2841 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2842 {
2843 gen_helper_sqdb(o->out, cpu_env, o->in2);
2844 return NO_EXIT;
2845 }
2846
2847 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2848 {
2849 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2850 return_low128(o->out2);
2851 return NO_EXIT;
2852 }
2853
2854 #ifndef CONFIG_USER_ONLY
2855 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2856 {
2857 check_privileged(s);
2858 potential_page_fault(s);
2859 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2860 set_cc_static(s);
2861 return NO_EXIT;
2862 }
2863
2864 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2865 {
2866 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2867 check_privileged(s);
2868 potential_page_fault(s);
2869 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2870 tcg_temp_free_i32(r1);
2871 return NO_EXIT;
2872 }
2873 #endif
2874
2875 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
2876 {
2877 DisasCompare c;
2878 TCGv_i64 a;
2879 int lab, r1;
2880
2881 disas_jcc(s, &c, get_field(s->fields, m3));
2882
2883 lab = gen_new_label();
2884 if (c.is_64) {
2885 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
2886 } else {
2887 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
2888 }
2889 free_compare(&c);
2890
2891 r1 = get_field(s->fields, r1);
2892 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2893 if (s->insn->data) {
2894 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
2895 } else {
2896 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
2897 }
2898 tcg_temp_free_i64(a);
2899
2900 gen_set_label(lab);
2901 return NO_EXIT;
2902 }
2903
2904 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2905 {
2906 uint64_t sign = 1ull << s->insn->data;
2907 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2908 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2909 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2910 /* The arithmetic left shift is curious in that it does not affect
2911 the sign bit. Copy that over from the source unchanged. */
2912 tcg_gen_andi_i64(o->out, o->out, ~sign);
2913 tcg_gen_andi_i64(o->in1, o->in1, sign);
2914 tcg_gen_or_i64(o->out, o->out, o->in1);
2915 return NO_EXIT;
2916 }
2917
2918 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2919 {
2920 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2921 return NO_EXIT;
2922 }
2923
2924 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2925 {
2926 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2927 return NO_EXIT;
2928 }
2929
2930 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2931 {
2932 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2933 return NO_EXIT;
2934 }
2935
2936 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2937 {
2938 gen_helper_sfpc(cpu_env, o->in2);
2939 return NO_EXIT;
2940 }
2941
2942 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
2943 {
2944 gen_helper_sfas(cpu_env, o->in2);
2945 return NO_EXIT;
2946 }
2947
2948 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
2949 {
2950 int b2 = get_field(s->fields, b2);
2951 int d2 = get_field(s->fields, d2);
2952 TCGv_i64 t1 = tcg_temp_new_i64();
2953 TCGv_i64 t2 = tcg_temp_new_i64();
2954 int mask, pos, len;
2955
2956 switch (s->fields->op2) {
2957 case 0x99: /* SRNM */
2958 pos = 0, len = 2;
2959 break;
2960 case 0xb8: /* SRNMB */
2961 pos = 0, len = 3;
2962 break;
2963 case 0xb9: /* SRNMT */
2964 pos = 4, len = 3;
2965 default:
2966 tcg_abort();
2967 }
2968 mask = (1 << len) - 1;
2969
2970 /* Insert the value into the appropriate field of the FPC. */
2971 if (b2 == 0) {
2972 tcg_gen_movi_i64(t1, d2 & mask);
2973 } else {
2974 tcg_gen_addi_i64(t1, regs[b2], d2);
2975 tcg_gen_andi_i64(t1, t1, mask);
2976 }
2977 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
2978 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
2979 tcg_temp_free_i64(t1);
2980
2981 /* Then install the new FPC to set the rounding mode in fpu_status. */
2982 gen_helper_sfpc(cpu_env, t2);
2983 tcg_temp_free_i64(t2);
2984 return NO_EXIT;
2985 }
2986
2987 #ifndef CONFIG_USER_ONLY
2988 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2989 {
2990 check_privileged(s);
2991 tcg_gen_shri_i64(o->in2, o->in2, 4);
2992 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2993 return NO_EXIT;
2994 }
2995
2996 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
2997 {
2998 check_privileged(s);
2999 gen_helper_sske(cpu_env, o->in1, o->in2);
3000 return NO_EXIT;
3001 }
3002
3003 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3004 {
3005 check_privileged(s);
3006 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3007 return NO_EXIT;
3008 }
3009
3010 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3011 {
3012 check_privileged(s);
3013 /* ??? Surely cpu address != cpu number. In any case the previous
3014 version of this stored more than the required half-word, so it
3015 is unlikely this has ever been tested. */
3016 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3017 return NO_EXIT;
3018 }
3019
3020 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3021 {
3022 gen_helper_stck(o->out, cpu_env);
3023 /* ??? We don't implement clock states. */
3024 gen_op_movi_cc(s, 0);
3025 return NO_EXIT;
3026 }
3027
3028 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3029 {
3030 TCGv_i64 c1 = tcg_temp_new_i64();
3031 TCGv_i64 c2 = tcg_temp_new_i64();
3032 gen_helper_stck(c1, cpu_env);
3033 /* Shift the 64-bit value into its place as a zero-extended
3034 104-bit value. Note that "bit positions 64-103 are always
3035 non-zero so that they compare differently to STCK"; we set
3036 the least significant bit to 1. */
3037 tcg_gen_shli_i64(c2, c1, 56);
3038 tcg_gen_shri_i64(c1, c1, 8);
3039 tcg_gen_ori_i64(c2, c2, 0x10000);
3040 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3041 tcg_gen_addi_i64(o->in2, o->in2, 8);
3042 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3043 tcg_temp_free_i64(c1);
3044 tcg_temp_free_i64(c2);
3045 /* ??? We don't implement clock states. */
3046 gen_op_movi_cc(s, 0);
3047 return NO_EXIT;
3048 }
3049
3050 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3051 {
3052 check_privileged(s);
3053 gen_helper_sckc(cpu_env, o->in2);
3054 return NO_EXIT;
3055 }
3056
3057 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3058 {
3059 check_privileged(s);
3060 gen_helper_stckc(o->out, cpu_env);
3061 return NO_EXIT;
3062 }
3063
3064 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3065 {
3066 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3067 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3068 check_privileged(s);
3069 potential_page_fault(s);
3070 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3071 tcg_temp_free_i32(r1);
3072 tcg_temp_free_i32(r3);
3073 return NO_EXIT;
3074 }
3075
3076 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3077 {
3078 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3079 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3080 check_privileged(s);
3081 potential_page_fault(s);
3082 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3083 tcg_temp_free_i32(r1);
3084 tcg_temp_free_i32(r3);
3085 return NO_EXIT;
3086 }
3087
3088 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3089 {
3090 check_privileged(s);
3091 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3092 return NO_EXIT;
3093 }
3094
3095 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3096 {
3097 check_privileged(s);
3098 gen_helper_spt(cpu_env, o->in2);
3099 return NO_EXIT;
3100 }
3101
3102 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3103 {
3104 TCGv_i64 f, a;
3105 /* We really ought to have more complete indication of facilities
3106 that we implement. Address this when STFLE is implemented. */
3107 check_privileged(s);
3108 f = tcg_const_i64(0xc0000000);
3109 a = tcg_const_i64(200);
3110 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3111 tcg_temp_free_i64(f);
3112 tcg_temp_free_i64(a);
3113 return NO_EXIT;
3114 }
3115
3116 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3117 {
3118 check_privileged(s);
3119 gen_helper_stpt(o->out, cpu_env);
3120 return NO_EXIT;
3121 }
3122
3123 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3124 {
3125 check_privileged(s);
3126 potential_page_fault(s);
3127 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3128 set_cc_static(s);
3129 return NO_EXIT;
3130 }
3131
3132 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3133 {
3134 check_privileged(s);
3135 gen_helper_spx(cpu_env, o->in2);
3136 return NO_EXIT;
3137 }
3138
3139 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3140 {
3141 check_privileged(s);
3142 /* Not operational. */
3143 gen_op_movi_cc(s, 3);
3144 return NO_EXIT;
3145 }
3146
3147 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3148 {
3149 check_privileged(s);
3150 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3151 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3152 return NO_EXIT;
3153 }
3154
3155 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3156 {
3157 uint64_t i2 = get_field(s->fields, i2);
3158 TCGv_i64 t;
3159
3160 check_privileged(s);
3161
3162 /* It is important to do what the instruction name says: STORE THEN.
3163 If we let the output hook perform the store then if we fault and
3164 restart, we'll have the wrong SYSTEM MASK in place. */
3165 t = tcg_temp_new_i64();
3166 tcg_gen_shri_i64(t, psw_mask, 56);
3167 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3168 tcg_temp_free_i64(t);
3169
3170 if (s->fields->op == 0xac) {
3171 tcg_gen_andi_i64(psw_mask, psw_mask,
3172 (i2 << 56) | 0x00ffffffffffffffull);
3173 } else {
3174 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3175 }
3176 return NO_EXIT;
3177 }
3178
3179 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3180 {
3181 check_privileged(s);
3182 potential_page_fault(s);
3183 gen_helper_stura(cpu_env, o->in2, o->in1);
3184 return NO_EXIT;
3185 }
3186 #endif
3187
3188 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3189 {
3190 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3191 return NO_EXIT;
3192 }
3193
3194 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3195 {
3196 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3197 return NO_EXIT;
3198 }
3199
3200 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3201 {
3202 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3203 return NO_EXIT;
3204 }
3205
3206 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3207 {
3208 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3209 return NO_EXIT;
3210 }
3211
3212 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3213 {
3214 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3215 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3216 potential_page_fault(s);
3217 gen_helper_stam(cpu_env, r1, o->in2, r3);
3218 tcg_temp_free_i32(r1);
3219 tcg_temp_free_i32(r3);
3220 return NO_EXIT;
3221 }
3222
3223 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3224 {
3225 int m3 = get_field(s->fields, m3);
3226 int pos, base = s->insn->data;
3227 TCGv_i64 tmp = tcg_temp_new_i64();
3228
3229 pos = base + ctz32(m3) * 8;
3230 switch (m3) {
3231 case 0xf:
3232 /* Effectively a 32-bit store. */
3233 tcg_gen_shri_i64(tmp, o->in1, pos);
3234 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3235 break;
3236
3237 case 0xc:
3238 case 0x6:
3239 case 0x3:
3240 /* Effectively a 16-bit store. */
3241 tcg_gen_shri_i64(tmp, o->in1, pos);
3242 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3243 break;
3244
3245 case 0x8:
3246 case 0x4:
3247 case 0x2:
3248 case 0x1:
3249 /* Effectively an 8-bit store. */
3250 tcg_gen_shri_i64(tmp, o->in1, pos);
3251 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3252 break;
3253
3254 default:
3255 /* This is going to be a sequence of shifts and stores. */
3256 pos = base + 32 - 8;
3257 while (m3) {
3258 if (m3 & 0x8) {
3259 tcg_gen_shri_i64(tmp, o->in1, pos);
3260 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3261 tcg_gen_addi_i64(o->in2, o->in2, 1);
3262 }
3263 m3 = (m3 << 1) & 0xf;
3264 pos -= 8;
3265 }
3266 break;
3267 }
3268 tcg_temp_free_i64(tmp);
3269 return NO_EXIT;
3270 }
3271
3272 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3273 {
3274 int r1 = get_field(s->fields, r1);
3275 int r3 = get_field(s->fields, r3);
3276 int size = s->insn->data;
3277 TCGv_i64 tsize = tcg_const_i64(size);
3278
3279 while (1) {
3280 if (size == 8) {
3281 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3282 } else {
3283 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3284 }
3285 if (r1 == r3) {
3286 break;
3287 }
3288 tcg_gen_add_i64(o->in2, o->in2, tsize);
3289 r1 = (r1 + 1) & 15;
3290 }
3291
3292 tcg_temp_free_i64(tsize);
3293 return NO_EXIT;
3294 }
3295
3296 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3297 {
3298 int r1 = get_field(s->fields, r1);
3299 int r3 = get_field(s->fields, r3);
3300 TCGv_i64 t = tcg_temp_new_i64();
3301 TCGv_i64 t4 = tcg_const_i64(4);
3302 TCGv_i64 t32 = tcg_const_i64(32);
3303
3304 while (1) {
3305 tcg_gen_shl_i64(t, regs[r1], t32);
3306 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3307 if (r1 == r3) {
3308 break;
3309 }
3310 tcg_gen_add_i64(o->in2, o->in2, t4);
3311 r1 = (r1 + 1) & 15;
3312 }
3313
3314 tcg_temp_free_i64(t);
3315 tcg_temp_free_i64(t4);
3316 tcg_temp_free_i64(t32);
3317 return NO_EXIT;
3318 }
3319
3320 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3321 {
3322 potential_page_fault(s);
3323 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3324 set_cc_static(s);
3325 return_low128(o->in2);
3326 return NO_EXIT;
3327 }
3328
3329 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3330 {
3331 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3332 return NO_EXIT;
3333 }
3334
3335 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3336 {
3337 TCGv_i64 cc;
3338
3339 assert(!o->g_in2);
3340 tcg_gen_not_i64(o->in2, o->in2);
3341 tcg_gen_add_i64(o->out, o->in1, o->in2);
3342
3343 /* XXX possible optimization point */
3344 gen_op_calc_cc(s);
3345 cc = tcg_temp_new_i64();
3346 tcg_gen_extu_i32_i64(cc, cc_op);
3347 tcg_gen_shri_i64(cc, cc, 1);
3348 tcg_gen_add_i64(o->out, o->out, cc);
3349 tcg_temp_free_i64(cc);
3350 return NO_EXIT;
3351 }
3352
3353 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3354 {
3355 TCGv_i32 t;
3356
3357 update_psw_addr(s);
3358 update_cc_op(s);
3359
3360 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3361 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3362 tcg_temp_free_i32(t);
3363
3364 t = tcg_const_i32(s->next_pc - s->pc);
3365 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3366 tcg_temp_free_i32(t);
3367
3368 gen_exception(EXCP_SVC);
3369 return EXIT_NORETURN;
3370 }
3371
3372 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3373 {
3374 gen_helper_tceb(cc_op, o->in1, o->in2);
3375 set_cc_static(s);
3376 return NO_EXIT;
3377 }
3378
3379 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3380 {
3381 gen_helper_tcdb(cc_op, o->in1, o->in2);
3382 set_cc_static(s);
3383 return NO_EXIT;
3384 }
3385
3386 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3387 {
3388 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3389 set_cc_static(s);
3390 return NO_EXIT;
3391 }
3392
3393 #ifndef CONFIG_USER_ONLY
3394 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3395 {
3396 potential_page_fault(s);
3397 gen_helper_tprot(cc_op, o->addr1, o->in2);
3398 set_cc_static(s);
3399 return NO_EXIT;
3400 }
3401 #endif
3402
3403 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3404 {
3405 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3406 potential_page_fault(s);
3407 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3408 tcg_temp_free_i32(l);
3409 set_cc_static(s);
3410 return NO_EXIT;
3411 }
3412
3413 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3414 {
3415 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3416 potential_page_fault(s);
3417 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3418 tcg_temp_free_i32(l);
3419 return NO_EXIT;
3420 }
3421
3422 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3423 {
3424 int d1 = get_field(s->fields, d1);
3425 int d2 = get_field(s->fields, d2);
3426 int b1 = get_field(s->fields, b1);
3427 int b2 = get_field(s->fields, b2);
3428 int l = get_field(s->fields, l1);
3429 TCGv_i32 t32;
3430
3431 o->addr1 = get_address(s, 0, b1, d1);
3432
3433 /* If the addresses are identical, this is a store/memset of zero. */
3434 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3435 o->in2 = tcg_const_i64(0);
3436
3437 l++;
3438 while (l >= 8) {
3439 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3440 l -= 8;
3441 if (l > 0) {
3442 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3443 }
3444 }
3445 if (l >= 4) {
3446 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3447 l -= 4;
3448 if (l > 0) {
3449 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3450 }
3451 }
3452 if (l >= 2) {
3453 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3454 l -= 2;
3455 if (l > 0) {
3456 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3457 }
3458 }
3459 if (l) {
3460 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3461 }
3462 gen_op_movi_cc(s, 0);
3463 return NO_EXIT;
3464 }
3465
3466 /* But in general we'll defer to a helper. */
3467 o->in2 = get_address(s, 0, b2, d2);
3468 t32 = tcg_const_i32(l);
3469 potential_page_fault(s);
3470 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3471 tcg_temp_free_i32(t32);
3472 set_cc_static(s);
3473 return NO_EXIT;
3474 }
3475
3476 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3477 {
3478 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3479 return NO_EXIT;
3480 }
3481
3482 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3483 {
3484 int shift = s->insn->data & 0xff;
3485 int size = s->insn->data >> 8;
3486 uint64_t mask = ((1ull << size) - 1) << shift;
3487
3488 assert(!o->g_in2);
3489 tcg_gen_shli_i64(o->in2, o->in2, shift);
3490 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3491
3492 /* Produce the CC from only the bits manipulated. */
3493 tcg_gen_andi_i64(cc_dst, o->out, mask);
3494 set_cc_nz_u64(s, cc_dst);
3495 return NO_EXIT;
3496 }
3497
3498 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3499 {
3500 o->out = tcg_const_i64(0);
3501 return NO_EXIT;
3502 }
3503
3504 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3505 {
3506 o->out = tcg_const_i64(0);
3507 o->out2 = o->out;
3508 o->g_out2 = true;
3509 return NO_EXIT;
3510 }
3511
3512 /* ====================================================================== */
3513 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3514 the original inputs), update the various cc data structures in order to
3515 be able to compute the new condition code. */
3516
3517 static void cout_abs32(DisasContext *s, DisasOps *o)
3518 {
3519 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3520 }
3521
3522 static void cout_abs64(DisasContext *s, DisasOps *o)
3523 {
3524 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3525 }
3526
3527 static void cout_adds32(DisasContext *s, DisasOps *o)
3528 {
3529 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3530 }
3531
3532 static void cout_adds64(DisasContext *s, DisasOps *o)
3533 {
3534 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3535 }
3536
3537 static void cout_addu32(DisasContext *s, DisasOps *o)
3538 {
3539 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3540 }
3541
3542 static void cout_addu64(DisasContext *s, DisasOps *o)
3543 {
3544 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3545 }
3546
3547 static void cout_addc32(DisasContext *s, DisasOps *o)
3548 {
3549 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3550 }
3551
3552 static void cout_addc64(DisasContext *s, DisasOps *o)
3553 {
3554 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3555 }
3556
3557 static void cout_cmps32(DisasContext *s, DisasOps *o)
3558 {
3559 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3560 }
3561
3562 static void cout_cmps64(DisasContext *s, DisasOps *o)
3563 {
3564 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3565 }
3566
3567 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3568 {
3569 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3570 }
3571
3572 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3573 {
3574 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3575 }
3576
3577 static void cout_f32(DisasContext *s, DisasOps *o)
3578 {
3579 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3580 }
3581
3582 static void cout_f64(DisasContext *s, DisasOps *o)
3583 {
3584 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3585 }
3586
3587 static void cout_f128(DisasContext *s, DisasOps *o)
3588 {
3589 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3590 }
3591
3592 static void cout_nabs32(DisasContext *s, DisasOps *o)
3593 {
3594 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3595 }
3596
3597 static void cout_nabs64(DisasContext *s, DisasOps *o)
3598 {
3599 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3600 }
3601
3602 static void cout_neg32(DisasContext *s, DisasOps *o)
3603 {
3604 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3605 }
3606
3607 static void cout_neg64(DisasContext *s, DisasOps *o)
3608 {
3609 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3610 }
3611
3612 static void cout_nz32(DisasContext *s, DisasOps *o)
3613 {
3614 tcg_gen_ext32u_i64(cc_dst, o->out);
3615 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3616 }
3617
3618 static void cout_nz64(DisasContext *s, DisasOps *o)
3619 {
3620 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3621 }
3622
3623 static void cout_s32(DisasContext *s, DisasOps *o)
3624 {
3625 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3626 }
3627
3628 static void cout_s64(DisasContext *s, DisasOps *o)
3629 {
3630 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3631 }
3632
3633 static void cout_subs32(DisasContext *s, DisasOps *o)
3634 {
3635 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3636 }
3637
3638 static void cout_subs64(DisasContext *s, DisasOps *o)
3639 {
3640 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3641 }
3642
3643 static void cout_subu32(DisasContext *s, DisasOps *o)
3644 {
3645 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3646 }
3647
3648 static void cout_subu64(DisasContext *s, DisasOps *o)
3649 {
3650 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3651 }
3652
3653 static void cout_subb32(DisasContext *s, DisasOps *o)
3654 {
3655 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3656 }
3657
3658 static void cout_subb64(DisasContext *s, DisasOps *o)
3659 {
3660 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3661 }
3662
3663 static void cout_tm32(DisasContext *s, DisasOps *o)
3664 {
3665 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3666 }
3667
3668 static void cout_tm64(DisasContext *s, DisasOps *o)
3669 {
3670 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3671 }
3672
3673 /* ====================================================================== */
3674 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3675 with the TCG register to which we will write. Used in combination with
3676 the "wout" generators, in some cases we need a new temporary, and in
3677 some cases we can write to a TCG global. */
3678
3679 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3680 {
3681 o->out = tcg_temp_new_i64();
3682 }
3683 #define SPEC_prep_new 0
3684
3685 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3686 {
3687 o->out = tcg_temp_new_i64();
3688 o->out2 = tcg_temp_new_i64();
3689 }
3690 #define SPEC_prep_new_P 0
3691
3692 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3693 {
3694 o->out = regs[get_field(f, r1)];
3695 o->g_out = true;
3696 }
3697 #define SPEC_prep_r1 0
3698
3699 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3700 {
3701 int r1 = get_field(f, r1);
3702 o->out = regs[r1];
3703 o->out2 = regs[r1 + 1];
3704 o->g_out = o->g_out2 = true;
3705 }
3706 #define SPEC_prep_r1_P SPEC_r1_even
3707
3708 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3709 {
3710 o->out = fregs[get_field(f, r1)];
3711 o->g_out = true;
3712 }
3713 #define SPEC_prep_f1 0
3714
3715 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3716 {
3717 int r1 = get_field(f, r1);
3718 o->out = fregs[r1];
3719 o->out2 = fregs[r1 + 2];
3720 o->g_out = o->g_out2 = true;
3721 }
3722 #define SPEC_prep_x1 SPEC_r1_f128
3723
3724 /* ====================================================================== */
3725 /* The "Write OUTput" generators. These generally perform some non-trivial
3726 copy of data to TCG globals, or to main memory. The trivial cases are
3727 generally handled by having a "prep" generator install the TCG global
3728 as the destination of the operation. */
3729
3730 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3731 {
3732 store_reg(get_field(f, r1), o->out);
3733 }
3734 #define SPEC_wout_r1 0
3735
3736 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3737 {
3738 int r1 = get_field(f, r1);
3739 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3740 }
3741 #define SPEC_wout_r1_8 0
3742
3743 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3744 {
3745 int r1 = get_field(f, r1);
3746 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3747 }
3748 #define SPEC_wout_r1_16 0
3749
3750 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3751 {
3752 store_reg32_i64(get_field(f, r1), o->out);
3753 }
3754 #define SPEC_wout_r1_32 0
3755
3756 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3757 {
3758 int r1 = get_field(f, r1);
3759 store_reg32_i64(r1, o->out);
3760 store_reg32_i64(r1 + 1, o->out2);
3761 }
3762 #define SPEC_wout_r1_P32 SPEC_r1_even
3763
3764 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3765 {
3766 int r1 = get_field(f, r1);
3767 store_reg32_i64(r1 + 1, o->out);
3768 tcg_gen_shri_i64(o->out, o->out, 32);
3769 store_reg32_i64(r1, o->out);
3770 }
3771 #define SPEC_wout_r1_D32 SPEC_r1_even
3772
3773 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3774 {
3775 store_freg32_i64(get_field(f, r1), o->out);
3776 }
3777 #define SPEC_wout_e1 0
3778
3779 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3780 {
3781 store_freg(get_field(f, r1), o->out);
3782 }
3783 #define SPEC_wout_f1 0
3784
3785 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3786 {
3787 int f1 = get_field(s->fields, r1);
3788 store_freg(f1, o->out);
3789 store_freg(f1 + 2, o->out2);
3790 }
3791 #define SPEC_wout_x1 SPEC_r1_f128
3792
3793 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3794 {
3795 if (get_field(f, r1) != get_field(f, r2)) {
3796 store_reg32_i64(get_field(f, r1), o->out);
3797 }
3798 }
3799 #define SPEC_wout_cond_r1r2_32 0
3800
3801 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3802 {
3803 if (get_field(f, r1) != get_field(f, r2)) {
3804 store_freg32_i64(get_field(f, r1), o->out);
3805 }
3806 }
3807 #define SPEC_wout_cond_e1e2 0
3808
3809 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3810 {
3811 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3812 }
3813 #define SPEC_wout_m1_8 0
3814
3815 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3816 {
3817 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3818 }
3819 #define SPEC_wout_m1_16 0
3820
3821 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3822 {
3823 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3824 }
3825 #define SPEC_wout_m1_32 0
3826
3827 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3828 {
3829 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3830 }
3831 #define SPEC_wout_m1_64 0
3832
3833 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3834 {
3835 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3836 }
3837 #define SPEC_wout_m2_32 0
3838
3839 /* ====================================================================== */
3840 /* The "INput 1" generators. These load the first operand to an insn. */
3841
3842 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3843 {
3844 o->in1 = load_reg(get_field(f, r1));
3845 }
3846 #define SPEC_in1_r1 0
3847
3848 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3849 {
3850 o->in1 = regs[get_field(f, r1)];
3851 o->g_in1 = true;
3852 }
3853 #define SPEC_in1_r1_o 0
3854
3855 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3856 {
3857 o->in1 = tcg_temp_new_i64();
3858 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3859 }
3860 #define SPEC_in1_r1_32s 0
3861
3862 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3863 {
3864 o->in1 = tcg_temp_new_i64();
3865 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3866 }
3867 #define SPEC_in1_r1_32u 0
3868
3869 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3870 {
3871 o->in1 = tcg_temp_new_i64();
3872 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3873 }
3874 #define SPEC_in1_r1_sr32 0
3875
3876 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3877 {
3878 o->in1 = load_reg(get_field(f, r1) + 1);
3879 }
3880 #define SPEC_in1_r1p1 SPEC_r1_even
3881
3882 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3883 {
3884 o->in1 = tcg_temp_new_i64();
3885 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
3886 }
3887 #define SPEC_in1_r1p1_32s SPEC_r1_even
3888
3889 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3890 {
3891 o->in1 = tcg_temp_new_i64();
3892 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
3893 }
3894 #define SPEC_in1_r1p1_32u SPEC_r1_even
3895
3896 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3897 {
3898 int r1 = get_field(f, r1);
3899 o->in1 = tcg_temp_new_i64();
3900 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3901 }
3902 #define SPEC_in1_r1_D32 SPEC_r1_even
3903
3904 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3905 {
3906 o->in1 = load_reg(get_field(f, r2));
3907 }
3908 #define SPEC_in1_r2 0
3909
3910 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3911 {
3912 o->in1 = load_reg(get_field(f, r3));
3913 }
3914 #define SPEC_in1_r3 0
3915
3916 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3917 {
3918 o->in1 = regs[get_field(f, r3)];
3919 o->g_in1 = true;
3920 }
3921 #define SPEC_in1_r3_o 0
3922
3923 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3924 {
3925 o->in1 = tcg_temp_new_i64();
3926 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3927 }
3928 #define SPEC_in1_r3_32s 0
3929
3930 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3931 {
3932 o->in1 = tcg_temp_new_i64();
3933 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3934 }
3935 #define SPEC_in1_r3_32u 0
3936
3937 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3938 {
3939 o->in1 = load_freg32_i64(get_field(f, r1));
3940 }
3941 #define SPEC_in1_e1 0
3942
3943 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3944 {
3945 o->in1 = fregs[get_field(f, r1)];
3946 o->g_in1 = true;
3947 }
3948 #define SPEC_in1_f1_o 0
3949
3950 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3951 {
3952 int r1 = get_field(f, r1);
3953 o->out = fregs[r1];
3954 o->out2 = fregs[r1 + 2];
3955 o->g_out = o->g_out2 = true;
3956 }
3957 #define SPEC_in1_x1_o SPEC_r1_f128
3958
3959 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3960 {
3961 o->in1 = fregs[get_field(f, r3)];
3962 o->g_in1 = true;
3963 }
3964 #define SPEC_in1_f3_o 0
3965
3966 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3967 {
3968 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3969 }
3970 #define SPEC_in1_la1 0
3971
3972 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3973 {
3974 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3975 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3976 }
3977 #define SPEC_in1_la2 0
3978
3979 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3980 {
3981 in1_la1(s, f, o);
3982 o->in1 = tcg_temp_new_i64();
3983 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3984 }
3985 #define SPEC_in1_m1_8u 0
3986
3987 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3988 {
3989 in1_la1(s, f, o);
3990 o->in1 = tcg_temp_new_i64();
3991 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3992 }
3993 #define SPEC_in1_m1_16s 0
3994
3995 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3996 {
3997 in1_la1(s, f, o);
3998 o->in1 = tcg_temp_new_i64();
3999 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4000 }
4001 #define SPEC_in1_m1_16u 0
4002
4003 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4004 {
4005 in1_la1(s, f, o);
4006 o->in1 = tcg_temp_new_i64();
4007 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4008 }
4009 #define SPEC_in1_m1_32s 0
4010
4011 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4012 {
4013 in1_la1(s, f, o);
4014 o->in1 = tcg_temp_new_i64();
4015 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4016 }
4017 #define SPEC_in1_m1_32u 0
4018
4019 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4020 {
4021 in1_la1(s, f, o);
4022 o->in1 = tcg_temp_new_i64();
4023 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4024 }
4025 #define SPEC_in1_m1_64 0
4026
4027 /* ====================================================================== */
4028 /* The "INput 2" generators. These load the second operand to an insn. */
4029
4030 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4031 {
4032 o->in2 = regs[get_field(f, r1)];
4033 o->g_in2 = true;
4034 }
4035 #define SPEC_in2_r1_o 0
4036
4037 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4038 {
4039 o->in2 = tcg_temp_new_i64();
4040 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4041 }
4042 #define SPEC_in2_r1_16u 0
4043
4044 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4045 {
4046 o->in2 = tcg_temp_new_i64();
4047 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4048 }
4049 #define SPEC_in2_r1_32u 0
4050
4051 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4052 {
4053 o->in2 = load_reg(get_field(f, r2));
4054 }
4055 #define SPEC_in2_r2 0
4056
4057 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4058 {
4059 o->in2 = regs[get_field(f, r2)];
4060 o->g_in2 = true;
4061 }
4062 #define SPEC_in2_r2_o 0
4063
4064 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4065 {
4066 int r2 = get_field(f, r2);
4067 if (r2 != 0) {
4068 o->in2 = load_reg(r2);
4069 }
4070 }
4071 #define SPEC_in2_r2_nz 0
4072
4073 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4074 {
4075 o->in2 = tcg_temp_new_i64();
4076 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4077 }
4078 #define SPEC_in2_r2_8s 0
4079
4080 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4081 {
4082 o->in2 = tcg_temp_new_i64();
4083 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4084 }
4085 #define SPEC_in2_r2_8u 0
4086
4087 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4088 {
4089 o->in2 = tcg_temp_new_i64();
4090 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4091 }
4092 #define SPEC_in2_r2_16s 0
4093
4094 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4095 {
4096 o->in2 = tcg_temp_new_i64();
4097 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4098 }
4099 #define SPEC_in2_r2_16u 0
4100
4101 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4102 {
4103 o->in2 = load_reg(get_field(f, r3));
4104 }
4105 #define SPEC_in2_r3 0
4106
4107 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4108 {
4109 o->in2 = tcg_temp_new_i64();
4110 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4111 }
4112 #define SPEC_in2_r2_32s 0
4113
4114 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4115 {
4116 o->in2 = tcg_temp_new_i64();
4117 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4118 }
4119 #define SPEC_in2_r2_32u 0
4120
4121 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4122 {
4123 o->in2 = load_freg32_i64(get_field(f, r2));
4124 }
4125 #define SPEC_in2_e2 0
4126
4127 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4128 {
4129 o->in2 = fregs[get_field(f, r2)];
4130 o->g_in2 = true;
4131 }
4132 #define SPEC_in2_f2_o 0
4133
4134 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4135 {
4136 int r2 = get_field(f, r2);
4137 o->in1 = fregs[r2];
4138 o->in2 = fregs[r2 + 2];
4139 o->g_in1 = o->g_in2 = true;
4140 }
4141 #define SPEC_in2_x2_o SPEC_r2_f128
4142
4143 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4144 {
4145 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4146 }
4147 #define SPEC_in2_ra2 0
4148
4149 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4150 {
4151 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4152 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4153 }
4154 #define SPEC_in2_a2 0
4155
4156 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4157 {
4158 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4159 }
4160 #define SPEC_in2_ri2 0
4161
4162 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4163 {
4164 help_l2_shift(s, f, o, 31);
4165 }
4166 #define SPEC_in2_sh32 0
4167
4168 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4169 {
4170 help_l2_shift(s, f, o, 63);
4171 }
4172 #define SPEC_in2_sh64 0
4173
4174 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4175 {
4176 in2_a2(s, f, o);
4177 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4178 }
4179 #define SPEC_in2_m2_8u 0
4180
4181 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4182 {
4183 in2_a2(s, f, o);
4184 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4185 }
4186 #define SPEC_in2_m2_16s 0
4187
4188 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4189 {
4190 in2_a2(s, f, o);
4191 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4192 }
4193 #define SPEC_in2_m2_16u 0
4194
4195 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4196 {
4197 in2_a2(s, f, o);
4198 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4199 }
4200 #define SPEC_in2_m2_32s 0
4201
4202 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4203 {
4204 in2_a2(s, f, o);
4205 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4206 }
4207 #define SPEC_in2_m2_32u 0
4208
4209 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4210 {
4211 in2_a2(s, f, o);
4212 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4213 }
4214 #define SPEC_in2_m2_64 0
4215
4216 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4217 {
4218 in2_ri2(s, f, o);
4219 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4220 }
4221 #define SPEC_in2_mri2_16u 0
4222
4223 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4224 {
4225 in2_ri2(s, f, o);
4226 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4227 }
4228 #define SPEC_in2_mri2_32s 0
4229
4230 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4231 {
4232 in2_ri2(s, f, o);
4233 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4234 }
4235 #define SPEC_in2_mri2_32u 0
4236
4237 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4238 {
4239 in2_ri2(s, f, o);
4240 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4241 }
4242 #define SPEC_in2_mri2_64 0
4243
4244 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4245 {
4246 o->in2 = tcg_const_i64(get_field(f, i2));
4247 }
4248 #define SPEC_in2_i2 0
4249
4250 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4251 {
4252 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4253 }
4254 #define SPEC_in2_i2_8u 0
4255
4256 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4257 {
4258 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4259 }
4260 #define SPEC_in2_i2_16u 0
4261
4262 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4263 {
4264 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4265 }
4266 #define SPEC_in2_i2_32u 0
4267
4268 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4269 {
4270 uint64_t i2 = (uint16_t)get_field(f, i2);
4271 o->in2 = tcg_const_i64(i2 << s->insn->data);
4272 }
4273 #define SPEC_in2_i2_16u_shl 0
4274
4275 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4276 {
4277 uint64_t i2 = (uint32_t)get_field(f, i2);
4278 o->in2 = tcg_const_i64(i2 << s->insn->data);
4279 }
4280 #define SPEC_in2_i2_32u_shl 0
4281
4282 /* ====================================================================== */
4283
4284 /* Find opc within the table of insns. This is formulated as a switch
4285 statement so that (1) we get compile-time notice of cut-paste errors
4286 for duplicated opcodes, and (2) the compiler generates the binary
4287 search tree, rather than us having to post-process the table. */
4288
4289 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4290 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4291
4292 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4293
4294 enum DisasInsnEnum {
4295 #include "insn-data.def"
4296 };
4297
4298 #undef D
4299 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4300 .opc = OPC, \
4301 .fmt = FMT_##FT, \
4302 .fac = FAC_##FC, \
4303 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4304 .name = #NM, \
4305 .help_in1 = in1_##I1, \
4306 .help_in2 = in2_##I2, \
4307 .help_prep = prep_##P, \
4308 .help_wout = wout_##W, \
4309 .help_cout = cout_##CC, \
4310 .help_op = op_##OP, \
4311 .data = D \
4312 },
4313
4314 /* Allow 0 to be used for NULL in the table below. */
4315 #define in1_0 NULL
4316 #define in2_0 NULL
4317 #define prep_0 NULL
4318 #define wout_0 NULL
4319 #define cout_0 NULL
4320 #define op_0 NULL
4321
4322 #define SPEC_in1_0 0
4323 #define SPEC_in2_0 0
4324 #define SPEC_prep_0 0
4325 #define SPEC_wout_0 0
4326
4327 static const DisasInsn insn_info[] = {
4328 #include "insn-data.def"
4329 };
4330
4331 #undef D
4332 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4333 case OPC: return &insn_info[insn_ ## NM];
4334
4335 static const DisasInsn *lookup_opc(uint16_t opc)
4336 {
4337 switch (opc) {
4338 #include "insn-data.def"
4339 default:
4340 return NULL;
4341 }
4342 }
4343
4344 #undef D
4345 #undef C
4346
4347 /* Extract a field from the insn. The INSN should be left-aligned in
4348 the uint64_t so that we can more easily utilize the big-bit-endian
4349 definitions we extract from the Principals of Operation. */
4350
4351 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4352 {
4353 uint32_t r, m;
4354
4355 if (f->size == 0) {
4356 return;
4357 }
4358
4359 /* Zero extract the field from the insn. */
4360 r = (insn << f->beg) >> (64 - f->size);
4361
4362 /* Sign-extend, or un-swap the field as necessary. */
4363 switch (f->type) {
4364 case 0: /* unsigned */
4365 break;
4366 case 1: /* signed */
4367 assert(f->size <= 32);
4368 m = 1u << (f->size - 1);
4369 r = (r ^ m) - m;
4370 break;
4371 case 2: /* dl+dh split, signed 20 bit. */
4372 r = ((int8_t)r << 12) | (r >> 8);
4373 break;
4374 default:
4375 abort();
4376 }
4377
4378 /* Validate that the "compressed" encoding we selected above is valid.
4379 I.e. we havn't make two different original fields overlap. */
4380 assert(((o->presentC >> f->indexC) & 1) == 0);
4381 o->presentC |= 1 << f->indexC;
4382 o->presentO |= 1 << f->indexO;
4383
4384 o->c[f->indexC] = r;
4385 }
4386
4387 /* Lookup the insn at the current PC, extracting the operands into O and
4388 returning the info struct for the insn. Returns NULL for invalid insn. */
4389
4390 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4391 DisasFields *f)
4392 {
4393 uint64_t insn, pc = s->pc;
4394 int op, op2, ilen;
4395 const DisasInsn *info;
4396
4397 insn = ld_code2(env, pc);
4398 op = (insn >> 8) & 0xff;
4399 ilen = get_ilen(op);
4400 s->next_pc = s->pc + ilen;
4401
4402 switch (ilen) {
4403 case 2:
4404 insn = insn << 48;
4405 break;
4406 case 4:
4407 insn = ld_code4(env, pc) << 32;
4408 break;
4409 case 6:
4410 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4411 break;
4412 default:
4413 abort();
4414 }
4415
4416 /* We can't actually determine the insn format until we've looked up
4417 the full insn opcode. Which we can't do without locating the
4418 secondary opcode. Assume by default that OP2 is at bit 40; for
4419 those smaller insns that don't actually have a secondary opcode
4420 this will correctly result in OP2 = 0. */
4421 switch (op) {
4422 case 0x01: /* E */
4423 case 0x80: /* S */
4424 case 0x82: /* S */
4425 case 0x93: /* S */
4426 case 0xb2: /* S, RRF, RRE */
4427 case 0xb3: /* RRE, RRD, RRF */
4428 case 0xb9: /* RRE, RRF */
4429 case 0xe5: /* SSE, SIL */
4430 op2 = (insn << 8) >> 56;
4431 break;
4432 case 0xa5: /* RI */
4433 case 0xa7: /* RI */
4434 case 0xc0: /* RIL */
4435 case 0xc2: /* RIL */
4436 case 0xc4: /* RIL */
4437 case 0xc6: /* RIL */
4438 case 0xc8: /* SSF */
4439 case 0xcc: /* RIL */
4440 op2 = (insn << 12) >> 60;
4441 break;
4442 case 0xd0 ... 0xdf: /* SS */
4443 case 0xe1: /* SS */
4444 case 0xe2: /* SS */
4445 case 0xe8: /* SS */
4446 case 0xe9: /* SS */
4447 case 0xea: /* SS */
4448 case 0xee ... 0xf3: /* SS */
4449 case 0xf8 ... 0xfd: /* SS */
4450 op2 = 0;
4451 break;
4452 default:
4453 op2 = (insn << 40) >> 56;
4454 break;
4455 }
4456
4457 memset(f, 0, sizeof(*f));
4458 f->op = op;
4459 f->op2 = op2;
4460
4461 /* Lookup the instruction. */
4462 info = lookup_opc(op << 8 | op2);
4463
4464 /* If we found it, extract the operands. */
4465 if (info != NULL) {
4466 DisasFormat fmt = info->fmt;
4467 int i;
4468
4469 for (i = 0; i < NUM_C_FIELD; ++i) {
4470 extract_field(f, &format_info[fmt].op[i], insn);
4471 }
4472 }
4473 return info;
4474 }
4475
4476 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4477 {
4478 const DisasInsn *insn;
4479 ExitStatus ret = NO_EXIT;
4480 DisasFields f;
4481 DisasOps o;
4482
4483 /* Search for the insn in the table. */
4484 insn = extract_insn(env, s, &f);
4485
4486 /* Not found means unimplemented/illegal opcode. */
4487 if (insn == NULL) {
4488 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4489 f.op, f.op2);
4490 gen_illegal_opcode(s);
4491 return EXIT_NORETURN;
4492 }
4493
4494 /* Check for insn specification exceptions. */
4495 if (insn->spec) {
4496 int spec = insn->spec, excp = 0, r;
4497
4498 if (spec & SPEC_r1_even) {
4499 r = get_field(&f, r1);
4500 if (r & 1) {
4501 excp = PGM_SPECIFICATION;
4502 }
4503 }
4504 if (spec & SPEC_r2_even) {
4505 r = get_field(&f, r2);
4506 if (r & 1) {
4507 excp = PGM_SPECIFICATION;
4508 }
4509 }
4510 if (spec & SPEC_r1_f128) {
4511 r = get_field(&f, r1);
4512 if (r > 13) {
4513 excp = PGM_SPECIFICATION;
4514 }
4515 }
4516 if (spec & SPEC_r2_f128) {
4517 r = get_field(&f, r2);
4518 if (r > 13) {
4519 excp = PGM_SPECIFICATION;
4520 }
4521 }
4522 if (excp) {
4523 gen_program_exception(s, excp);
4524 return EXIT_NORETURN;
4525 }
4526 }
4527
4528 /* Set up the strutures we use to communicate with the helpers. */
4529 s->insn = insn;
4530 s->fields = &f;
4531 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4532 TCGV_UNUSED_I64(o.out);
4533 TCGV_UNUSED_I64(o.out2);
4534 TCGV_UNUSED_I64(o.in1);
4535 TCGV_UNUSED_I64(o.in2);
4536 TCGV_UNUSED_I64(o.addr1);
4537
4538 /* Implement the instruction. */
4539 if (insn->help_in1) {
4540 insn->help_in1(s, &f, &o);
4541 }
4542 if (insn->help_in2) {
4543 insn->help_in2(s, &f, &o);
4544 }
4545 if (insn->help_prep) {
4546 insn->help_prep(s, &f, &o);
4547 }
4548 if (insn->help_op) {
4549 ret = insn->help_op(s, &o);
4550 }
4551 if (insn->help_wout) {
4552 insn->help_wout(s, &f, &o);
4553 }
4554 if (insn->help_cout) {
4555 insn->help_cout(s, &o);
4556 }
4557
4558 /* Free any temporaries created by the helpers. */
4559 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4560 tcg_temp_free_i64(o.out);
4561 }
4562 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4563 tcg_temp_free_i64(o.out2);
4564 }
4565 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4566 tcg_temp_free_i64(o.in1);
4567 }
4568 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4569 tcg_temp_free_i64(o.in2);
4570 }
4571 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4572 tcg_temp_free_i64(o.addr1);
4573 }
4574
4575 /* Advance to the next instruction. */
4576 s->pc = s->next_pc;
4577 return ret;
4578 }
4579
4580 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4581 TranslationBlock *tb,
4582 int search_pc)
4583 {
4584 DisasContext dc;
4585 target_ulong pc_start;
4586 uint64_t next_page_start;
4587 uint16_t *gen_opc_end;
4588 int j, lj = -1;
4589 int num_insns, max_insns;
4590 CPUBreakpoint *bp;
4591 ExitStatus status;
4592 bool do_debug;
4593
4594 pc_start = tb->pc;
4595
4596 /* 31-bit mode */
4597 if (!(tb->flags & FLAG_MASK_64)) {
4598 pc_start &= 0x7fffffff;
4599 }
4600
4601 dc.tb = tb;
4602 dc.pc = pc_start;
4603 dc.cc_op = CC_OP_DYNAMIC;
4604 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4605
4606 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4607
4608 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4609
4610 num_insns = 0;
4611 max_insns = tb->cflags & CF_COUNT_MASK;
4612 if (max_insns == 0) {
4613 max_insns = CF_COUNT_MASK;
4614 }
4615
4616 gen_icount_start();
4617
4618 do {
4619 if (search_pc) {
4620 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4621 if (lj < j) {
4622 lj++;
4623 while (lj < j) {
4624 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4625 }
4626 }
4627 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4628 gen_opc_cc_op[lj] = dc.cc_op;
4629 tcg_ctx.gen_opc_instr_start[lj] = 1;
4630 tcg_ctx.gen_opc_icount[lj] = num_insns;
4631 }
4632 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4633 gen_io_start();
4634 }
4635
4636 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4637 tcg_gen_debug_insn_start(dc.pc);
4638 }
4639
4640 status = NO_EXIT;
4641 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4642 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4643 if (bp->pc == dc.pc) {
4644 status = EXIT_PC_STALE;
4645 do_debug = true;
4646 break;
4647 }
4648 }
4649 }
4650 if (status == NO_EXIT) {
4651 status = translate_one(env, &dc);
4652 }
4653
4654 /* If we reach a page boundary, are single stepping,
4655 or exhaust instruction count, stop generation. */
4656 if (status == NO_EXIT
4657 && (dc.pc >= next_page_start
4658 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4659 || num_insns >= max_insns
4660 || singlestep
4661 || env->singlestep_enabled)) {
4662 status = EXIT_PC_STALE;
4663 }
4664 } while (status == NO_EXIT);
4665
4666 if (tb->cflags & CF_LAST_IO) {
4667 gen_io_end();
4668 }
4669
4670 switch (status) {
4671 case EXIT_GOTO_TB:
4672 case EXIT_NORETURN:
4673 break;
4674 case EXIT_PC_STALE:
4675 update_psw_addr(&dc);
4676 /* FALLTHRU */
4677 case EXIT_PC_UPDATED:
4678 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4679 cc op type is in env */
4680 update_cc_op(&dc);
4681 /* Exit the TB, either by raising a debug exception or by return. */
4682 if (do_debug) {
4683 gen_exception(EXCP_DEBUG);
4684 } else {
4685 tcg_gen_exit_tb(0);
4686 }
4687 break;
4688 default:
4689 abort();
4690 }
4691
4692 gen_icount_end(tb, num_insns);
4693 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4694 if (search_pc) {
4695 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4696 lj++;
4697 while (lj <= j) {
4698 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4699 }
4700 } else {
4701 tb->size = dc.pc - pc_start;
4702 tb->icount = num_insns;
4703 }
4704
4705 #if defined(S390X_DEBUG_DISAS)
4706 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4707 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4708 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4709 qemu_log("\n");
4710 }
4711 #endif
4712 }
4713
4714 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4715 {
4716 gen_intermediate_code_internal(env, tb, 0);
4717 }
4718
4719 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4720 {
4721 gen_intermediate_code_internal(env, tb, 1);
4722 }
4723
4724 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4725 {
4726 int cc_op;
4727 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4728 cc_op = gen_opc_cc_op[pc_pos];
4729 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4730 env->cc_op = cc_op;
4731 }
4732 }