]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/translate.c
target-s390: Implement BRANCH ON INDEX
[mirror_qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
83 {
84 if (!(s->tb->flags & FLAG_MASK_64)) {
85 if (s->tb->flags & FLAG_MASK_32) {
86 return pc | 0x80000000;
87 }
88 }
89 return pc;
90 }
91
92 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
93 int flags)
94 {
95 int i;
96
97 if (env->cc_op > 3) {
98 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
99 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
100 } else {
101 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
102 env->psw.mask, env->psw.addr, env->cc_op);
103 }
104
105 for (i = 0; i < 16; i++) {
106 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
107 if ((i % 4) == 3) {
108 cpu_fprintf(f, "\n");
109 } else {
110 cpu_fprintf(f, " ");
111 }
112 }
113
114 for (i = 0; i < 16; i++) {
115 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
116 if ((i % 4) == 3) {
117 cpu_fprintf(f, "\n");
118 } else {
119 cpu_fprintf(f, " ");
120 }
121 }
122
123 #ifndef CONFIG_USER_ONLY
124 for (i = 0; i < 16; i++) {
125 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
126 if ((i % 4) == 3) {
127 cpu_fprintf(f, "\n");
128 } else {
129 cpu_fprintf(f, " ");
130 }
131 }
132 #endif
133
134 #ifdef DEBUG_INLINE_BRANCHES
135 for (i = 0; i < CC_OP_MAX; i++) {
136 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
137 inline_branch_miss[i], inline_branch_hit[i]);
138 }
139 #endif
140
141 cpu_fprintf(f, "\n");
142 }
143
144 static TCGv_i64 psw_addr;
145 static TCGv_i64 psw_mask;
146
147 static TCGv_i32 cc_op;
148 static TCGv_i64 cc_src;
149 static TCGv_i64 cc_dst;
150 static TCGv_i64 cc_vr;
151
152 static char cpu_reg_names[32][4];
153 static TCGv_i64 regs[16];
154 static TCGv_i64 fregs[16];
155
156 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
157
158 void s390x_translate_init(void)
159 {
160 int i;
161
162 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
163 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
164 offsetof(CPUS390XState, psw.addr),
165 "psw_addr");
166 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
167 offsetof(CPUS390XState, psw.mask),
168 "psw_mask");
169
170 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
171 "cc_op");
172 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
173 "cc_src");
174 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
175 "cc_dst");
176 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
177 "cc_vr");
178
179 for (i = 0; i < 16; i++) {
180 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
181 regs[i] = tcg_global_mem_new(TCG_AREG0,
182 offsetof(CPUS390XState, regs[i]),
183 cpu_reg_names[i]);
184 }
185
186 for (i = 0; i < 16; i++) {
187 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
188 fregs[i] = tcg_global_mem_new(TCG_AREG0,
189 offsetof(CPUS390XState, fregs[i].d),
190 cpu_reg_names[i + 16]);
191 }
192
193 /* register helpers */
194 #define GEN_HELPER 2
195 #include "helper.h"
196 }
197
198 static TCGv_i64 load_reg(int reg)
199 {
200 TCGv_i64 r = tcg_temp_new_i64();
201 tcg_gen_mov_i64(r, regs[reg]);
202 return r;
203 }
204
205 static TCGv_i64 load_freg32_i64(int reg)
206 {
207 TCGv_i64 r = tcg_temp_new_i64();
208 tcg_gen_shri_i64(r, fregs[reg], 32);
209 return r;
210 }
211
212 static void store_reg(int reg, TCGv_i64 v)
213 {
214 tcg_gen_mov_i64(regs[reg], v);
215 }
216
217 static void store_freg(int reg, TCGv_i64 v)
218 {
219 tcg_gen_mov_i64(fregs[reg], v);
220 }
221
222 static void store_reg32_i64(int reg, TCGv_i64 v)
223 {
224 /* 32 bit register writes keep the upper half */
225 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
226 }
227
228 static void store_reg32h_i64(int reg, TCGv_i64 v)
229 {
230 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
231 }
232
233 static void store_freg32_i64(int reg, TCGv_i64 v)
234 {
235 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
236 }
237
238 static void return_low128(TCGv_i64 dest)
239 {
240 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
241 }
242
243 static void update_psw_addr(DisasContext *s)
244 {
245 /* psw.addr */
246 tcg_gen_movi_i64(psw_addr, s->pc);
247 }
248
249 static void potential_page_fault(DisasContext *s)
250 {
251 #ifndef CONFIG_USER_ONLY
252 update_psw_addr(s);
253 gen_op_calc_cc(s);
254 #endif
255 }
256
257 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
258 {
259 return (uint64_t)cpu_lduw_code(env, pc);
260 }
261
262 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
263 {
264 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
265 }
266
267 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
268 {
269 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
270 }
271
272 static int get_mem_index(DisasContext *s)
273 {
274 switch (s->tb->flags & FLAG_MASK_ASC) {
275 case PSW_ASC_PRIMARY >> 32:
276 return 0;
277 case PSW_ASC_SECONDARY >> 32:
278 return 1;
279 case PSW_ASC_HOME >> 32:
280 return 2;
281 default:
282 tcg_abort();
283 break;
284 }
285 }
286
287 static void gen_exception(int excp)
288 {
289 TCGv_i32 tmp = tcg_const_i32(excp);
290 gen_helper_exception(cpu_env, tmp);
291 tcg_temp_free_i32(tmp);
292 }
293
294 static void gen_program_exception(DisasContext *s, int code)
295 {
296 TCGv_i32 tmp;
297
298 /* Remember what pgm exeption this was. */
299 tmp = tcg_const_i32(code);
300 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
301 tcg_temp_free_i32(tmp);
302
303 tmp = tcg_const_i32(s->next_pc - s->pc);
304 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
305 tcg_temp_free_i32(tmp);
306
307 /* Advance past instruction. */
308 s->pc = s->next_pc;
309 update_psw_addr(s);
310
311 /* Save off cc. */
312 gen_op_calc_cc(s);
313
314 /* Trigger exception. */
315 gen_exception(EXCP_PGM);
316
317 /* End TB here. */
318 s->is_jmp = DISAS_EXCP;
319 }
320
321 static inline void gen_illegal_opcode(DisasContext *s)
322 {
323 gen_program_exception(s, PGM_SPECIFICATION);
324 }
325
326 static inline void check_privileged(DisasContext *s)
327 {
328 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
329 gen_program_exception(s, PGM_PRIVILEGED);
330 }
331 }
332
333 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
334 {
335 TCGv_i64 tmp;
336
337 /* 31-bitify the immediate part; register contents are dealt with below */
338 if (!(s->tb->flags & FLAG_MASK_64)) {
339 d2 &= 0x7fffffffUL;
340 }
341
342 if (x2) {
343 if (d2) {
344 tmp = tcg_const_i64(d2);
345 tcg_gen_add_i64(tmp, tmp, regs[x2]);
346 } else {
347 tmp = load_reg(x2);
348 }
349 if (b2) {
350 tcg_gen_add_i64(tmp, tmp, regs[b2]);
351 }
352 } else if (b2) {
353 if (d2) {
354 tmp = tcg_const_i64(d2);
355 tcg_gen_add_i64(tmp, tmp, regs[b2]);
356 } else {
357 tmp = load_reg(b2);
358 }
359 } else {
360 tmp = tcg_const_i64(d2);
361 }
362
363 /* 31-bit mode mask if there are values loaded from registers */
364 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
365 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
366 }
367
368 return tmp;
369 }
370
371 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
372 {
373 s->cc_op = CC_OP_CONST0 + val;
374 }
375
376 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
377 {
378 tcg_gen_discard_i64(cc_src);
379 tcg_gen_mov_i64(cc_dst, dst);
380 tcg_gen_discard_i64(cc_vr);
381 s->cc_op = op;
382 }
383
384 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
385 TCGv_i64 dst)
386 {
387 tcg_gen_mov_i64(cc_src, src);
388 tcg_gen_mov_i64(cc_dst, dst);
389 tcg_gen_discard_i64(cc_vr);
390 s->cc_op = op;
391 }
392
393 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
394 TCGv_i64 dst, TCGv_i64 vr)
395 {
396 tcg_gen_mov_i64(cc_src, src);
397 tcg_gen_mov_i64(cc_dst, dst);
398 tcg_gen_mov_i64(cc_vr, vr);
399 s->cc_op = op;
400 }
401
402 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
403 {
404 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
405 }
406
407 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
408 {
409 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
410 }
411
412 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
413 {
414 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
415 }
416
417 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
418 {
419 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
420 }
421
422 /* CC value is in env->cc_op */
423 static void set_cc_static(DisasContext *s)
424 {
425 tcg_gen_discard_i64(cc_src);
426 tcg_gen_discard_i64(cc_dst);
427 tcg_gen_discard_i64(cc_vr);
428 s->cc_op = CC_OP_STATIC;
429 }
430
431 static void gen_op_set_cc_op(DisasContext *s)
432 {
433 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
434 tcg_gen_movi_i32(cc_op, s->cc_op);
435 }
436 }
437
438 static void gen_update_cc_op(DisasContext *s)
439 {
440 gen_op_set_cc_op(s);
441 }
442
443 /* calculates cc into cc_op */
444 static void gen_op_calc_cc(DisasContext *s)
445 {
446 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
447 TCGv_i64 dummy = tcg_const_i64(0);
448
449 switch (s->cc_op) {
450 case CC_OP_CONST0:
451 case CC_OP_CONST1:
452 case CC_OP_CONST2:
453 case CC_OP_CONST3:
454 /* s->cc_op is the cc value */
455 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
456 break;
457 case CC_OP_STATIC:
458 /* env->cc_op already is the cc value */
459 break;
460 case CC_OP_NZ:
461 case CC_OP_ABS_64:
462 case CC_OP_NABS_64:
463 case CC_OP_ABS_32:
464 case CC_OP_NABS_32:
465 case CC_OP_LTGT0_32:
466 case CC_OP_LTGT0_64:
467 case CC_OP_COMP_32:
468 case CC_OP_COMP_64:
469 case CC_OP_NZ_F32:
470 case CC_OP_NZ_F64:
471 case CC_OP_FLOGR:
472 /* 1 argument */
473 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
474 break;
475 case CC_OP_ICM:
476 case CC_OP_LTGT_32:
477 case CC_OP_LTGT_64:
478 case CC_OP_LTUGTU_32:
479 case CC_OP_LTUGTU_64:
480 case CC_OP_TM_32:
481 case CC_OP_TM_64:
482 case CC_OP_SLA_32:
483 case CC_OP_SLA_64:
484 case CC_OP_NZ_F128:
485 /* 2 arguments */
486 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
487 break;
488 case CC_OP_ADD_64:
489 case CC_OP_ADDU_64:
490 case CC_OP_ADDC_64:
491 case CC_OP_SUB_64:
492 case CC_OP_SUBU_64:
493 case CC_OP_SUBB_64:
494 case CC_OP_ADD_32:
495 case CC_OP_ADDU_32:
496 case CC_OP_ADDC_32:
497 case CC_OP_SUB_32:
498 case CC_OP_SUBU_32:
499 case CC_OP_SUBB_32:
500 /* 3 arguments */
501 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
502 break;
503 case CC_OP_DYNAMIC:
504 /* unknown operation - assume 3 arguments and cc_op in env */
505 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
506 break;
507 default:
508 tcg_abort();
509 }
510
511 tcg_temp_free_i32(local_cc_op);
512 tcg_temp_free_i64(dummy);
513
514 /* We now have cc in cc_op as constant */
515 set_cc_static(s);
516 }
517
518 static int use_goto_tb(DisasContext *s, uint64_t dest)
519 {
520 /* NOTE: we handle the case where the TB spans two pages here */
521 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
522 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
523 && !s->singlestep_enabled
524 && !(s->tb->cflags & CF_LAST_IO));
525 }
526
527 static void account_noninline_branch(DisasContext *s, int cc_op)
528 {
529 #ifdef DEBUG_INLINE_BRANCHES
530 inline_branch_miss[cc_op]++;
531 #endif
532 }
533
534 static void account_inline_branch(DisasContext *s, int cc_op)
535 {
536 #ifdef DEBUG_INLINE_BRANCHES
537 inline_branch_hit[cc_op]++;
538 #endif
539 }
540
541 /* Table of mask values to comparison codes, given a comparison as input.
542 For a true comparison CC=3 will never be set, but we treat this
543 conservatively for possible use when CC=3 indicates overflow. */
544 static const TCGCond ltgt_cond[16] = {
545 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
546 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
547 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
548 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
549 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
550 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
551 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
552 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
553 };
554
555 /* Table of mask values to comparison codes, given a logic op as input.
556 For such, only CC=0 and CC=1 should be possible. */
557 static const TCGCond nz_cond[16] = {
558 /* | | x | x */
559 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
560 /* | NE | x | x */
561 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
562 /* EQ | | x | x */
563 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
564 /* EQ | NE | x | x */
565 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
566 };
567
568 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
569 details required to generate a TCG comparison. */
570 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
571 {
572 TCGCond cond;
573 enum cc_op old_cc_op = s->cc_op;
574
575 if (mask == 15 || mask == 0) {
576 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
577 c->u.s32.a = cc_op;
578 c->u.s32.b = cc_op;
579 c->g1 = c->g2 = true;
580 c->is_64 = false;
581 return;
582 }
583
584 /* Find the TCG condition for the mask + cc op. */
585 switch (old_cc_op) {
586 case CC_OP_LTGT0_32:
587 case CC_OP_LTGT0_64:
588 case CC_OP_LTGT_32:
589 case CC_OP_LTGT_64:
590 cond = ltgt_cond[mask];
591 if (cond == TCG_COND_NEVER) {
592 goto do_dynamic;
593 }
594 account_inline_branch(s, old_cc_op);
595 break;
596
597 case CC_OP_LTUGTU_32:
598 case CC_OP_LTUGTU_64:
599 cond = tcg_unsigned_cond(ltgt_cond[mask]);
600 if (cond == TCG_COND_NEVER) {
601 goto do_dynamic;
602 }
603 account_inline_branch(s, old_cc_op);
604 break;
605
606 case CC_OP_NZ:
607 cond = nz_cond[mask];
608 if (cond == TCG_COND_NEVER) {
609 goto do_dynamic;
610 }
611 account_inline_branch(s, old_cc_op);
612 break;
613
614 case CC_OP_TM_32:
615 case CC_OP_TM_64:
616 switch (mask) {
617 case 8:
618 cond = TCG_COND_EQ;
619 break;
620 case 4 | 2 | 1:
621 cond = TCG_COND_NE;
622 break;
623 default:
624 goto do_dynamic;
625 }
626 account_inline_branch(s, old_cc_op);
627 break;
628
629 case CC_OP_ICM:
630 switch (mask) {
631 case 8:
632 cond = TCG_COND_EQ;
633 break;
634 case 4 | 2 | 1:
635 case 4 | 2:
636 cond = TCG_COND_NE;
637 break;
638 default:
639 goto do_dynamic;
640 }
641 account_inline_branch(s, old_cc_op);
642 break;
643
644 case CC_OP_FLOGR:
645 switch (mask & 0xa) {
646 case 8: /* src == 0 -> no one bit found */
647 cond = TCG_COND_EQ;
648 break;
649 case 2: /* src != 0 -> one bit found */
650 cond = TCG_COND_NE;
651 break;
652 default:
653 goto do_dynamic;
654 }
655 account_inline_branch(s, old_cc_op);
656 break;
657
658 default:
659 do_dynamic:
660 /* Calculate cc value. */
661 gen_op_calc_cc(s);
662 /* FALLTHRU */
663
664 case CC_OP_STATIC:
665 /* Jump based on CC. We'll load up the real cond below;
666 the assignment here merely avoids a compiler warning. */
667 account_noninline_branch(s, old_cc_op);
668 old_cc_op = CC_OP_STATIC;
669 cond = TCG_COND_NEVER;
670 break;
671 }
672
673 /* Load up the arguments of the comparison. */
674 c->is_64 = true;
675 c->g1 = c->g2 = false;
676 switch (old_cc_op) {
677 case CC_OP_LTGT0_32:
678 c->is_64 = false;
679 c->u.s32.a = tcg_temp_new_i32();
680 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
681 c->u.s32.b = tcg_const_i32(0);
682 break;
683 case CC_OP_LTGT_32:
684 case CC_OP_LTUGTU_32:
685 c->is_64 = false;
686 c->u.s32.a = tcg_temp_new_i32();
687 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
688 c->u.s32.b = tcg_temp_new_i32();
689 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
690 break;
691
692 case CC_OP_LTGT0_64:
693 case CC_OP_NZ:
694 case CC_OP_FLOGR:
695 c->u.s64.a = cc_dst;
696 c->u.s64.b = tcg_const_i64(0);
697 c->g1 = true;
698 break;
699 case CC_OP_LTGT_64:
700 case CC_OP_LTUGTU_64:
701 c->u.s64.a = cc_src;
702 c->u.s64.b = cc_dst;
703 c->g1 = c->g2 = true;
704 break;
705
706 case CC_OP_TM_32:
707 case CC_OP_TM_64:
708 case CC_OP_ICM:
709 c->u.s64.a = tcg_temp_new_i64();
710 c->u.s64.b = tcg_const_i64(0);
711 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
712 break;
713
714 case CC_OP_STATIC:
715 c->is_64 = false;
716 c->u.s32.a = cc_op;
717 c->g1 = true;
718 switch (mask) {
719 case 0x8 | 0x4 | 0x2: /* cc != 3 */
720 cond = TCG_COND_NE;
721 c->u.s32.b = tcg_const_i32(3);
722 break;
723 case 0x8 | 0x4 | 0x1: /* cc != 2 */
724 cond = TCG_COND_NE;
725 c->u.s32.b = tcg_const_i32(2);
726 break;
727 case 0x8 | 0x2 | 0x1: /* cc != 1 */
728 cond = TCG_COND_NE;
729 c->u.s32.b = tcg_const_i32(1);
730 break;
731 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
732 cond = TCG_COND_EQ;
733 c->g1 = false;
734 c->u.s32.a = tcg_temp_new_i32();
735 c->u.s32.b = tcg_const_i32(0);
736 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
737 break;
738 case 0x8 | 0x4: /* cc < 2 */
739 cond = TCG_COND_LTU;
740 c->u.s32.b = tcg_const_i32(2);
741 break;
742 case 0x8: /* cc == 0 */
743 cond = TCG_COND_EQ;
744 c->u.s32.b = tcg_const_i32(0);
745 break;
746 case 0x4 | 0x2 | 0x1: /* cc != 0 */
747 cond = TCG_COND_NE;
748 c->u.s32.b = tcg_const_i32(0);
749 break;
750 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
751 cond = TCG_COND_NE;
752 c->g1 = false;
753 c->u.s32.a = tcg_temp_new_i32();
754 c->u.s32.b = tcg_const_i32(0);
755 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
756 break;
757 case 0x4: /* cc == 1 */
758 cond = TCG_COND_EQ;
759 c->u.s32.b = tcg_const_i32(1);
760 break;
761 case 0x2 | 0x1: /* cc > 1 */
762 cond = TCG_COND_GTU;
763 c->u.s32.b = tcg_const_i32(1);
764 break;
765 case 0x2: /* cc == 2 */
766 cond = TCG_COND_EQ;
767 c->u.s32.b = tcg_const_i32(2);
768 break;
769 case 0x1: /* cc == 3 */
770 cond = TCG_COND_EQ;
771 c->u.s32.b = tcg_const_i32(3);
772 break;
773 default:
774 /* CC is masked by something else: (8 >> cc) & mask. */
775 cond = TCG_COND_NE;
776 c->g1 = false;
777 c->u.s32.a = tcg_const_i32(8);
778 c->u.s32.b = tcg_const_i32(0);
779 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
780 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
781 break;
782 }
783 break;
784
785 default:
786 abort();
787 }
788 c->cond = cond;
789 }
790
791 static void free_compare(DisasCompare *c)
792 {
793 if (!c->g1) {
794 if (c->is_64) {
795 tcg_temp_free_i64(c->u.s64.a);
796 } else {
797 tcg_temp_free_i32(c->u.s32.a);
798 }
799 }
800 if (!c->g2) {
801 if (c->is_64) {
802 tcg_temp_free_i64(c->u.s64.b);
803 } else {
804 tcg_temp_free_i32(c->u.s32.b);
805 }
806 }
807 }
808
809 /* ====================================================================== */
810 /* Define the insn format enumeration. */
811 #define F0(N) FMT_##N,
812 #define F1(N, X1) F0(N)
813 #define F2(N, X1, X2) F0(N)
814 #define F3(N, X1, X2, X3) F0(N)
815 #define F4(N, X1, X2, X3, X4) F0(N)
816 #define F5(N, X1, X2, X3, X4, X5) F0(N)
817
818 typedef enum {
819 #include "insn-format.def"
820 } DisasFormat;
821
822 #undef F0
823 #undef F1
824 #undef F2
825 #undef F3
826 #undef F4
827 #undef F5
828
829 /* Define a structure to hold the decoded fields. We'll store each inside
830 an array indexed by an enum. In order to conserve memory, we'll arrange
831 for fields that do not exist at the same time to overlap, thus the "C"
832 for compact. For checking purposes there is an "O" for original index
833 as well that will be applied to availability bitmaps. */
834
835 enum DisasFieldIndexO {
836 FLD_O_r1,
837 FLD_O_r2,
838 FLD_O_r3,
839 FLD_O_m1,
840 FLD_O_m3,
841 FLD_O_m4,
842 FLD_O_b1,
843 FLD_O_b2,
844 FLD_O_b4,
845 FLD_O_d1,
846 FLD_O_d2,
847 FLD_O_d4,
848 FLD_O_x2,
849 FLD_O_l1,
850 FLD_O_l2,
851 FLD_O_i1,
852 FLD_O_i2,
853 FLD_O_i3,
854 FLD_O_i4,
855 FLD_O_i5
856 };
857
858 enum DisasFieldIndexC {
859 FLD_C_r1 = 0,
860 FLD_C_m1 = 0,
861 FLD_C_b1 = 0,
862 FLD_C_i1 = 0,
863
864 FLD_C_r2 = 1,
865 FLD_C_b2 = 1,
866 FLD_C_i2 = 1,
867
868 FLD_C_r3 = 2,
869 FLD_C_m3 = 2,
870 FLD_C_i3 = 2,
871
872 FLD_C_m4 = 3,
873 FLD_C_b4 = 3,
874 FLD_C_i4 = 3,
875 FLD_C_l1 = 3,
876
877 FLD_C_i5 = 4,
878 FLD_C_d1 = 4,
879
880 FLD_C_d2 = 5,
881
882 FLD_C_d4 = 6,
883 FLD_C_x2 = 6,
884 FLD_C_l2 = 6,
885
886 NUM_C_FIELD = 7
887 };
888
889 struct DisasFields {
890 unsigned op:8;
891 unsigned op2:8;
892 unsigned presentC:16;
893 unsigned int presentO;
894 int c[NUM_C_FIELD];
895 };
896
897 /* This is the way fields are to be accessed out of DisasFields. */
898 #define have_field(S, F) have_field1((S), FLD_O_##F)
899 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
900
901 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
902 {
903 return (f->presentO >> c) & 1;
904 }
905
906 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
907 enum DisasFieldIndexC c)
908 {
909 assert(have_field1(f, o));
910 return f->c[c];
911 }
912
913 /* Describe the layout of each field in each format. */
914 typedef struct DisasField {
915 unsigned int beg:8;
916 unsigned int size:8;
917 unsigned int type:2;
918 unsigned int indexC:6;
919 enum DisasFieldIndexO indexO:8;
920 } DisasField;
921
922 typedef struct DisasFormatInfo {
923 DisasField op[NUM_C_FIELD];
924 } DisasFormatInfo;
925
926 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
927 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
928 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
929 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
930 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
931 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
932 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
933 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
934 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
935 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
936 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
937 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
938 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
939 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
940
941 #define F0(N) { { } },
942 #define F1(N, X1) { { X1 } },
943 #define F2(N, X1, X2) { { X1, X2 } },
944 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
945 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
946 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
947
948 static const DisasFormatInfo format_info[] = {
949 #include "insn-format.def"
950 };
951
952 #undef F0
953 #undef F1
954 #undef F2
955 #undef F3
956 #undef F4
957 #undef F5
958 #undef R
959 #undef M
960 #undef BD
961 #undef BXD
962 #undef BDL
963 #undef BXDL
964 #undef I
965 #undef L
966
967 /* Generally, we'll extract operands into this structures, operate upon
968 them, and store them back. See the "in1", "in2", "prep", "wout" sets
969 of routines below for more details. */
970 typedef struct {
971 bool g_out, g_out2, g_in1, g_in2;
972 TCGv_i64 out, out2, in1, in2;
973 TCGv_i64 addr1;
974 } DisasOps;
975
976 /* Return values from translate_one, indicating the state of the TB. */
977 typedef enum {
978 /* Continue the TB. */
979 NO_EXIT,
980 /* We have emitted one or more goto_tb. No fixup required. */
981 EXIT_GOTO_TB,
982 /* We are not using a goto_tb (for whatever reason), but have updated
983 the PC (for whatever reason), so there's no need to do it again on
984 exiting the TB. */
985 EXIT_PC_UPDATED,
986 /* We are exiting the TB, but have neither emitted a goto_tb, nor
987 updated the PC for the next instruction to be executed. */
988 EXIT_PC_STALE,
989 /* We are ending the TB with a noreturn function call, e.g. longjmp.
990 No following code will be executed. */
991 EXIT_NORETURN,
992 } ExitStatus;
993
994 typedef enum DisasFacility {
995 FAC_Z, /* zarch (default) */
996 FAC_CASS, /* compare and swap and store */
997 FAC_CASS2, /* compare and swap and store 2*/
998 FAC_DFP, /* decimal floating point */
999 FAC_DFPR, /* decimal floating point rounding */
1000 FAC_DO, /* distinct operands */
1001 FAC_EE, /* execute extensions */
1002 FAC_EI, /* extended immediate */
1003 FAC_FPE, /* floating point extension */
1004 FAC_FPSSH, /* floating point support sign handling */
1005 FAC_FPRGR, /* FPR-GR transfer */
1006 FAC_GIE, /* general instructions extension */
1007 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1008 FAC_HW, /* high-word */
1009 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1010 FAC_LOC, /* load/store on condition */
1011 FAC_LD, /* long displacement */
1012 FAC_PC, /* population count */
1013 FAC_SCF, /* store clock fast */
1014 FAC_SFLE, /* store facility list extended */
1015 } DisasFacility;
1016
1017 struct DisasInsn {
1018 unsigned opc:16;
1019 DisasFormat fmt:6;
1020 DisasFacility fac:6;
1021
1022 const char *name;
1023
1024 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1025 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1026 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1027 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1028 void (*help_cout)(DisasContext *, DisasOps *);
1029 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1030
1031 uint64_t data;
1032 };
1033
1034 /* ====================================================================== */
1035 /* Miscelaneous helpers, used by several operations. */
1036
1037 static void help_l2_shift(DisasContext *s, DisasFields *f,
1038 DisasOps *o, int mask)
1039 {
1040 int b2 = get_field(f, b2);
1041 int d2 = get_field(f, d2);
1042
1043 if (b2 == 0) {
1044 o->in2 = tcg_const_i64(d2 & mask);
1045 } else {
1046 o->in2 = get_address(s, 0, b2, d2);
1047 tcg_gen_andi_i64(o->in2, o->in2, mask);
1048 }
1049 }
1050
1051 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1052 {
1053 if (dest == s->next_pc) {
1054 return NO_EXIT;
1055 }
1056 if (use_goto_tb(s, dest)) {
1057 gen_update_cc_op(s);
1058 tcg_gen_goto_tb(0);
1059 tcg_gen_movi_i64(psw_addr, dest);
1060 tcg_gen_exit_tb((tcg_target_long)s->tb);
1061 return EXIT_GOTO_TB;
1062 } else {
1063 tcg_gen_movi_i64(psw_addr, dest);
1064 return EXIT_PC_UPDATED;
1065 }
1066 }
1067
1068 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1069 bool is_imm, int imm, TCGv_i64 cdest)
1070 {
1071 ExitStatus ret;
1072 uint64_t dest = s->pc + 2 * imm;
1073 int lab;
1074
1075 /* Take care of the special cases first. */
1076 if (c->cond == TCG_COND_NEVER) {
1077 ret = NO_EXIT;
1078 goto egress;
1079 }
1080 if (is_imm) {
1081 if (dest == s->next_pc) {
1082 /* Branch to next. */
1083 ret = NO_EXIT;
1084 goto egress;
1085 }
1086 if (c->cond == TCG_COND_ALWAYS) {
1087 ret = help_goto_direct(s, dest);
1088 goto egress;
1089 }
1090 } else {
1091 if (TCGV_IS_UNUSED_I64(cdest)) {
1092 /* E.g. bcr %r0 -> no branch. */
1093 ret = NO_EXIT;
1094 goto egress;
1095 }
1096 if (c->cond == TCG_COND_ALWAYS) {
1097 tcg_gen_mov_i64(psw_addr, cdest);
1098 ret = EXIT_PC_UPDATED;
1099 goto egress;
1100 }
1101 }
1102
1103 if (use_goto_tb(s, s->next_pc)) {
1104 if (is_imm && use_goto_tb(s, dest)) {
1105 /* Both exits can use goto_tb. */
1106 gen_update_cc_op(s);
1107
1108 lab = gen_new_label();
1109 if (c->is_64) {
1110 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1111 } else {
1112 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1113 }
1114
1115 /* Branch not taken. */
1116 tcg_gen_goto_tb(0);
1117 tcg_gen_movi_i64(psw_addr, s->next_pc);
1118 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1119
1120 /* Branch taken. */
1121 gen_set_label(lab);
1122 tcg_gen_goto_tb(1);
1123 tcg_gen_movi_i64(psw_addr, dest);
1124 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1125
1126 ret = EXIT_GOTO_TB;
1127 } else {
1128 /* Fallthru can use goto_tb, but taken branch cannot. */
1129 /* Store taken branch destination before the brcond. This
1130 avoids having to allocate a new local temp to hold it.
1131 We'll overwrite this in the not taken case anyway. */
1132 if (!is_imm) {
1133 tcg_gen_mov_i64(psw_addr, cdest);
1134 }
1135
1136 lab = gen_new_label();
1137 if (c->is_64) {
1138 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1139 } else {
1140 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1141 }
1142
1143 /* Branch not taken. */
1144 gen_update_cc_op(s);
1145 tcg_gen_goto_tb(0);
1146 tcg_gen_movi_i64(psw_addr, s->next_pc);
1147 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1148
1149 gen_set_label(lab);
1150 if (is_imm) {
1151 tcg_gen_movi_i64(psw_addr, dest);
1152 }
1153 ret = EXIT_PC_UPDATED;
1154 }
1155 } else {
1156 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1157 Most commonly we're single-stepping or some other condition that
1158 disables all use of goto_tb. Just update the PC and exit. */
1159
1160 TCGv_i64 next = tcg_const_i64(s->next_pc);
1161 if (is_imm) {
1162 cdest = tcg_const_i64(dest);
1163 }
1164
1165 if (c->is_64) {
1166 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1167 cdest, next);
1168 } else {
1169 TCGv_i32 t0 = tcg_temp_new_i32();
1170 TCGv_i64 t1 = tcg_temp_new_i64();
1171 TCGv_i64 z = tcg_const_i64(0);
1172 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1173 tcg_gen_extu_i32_i64(t1, t0);
1174 tcg_temp_free_i32(t0);
1175 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1176 tcg_temp_free_i64(t1);
1177 tcg_temp_free_i64(z);
1178 }
1179
1180 if (is_imm) {
1181 tcg_temp_free_i64(cdest);
1182 }
1183 tcg_temp_free_i64(next);
1184
1185 ret = EXIT_PC_UPDATED;
1186 }
1187
1188 egress:
1189 free_compare(c);
1190 return ret;
1191 }
1192
1193 /* ====================================================================== */
1194 /* The operations. These perform the bulk of the work for any insn,
1195 usually after the operands have been loaded and output initialized. */
1196
1197 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1198 {
1199 gen_helper_abs_i64(o->out, o->in2);
1200 return NO_EXIT;
1201 }
1202
1203 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1204 {
1205 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1206 return NO_EXIT;
1207 }
1208
1209 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1210 {
1211 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1212 return NO_EXIT;
1213 }
1214
1215 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1216 {
1217 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1218 tcg_gen_mov_i64(o->out2, o->in2);
1219 return NO_EXIT;
1220 }
1221
1222 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1223 {
1224 tcg_gen_add_i64(o->out, o->in1, o->in2);
1225 return NO_EXIT;
1226 }
1227
1228 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1229 {
1230 TCGv_i64 cc;
1231
1232 tcg_gen_add_i64(o->out, o->in1, o->in2);
1233
1234 /* XXX possible optimization point */
1235 gen_op_calc_cc(s);
1236 cc = tcg_temp_new_i64();
1237 tcg_gen_extu_i32_i64(cc, cc_op);
1238 tcg_gen_shri_i64(cc, cc, 1);
1239
1240 tcg_gen_add_i64(o->out, o->out, cc);
1241 tcg_temp_free_i64(cc);
1242 return NO_EXIT;
1243 }
1244
1245 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1246 {
1247 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1248 return NO_EXIT;
1249 }
1250
1251 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1252 {
1253 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1254 return NO_EXIT;
1255 }
1256
1257 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1258 {
1259 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1260 return_low128(o->out2);
1261 return NO_EXIT;
1262 }
1263
1264 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1265 {
1266 tcg_gen_and_i64(o->out, o->in1, o->in2);
1267 return NO_EXIT;
1268 }
1269
1270 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1271 {
1272 int shift = s->insn->data & 0xff;
1273 int size = s->insn->data >> 8;
1274 uint64_t mask = ((1ull << size) - 1) << shift;
1275
1276 assert(!o->g_in2);
1277 tcg_gen_shli_i64(o->in2, o->in2, shift);
1278 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1279 tcg_gen_and_i64(o->out, o->in1, o->in2);
1280
1281 /* Produce the CC from only the bits manipulated. */
1282 tcg_gen_andi_i64(cc_dst, o->out, mask);
1283 set_cc_nz_u64(s, cc_dst);
1284 return NO_EXIT;
1285 }
1286
1287 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1288 {
1289 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1290 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1291 tcg_gen_mov_i64(psw_addr, o->in2);
1292 return EXIT_PC_UPDATED;
1293 } else {
1294 return NO_EXIT;
1295 }
1296 }
1297
1298 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1299 {
1300 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1301 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1302 }
1303
1304 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1305 {
1306 int m1 = get_field(s->fields, m1);
1307 bool is_imm = have_field(s->fields, i2);
1308 int imm = is_imm ? get_field(s->fields, i2) : 0;
1309 DisasCompare c;
1310
1311 disas_jcc(s, &c, m1);
1312 return help_branch(s, &c, is_imm, imm, o->in2);
1313 }
1314
1315 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1316 {
1317 int r1 = get_field(s->fields, r1);
1318 bool is_imm = have_field(s->fields, i2);
1319 int imm = is_imm ? get_field(s->fields, i2) : 0;
1320 DisasCompare c;
1321 TCGv_i64 t;
1322
1323 c.cond = TCG_COND_NE;
1324 c.is_64 = false;
1325 c.g1 = false;
1326 c.g2 = false;
1327
1328 t = tcg_temp_new_i64();
1329 tcg_gen_subi_i64(t, regs[r1], 1);
1330 store_reg32_i64(r1, t);
1331 c.u.s32.a = tcg_temp_new_i32();
1332 c.u.s32.b = tcg_const_i32(0);
1333 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1334 tcg_temp_free_i64(t);
1335
1336 return help_branch(s, &c, is_imm, imm, o->in2);
1337 }
1338
1339 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1340 {
1341 int r1 = get_field(s->fields, r1);
1342 bool is_imm = have_field(s->fields, i2);
1343 int imm = is_imm ? get_field(s->fields, i2) : 0;
1344 DisasCompare c;
1345
1346 c.cond = TCG_COND_NE;
1347 c.is_64 = true;
1348 c.g1 = true;
1349 c.g2 = false;
1350
1351 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1352 c.u.s64.a = regs[r1];
1353 c.u.s64.b = tcg_const_i64(0);
1354
1355 return help_branch(s, &c, is_imm, imm, o->in2);
1356 }
1357
1358 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1359 {
1360 int r1 = get_field(s->fields, r1);
1361 int r3 = get_field(s->fields, r3);
1362 bool is_imm = have_field(s->fields, i2);
1363 int imm = is_imm ? get_field(s->fields, i2) : 0;
1364 DisasCompare c;
1365 TCGv_i64 t;
1366
1367 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1368 c.is_64 = false;
1369 c.g1 = false;
1370 c.g2 = false;
1371
1372 t = tcg_temp_new_i64();
1373 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1374 c.u.s32.a = tcg_temp_new_i32();
1375 c.u.s32.b = tcg_temp_new_i32();
1376 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1377 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1378 store_reg32_i64(r1, t);
1379 tcg_temp_free_i64(t);
1380
1381 return help_branch(s, &c, is_imm, imm, o->in2);
1382 }
1383
1384 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1385 {
1386 int r1 = get_field(s->fields, r1);
1387 int r3 = get_field(s->fields, r3);
1388 bool is_imm = have_field(s->fields, i2);
1389 int imm = is_imm ? get_field(s->fields, i2) : 0;
1390 DisasCompare c;
1391
1392 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1393 c.is_64 = true;
1394
1395 if (r1 == (r3 | 1)) {
1396 c.u.s64.b = load_reg(r3 | 1);
1397 c.g2 = false;
1398 } else {
1399 c.u.s64.b = regs[r3 | 1];
1400 c.g2 = true;
1401 }
1402
1403 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1404 c.u.s64.a = regs[r1];
1405 c.g1 = true;
1406
1407 return help_branch(s, &c, is_imm, imm, o->in2);
1408 }
1409
1410 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1411 {
1412 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1413 set_cc_static(s);
1414 return NO_EXIT;
1415 }
1416
1417 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1418 {
1419 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1420 set_cc_static(s);
1421 return NO_EXIT;
1422 }
1423
1424 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1425 {
1426 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1427 set_cc_static(s);
1428 return NO_EXIT;
1429 }
1430
1431 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1432 {
1433 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1434 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1435 tcg_temp_free_i32(m3);
1436 gen_set_cc_nz_f32(s, o->in2);
1437 return NO_EXIT;
1438 }
1439
1440 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1441 {
1442 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1443 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1444 tcg_temp_free_i32(m3);
1445 gen_set_cc_nz_f64(s, o->in2);
1446 return NO_EXIT;
1447 }
1448
1449 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1450 {
1451 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1452 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1453 tcg_temp_free_i32(m3);
1454 gen_set_cc_nz_f128(s, o->in1, o->in2);
1455 return NO_EXIT;
1456 }
1457
1458 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1459 {
1460 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1461 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1462 tcg_temp_free_i32(m3);
1463 gen_set_cc_nz_f32(s, o->in2);
1464 return NO_EXIT;
1465 }
1466
1467 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1468 {
1469 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1470 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1471 tcg_temp_free_i32(m3);
1472 gen_set_cc_nz_f64(s, o->in2);
1473 return NO_EXIT;
1474 }
1475
1476 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1477 {
1478 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1479 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1480 tcg_temp_free_i32(m3);
1481 gen_set_cc_nz_f128(s, o->in1, o->in2);
1482 return NO_EXIT;
1483 }
1484
1485 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1486 {
1487 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1488 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1489 tcg_temp_free_i32(m3);
1490 return NO_EXIT;
1491 }
1492
1493 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1494 {
1495 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1496 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1497 tcg_temp_free_i32(m3);
1498 return NO_EXIT;
1499 }
1500
1501 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1502 {
1503 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1504 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1505 tcg_temp_free_i32(m3);
1506 return_low128(o->out2);
1507 return NO_EXIT;
1508 }
1509
1510 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1511 {
1512 int r2 = get_field(s->fields, r2);
1513 TCGv_i64 len = tcg_temp_new_i64();
1514
1515 potential_page_fault(s);
1516 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1517 set_cc_static(s);
1518 return_low128(o->out);
1519
1520 tcg_gen_add_i64(regs[r2], regs[r2], len);
1521 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1522 tcg_temp_free_i64(len);
1523
1524 return NO_EXIT;
1525 }
1526
1527 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1528 {
1529 int l = get_field(s->fields, l1);
1530 TCGv_i32 vl;
1531
1532 switch (l + 1) {
1533 case 1:
1534 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1535 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1536 break;
1537 case 2:
1538 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1539 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1540 break;
1541 case 4:
1542 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1543 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1544 break;
1545 case 8:
1546 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1547 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1548 break;
1549 default:
1550 potential_page_fault(s);
1551 vl = tcg_const_i32(l);
1552 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1553 tcg_temp_free_i32(vl);
1554 set_cc_static(s);
1555 return NO_EXIT;
1556 }
1557 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1558 return NO_EXIT;
1559 }
1560
1561 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1562 {
1563 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1564 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1565 potential_page_fault(s);
1566 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1567 tcg_temp_free_i32(r1);
1568 tcg_temp_free_i32(r3);
1569 set_cc_static(s);
1570 return NO_EXIT;
1571 }
1572
1573 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1574 {
1575 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1576 TCGv_i32 t1 = tcg_temp_new_i32();
1577 tcg_gen_trunc_i64_i32(t1, o->in1);
1578 potential_page_fault(s);
1579 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1580 set_cc_static(s);
1581 tcg_temp_free_i32(t1);
1582 tcg_temp_free_i32(m3);
1583 return NO_EXIT;
1584 }
1585
1586 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1587 {
1588 potential_page_fault(s);
1589 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1590 set_cc_static(s);
1591 return_low128(o->in2);
1592 return NO_EXIT;
1593 }
1594
1595 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1596 {
1597 int r3 = get_field(s->fields, r3);
1598 potential_page_fault(s);
1599 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1600 set_cc_static(s);
1601 return NO_EXIT;
1602 }
1603
1604 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1605 {
1606 int r3 = get_field(s->fields, r3);
1607 potential_page_fault(s);
1608 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1609 set_cc_static(s);
1610 return NO_EXIT;
1611 }
1612
1613 #ifndef CONFIG_USER_ONLY
1614 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1615 {
1616 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1617 check_privileged(s);
1618 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1619 tcg_temp_free_i32(r1);
1620 set_cc_static(s);
1621 return NO_EXIT;
1622 }
1623 #endif
1624
1625 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1626 {
1627 int r3 = get_field(s->fields, r3);
1628 TCGv_i64 in3 = tcg_temp_new_i64();
1629 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1630 potential_page_fault(s);
1631 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1632 tcg_temp_free_i64(in3);
1633 set_cc_static(s);
1634 return NO_EXIT;
1635 }
1636
1637 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1638 {
1639 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1640 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1641 potential_page_fault(s);
1642 /* XXX rewrite in tcg */
1643 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1644 set_cc_static(s);
1645 return NO_EXIT;
1646 }
1647
1648 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1649 {
1650 TCGv_i64 t1 = tcg_temp_new_i64();
1651 TCGv_i32 t2 = tcg_temp_new_i32();
1652 tcg_gen_trunc_i64_i32(t2, o->in1);
1653 gen_helper_cvd(t1, t2);
1654 tcg_temp_free_i32(t2);
1655 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1656 tcg_temp_free_i64(t1);
1657 return NO_EXIT;
1658 }
1659
1660 #ifndef CONFIG_USER_ONLY
1661 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1662 {
1663 TCGv_i32 tmp;
1664
1665 check_privileged(s);
1666 potential_page_fault(s);
1667
1668 /* We pretend the format is RX_a so that D2 is the field we want. */
1669 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1670 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1671 tcg_temp_free_i32(tmp);
1672 return NO_EXIT;
1673 }
1674 #endif
1675
1676 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1677 {
1678 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1679 return_low128(o->out);
1680 return NO_EXIT;
1681 }
1682
1683 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1684 {
1685 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1686 return_low128(o->out);
1687 return NO_EXIT;
1688 }
1689
1690 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1691 {
1692 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1693 return_low128(o->out);
1694 return NO_EXIT;
1695 }
1696
1697 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1698 {
1699 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
1700 return_low128(o->out);
1701 return NO_EXIT;
1702 }
1703
1704 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
1705 {
1706 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
1707 return NO_EXIT;
1708 }
1709
1710 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
1711 {
1712 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
1713 return NO_EXIT;
1714 }
1715
1716 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
1717 {
1718 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1719 return_low128(o->out2);
1720 return NO_EXIT;
1721 }
1722
1723 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
1724 {
1725 int r2 = get_field(s->fields, r2);
1726 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1727 return NO_EXIT;
1728 }
1729
1730 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
1731 {
1732 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
1733 return NO_EXIT;
1734 }
1735
1736 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
1737 {
1738 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
1739 tb->flags, (ab)use the tb->cs_base field as the address of
1740 the template in memory, and grab 8 bits of tb->flags/cflags for
1741 the contents of the register. We would then recognize all this
1742 in gen_intermediate_code_internal, generating code for exactly
1743 one instruction. This new TB then gets executed normally.
1744
1745 On the other hand, this seems to be mostly used for modifying
1746 MVC inside of memcpy, which needs a helper call anyway. So
1747 perhaps this doesn't bear thinking about any further. */
1748
1749 TCGv_i64 tmp;
1750
1751 update_psw_addr(s);
1752 gen_op_calc_cc(s);
1753
1754 tmp = tcg_const_i64(s->next_pc);
1755 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
1756 tcg_temp_free_i64(tmp);
1757
1758 set_cc_static(s);
1759 return NO_EXIT;
1760 }
1761
1762 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
1763 {
1764 /* We'll use the original input for cc computation, since we get to
1765 compare that against 0, which ought to be better than comparing
1766 the real output against 64. It also lets cc_dst be a convenient
1767 temporary during our computation. */
1768 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
1769
1770 /* R1 = IN ? CLZ(IN) : 64. */
1771 gen_helper_clz(o->out, o->in2);
1772
1773 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
1774 value by 64, which is undefined. But since the shift is 64 iff the
1775 input is zero, we still get the correct result after and'ing. */
1776 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
1777 tcg_gen_shr_i64(o->out2, o->out2, o->out);
1778 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
1779 return NO_EXIT;
1780 }
1781
1782 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
1783 {
1784 int m3 = get_field(s->fields, m3);
1785 int pos, len, base = s->insn->data;
1786 TCGv_i64 tmp = tcg_temp_new_i64();
1787 uint64_t ccm;
1788
1789 switch (m3) {
1790 case 0xf:
1791 /* Effectively a 32-bit load. */
1792 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
1793 len = 32;
1794 goto one_insert;
1795
1796 case 0xc:
1797 case 0x6:
1798 case 0x3:
1799 /* Effectively a 16-bit load. */
1800 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
1801 len = 16;
1802 goto one_insert;
1803
1804 case 0x8:
1805 case 0x4:
1806 case 0x2:
1807 case 0x1:
1808 /* Effectively an 8-bit load. */
1809 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
1810 len = 8;
1811 goto one_insert;
1812
1813 one_insert:
1814 pos = base + ctz32(m3) * 8;
1815 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
1816 ccm = ((1ull << len) - 1) << pos;
1817 break;
1818
1819 default:
1820 /* This is going to be a sequence of loads and inserts. */
1821 pos = base + 32 - 8;
1822 ccm = 0;
1823 while (m3) {
1824 if (m3 & 0x8) {
1825 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
1826 tcg_gen_addi_i64(o->in2, o->in2, 1);
1827 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
1828 ccm |= 0xff << pos;
1829 }
1830 m3 = (m3 << 1) & 0xf;
1831 pos -= 8;
1832 }
1833 break;
1834 }
1835
1836 tcg_gen_movi_i64(tmp, ccm);
1837 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
1838 tcg_temp_free_i64(tmp);
1839 return NO_EXIT;
1840 }
1841
1842 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
1843 {
1844 int shift = s->insn->data & 0xff;
1845 int size = s->insn->data >> 8;
1846 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
1847 return NO_EXIT;
1848 }
1849
1850 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
1851 {
1852 TCGv_i64 t1;
1853
1854 gen_op_calc_cc(s);
1855 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
1856
1857 t1 = tcg_temp_new_i64();
1858 tcg_gen_shli_i64(t1, psw_mask, 20);
1859 tcg_gen_shri_i64(t1, t1, 36);
1860 tcg_gen_or_i64(o->out, o->out, t1);
1861
1862 tcg_gen_extu_i32_i64(t1, cc_op);
1863 tcg_gen_shli_i64(t1, t1, 28);
1864 tcg_gen_or_i64(o->out, o->out, t1);
1865 tcg_temp_free_i64(t1);
1866 return NO_EXIT;
1867 }
1868
1869 #ifndef CONFIG_USER_ONLY
1870 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
1871 {
1872 check_privileged(s);
1873 gen_helper_ipte(cpu_env, o->in1, o->in2);
1874 return NO_EXIT;
1875 }
1876
1877 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
1878 {
1879 check_privileged(s);
1880 gen_helper_iske(o->out, cpu_env, o->in2);
1881 return NO_EXIT;
1882 }
1883 #endif
1884
1885 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
1886 {
1887 gen_helper_ldeb(o->out, cpu_env, o->in2);
1888 return NO_EXIT;
1889 }
1890
1891 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
1892 {
1893 gen_helper_ledb(o->out, cpu_env, o->in2);
1894 return NO_EXIT;
1895 }
1896
1897 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
1898 {
1899 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
1900 return NO_EXIT;
1901 }
1902
1903 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
1904 {
1905 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
1906 return NO_EXIT;
1907 }
1908
1909 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
1910 {
1911 gen_helper_lxdb(o->out, cpu_env, o->in2);
1912 return_low128(o->out2);
1913 return NO_EXIT;
1914 }
1915
1916 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
1917 {
1918 gen_helper_lxeb(o->out, cpu_env, o->in2);
1919 return_low128(o->out2);
1920 return NO_EXIT;
1921 }
1922
1923 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
1924 {
1925 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
1926 return NO_EXIT;
1927 }
1928
1929 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
1930 {
1931 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
1932 return NO_EXIT;
1933 }
1934
1935 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
1936 {
1937 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
1938 return NO_EXIT;
1939 }
1940
1941 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
1942 {
1943 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
1944 return NO_EXIT;
1945 }
1946
1947 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
1948 {
1949 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
1950 return NO_EXIT;
1951 }
1952
1953 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
1954 {
1955 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
1956 return NO_EXIT;
1957 }
1958
1959 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
1960 {
1961 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
1962 return NO_EXIT;
1963 }
1964
1965 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
1966 {
1967 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
1968 return NO_EXIT;
1969 }
1970
1971 #ifndef CONFIG_USER_ONLY
1972 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
1973 {
1974 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1975 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1976 check_privileged(s);
1977 potential_page_fault(s);
1978 gen_helper_lctl(cpu_env, r1, o->in2, r3);
1979 tcg_temp_free_i32(r1);
1980 tcg_temp_free_i32(r3);
1981 return NO_EXIT;
1982 }
1983
1984 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
1985 {
1986 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1987 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1988 check_privileged(s);
1989 potential_page_fault(s);
1990 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
1991 tcg_temp_free_i32(r1);
1992 tcg_temp_free_i32(r3);
1993 return NO_EXIT;
1994 }
1995 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
1996 {
1997 check_privileged(s);
1998 potential_page_fault(s);
1999 gen_helper_lra(o->out, cpu_env, o->in2);
2000 set_cc_static(s);
2001 return NO_EXIT;
2002 }
2003
2004 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2005 {
2006 TCGv_i64 t1, t2;
2007
2008 check_privileged(s);
2009
2010 t1 = tcg_temp_new_i64();
2011 t2 = tcg_temp_new_i64();
2012 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2013 tcg_gen_addi_i64(o->in2, o->in2, 4);
2014 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2015 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2016 tcg_gen_shli_i64(t1, t1, 32);
2017 gen_helper_load_psw(cpu_env, t1, t2);
2018 tcg_temp_free_i64(t1);
2019 tcg_temp_free_i64(t2);
2020 return EXIT_NORETURN;
2021 }
2022
2023 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2024 {
2025 TCGv_i64 t1, t2;
2026
2027 check_privileged(s);
2028
2029 t1 = tcg_temp_new_i64();
2030 t2 = tcg_temp_new_i64();
2031 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2032 tcg_gen_addi_i64(o->in2, o->in2, 8);
2033 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2034 gen_helper_load_psw(cpu_env, t1, t2);
2035 tcg_temp_free_i64(t1);
2036 tcg_temp_free_i64(t2);
2037 return EXIT_NORETURN;
2038 }
2039 #endif
2040
2041 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2042 {
2043 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2044 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2045 potential_page_fault(s);
2046 gen_helper_lam(cpu_env, r1, o->in2, r3);
2047 tcg_temp_free_i32(r1);
2048 tcg_temp_free_i32(r3);
2049 return NO_EXIT;
2050 }
2051
2052 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2053 {
2054 int r1 = get_field(s->fields, r1);
2055 int r3 = get_field(s->fields, r3);
2056 TCGv_i64 t = tcg_temp_new_i64();
2057 TCGv_i64 t4 = tcg_const_i64(4);
2058
2059 while (1) {
2060 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2061 store_reg32_i64(r1, t);
2062 if (r1 == r3) {
2063 break;
2064 }
2065 tcg_gen_add_i64(o->in2, o->in2, t4);
2066 r1 = (r1 + 1) & 15;
2067 }
2068
2069 tcg_temp_free_i64(t);
2070 tcg_temp_free_i64(t4);
2071 return NO_EXIT;
2072 }
2073
2074 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2075 {
2076 int r1 = get_field(s->fields, r1);
2077 int r3 = get_field(s->fields, r3);
2078 TCGv_i64 t = tcg_temp_new_i64();
2079 TCGv_i64 t4 = tcg_const_i64(4);
2080
2081 while (1) {
2082 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2083 store_reg32h_i64(r1, t);
2084 if (r1 == r3) {
2085 break;
2086 }
2087 tcg_gen_add_i64(o->in2, o->in2, t4);
2088 r1 = (r1 + 1) & 15;
2089 }
2090
2091 tcg_temp_free_i64(t);
2092 tcg_temp_free_i64(t4);
2093 return NO_EXIT;
2094 }
2095
2096 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2097 {
2098 int r1 = get_field(s->fields, r1);
2099 int r3 = get_field(s->fields, r3);
2100 TCGv_i64 t8 = tcg_const_i64(8);
2101
2102 while (1) {
2103 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2104 if (r1 == r3) {
2105 break;
2106 }
2107 tcg_gen_add_i64(o->in2, o->in2, t8);
2108 r1 = (r1 + 1) & 15;
2109 }
2110
2111 tcg_temp_free_i64(t8);
2112 return NO_EXIT;
2113 }
2114
2115 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2116 {
2117 o->out = o->in2;
2118 o->g_out = o->g_in2;
2119 TCGV_UNUSED_I64(o->in2);
2120 o->g_in2 = false;
2121 return NO_EXIT;
2122 }
2123
2124 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2125 {
2126 o->out = o->in1;
2127 o->out2 = o->in2;
2128 o->g_out = o->g_in1;
2129 o->g_out2 = o->g_in2;
2130 TCGV_UNUSED_I64(o->in1);
2131 TCGV_UNUSED_I64(o->in2);
2132 o->g_in1 = o->g_in2 = false;
2133 return NO_EXIT;
2134 }
2135
2136 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2137 {
2138 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2139 potential_page_fault(s);
2140 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2141 tcg_temp_free_i32(l);
2142 return NO_EXIT;
2143 }
2144
2145 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2146 {
2147 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2148 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2149 potential_page_fault(s);
2150 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2151 tcg_temp_free_i32(r1);
2152 tcg_temp_free_i32(r2);
2153 set_cc_static(s);
2154 return NO_EXIT;
2155 }
2156
2157 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2158 {
2159 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2160 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2161 potential_page_fault(s);
2162 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2163 tcg_temp_free_i32(r1);
2164 tcg_temp_free_i32(r3);
2165 set_cc_static(s);
2166 return NO_EXIT;
2167 }
2168
2169 #ifndef CONFIG_USER_ONLY
2170 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2171 {
2172 int r1 = get_field(s->fields, l1);
2173 check_privileged(s);
2174 potential_page_fault(s);
2175 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2176 set_cc_static(s);
2177 return NO_EXIT;
2178 }
2179
2180 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2181 {
2182 int r1 = get_field(s->fields, l1);
2183 check_privileged(s);
2184 potential_page_fault(s);
2185 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2186 set_cc_static(s);
2187 return NO_EXIT;
2188 }
2189 #endif
2190
2191 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2192 {
2193 potential_page_fault(s);
2194 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2195 set_cc_static(s);
2196 return NO_EXIT;
2197 }
2198
2199 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2200 {
2201 potential_page_fault(s);
2202 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2203 set_cc_static(s);
2204 return_low128(o->in2);
2205 return NO_EXIT;
2206 }
2207
2208 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2209 {
2210 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2211 return NO_EXIT;
2212 }
2213
2214 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2215 {
2216 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2217 return_low128(o->out2);
2218 return NO_EXIT;
2219 }
2220
2221 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2222 {
2223 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2224 return NO_EXIT;
2225 }
2226
2227 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2228 {
2229 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2230 return NO_EXIT;
2231 }
2232
2233 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2234 {
2235 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2236 return NO_EXIT;
2237 }
2238
2239 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2240 {
2241 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2242 return_low128(o->out2);
2243 return NO_EXIT;
2244 }
2245
2246 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2247 {
2248 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2249 return_low128(o->out2);
2250 return NO_EXIT;
2251 }
2252
2253 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2254 {
2255 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2256 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2257 tcg_temp_free_i64(r3);
2258 return NO_EXIT;
2259 }
2260
2261 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2262 {
2263 int r3 = get_field(s->fields, r3);
2264 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2265 return NO_EXIT;
2266 }
2267
2268 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2269 {
2270 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2271 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2272 tcg_temp_free_i64(r3);
2273 return NO_EXIT;
2274 }
2275
2276 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2277 {
2278 int r3 = get_field(s->fields, r3);
2279 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2280 return NO_EXIT;
2281 }
2282
2283 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2284 {
2285 gen_helper_nabs_i64(o->out, o->in2);
2286 return NO_EXIT;
2287 }
2288
2289 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2290 {
2291 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2292 return NO_EXIT;
2293 }
2294
2295 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2296 {
2297 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2298 return NO_EXIT;
2299 }
2300
2301 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2302 {
2303 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2304 tcg_gen_mov_i64(o->out2, o->in2);
2305 return NO_EXIT;
2306 }
2307
2308 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2309 {
2310 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2311 potential_page_fault(s);
2312 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2313 tcg_temp_free_i32(l);
2314 set_cc_static(s);
2315 return NO_EXIT;
2316 }
2317
2318 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2319 {
2320 tcg_gen_neg_i64(o->out, o->in2);
2321 return NO_EXIT;
2322 }
2323
2324 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2325 {
2326 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2327 return NO_EXIT;
2328 }
2329
2330 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2331 {
2332 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2333 return NO_EXIT;
2334 }
2335
2336 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2337 {
2338 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2339 tcg_gen_mov_i64(o->out2, o->in2);
2340 return NO_EXIT;
2341 }
2342
2343 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2344 {
2345 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2346 potential_page_fault(s);
2347 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2348 tcg_temp_free_i32(l);
2349 set_cc_static(s);
2350 return NO_EXIT;
2351 }
2352
2353 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2354 {
2355 tcg_gen_or_i64(o->out, o->in1, o->in2);
2356 return NO_EXIT;
2357 }
2358
2359 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2360 {
2361 int shift = s->insn->data & 0xff;
2362 int size = s->insn->data >> 8;
2363 uint64_t mask = ((1ull << size) - 1) << shift;
2364
2365 assert(!o->g_in2);
2366 tcg_gen_shli_i64(o->in2, o->in2, shift);
2367 tcg_gen_or_i64(o->out, o->in1, o->in2);
2368
2369 /* Produce the CC from only the bits manipulated. */
2370 tcg_gen_andi_i64(cc_dst, o->out, mask);
2371 set_cc_nz_u64(s, cc_dst);
2372 return NO_EXIT;
2373 }
2374
2375 #ifndef CONFIG_USER_ONLY
2376 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2377 {
2378 check_privileged(s);
2379 gen_helper_ptlb(cpu_env);
2380 return NO_EXIT;
2381 }
2382 #endif
2383
2384 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2385 {
2386 tcg_gen_bswap16_i64(o->out, o->in2);
2387 return NO_EXIT;
2388 }
2389
2390 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2391 {
2392 tcg_gen_bswap32_i64(o->out, o->in2);
2393 return NO_EXIT;
2394 }
2395
2396 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2397 {
2398 tcg_gen_bswap64_i64(o->out, o->in2);
2399 return NO_EXIT;
2400 }
2401
2402 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2403 {
2404 TCGv_i32 t1 = tcg_temp_new_i32();
2405 TCGv_i32 t2 = tcg_temp_new_i32();
2406 TCGv_i32 to = tcg_temp_new_i32();
2407 tcg_gen_trunc_i64_i32(t1, o->in1);
2408 tcg_gen_trunc_i64_i32(t2, o->in2);
2409 tcg_gen_rotl_i32(to, t1, t2);
2410 tcg_gen_extu_i32_i64(o->out, to);
2411 tcg_temp_free_i32(t1);
2412 tcg_temp_free_i32(t2);
2413 tcg_temp_free_i32(to);
2414 return NO_EXIT;
2415 }
2416
2417 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2418 {
2419 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2420 return NO_EXIT;
2421 }
2422
2423 #ifndef CONFIG_USER_ONLY
2424 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2425 {
2426 check_privileged(s);
2427 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2428 set_cc_static(s);
2429 return NO_EXIT;
2430 }
2431
2432 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2433 {
2434 check_privileged(s);
2435 gen_helper_sacf(cpu_env, o->in2);
2436 /* Addressing mode has changed, so end the block. */
2437 return EXIT_PC_STALE;
2438 }
2439 #endif
2440
2441 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2442 {
2443 int r1 = get_field(s->fields, r1);
2444 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2445 return NO_EXIT;
2446 }
2447
2448 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2449 {
2450 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2451 return NO_EXIT;
2452 }
2453
2454 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2455 {
2456 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2457 return NO_EXIT;
2458 }
2459
2460 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2461 {
2462 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2463 return_low128(o->out2);
2464 return NO_EXIT;
2465 }
2466
2467 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2468 {
2469 gen_helper_sqeb(o->out, cpu_env, o->in2);
2470 return NO_EXIT;
2471 }
2472
2473 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2474 {
2475 gen_helper_sqdb(o->out, cpu_env, o->in2);
2476 return NO_EXIT;
2477 }
2478
2479 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2480 {
2481 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2482 return_low128(o->out2);
2483 return NO_EXIT;
2484 }
2485
2486 #ifndef CONFIG_USER_ONLY
2487 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2488 {
2489 check_privileged(s);
2490 potential_page_fault(s);
2491 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2492 set_cc_static(s);
2493 return NO_EXIT;
2494 }
2495
2496 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2497 {
2498 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2499 check_privileged(s);
2500 potential_page_fault(s);
2501 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2502 tcg_temp_free_i32(r1);
2503 return NO_EXIT;
2504 }
2505 #endif
2506
2507 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2508 {
2509 uint64_t sign = 1ull << s->insn->data;
2510 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2511 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2512 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2513 /* The arithmetic left shift is curious in that it does not affect
2514 the sign bit. Copy that over from the source unchanged. */
2515 tcg_gen_andi_i64(o->out, o->out, ~sign);
2516 tcg_gen_andi_i64(o->in1, o->in1, sign);
2517 tcg_gen_or_i64(o->out, o->out, o->in1);
2518 return NO_EXIT;
2519 }
2520
2521 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2522 {
2523 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2524 return NO_EXIT;
2525 }
2526
2527 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2528 {
2529 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2530 return NO_EXIT;
2531 }
2532
2533 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2534 {
2535 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2536 return NO_EXIT;
2537 }
2538
2539 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2540 {
2541 gen_helper_sfpc(cpu_env, o->in2);
2542 return NO_EXIT;
2543 }
2544
2545 #ifndef CONFIG_USER_ONLY
2546 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2547 {
2548 check_privileged(s);
2549 tcg_gen_shri_i64(o->in2, o->in2, 4);
2550 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2551 return NO_EXIT;
2552 }
2553
2554 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
2555 {
2556 check_privileged(s);
2557 gen_helper_sske(cpu_env, o->in1, o->in2);
2558 return NO_EXIT;
2559 }
2560
2561 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2562 {
2563 check_privileged(s);
2564 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2565 return NO_EXIT;
2566 }
2567
2568 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
2569 {
2570 check_privileged(s);
2571 /* ??? Surely cpu address != cpu number. In any case the previous
2572 version of this stored more than the required half-word, so it
2573 is unlikely this has ever been tested. */
2574 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2575 return NO_EXIT;
2576 }
2577
2578 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
2579 {
2580 gen_helper_stck(o->out, cpu_env);
2581 /* ??? We don't implement clock states. */
2582 gen_op_movi_cc(s, 0);
2583 return NO_EXIT;
2584 }
2585
2586 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
2587 {
2588 TCGv_i64 c1 = tcg_temp_new_i64();
2589 TCGv_i64 c2 = tcg_temp_new_i64();
2590 gen_helper_stck(c1, cpu_env);
2591 /* Shift the 64-bit value into its place as a zero-extended
2592 104-bit value. Note that "bit positions 64-103 are always
2593 non-zero so that they compare differently to STCK"; we set
2594 the least significant bit to 1. */
2595 tcg_gen_shli_i64(c2, c1, 56);
2596 tcg_gen_shri_i64(c1, c1, 8);
2597 tcg_gen_ori_i64(c2, c2, 0x10000);
2598 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
2599 tcg_gen_addi_i64(o->in2, o->in2, 8);
2600 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
2601 tcg_temp_free_i64(c1);
2602 tcg_temp_free_i64(c2);
2603 /* ??? We don't implement clock states. */
2604 gen_op_movi_cc(s, 0);
2605 return NO_EXIT;
2606 }
2607
2608 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
2609 {
2610 check_privileged(s);
2611 gen_helper_sckc(cpu_env, o->in2);
2612 return NO_EXIT;
2613 }
2614
2615 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
2616 {
2617 check_privileged(s);
2618 gen_helper_stckc(o->out, cpu_env);
2619 return NO_EXIT;
2620 }
2621
2622 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2623 {
2624 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2625 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2626 check_privileged(s);
2627 potential_page_fault(s);
2628 gen_helper_stctg(cpu_env, r1, o->in2, r3);
2629 tcg_temp_free_i32(r1);
2630 tcg_temp_free_i32(r3);
2631 return NO_EXIT;
2632 }
2633
2634 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
2635 {
2636 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2637 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2638 check_privileged(s);
2639 potential_page_fault(s);
2640 gen_helper_stctl(cpu_env, r1, o->in2, r3);
2641 tcg_temp_free_i32(r1);
2642 tcg_temp_free_i32(r3);
2643 return NO_EXIT;
2644 }
2645
2646 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
2647 {
2648 check_privileged(s);
2649 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2650 return NO_EXIT;
2651 }
2652
2653 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
2654 {
2655 check_privileged(s);
2656 gen_helper_spt(cpu_env, o->in2);
2657 return NO_EXIT;
2658 }
2659
2660 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
2661 {
2662 TCGv_i64 f, a;
2663 /* We really ought to have more complete indication of facilities
2664 that we implement. Address this when STFLE is implemented. */
2665 check_privileged(s);
2666 f = tcg_const_i64(0xc0000000);
2667 a = tcg_const_i64(200);
2668 tcg_gen_qemu_st32(f, a, get_mem_index(s));
2669 tcg_temp_free_i64(f);
2670 tcg_temp_free_i64(a);
2671 return NO_EXIT;
2672 }
2673
2674 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
2675 {
2676 check_privileged(s);
2677 gen_helper_stpt(o->out, cpu_env);
2678 return NO_EXIT;
2679 }
2680
2681 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
2682 {
2683 check_privileged(s);
2684 potential_page_fault(s);
2685 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
2686 set_cc_static(s);
2687 return NO_EXIT;
2688 }
2689
2690 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
2691 {
2692 check_privileged(s);
2693 gen_helper_spx(cpu_env, o->in2);
2694 return NO_EXIT;
2695 }
2696
2697 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
2698 {
2699 check_privileged(s);
2700 /* Not operational. */
2701 gen_op_movi_cc(s, 3);
2702 return NO_EXIT;
2703 }
2704
2705 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
2706 {
2707 check_privileged(s);
2708 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
2709 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
2710 return NO_EXIT;
2711 }
2712
2713 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
2714 {
2715 uint64_t i2 = get_field(s->fields, i2);
2716 TCGv_i64 t;
2717
2718 check_privileged(s);
2719
2720 /* It is important to do what the instruction name says: STORE THEN.
2721 If we let the output hook perform the store then if we fault and
2722 restart, we'll have the wrong SYSTEM MASK in place. */
2723 t = tcg_temp_new_i64();
2724 tcg_gen_shri_i64(t, psw_mask, 56);
2725 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
2726 tcg_temp_free_i64(t);
2727
2728 if (s->fields->op == 0xac) {
2729 tcg_gen_andi_i64(psw_mask, psw_mask,
2730 (i2 << 56) | 0x00ffffffffffffffull);
2731 } else {
2732 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
2733 }
2734 return NO_EXIT;
2735 }
2736
2737 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
2738 {
2739 check_privileged(s);
2740 potential_page_fault(s);
2741 gen_helper_stura(cpu_env, o->in2, o->in1);
2742 return NO_EXIT;
2743 }
2744 #endif
2745
2746 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
2747 {
2748 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
2749 return NO_EXIT;
2750 }
2751
2752 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
2753 {
2754 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
2755 return NO_EXIT;
2756 }
2757
2758 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
2759 {
2760 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
2761 return NO_EXIT;
2762 }
2763
2764 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
2765 {
2766 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
2767 return NO_EXIT;
2768 }
2769
2770 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
2771 {
2772 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2773 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2774 potential_page_fault(s);
2775 gen_helper_stam(cpu_env, r1, o->in2, r3);
2776 tcg_temp_free_i32(r1);
2777 tcg_temp_free_i32(r3);
2778 return NO_EXIT;
2779 }
2780
2781 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
2782 {
2783 int m3 = get_field(s->fields, m3);
2784 int pos, base = s->insn->data;
2785 TCGv_i64 tmp = tcg_temp_new_i64();
2786
2787 pos = base + ctz32(m3) * 8;
2788 switch (m3) {
2789 case 0xf:
2790 /* Effectively a 32-bit store. */
2791 tcg_gen_shri_i64(tmp, o->in1, pos);
2792 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
2793 break;
2794
2795 case 0xc:
2796 case 0x6:
2797 case 0x3:
2798 /* Effectively a 16-bit store. */
2799 tcg_gen_shri_i64(tmp, o->in1, pos);
2800 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
2801 break;
2802
2803 case 0x8:
2804 case 0x4:
2805 case 0x2:
2806 case 0x1:
2807 /* Effectively an 8-bit store. */
2808 tcg_gen_shri_i64(tmp, o->in1, pos);
2809 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
2810 break;
2811
2812 default:
2813 /* This is going to be a sequence of shifts and stores. */
2814 pos = base + 32 - 8;
2815 while (m3) {
2816 if (m3 & 0x8) {
2817 tcg_gen_shri_i64(tmp, o->in1, pos);
2818 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
2819 tcg_gen_addi_i64(o->in2, o->in2, 1);
2820 }
2821 m3 = (m3 << 1) & 0xf;
2822 pos -= 8;
2823 }
2824 break;
2825 }
2826 tcg_temp_free_i64(tmp);
2827 return NO_EXIT;
2828 }
2829
2830 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
2831 {
2832 int r1 = get_field(s->fields, r1);
2833 int r3 = get_field(s->fields, r3);
2834 int size = s->insn->data;
2835 TCGv_i64 tsize = tcg_const_i64(size);
2836
2837 while (1) {
2838 if (size == 8) {
2839 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
2840 } else {
2841 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
2842 }
2843 if (r1 == r3) {
2844 break;
2845 }
2846 tcg_gen_add_i64(o->in2, o->in2, tsize);
2847 r1 = (r1 + 1) & 15;
2848 }
2849
2850 tcg_temp_free_i64(tsize);
2851 return NO_EXIT;
2852 }
2853
2854 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
2855 {
2856 int r1 = get_field(s->fields, r1);
2857 int r3 = get_field(s->fields, r3);
2858 TCGv_i64 t = tcg_temp_new_i64();
2859 TCGv_i64 t4 = tcg_const_i64(4);
2860 TCGv_i64 t32 = tcg_const_i64(32);
2861
2862 while (1) {
2863 tcg_gen_shl_i64(t, regs[r1], t32);
2864 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
2865 if (r1 == r3) {
2866 break;
2867 }
2868 tcg_gen_add_i64(o->in2, o->in2, t4);
2869 r1 = (r1 + 1) & 15;
2870 }
2871
2872 tcg_temp_free_i64(t);
2873 tcg_temp_free_i64(t4);
2874 tcg_temp_free_i64(t32);
2875 return NO_EXIT;
2876 }
2877
2878 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
2879 {
2880 potential_page_fault(s);
2881 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2882 set_cc_static(s);
2883 return_low128(o->in2);
2884 return NO_EXIT;
2885 }
2886
2887 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
2888 {
2889 tcg_gen_sub_i64(o->out, o->in1, o->in2);
2890 return NO_EXIT;
2891 }
2892
2893 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
2894 {
2895 TCGv_i64 cc;
2896
2897 assert(!o->g_in2);
2898 tcg_gen_not_i64(o->in2, o->in2);
2899 tcg_gen_add_i64(o->out, o->in1, o->in2);
2900
2901 /* XXX possible optimization point */
2902 gen_op_calc_cc(s);
2903 cc = tcg_temp_new_i64();
2904 tcg_gen_extu_i32_i64(cc, cc_op);
2905 tcg_gen_shri_i64(cc, cc, 1);
2906 tcg_gen_add_i64(o->out, o->out, cc);
2907 tcg_temp_free_i64(cc);
2908 return NO_EXIT;
2909 }
2910
2911 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
2912 {
2913 TCGv_i32 t;
2914
2915 update_psw_addr(s);
2916 gen_op_calc_cc(s);
2917
2918 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
2919 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
2920 tcg_temp_free_i32(t);
2921
2922 t = tcg_const_i32(s->next_pc - s->pc);
2923 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
2924 tcg_temp_free_i32(t);
2925
2926 gen_exception(EXCP_SVC);
2927 return EXIT_NORETURN;
2928 }
2929
2930 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
2931 {
2932 gen_helper_tceb(cc_op, o->in1, o->in2);
2933 set_cc_static(s);
2934 return NO_EXIT;
2935 }
2936
2937 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
2938 {
2939 gen_helper_tcdb(cc_op, o->in1, o->in2);
2940 set_cc_static(s);
2941 return NO_EXIT;
2942 }
2943
2944 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
2945 {
2946 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
2947 set_cc_static(s);
2948 return NO_EXIT;
2949 }
2950
2951 #ifndef CONFIG_USER_ONLY
2952 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
2953 {
2954 potential_page_fault(s);
2955 gen_helper_tprot(cc_op, o->addr1, o->in2);
2956 set_cc_static(s);
2957 return NO_EXIT;
2958 }
2959 #endif
2960
2961 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
2962 {
2963 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2964 potential_page_fault(s);
2965 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
2966 tcg_temp_free_i32(l);
2967 set_cc_static(s);
2968 return NO_EXIT;
2969 }
2970
2971 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
2972 {
2973 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2974 potential_page_fault(s);
2975 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
2976 tcg_temp_free_i32(l);
2977 return NO_EXIT;
2978 }
2979
2980 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
2981 {
2982 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2983 potential_page_fault(s);
2984 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
2985 tcg_temp_free_i32(l);
2986 set_cc_static(s);
2987 return NO_EXIT;
2988 }
2989
2990 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
2991 {
2992 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2993 return NO_EXIT;
2994 }
2995
2996 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
2997 {
2998 int shift = s->insn->data & 0xff;
2999 int size = s->insn->data >> 8;
3000 uint64_t mask = ((1ull << size) - 1) << shift;
3001
3002 assert(!o->g_in2);
3003 tcg_gen_shli_i64(o->in2, o->in2, shift);
3004 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3005
3006 /* Produce the CC from only the bits manipulated. */
3007 tcg_gen_andi_i64(cc_dst, o->out, mask);
3008 set_cc_nz_u64(s, cc_dst);
3009 return NO_EXIT;
3010 }
3011
3012 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3013 {
3014 o->out = tcg_const_i64(0);
3015 return NO_EXIT;
3016 }
3017
3018 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3019 {
3020 o->out = tcg_const_i64(0);
3021 o->out2 = o->out;
3022 o->g_out2 = true;
3023 return NO_EXIT;
3024 }
3025
3026 /* ====================================================================== */
3027 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3028 the original inputs), update the various cc data structures in order to
3029 be able to compute the new condition code. */
3030
3031 static void cout_abs32(DisasContext *s, DisasOps *o)
3032 {
3033 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3034 }
3035
3036 static void cout_abs64(DisasContext *s, DisasOps *o)
3037 {
3038 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3039 }
3040
3041 static void cout_adds32(DisasContext *s, DisasOps *o)
3042 {
3043 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3044 }
3045
3046 static void cout_adds64(DisasContext *s, DisasOps *o)
3047 {
3048 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3049 }
3050
3051 static void cout_addu32(DisasContext *s, DisasOps *o)
3052 {
3053 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3054 }
3055
3056 static void cout_addu64(DisasContext *s, DisasOps *o)
3057 {
3058 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3059 }
3060
3061 static void cout_addc32(DisasContext *s, DisasOps *o)
3062 {
3063 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3064 }
3065
3066 static void cout_addc64(DisasContext *s, DisasOps *o)
3067 {
3068 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3069 }
3070
3071 static void cout_cmps32(DisasContext *s, DisasOps *o)
3072 {
3073 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3074 }
3075
3076 static void cout_cmps64(DisasContext *s, DisasOps *o)
3077 {
3078 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3079 }
3080
3081 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3082 {
3083 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3084 }
3085
3086 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3087 {
3088 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3089 }
3090
3091 static void cout_f32(DisasContext *s, DisasOps *o)
3092 {
3093 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3094 }
3095
3096 static void cout_f64(DisasContext *s, DisasOps *o)
3097 {
3098 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3099 }
3100
3101 static void cout_f128(DisasContext *s, DisasOps *o)
3102 {
3103 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3104 }
3105
3106 static void cout_nabs32(DisasContext *s, DisasOps *o)
3107 {
3108 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3109 }
3110
3111 static void cout_nabs64(DisasContext *s, DisasOps *o)
3112 {
3113 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3114 }
3115
3116 static void cout_neg32(DisasContext *s, DisasOps *o)
3117 {
3118 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3119 }
3120
3121 static void cout_neg64(DisasContext *s, DisasOps *o)
3122 {
3123 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3124 }
3125
3126 static void cout_nz32(DisasContext *s, DisasOps *o)
3127 {
3128 tcg_gen_ext32u_i64(cc_dst, o->out);
3129 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3130 }
3131
3132 static void cout_nz64(DisasContext *s, DisasOps *o)
3133 {
3134 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3135 }
3136
3137 static void cout_s32(DisasContext *s, DisasOps *o)
3138 {
3139 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3140 }
3141
3142 static void cout_s64(DisasContext *s, DisasOps *o)
3143 {
3144 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3145 }
3146
3147 static void cout_subs32(DisasContext *s, DisasOps *o)
3148 {
3149 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3150 }
3151
3152 static void cout_subs64(DisasContext *s, DisasOps *o)
3153 {
3154 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3155 }
3156
3157 static void cout_subu32(DisasContext *s, DisasOps *o)
3158 {
3159 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3160 }
3161
3162 static void cout_subu64(DisasContext *s, DisasOps *o)
3163 {
3164 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3165 }
3166
3167 static void cout_subb32(DisasContext *s, DisasOps *o)
3168 {
3169 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3170 }
3171
3172 static void cout_subb64(DisasContext *s, DisasOps *o)
3173 {
3174 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3175 }
3176
3177 static void cout_tm32(DisasContext *s, DisasOps *o)
3178 {
3179 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3180 }
3181
3182 static void cout_tm64(DisasContext *s, DisasOps *o)
3183 {
3184 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3185 }
3186
3187 /* ====================================================================== */
3188 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3189 with the TCG register to which we will write. Used in combination with
3190 the "wout" generators, in some cases we need a new temporary, and in
3191 some cases we can write to a TCG global. */
3192
3193 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3194 {
3195 o->out = tcg_temp_new_i64();
3196 }
3197
3198 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3199 {
3200 o->out = tcg_temp_new_i64();
3201 o->out2 = tcg_temp_new_i64();
3202 }
3203
3204 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3205 {
3206 o->out = regs[get_field(f, r1)];
3207 o->g_out = true;
3208 }
3209
3210 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3211 {
3212 /* ??? Specification exception: r1 must be even. */
3213 int r1 = get_field(f, r1);
3214 o->out = regs[r1];
3215 o->out2 = regs[(r1 + 1) & 15];
3216 o->g_out = o->g_out2 = true;
3217 }
3218
3219 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3220 {
3221 o->out = fregs[get_field(f, r1)];
3222 o->g_out = true;
3223 }
3224
3225 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3226 {
3227 /* ??? Specification exception: r1 must be < 14. */
3228 int r1 = get_field(f, r1);
3229 o->out = fregs[r1];
3230 o->out2 = fregs[(r1 + 2) & 15];
3231 o->g_out = o->g_out2 = true;
3232 }
3233
3234 /* ====================================================================== */
3235 /* The "Write OUTput" generators. These generally perform some non-trivial
3236 copy of data to TCG globals, or to main memory. The trivial cases are
3237 generally handled by having a "prep" generator install the TCG global
3238 as the destination of the operation. */
3239
3240 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3241 {
3242 store_reg(get_field(f, r1), o->out);
3243 }
3244
3245 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3246 {
3247 int r1 = get_field(f, r1);
3248 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3249 }
3250
3251 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3252 {
3253 int r1 = get_field(f, r1);
3254 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3255 }
3256
3257 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3258 {
3259 store_reg32_i64(get_field(f, r1), o->out);
3260 }
3261
3262 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3263 {
3264 /* ??? Specification exception: r1 must be even. */
3265 int r1 = get_field(f, r1);
3266 store_reg32_i64(r1, o->out);
3267 store_reg32_i64((r1 + 1) & 15, o->out2);
3268 }
3269
3270 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3271 {
3272 /* ??? Specification exception: r1 must be even. */
3273 int r1 = get_field(f, r1);
3274 store_reg32_i64((r1 + 1) & 15, o->out);
3275 tcg_gen_shri_i64(o->out, o->out, 32);
3276 store_reg32_i64(r1, o->out);
3277 }
3278
3279 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3280 {
3281 store_freg32_i64(get_field(f, r1), o->out);
3282 }
3283
3284 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3285 {
3286 store_freg(get_field(f, r1), o->out);
3287 }
3288
3289 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3290 {
3291 /* ??? Specification exception: r1 must be < 14. */
3292 int f1 = get_field(s->fields, r1);
3293 store_freg(f1, o->out);
3294 store_freg((f1 + 2) & 15, o->out2);
3295 }
3296
3297 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3298 {
3299 if (get_field(f, r1) != get_field(f, r2)) {
3300 store_reg32_i64(get_field(f, r1), o->out);
3301 }
3302 }
3303
3304 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3305 {
3306 if (get_field(f, r1) != get_field(f, r2)) {
3307 store_freg32_i64(get_field(f, r1), o->out);
3308 }
3309 }
3310
3311 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3312 {
3313 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3314 }
3315
3316 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3317 {
3318 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3319 }
3320
3321 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3322 {
3323 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3324 }
3325
3326 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3327 {
3328 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3329 }
3330
3331 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3332 {
3333 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3334 }
3335
3336 /* ====================================================================== */
3337 /* The "INput 1" generators. These load the first operand to an insn. */
3338
3339 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3340 {
3341 o->in1 = load_reg(get_field(f, r1));
3342 }
3343
3344 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3345 {
3346 o->in1 = regs[get_field(f, r1)];
3347 o->g_in1 = true;
3348 }
3349
3350 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3351 {
3352 o->in1 = tcg_temp_new_i64();
3353 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3354 }
3355
3356 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3357 {
3358 o->in1 = tcg_temp_new_i64();
3359 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3360 }
3361
3362 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3363 {
3364 o->in1 = tcg_temp_new_i64();
3365 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3366 }
3367
3368 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3369 {
3370 /* ??? Specification exception: r1 must be even. */
3371 int r1 = get_field(f, r1);
3372 o->in1 = load_reg((r1 + 1) & 15);
3373 }
3374
3375 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3376 {
3377 /* ??? Specification exception: r1 must be even. */
3378 int r1 = get_field(f, r1);
3379 o->in1 = tcg_temp_new_i64();
3380 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3381 }
3382
3383 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3384 {
3385 /* ??? Specification exception: r1 must be even. */
3386 int r1 = get_field(f, r1);
3387 o->in1 = tcg_temp_new_i64();
3388 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3389 }
3390
3391 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3392 {
3393 /* ??? Specification exception: r1 must be even. */
3394 int r1 = get_field(f, r1);
3395 o->in1 = tcg_temp_new_i64();
3396 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3397 }
3398
3399 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3400 {
3401 o->in1 = load_reg(get_field(f, r2));
3402 }
3403
3404 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3405 {
3406 o->in1 = load_reg(get_field(f, r3));
3407 }
3408
3409 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3410 {
3411 o->in1 = regs[get_field(f, r3)];
3412 o->g_in1 = true;
3413 }
3414
3415 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3416 {
3417 o->in1 = tcg_temp_new_i64();
3418 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3419 }
3420
3421 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3422 {
3423 o->in1 = tcg_temp_new_i64();
3424 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3425 }
3426
3427 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3428 {
3429 o->in1 = load_freg32_i64(get_field(f, r1));
3430 }
3431
3432 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3433 {
3434 o->in1 = fregs[get_field(f, r1)];
3435 o->g_in1 = true;
3436 }
3437
3438 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3439 {
3440 /* ??? Specification exception: r1 must be < 14. */
3441 int r1 = get_field(f, r1);
3442 o->out = fregs[r1];
3443 o->out2 = fregs[(r1 + 2) & 15];
3444 o->g_out = o->g_out2 = true;
3445 }
3446
3447 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3448 {
3449 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3450 }
3451
3452 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3453 {
3454 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3455 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3456 }
3457
3458 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3459 {
3460 in1_la1(s, f, o);
3461 o->in1 = tcg_temp_new_i64();
3462 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3463 }
3464
3465 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3466 {
3467 in1_la1(s, f, o);
3468 o->in1 = tcg_temp_new_i64();
3469 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3470 }
3471
3472 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3473 {
3474 in1_la1(s, f, o);
3475 o->in1 = tcg_temp_new_i64();
3476 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3477 }
3478
3479 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3480 {
3481 in1_la1(s, f, o);
3482 o->in1 = tcg_temp_new_i64();
3483 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3484 }
3485
3486 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3487 {
3488 in1_la1(s, f, o);
3489 o->in1 = tcg_temp_new_i64();
3490 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3491 }
3492
3493 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3494 {
3495 in1_la1(s, f, o);
3496 o->in1 = tcg_temp_new_i64();
3497 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3498 }
3499
3500 /* ====================================================================== */
3501 /* The "INput 2" generators. These load the second operand to an insn. */
3502
3503 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3504 {
3505 o->in2 = regs[get_field(f, r1)];
3506 o->g_in2 = true;
3507 }
3508
3509 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3510 {
3511 o->in2 = tcg_temp_new_i64();
3512 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3513 }
3514
3515 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3516 {
3517 o->in2 = tcg_temp_new_i64();
3518 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3519 }
3520
3521 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3522 {
3523 o->in2 = load_reg(get_field(f, r2));
3524 }
3525
3526 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3527 {
3528 o->in2 = regs[get_field(f, r2)];
3529 o->g_in2 = true;
3530 }
3531
3532 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3533 {
3534 int r2 = get_field(f, r2);
3535 if (r2 != 0) {
3536 o->in2 = load_reg(r2);
3537 }
3538 }
3539
3540 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3541 {
3542 o->in2 = tcg_temp_new_i64();
3543 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3544 }
3545
3546 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3547 {
3548 o->in2 = tcg_temp_new_i64();
3549 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3550 }
3551
3552 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3553 {
3554 o->in2 = tcg_temp_new_i64();
3555 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3556 }
3557
3558 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3559 {
3560 o->in2 = tcg_temp_new_i64();
3561 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3562 }
3563
3564 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3565 {
3566 o->in2 = load_reg(get_field(f, r3));
3567 }
3568
3569 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3570 {
3571 o->in2 = tcg_temp_new_i64();
3572 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3573 }
3574
3575 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3576 {
3577 o->in2 = tcg_temp_new_i64();
3578 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3579 }
3580
3581 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3582 {
3583 o->in2 = load_freg32_i64(get_field(f, r2));
3584 }
3585
3586 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3587 {
3588 o->in2 = fregs[get_field(f, r2)];
3589 o->g_in2 = true;
3590 }
3591
3592 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3593 {
3594 /* ??? Specification exception: r1 must be < 14. */
3595 int r2 = get_field(f, r2);
3596 o->in1 = fregs[r2];
3597 o->in2 = fregs[(r2 + 2) & 15];
3598 o->g_in1 = o->g_in2 = true;
3599 }
3600
3601 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
3602 {
3603 o->in2 = get_address(s, 0, get_field(f, r2), 0);
3604 }
3605
3606 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3607 {
3608 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3609 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3610 }
3611
3612 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3613 {
3614 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3615 }
3616
3617 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3618 {
3619 help_l2_shift(s, f, o, 31);
3620 }
3621
3622 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3623 {
3624 help_l2_shift(s, f, o, 63);
3625 }
3626
3627 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3628 {
3629 in2_a2(s, f, o);
3630 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3631 }
3632
3633 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3634 {
3635 in2_a2(s, f, o);
3636 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3637 }
3638
3639 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3640 {
3641 in2_a2(s, f, o);
3642 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3643 }
3644
3645 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3646 {
3647 in2_a2(s, f, o);
3648 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3649 }
3650
3651 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3652 {
3653 in2_a2(s, f, o);
3654 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3655 }
3656
3657 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3658 {
3659 in2_a2(s, f, o);
3660 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3661 }
3662
3663 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3664 {
3665 in2_ri2(s, f, o);
3666 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3667 }
3668
3669 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3670 {
3671 in2_ri2(s, f, o);
3672 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3673 }
3674
3675 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3676 {
3677 in2_ri2(s, f, o);
3678 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3679 }
3680
3681 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3682 {
3683 in2_ri2(s, f, o);
3684 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3685 }
3686
3687 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3688 {
3689 o->in2 = tcg_const_i64(get_field(f, i2));
3690 }
3691
3692 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3693 {
3694 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3695 }
3696
3697 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3698 {
3699 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3700 }
3701
3702 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3703 {
3704 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3705 }
3706
3707 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3708 {
3709 uint64_t i2 = (uint16_t)get_field(f, i2);
3710 o->in2 = tcg_const_i64(i2 << s->insn->data);
3711 }
3712
3713 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3714 {
3715 uint64_t i2 = (uint32_t)get_field(f, i2);
3716 o->in2 = tcg_const_i64(i2 << s->insn->data);
3717 }
3718
3719 /* ====================================================================== */
3720
3721 /* Find opc within the table of insns. This is formulated as a switch
3722 statement so that (1) we get compile-time notice of cut-paste errors
3723 for duplicated opcodes, and (2) the compiler generates the binary
3724 search tree, rather than us having to post-process the table. */
3725
3726 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3727 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3728
3729 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3730
3731 enum DisasInsnEnum {
3732 #include "insn-data.def"
3733 };
3734
3735 #undef D
3736 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3737 .opc = OPC, \
3738 .fmt = FMT_##FT, \
3739 .fac = FAC_##FC, \
3740 .name = #NM, \
3741 .help_in1 = in1_##I1, \
3742 .help_in2 = in2_##I2, \
3743 .help_prep = prep_##P, \
3744 .help_wout = wout_##W, \
3745 .help_cout = cout_##CC, \
3746 .help_op = op_##OP, \
3747 .data = D \
3748 },
3749
3750 /* Allow 0 to be used for NULL in the table below. */
3751 #define in1_0 NULL
3752 #define in2_0 NULL
3753 #define prep_0 NULL
3754 #define wout_0 NULL
3755 #define cout_0 NULL
3756 #define op_0 NULL
3757
3758 static const DisasInsn insn_info[] = {
3759 #include "insn-data.def"
3760 };
3761
3762 #undef D
3763 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3764 case OPC: return &insn_info[insn_ ## NM];
3765
3766 static const DisasInsn *lookup_opc(uint16_t opc)
3767 {
3768 switch (opc) {
3769 #include "insn-data.def"
3770 default:
3771 return NULL;
3772 }
3773 }
3774
3775 #undef D
3776 #undef C
3777
3778 /* Extract a field from the insn. The INSN should be left-aligned in
3779 the uint64_t so that we can more easily utilize the big-bit-endian
3780 definitions we extract from the Principals of Operation. */
3781
3782 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
3783 {
3784 uint32_t r, m;
3785
3786 if (f->size == 0) {
3787 return;
3788 }
3789
3790 /* Zero extract the field from the insn. */
3791 r = (insn << f->beg) >> (64 - f->size);
3792
3793 /* Sign-extend, or un-swap the field as necessary. */
3794 switch (f->type) {
3795 case 0: /* unsigned */
3796 break;
3797 case 1: /* signed */
3798 assert(f->size <= 32);
3799 m = 1u << (f->size - 1);
3800 r = (r ^ m) - m;
3801 break;
3802 case 2: /* dl+dh split, signed 20 bit. */
3803 r = ((int8_t)r << 12) | (r >> 8);
3804 break;
3805 default:
3806 abort();
3807 }
3808
3809 /* Validate that the "compressed" encoding we selected above is valid.
3810 I.e. we havn't make two different original fields overlap. */
3811 assert(((o->presentC >> f->indexC) & 1) == 0);
3812 o->presentC |= 1 << f->indexC;
3813 o->presentO |= 1 << f->indexO;
3814
3815 o->c[f->indexC] = r;
3816 }
3817
3818 /* Lookup the insn at the current PC, extracting the operands into O and
3819 returning the info struct for the insn. Returns NULL for invalid insn. */
3820
3821 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
3822 DisasFields *f)
3823 {
3824 uint64_t insn, pc = s->pc;
3825 int op, op2, ilen;
3826 const DisasInsn *info;
3827
3828 insn = ld_code2(env, pc);
3829 op = (insn >> 8) & 0xff;
3830 ilen = get_ilen(op);
3831 s->next_pc = s->pc + ilen;
3832
3833 switch (ilen) {
3834 case 2:
3835 insn = insn << 48;
3836 break;
3837 case 4:
3838 insn = ld_code4(env, pc) << 32;
3839 break;
3840 case 6:
3841 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
3842 break;
3843 default:
3844 abort();
3845 }
3846
3847 /* We can't actually determine the insn format until we've looked up
3848 the full insn opcode. Which we can't do without locating the
3849 secondary opcode. Assume by default that OP2 is at bit 40; for
3850 those smaller insns that don't actually have a secondary opcode
3851 this will correctly result in OP2 = 0. */
3852 switch (op) {
3853 case 0x01: /* E */
3854 case 0x80: /* S */
3855 case 0x82: /* S */
3856 case 0x93: /* S */
3857 case 0xb2: /* S, RRF, RRE */
3858 case 0xb3: /* RRE, RRD, RRF */
3859 case 0xb9: /* RRE, RRF */
3860 case 0xe5: /* SSE, SIL */
3861 op2 = (insn << 8) >> 56;
3862 break;
3863 case 0xa5: /* RI */
3864 case 0xa7: /* RI */
3865 case 0xc0: /* RIL */
3866 case 0xc2: /* RIL */
3867 case 0xc4: /* RIL */
3868 case 0xc6: /* RIL */
3869 case 0xc8: /* SSF */
3870 case 0xcc: /* RIL */
3871 op2 = (insn << 12) >> 60;
3872 break;
3873 case 0xd0 ... 0xdf: /* SS */
3874 case 0xe1: /* SS */
3875 case 0xe2: /* SS */
3876 case 0xe8: /* SS */
3877 case 0xe9: /* SS */
3878 case 0xea: /* SS */
3879 case 0xee ... 0xf3: /* SS */
3880 case 0xf8 ... 0xfd: /* SS */
3881 op2 = 0;
3882 break;
3883 default:
3884 op2 = (insn << 40) >> 56;
3885 break;
3886 }
3887
3888 memset(f, 0, sizeof(*f));
3889 f->op = op;
3890 f->op2 = op2;
3891
3892 /* Lookup the instruction. */
3893 info = lookup_opc(op << 8 | op2);
3894
3895 /* If we found it, extract the operands. */
3896 if (info != NULL) {
3897 DisasFormat fmt = info->fmt;
3898 int i;
3899
3900 for (i = 0; i < NUM_C_FIELD; ++i) {
3901 extract_field(f, &format_info[fmt].op[i], insn);
3902 }
3903 }
3904 return info;
3905 }
3906
3907 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
3908 {
3909 const DisasInsn *insn;
3910 ExitStatus ret = NO_EXIT;
3911 DisasFields f;
3912 DisasOps o;
3913
3914 /* Search for the insn in the table. */
3915 insn = extract_insn(env, s, &f);
3916
3917 /* Not found means unimplemented/illegal opcode. */
3918 if (insn == NULL) {
3919 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
3920 f.op, f.op2);
3921 gen_illegal_opcode(s);
3922 return EXIT_NORETURN;
3923 }
3924
3925 /* Set up the strutures we use to communicate with the helpers. */
3926 s->insn = insn;
3927 s->fields = &f;
3928 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
3929 TCGV_UNUSED_I64(o.out);
3930 TCGV_UNUSED_I64(o.out2);
3931 TCGV_UNUSED_I64(o.in1);
3932 TCGV_UNUSED_I64(o.in2);
3933 TCGV_UNUSED_I64(o.addr1);
3934
3935 /* Implement the instruction. */
3936 if (insn->help_in1) {
3937 insn->help_in1(s, &f, &o);
3938 }
3939 if (insn->help_in2) {
3940 insn->help_in2(s, &f, &o);
3941 }
3942 if (insn->help_prep) {
3943 insn->help_prep(s, &f, &o);
3944 }
3945 if (insn->help_op) {
3946 ret = insn->help_op(s, &o);
3947 }
3948 if (insn->help_wout) {
3949 insn->help_wout(s, &f, &o);
3950 }
3951 if (insn->help_cout) {
3952 insn->help_cout(s, &o);
3953 }
3954
3955 /* Free any temporaries created by the helpers. */
3956 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
3957 tcg_temp_free_i64(o.out);
3958 }
3959 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
3960 tcg_temp_free_i64(o.out2);
3961 }
3962 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
3963 tcg_temp_free_i64(o.in1);
3964 }
3965 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
3966 tcg_temp_free_i64(o.in2);
3967 }
3968 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
3969 tcg_temp_free_i64(o.addr1);
3970 }
3971
3972 /* Advance to the next instruction. */
3973 s->pc = s->next_pc;
3974 return ret;
3975 }
3976
3977 static inline void gen_intermediate_code_internal(CPUS390XState *env,
3978 TranslationBlock *tb,
3979 int search_pc)
3980 {
3981 DisasContext dc;
3982 target_ulong pc_start;
3983 uint64_t next_page_start;
3984 uint16_t *gen_opc_end;
3985 int j, lj = -1;
3986 int num_insns, max_insns;
3987 CPUBreakpoint *bp;
3988 ExitStatus status;
3989 bool do_debug;
3990
3991 pc_start = tb->pc;
3992
3993 /* 31-bit mode */
3994 if (!(tb->flags & FLAG_MASK_64)) {
3995 pc_start &= 0x7fffffff;
3996 }
3997
3998 dc.tb = tb;
3999 dc.pc = pc_start;
4000 dc.cc_op = CC_OP_DYNAMIC;
4001 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4002 dc.is_jmp = DISAS_NEXT;
4003
4004 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4005
4006 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4007
4008 num_insns = 0;
4009 max_insns = tb->cflags & CF_COUNT_MASK;
4010 if (max_insns == 0) {
4011 max_insns = CF_COUNT_MASK;
4012 }
4013
4014 gen_icount_start();
4015
4016 do {
4017 if (search_pc) {
4018 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4019 if (lj < j) {
4020 lj++;
4021 while (lj < j) {
4022 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4023 }
4024 }
4025 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4026 gen_opc_cc_op[lj] = dc.cc_op;
4027 tcg_ctx.gen_opc_instr_start[lj] = 1;
4028 tcg_ctx.gen_opc_icount[lj] = num_insns;
4029 }
4030 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4031 gen_io_start();
4032 }
4033
4034 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4035 tcg_gen_debug_insn_start(dc.pc);
4036 }
4037
4038 status = NO_EXIT;
4039 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4040 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4041 if (bp->pc == dc.pc) {
4042 status = EXIT_PC_STALE;
4043 do_debug = true;
4044 break;
4045 }
4046 }
4047 }
4048 if (status == NO_EXIT) {
4049 status = translate_one(env, &dc);
4050 }
4051
4052 /* If we reach a page boundary, are single stepping,
4053 or exhaust instruction count, stop generation. */
4054 if (status == NO_EXIT
4055 && (dc.pc >= next_page_start
4056 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4057 || num_insns >= max_insns
4058 || singlestep
4059 || env->singlestep_enabled)) {
4060 status = EXIT_PC_STALE;
4061 }
4062 } while (status == NO_EXIT);
4063
4064 if (tb->cflags & CF_LAST_IO) {
4065 gen_io_end();
4066 }
4067
4068 switch (status) {
4069 case EXIT_GOTO_TB:
4070 case EXIT_NORETURN:
4071 break;
4072 case EXIT_PC_STALE:
4073 update_psw_addr(&dc);
4074 /* FALLTHRU */
4075 case EXIT_PC_UPDATED:
4076 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4077 gen_op_calc_cc(&dc);
4078 } else {
4079 /* Next TB starts off with CC_OP_DYNAMIC,
4080 so make sure the cc op type is in env */
4081 gen_op_set_cc_op(&dc);
4082 }
4083 if (do_debug) {
4084 gen_exception(EXCP_DEBUG);
4085 } else {
4086 /* Generate the return instruction */
4087 tcg_gen_exit_tb(0);
4088 }
4089 break;
4090 default:
4091 abort();
4092 }
4093
4094 gen_icount_end(tb, num_insns);
4095 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4096 if (search_pc) {
4097 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4098 lj++;
4099 while (lj <= j) {
4100 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4101 }
4102 } else {
4103 tb->size = dc.pc - pc_start;
4104 tb->icount = num_insns;
4105 }
4106
4107 #if defined(S390X_DEBUG_DISAS)
4108 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4109 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4110 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4111 qemu_log("\n");
4112 }
4113 #endif
4114 }
4115
4116 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4117 {
4118 gen_intermediate_code_internal(env, tb, 0);
4119 }
4120
4121 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4122 {
4123 gen_intermediate_code_internal(env, tb, 1);
4124 }
4125
4126 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4127 {
4128 int cc_op;
4129 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4130 cc_op = gen_opc_cc_op[pc_pos];
4131 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4132 env->cc_op = cc_op;
4133 }
4134 }