]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Optimize ADDC/SUBB
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 };
59
60 /* Information carried about a condition to be evaluated. */
61 typedef struct {
62 TCGCond cond:8;
63 bool is_64;
64 bool g1;
65 bool g2;
66 union {
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
69 } u;
70 } DisasCompare;
71
72 #define DISAS_EXCP 4
73
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit[CC_OP_MAX];
76 static uint64_t inline_branch_miss[CC_OP_MAX];
77 #endif
78
79 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
80 {
81 if (!(s->tb->flags & FLAG_MASK_64)) {
82 if (s->tb->flags & FLAG_MASK_32) {
83 return pc | 0x80000000;
84 }
85 }
86 return pc;
87 }
88
89 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
90 int flags)
91 {
92 int i;
93
94 if (env->cc_op > 3) {
95 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
96 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
97 } else {
98 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
99 env->psw.mask, env->psw.addr, env->cc_op);
100 }
101
102 for (i = 0; i < 16; i++) {
103 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
104 if ((i % 4) == 3) {
105 cpu_fprintf(f, "\n");
106 } else {
107 cpu_fprintf(f, " ");
108 }
109 }
110
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
113 if ((i % 4) == 3) {
114 cpu_fprintf(f, "\n");
115 } else {
116 cpu_fprintf(f, " ");
117 }
118 }
119
120 #ifndef CONFIG_USER_ONLY
121 for (i = 0; i < 16; i++) {
122 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
123 if ((i % 4) == 3) {
124 cpu_fprintf(f, "\n");
125 } else {
126 cpu_fprintf(f, " ");
127 }
128 }
129 #endif
130
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i = 0; i < CC_OP_MAX; i++) {
133 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
134 inline_branch_miss[i], inline_branch_hit[i]);
135 }
136 #endif
137
138 cpu_fprintf(f, "\n");
139 }
140
141 static TCGv_i64 psw_addr;
142 static TCGv_i64 psw_mask;
143
144 static TCGv_i32 cc_op;
145 static TCGv_i64 cc_src;
146 static TCGv_i64 cc_dst;
147 static TCGv_i64 cc_vr;
148
149 static char cpu_reg_names[32][4];
150 static TCGv_i64 regs[16];
151 static TCGv_i64 fregs[16];
152
153 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
154
155 void s390x_translate_init(void)
156 {
157 int i;
158
159 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
160 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
161 offsetof(CPUS390XState, psw.addr),
162 "psw_addr");
163 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
164 offsetof(CPUS390XState, psw.mask),
165 "psw_mask");
166
167 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
168 "cc_op");
169 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
170 "cc_src");
171 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
172 "cc_dst");
173 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
174 "cc_vr");
175
176 for (i = 0; i < 16; i++) {
177 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
178 regs[i] = tcg_global_mem_new(TCG_AREG0,
179 offsetof(CPUS390XState, regs[i]),
180 cpu_reg_names[i]);
181 }
182
183 for (i = 0; i < 16; i++) {
184 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
185 fregs[i] = tcg_global_mem_new(TCG_AREG0,
186 offsetof(CPUS390XState, fregs[i].d),
187 cpu_reg_names[i + 16]);
188 }
189
190 /* register helpers */
191 #define GEN_HELPER 2
192 #include "helper.h"
193 }
194
195 static TCGv_i64 load_reg(int reg)
196 {
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
200 }
201
202 static TCGv_i64 load_freg32_i64(int reg)
203 {
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
206 return r;
207 }
208
209 static void store_reg(int reg, TCGv_i64 v)
210 {
211 tcg_gen_mov_i64(regs[reg], v);
212 }
213
214 static void store_freg(int reg, TCGv_i64 v)
215 {
216 tcg_gen_mov_i64(fregs[reg], v);
217 }
218
219 static void store_reg32_i64(int reg, TCGv_i64 v)
220 {
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
223 }
224
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
226 {
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
228 }
229
230 static void store_freg32_i64(int reg, TCGv_i64 v)
231 {
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
233 }
234
235 static void return_low128(TCGv_i64 dest)
236 {
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
238 }
239
240 static void update_psw_addr(DisasContext *s)
241 {
242 /* psw.addr */
243 tcg_gen_movi_i64(psw_addr, s->pc);
244 }
245
246 static void update_cc_op(DisasContext *s)
247 {
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
250 }
251 }
252
253 static void potential_page_fault(DisasContext *s)
254 {
255 update_psw_addr(s);
256 update_cc_op(s);
257 }
258
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
260 {
261 return (uint64_t)cpu_lduw_code(env, pc);
262 }
263
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
265 {
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
267 }
268
269 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
270 {
271 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
272 }
273
274 static int get_mem_index(DisasContext *s)
275 {
276 switch (s->tb->flags & FLAG_MASK_ASC) {
277 case PSW_ASC_PRIMARY >> 32:
278 return 0;
279 case PSW_ASC_SECONDARY >> 32:
280 return 1;
281 case PSW_ASC_HOME >> 32:
282 return 2;
283 default:
284 tcg_abort();
285 break;
286 }
287 }
288
289 static void gen_exception(int excp)
290 {
291 TCGv_i32 tmp = tcg_const_i32(excp);
292 gen_helper_exception(cpu_env, tmp);
293 tcg_temp_free_i32(tmp);
294 }
295
296 static void gen_program_exception(DisasContext *s, int code)
297 {
298 TCGv_i32 tmp;
299
300 /* Remember what pgm exeption this was. */
301 tmp = tcg_const_i32(code);
302 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
303 tcg_temp_free_i32(tmp);
304
305 tmp = tcg_const_i32(s->next_pc - s->pc);
306 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
307 tcg_temp_free_i32(tmp);
308
309 /* Advance past instruction. */
310 s->pc = s->next_pc;
311 update_psw_addr(s);
312
313 /* Save off cc. */
314 update_cc_op(s);
315
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM);
318 }
319
320 static inline void gen_illegal_opcode(DisasContext *s)
321 {
322 gen_program_exception(s, PGM_SPECIFICATION);
323 }
324
325 static inline void check_privileged(DisasContext *s)
326 {
327 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
328 gen_program_exception(s, PGM_PRIVILEGED);
329 }
330 }
331
332 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
333 {
334 TCGv_i64 tmp;
335
336 /* 31-bitify the immediate part; register contents are dealt with below */
337 if (!(s->tb->flags & FLAG_MASK_64)) {
338 d2 &= 0x7fffffffUL;
339 }
340
341 if (x2) {
342 if (d2) {
343 tmp = tcg_const_i64(d2);
344 tcg_gen_add_i64(tmp, tmp, regs[x2]);
345 } else {
346 tmp = load_reg(x2);
347 }
348 if (b2) {
349 tcg_gen_add_i64(tmp, tmp, regs[b2]);
350 }
351 } else if (b2) {
352 if (d2) {
353 tmp = tcg_const_i64(d2);
354 tcg_gen_add_i64(tmp, tmp, regs[b2]);
355 } else {
356 tmp = load_reg(b2);
357 }
358 } else {
359 tmp = tcg_const_i64(d2);
360 }
361
362 /* 31-bit mode mask if there are values loaded from registers */
363 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
364 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
365 }
366
367 return tmp;
368 }
369
370 static inline bool live_cc_data(DisasContext *s)
371 {
372 return (s->cc_op != CC_OP_DYNAMIC
373 && s->cc_op != CC_OP_STATIC
374 && s->cc_op > 3);
375 }
376
377 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
378 {
379 if (live_cc_data(s)) {
380 tcg_gen_discard_i64(cc_src);
381 tcg_gen_discard_i64(cc_dst);
382 tcg_gen_discard_i64(cc_vr);
383 }
384 s->cc_op = CC_OP_CONST0 + val;
385 }
386
387 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
388 {
389 if (live_cc_data(s)) {
390 tcg_gen_discard_i64(cc_src);
391 tcg_gen_discard_i64(cc_vr);
392 }
393 tcg_gen_mov_i64(cc_dst, dst);
394 s->cc_op = op;
395 }
396
397 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
398 TCGv_i64 dst)
399 {
400 if (live_cc_data(s)) {
401 tcg_gen_discard_i64(cc_vr);
402 }
403 tcg_gen_mov_i64(cc_src, src);
404 tcg_gen_mov_i64(cc_dst, dst);
405 s->cc_op = op;
406 }
407
408 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
409 TCGv_i64 dst, TCGv_i64 vr)
410 {
411 tcg_gen_mov_i64(cc_src, src);
412 tcg_gen_mov_i64(cc_dst, dst);
413 tcg_gen_mov_i64(cc_vr, vr);
414 s->cc_op = op;
415 }
416
417 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
418 {
419 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
420 }
421
422 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
423 {
424 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
425 }
426
427 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
428 {
429 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
430 }
431
432 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
433 {
434 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
435 }
436
437 /* CC value is in env->cc_op */
438 static void set_cc_static(DisasContext *s)
439 {
440 if (live_cc_data(s)) {
441 tcg_gen_discard_i64(cc_src);
442 tcg_gen_discard_i64(cc_dst);
443 tcg_gen_discard_i64(cc_vr);
444 }
445 s->cc_op = CC_OP_STATIC;
446 }
447
448 /* calculates cc into cc_op */
449 static void gen_op_calc_cc(DisasContext *s)
450 {
451 TCGv_i32 local_cc_op;
452 TCGv_i64 dummy;
453
454 TCGV_UNUSED_I32(local_cc_op);
455 TCGV_UNUSED_I64(dummy);
456 switch (s->cc_op) {
457 default:
458 dummy = tcg_const_i64(0);
459 /* FALLTHRU */
460 case CC_OP_ADD_64:
461 case CC_OP_ADDU_64:
462 case CC_OP_ADDC_64:
463 case CC_OP_SUB_64:
464 case CC_OP_SUBU_64:
465 case CC_OP_SUBB_64:
466 case CC_OP_ADD_32:
467 case CC_OP_ADDU_32:
468 case CC_OP_ADDC_32:
469 case CC_OP_SUB_32:
470 case CC_OP_SUBU_32:
471 case CC_OP_SUBB_32:
472 local_cc_op = tcg_const_i32(s->cc_op);
473 break;
474 case CC_OP_CONST0:
475 case CC_OP_CONST1:
476 case CC_OP_CONST2:
477 case CC_OP_CONST3:
478 case CC_OP_STATIC:
479 case CC_OP_DYNAMIC:
480 break;
481 }
482
483 switch (s->cc_op) {
484 case CC_OP_CONST0:
485 case CC_OP_CONST1:
486 case CC_OP_CONST2:
487 case CC_OP_CONST3:
488 /* s->cc_op is the cc value */
489 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
490 break;
491 case CC_OP_STATIC:
492 /* env->cc_op already is the cc value */
493 break;
494 case CC_OP_NZ:
495 case CC_OP_ABS_64:
496 case CC_OP_NABS_64:
497 case CC_OP_ABS_32:
498 case CC_OP_NABS_32:
499 case CC_OP_LTGT0_32:
500 case CC_OP_LTGT0_64:
501 case CC_OP_COMP_32:
502 case CC_OP_COMP_64:
503 case CC_OP_NZ_F32:
504 case CC_OP_NZ_F64:
505 case CC_OP_FLOGR:
506 /* 1 argument */
507 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
508 break;
509 case CC_OP_ICM:
510 case CC_OP_LTGT_32:
511 case CC_OP_LTGT_64:
512 case CC_OP_LTUGTU_32:
513 case CC_OP_LTUGTU_64:
514 case CC_OP_TM_32:
515 case CC_OP_TM_64:
516 case CC_OP_SLA_32:
517 case CC_OP_SLA_64:
518 case CC_OP_NZ_F128:
519 /* 2 arguments */
520 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
521 break;
522 case CC_OP_ADD_64:
523 case CC_OP_ADDU_64:
524 case CC_OP_ADDC_64:
525 case CC_OP_SUB_64:
526 case CC_OP_SUBU_64:
527 case CC_OP_SUBB_64:
528 case CC_OP_ADD_32:
529 case CC_OP_ADDU_32:
530 case CC_OP_ADDC_32:
531 case CC_OP_SUB_32:
532 case CC_OP_SUBU_32:
533 case CC_OP_SUBB_32:
534 /* 3 arguments */
535 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
536 break;
537 case CC_OP_DYNAMIC:
538 /* unknown operation - assume 3 arguments and cc_op in env */
539 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
540 break;
541 default:
542 tcg_abort();
543 }
544
545 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
546 tcg_temp_free_i32(local_cc_op);
547 }
548 if (!TCGV_IS_UNUSED_I64(dummy)) {
549 tcg_temp_free_i64(dummy);
550 }
551
552 /* We now have cc in cc_op as constant */
553 set_cc_static(s);
554 }
555
556 static int use_goto_tb(DisasContext *s, uint64_t dest)
557 {
558 /* NOTE: we handle the case where the TB spans two pages here */
559 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
560 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
561 && !s->singlestep_enabled
562 && !(s->tb->cflags & CF_LAST_IO));
563 }
564
565 static void account_noninline_branch(DisasContext *s, int cc_op)
566 {
567 #ifdef DEBUG_INLINE_BRANCHES
568 inline_branch_miss[cc_op]++;
569 #endif
570 }
571
572 static void account_inline_branch(DisasContext *s, int cc_op)
573 {
574 #ifdef DEBUG_INLINE_BRANCHES
575 inline_branch_hit[cc_op]++;
576 #endif
577 }
578
579 /* Table of mask values to comparison codes, given a comparison as input.
580 For such, CC=3 should not be possible. */
581 static const TCGCond ltgt_cond[16] = {
582 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
583 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
584 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
585 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
586 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
587 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
588 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
589 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
590 };
591
592 /* Table of mask values to comparison codes, given a logic op as input.
593 For such, only CC=0 and CC=1 should be possible. */
594 static const TCGCond nz_cond[16] = {
595 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
596 TCG_COND_NEVER, TCG_COND_NEVER,
597 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
598 TCG_COND_NE, TCG_COND_NE,
599 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
600 TCG_COND_EQ, TCG_COND_EQ,
601 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
602 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
603 };
604
605 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
606 details required to generate a TCG comparison. */
607 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
608 {
609 TCGCond cond;
610 enum cc_op old_cc_op = s->cc_op;
611
612 if (mask == 15 || mask == 0) {
613 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
614 c->u.s32.a = cc_op;
615 c->u.s32.b = cc_op;
616 c->g1 = c->g2 = true;
617 c->is_64 = false;
618 return;
619 }
620
621 /* Find the TCG condition for the mask + cc op. */
622 switch (old_cc_op) {
623 case CC_OP_LTGT0_32:
624 case CC_OP_LTGT0_64:
625 case CC_OP_LTGT_32:
626 case CC_OP_LTGT_64:
627 cond = ltgt_cond[mask];
628 if (cond == TCG_COND_NEVER) {
629 goto do_dynamic;
630 }
631 account_inline_branch(s, old_cc_op);
632 break;
633
634 case CC_OP_LTUGTU_32:
635 case CC_OP_LTUGTU_64:
636 cond = tcg_unsigned_cond(ltgt_cond[mask]);
637 if (cond == TCG_COND_NEVER) {
638 goto do_dynamic;
639 }
640 account_inline_branch(s, old_cc_op);
641 break;
642
643 case CC_OP_NZ:
644 cond = nz_cond[mask];
645 if (cond == TCG_COND_NEVER) {
646 goto do_dynamic;
647 }
648 account_inline_branch(s, old_cc_op);
649 break;
650
651 case CC_OP_TM_32:
652 case CC_OP_TM_64:
653 switch (mask) {
654 case 8:
655 cond = TCG_COND_EQ;
656 break;
657 case 4 | 2 | 1:
658 cond = TCG_COND_NE;
659 break;
660 default:
661 goto do_dynamic;
662 }
663 account_inline_branch(s, old_cc_op);
664 break;
665
666 case CC_OP_ICM:
667 switch (mask) {
668 case 8:
669 cond = TCG_COND_EQ;
670 break;
671 case 4 | 2 | 1:
672 case 4 | 2:
673 cond = TCG_COND_NE;
674 break;
675 default:
676 goto do_dynamic;
677 }
678 account_inline_branch(s, old_cc_op);
679 break;
680
681 case CC_OP_FLOGR:
682 switch (mask & 0xa) {
683 case 8: /* src == 0 -> no one bit found */
684 cond = TCG_COND_EQ;
685 break;
686 case 2: /* src != 0 -> one bit found */
687 cond = TCG_COND_NE;
688 break;
689 default:
690 goto do_dynamic;
691 }
692 account_inline_branch(s, old_cc_op);
693 break;
694
695 case CC_OP_ADDU_32:
696 case CC_OP_ADDU_64:
697 switch (mask) {
698 case 8 | 2: /* vr == 0 */
699 cond = TCG_COND_EQ;
700 break;
701 case 4 | 1: /* vr != 0 */
702 cond = TCG_COND_NE;
703 break;
704 case 8 | 4: /* no carry -> vr >= src */
705 cond = TCG_COND_GEU;
706 break;
707 case 2 | 1: /* carry -> vr < src */
708 cond = TCG_COND_LTU;
709 break;
710 default:
711 goto do_dynamic;
712 }
713 account_inline_branch(s, old_cc_op);
714 break;
715
716 case CC_OP_SUBU_32:
717 case CC_OP_SUBU_64:
718 /* Note that CC=0 is impossible; treat it as dont-care. */
719 switch (mask & 7) {
720 case 2: /* zero -> op1 == op2 */
721 cond = TCG_COND_EQ;
722 break;
723 case 4 | 1: /* !zero -> op1 != op2 */
724 cond = TCG_COND_NE;
725 break;
726 case 4: /* borrow (!carry) -> op1 < op2 */
727 cond = TCG_COND_LTU;
728 break;
729 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
730 cond = TCG_COND_GEU;
731 break;
732 default:
733 goto do_dynamic;
734 }
735 account_inline_branch(s, old_cc_op);
736 break;
737
738 default:
739 do_dynamic:
740 /* Calculate cc value. */
741 gen_op_calc_cc(s);
742 /* FALLTHRU */
743
744 case CC_OP_STATIC:
745 /* Jump based on CC. We'll load up the real cond below;
746 the assignment here merely avoids a compiler warning. */
747 account_noninline_branch(s, old_cc_op);
748 old_cc_op = CC_OP_STATIC;
749 cond = TCG_COND_NEVER;
750 break;
751 }
752
753 /* Load up the arguments of the comparison. */
754 c->is_64 = true;
755 c->g1 = c->g2 = false;
756 switch (old_cc_op) {
757 case CC_OP_LTGT0_32:
758 c->is_64 = false;
759 c->u.s32.a = tcg_temp_new_i32();
760 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
761 c->u.s32.b = tcg_const_i32(0);
762 break;
763 case CC_OP_LTGT_32:
764 case CC_OP_LTUGTU_32:
765 case CC_OP_SUBU_32:
766 c->is_64 = false;
767 c->u.s32.a = tcg_temp_new_i32();
768 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
769 c->u.s32.b = tcg_temp_new_i32();
770 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
771 break;
772
773 case CC_OP_LTGT0_64:
774 case CC_OP_NZ:
775 case CC_OP_FLOGR:
776 c->u.s64.a = cc_dst;
777 c->u.s64.b = tcg_const_i64(0);
778 c->g1 = true;
779 break;
780 case CC_OP_LTGT_64:
781 case CC_OP_LTUGTU_64:
782 case CC_OP_SUBU_64:
783 c->u.s64.a = cc_src;
784 c->u.s64.b = cc_dst;
785 c->g1 = c->g2 = true;
786 break;
787
788 case CC_OP_TM_32:
789 case CC_OP_TM_64:
790 case CC_OP_ICM:
791 c->u.s64.a = tcg_temp_new_i64();
792 c->u.s64.b = tcg_const_i64(0);
793 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
794 break;
795
796 case CC_OP_ADDU_32:
797 c->is_64 = false;
798 c->u.s32.a = tcg_temp_new_i32();
799 c->u.s32.b = tcg_temp_new_i32();
800 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
801 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
802 tcg_gen_movi_i32(c->u.s32.b, 0);
803 } else {
804 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
805 }
806 break;
807
808 case CC_OP_ADDU_64:
809 c->u.s64.a = cc_vr;
810 c->g1 = true;
811 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
812 c->u.s64.b = tcg_const_i64(0);
813 } else {
814 c->u.s64.b = cc_src;
815 c->g2 = true;
816 }
817 break;
818
819 case CC_OP_STATIC:
820 c->is_64 = false;
821 c->u.s32.a = cc_op;
822 c->g1 = true;
823 switch (mask) {
824 case 0x8 | 0x4 | 0x2: /* cc != 3 */
825 cond = TCG_COND_NE;
826 c->u.s32.b = tcg_const_i32(3);
827 break;
828 case 0x8 | 0x4 | 0x1: /* cc != 2 */
829 cond = TCG_COND_NE;
830 c->u.s32.b = tcg_const_i32(2);
831 break;
832 case 0x8 | 0x2 | 0x1: /* cc != 1 */
833 cond = TCG_COND_NE;
834 c->u.s32.b = tcg_const_i32(1);
835 break;
836 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
837 cond = TCG_COND_EQ;
838 c->g1 = false;
839 c->u.s32.a = tcg_temp_new_i32();
840 c->u.s32.b = tcg_const_i32(0);
841 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
842 break;
843 case 0x8 | 0x4: /* cc < 2 */
844 cond = TCG_COND_LTU;
845 c->u.s32.b = tcg_const_i32(2);
846 break;
847 case 0x8: /* cc == 0 */
848 cond = TCG_COND_EQ;
849 c->u.s32.b = tcg_const_i32(0);
850 break;
851 case 0x4 | 0x2 | 0x1: /* cc != 0 */
852 cond = TCG_COND_NE;
853 c->u.s32.b = tcg_const_i32(0);
854 break;
855 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
856 cond = TCG_COND_NE;
857 c->g1 = false;
858 c->u.s32.a = tcg_temp_new_i32();
859 c->u.s32.b = tcg_const_i32(0);
860 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
861 break;
862 case 0x4: /* cc == 1 */
863 cond = TCG_COND_EQ;
864 c->u.s32.b = tcg_const_i32(1);
865 break;
866 case 0x2 | 0x1: /* cc > 1 */
867 cond = TCG_COND_GTU;
868 c->u.s32.b = tcg_const_i32(1);
869 break;
870 case 0x2: /* cc == 2 */
871 cond = TCG_COND_EQ;
872 c->u.s32.b = tcg_const_i32(2);
873 break;
874 case 0x1: /* cc == 3 */
875 cond = TCG_COND_EQ;
876 c->u.s32.b = tcg_const_i32(3);
877 break;
878 default:
879 /* CC is masked by something else: (8 >> cc) & mask. */
880 cond = TCG_COND_NE;
881 c->g1 = false;
882 c->u.s32.a = tcg_const_i32(8);
883 c->u.s32.b = tcg_const_i32(0);
884 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
885 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
886 break;
887 }
888 break;
889
890 default:
891 abort();
892 }
893 c->cond = cond;
894 }
895
896 static void free_compare(DisasCompare *c)
897 {
898 if (!c->g1) {
899 if (c->is_64) {
900 tcg_temp_free_i64(c->u.s64.a);
901 } else {
902 tcg_temp_free_i32(c->u.s32.a);
903 }
904 }
905 if (!c->g2) {
906 if (c->is_64) {
907 tcg_temp_free_i64(c->u.s64.b);
908 } else {
909 tcg_temp_free_i32(c->u.s32.b);
910 }
911 }
912 }
913
914 /* ====================================================================== */
915 /* Define the insn format enumeration. */
916 #define F0(N) FMT_##N,
917 #define F1(N, X1) F0(N)
918 #define F2(N, X1, X2) F0(N)
919 #define F3(N, X1, X2, X3) F0(N)
920 #define F4(N, X1, X2, X3, X4) F0(N)
921 #define F5(N, X1, X2, X3, X4, X5) F0(N)
922
923 typedef enum {
924 #include "insn-format.def"
925 } DisasFormat;
926
927 #undef F0
928 #undef F1
929 #undef F2
930 #undef F3
931 #undef F4
932 #undef F5
933
934 /* Define a structure to hold the decoded fields. We'll store each inside
935 an array indexed by an enum. In order to conserve memory, we'll arrange
936 for fields that do not exist at the same time to overlap, thus the "C"
937 for compact. For checking purposes there is an "O" for original index
938 as well that will be applied to availability bitmaps. */
939
940 enum DisasFieldIndexO {
941 FLD_O_r1,
942 FLD_O_r2,
943 FLD_O_r3,
944 FLD_O_m1,
945 FLD_O_m3,
946 FLD_O_m4,
947 FLD_O_b1,
948 FLD_O_b2,
949 FLD_O_b4,
950 FLD_O_d1,
951 FLD_O_d2,
952 FLD_O_d4,
953 FLD_O_x2,
954 FLD_O_l1,
955 FLD_O_l2,
956 FLD_O_i1,
957 FLD_O_i2,
958 FLD_O_i3,
959 FLD_O_i4,
960 FLD_O_i5
961 };
962
963 enum DisasFieldIndexC {
964 FLD_C_r1 = 0,
965 FLD_C_m1 = 0,
966 FLD_C_b1 = 0,
967 FLD_C_i1 = 0,
968
969 FLD_C_r2 = 1,
970 FLD_C_b2 = 1,
971 FLD_C_i2 = 1,
972
973 FLD_C_r3 = 2,
974 FLD_C_m3 = 2,
975 FLD_C_i3 = 2,
976
977 FLD_C_m4 = 3,
978 FLD_C_b4 = 3,
979 FLD_C_i4 = 3,
980 FLD_C_l1 = 3,
981
982 FLD_C_i5 = 4,
983 FLD_C_d1 = 4,
984
985 FLD_C_d2 = 5,
986
987 FLD_C_d4 = 6,
988 FLD_C_x2 = 6,
989 FLD_C_l2 = 6,
990
991 NUM_C_FIELD = 7
992 };
993
994 struct DisasFields {
995 unsigned op:8;
996 unsigned op2:8;
997 unsigned presentC:16;
998 unsigned int presentO;
999 int c[NUM_C_FIELD];
1000 };
1001
1002 /* This is the way fields are to be accessed out of DisasFields. */
1003 #define have_field(S, F) have_field1((S), FLD_O_##F)
1004 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1005
1006 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1007 {
1008 return (f->presentO >> c) & 1;
1009 }
1010
1011 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1012 enum DisasFieldIndexC c)
1013 {
1014 assert(have_field1(f, o));
1015 return f->c[c];
1016 }
1017
1018 /* Describe the layout of each field in each format. */
1019 typedef struct DisasField {
1020 unsigned int beg:8;
1021 unsigned int size:8;
1022 unsigned int type:2;
1023 unsigned int indexC:6;
1024 enum DisasFieldIndexO indexO:8;
1025 } DisasField;
1026
1027 typedef struct DisasFormatInfo {
1028 DisasField op[NUM_C_FIELD];
1029 } DisasFormatInfo;
1030
1031 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1032 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1033 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1034 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1035 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1036 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1037 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1038 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1039 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1040 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1041 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1042 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1043 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1044 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1045
1046 #define F0(N) { { } },
1047 #define F1(N, X1) { { X1 } },
1048 #define F2(N, X1, X2) { { X1, X2 } },
1049 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1050 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1051 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1052
1053 static const DisasFormatInfo format_info[] = {
1054 #include "insn-format.def"
1055 };
1056
1057 #undef F0
1058 #undef F1
1059 #undef F2
1060 #undef F3
1061 #undef F4
1062 #undef F5
1063 #undef R
1064 #undef M
1065 #undef BD
1066 #undef BXD
1067 #undef BDL
1068 #undef BXDL
1069 #undef I
1070 #undef L
1071
1072 /* Generally, we'll extract operands into this structures, operate upon
1073 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1074 of routines below for more details. */
1075 typedef struct {
1076 bool g_out, g_out2, g_in1, g_in2;
1077 TCGv_i64 out, out2, in1, in2;
1078 TCGv_i64 addr1;
1079 } DisasOps;
1080
1081 /* Instructions can place constraints on their operands, raising specification
1082 exceptions if they are violated. To make this easy to automate, each "in1",
1083 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1084 of the following, or 0. To make this easy to document, we'll put the
1085 SPEC_<name> defines next to <name>. */
1086
1087 #define SPEC_r1_even 1
1088 #define SPEC_r2_even 2
1089 #define SPEC_r1_f128 4
1090 #define SPEC_r2_f128 8
1091
1092 /* Return values from translate_one, indicating the state of the TB. */
1093 typedef enum {
1094 /* Continue the TB. */
1095 NO_EXIT,
1096 /* We have emitted one or more goto_tb. No fixup required. */
1097 EXIT_GOTO_TB,
1098 /* We are not using a goto_tb (for whatever reason), but have updated
1099 the PC (for whatever reason), so there's no need to do it again on
1100 exiting the TB. */
1101 EXIT_PC_UPDATED,
1102 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1103 updated the PC for the next instruction to be executed. */
1104 EXIT_PC_STALE,
1105 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1106 No following code will be executed. */
1107 EXIT_NORETURN,
1108 } ExitStatus;
1109
1110 typedef enum DisasFacility {
1111 FAC_Z, /* zarch (default) */
1112 FAC_CASS, /* compare and swap and store */
1113 FAC_CASS2, /* compare and swap and store 2*/
1114 FAC_DFP, /* decimal floating point */
1115 FAC_DFPR, /* decimal floating point rounding */
1116 FAC_DO, /* distinct operands */
1117 FAC_EE, /* execute extensions */
1118 FAC_EI, /* extended immediate */
1119 FAC_FPE, /* floating point extension */
1120 FAC_FPSSH, /* floating point support sign handling */
1121 FAC_FPRGR, /* FPR-GR transfer */
1122 FAC_GIE, /* general instructions extension */
1123 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1124 FAC_HW, /* high-word */
1125 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1126 FAC_LOC, /* load/store on condition */
1127 FAC_LD, /* long displacement */
1128 FAC_PC, /* population count */
1129 FAC_SCF, /* store clock fast */
1130 FAC_SFLE, /* store facility list extended */
1131 } DisasFacility;
1132
1133 struct DisasInsn {
1134 unsigned opc:16;
1135 DisasFormat fmt:6;
1136 DisasFacility fac:6;
1137 unsigned spec:4;
1138
1139 const char *name;
1140
1141 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1142 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1143 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1144 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1145 void (*help_cout)(DisasContext *, DisasOps *);
1146 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1147
1148 uint64_t data;
1149 };
1150
1151 /* ====================================================================== */
1152 /* Miscelaneous helpers, used by several operations. */
1153
1154 static void help_l2_shift(DisasContext *s, DisasFields *f,
1155 DisasOps *o, int mask)
1156 {
1157 int b2 = get_field(f, b2);
1158 int d2 = get_field(f, d2);
1159
1160 if (b2 == 0) {
1161 o->in2 = tcg_const_i64(d2 & mask);
1162 } else {
1163 o->in2 = get_address(s, 0, b2, d2);
1164 tcg_gen_andi_i64(o->in2, o->in2, mask);
1165 }
1166 }
1167
1168 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1169 {
1170 if (dest == s->next_pc) {
1171 return NO_EXIT;
1172 }
1173 if (use_goto_tb(s, dest)) {
1174 update_cc_op(s);
1175 tcg_gen_goto_tb(0);
1176 tcg_gen_movi_i64(psw_addr, dest);
1177 tcg_gen_exit_tb((tcg_target_long)s->tb);
1178 return EXIT_GOTO_TB;
1179 } else {
1180 tcg_gen_movi_i64(psw_addr, dest);
1181 return EXIT_PC_UPDATED;
1182 }
1183 }
1184
1185 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1186 bool is_imm, int imm, TCGv_i64 cdest)
1187 {
1188 ExitStatus ret;
1189 uint64_t dest = s->pc + 2 * imm;
1190 int lab;
1191
1192 /* Take care of the special cases first. */
1193 if (c->cond == TCG_COND_NEVER) {
1194 ret = NO_EXIT;
1195 goto egress;
1196 }
1197 if (is_imm) {
1198 if (dest == s->next_pc) {
1199 /* Branch to next. */
1200 ret = NO_EXIT;
1201 goto egress;
1202 }
1203 if (c->cond == TCG_COND_ALWAYS) {
1204 ret = help_goto_direct(s, dest);
1205 goto egress;
1206 }
1207 } else {
1208 if (TCGV_IS_UNUSED_I64(cdest)) {
1209 /* E.g. bcr %r0 -> no branch. */
1210 ret = NO_EXIT;
1211 goto egress;
1212 }
1213 if (c->cond == TCG_COND_ALWAYS) {
1214 tcg_gen_mov_i64(psw_addr, cdest);
1215 ret = EXIT_PC_UPDATED;
1216 goto egress;
1217 }
1218 }
1219
1220 if (use_goto_tb(s, s->next_pc)) {
1221 if (is_imm && use_goto_tb(s, dest)) {
1222 /* Both exits can use goto_tb. */
1223 update_cc_op(s);
1224
1225 lab = gen_new_label();
1226 if (c->is_64) {
1227 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1228 } else {
1229 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1230 }
1231
1232 /* Branch not taken. */
1233 tcg_gen_goto_tb(0);
1234 tcg_gen_movi_i64(psw_addr, s->next_pc);
1235 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1236
1237 /* Branch taken. */
1238 gen_set_label(lab);
1239 tcg_gen_goto_tb(1);
1240 tcg_gen_movi_i64(psw_addr, dest);
1241 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1242
1243 ret = EXIT_GOTO_TB;
1244 } else {
1245 /* Fallthru can use goto_tb, but taken branch cannot. */
1246 /* Store taken branch destination before the brcond. This
1247 avoids having to allocate a new local temp to hold it.
1248 We'll overwrite this in the not taken case anyway. */
1249 if (!is_imm) {
1250 tcg_gen_mov_i64(psw_addr, cdest);
1251 }
1252
1253 lab = gen_new_label();
1254 if (c->is_64) {
1255 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1256 } else {
1257 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1258 }
1259
1260 /* Branch not taken. */
1261 update_cc_op(s);
1262 tcg_gen_goto_tb(0);
1263 tcg_gen_movi_i64(psw_addr, s->next_pc);
1264 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1265
1266 gen_set_label(lab);
1267 if (is_imm) {
1268 tcg_gen_movi_i64(psw_addr, dest);
1269 }
1270 ret = EXIT_PC_UPDATED;
1271 }
1272 } else {
1273 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1274 Most commonly we're single-stepping or some other condition that
1275 disables all use of goto_tb. Just update the PC and exit. */
1276
1277 TCGv_i64 next = tcg_const_i64(s->next_pc);
1278 if (is_imm) {
1279 cdest = tcg_const_i64(dest);
1280 }
1281
1282 if (c->is_64) {
1283 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1284 cdest, next);
1285 } else {
1286 TCGv_i32 t0 = tcg_temp_new_i32();
1287 TCGv_i64 t1 = tcg_temp_new_i64();
1288 TCGv_i64 z = tcg_const_i64(0);
1289 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1290 tcg_gen_extu_i32_i64(t1, t0);
1291 tcg_temp_free_i32(t0);
1292 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1293 tcg_temp_free_i64(t1);
1294 tcg_temp_free_i64(z);
1295 }
1296
1297 if (is_imm) {
1298 tcg_temp_free_i64(cdest);
1299 }
1300 tcg_temp_free_i64(next);
1301
1302 ret = EXIT_PC_UPDATED;
1303 }
1304
1305 egress:
1306 free_compare(c);
1307 return ret;
1308 }
1309
1310 /* ====================================================================== */
1311 /* The operations. These perform the bulk of the work for any insn,
1312 usually after the operands have been loaded and output initialized. */
1313
1314 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1315 {
1316 gen_helper_abs_i64(o->out, o->in2);
1317 return NO_EXIT;
1318 }
1319
1320 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1321 {
1322 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1323 return NO_EXIT;
1324 }
1325
1326 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1327 {
1328 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1329 return NO_EXIT;
1330 }
1331
1332 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1333 {
1334 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1335 tcg_gen_mov_i64(o->out2, o->in2);
1336 return NO_EXIT;
1337 }
1338
1339 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1340 {
1341 tcg_gen_add_i64(o->out, o->in1, o->in2);
1342 return NO_EXIT;
1343 }
1344
1345 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1346 {
1347 DisasCompare cmp;
1348 TCGv_i64 carry;
1349
1350 tcg_gen_add_i64(o->out, o->in1, o->in2);
1351
1352 /* The carry flag is the msb of CC, therefore the branch mask that would
1353 create that comparison is 3. Feeding the generated comparison to
1354 setcond produces the carry flag that we desire. */
1355 disas_jcc(s, &cmp, 3);
1356 carry = tcg_temp_new_i64();
1357 if (cmp.is_64) {
1358 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1359 } else {
1360 TCGv_i32 t = tcg_temp_new_i32();
1361 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1362 tcg_gen_extu_i32_i64(carry, t);
1363 tcg_temp_free_i32(t);
1364 }
1365 free_compare(&cmp);
1366
1367 tcg_gen_add_i64(o->out, o->out, carry);
1368 tcg_temp_free_i64(carry);
1369 return NO_EXIT;
1370 }
1371
1372 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1373 {
1374 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1375 return NO_EXIT;
1376 }
1377
1378 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1379 {
1380 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1381 return NO_EXIT;
1382 }
1383
1384 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1385 {
1386 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1387 return_low128(o->out2);
1388 return NO_EXIT;
1389 }
1390
1391 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1392 {
1393 tcg_gen_and_i64(o->out, o->in1, o->in2);
1394 return NO_EXIT;
1395 }
1396
1397 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1398 {
1399 int shift = s->insn->data & 0xff;
1400 int size = s->insn->data >> 8;
1401 uint64_t mask = ((1ull << size) - 1) << shift;
1402
1403 assert(!o->g_in2);
1404 tcg_gen_shli_i64(o->in2, o->in2, shift);
1405 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1406 tcg_gen_and_i64(o->out, o->in1, o->in2);
1407
1408 /* Produce the CC from only the bits manipulated. */
1409 tcg_gen_andi_i64(cc_dst, o->out, mask);
1410 set_cc_nz_u64(s, cc_dst);
1411 return NO_EXIT;
1412 }
1413
1414 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1415 {
1416 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1417 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1418 tcg_gen_mov_i64(psw_addr, o->in2);
1419 return EXIT_PC_UPDATED;
1420 } else {
1421 return NO_EXIT;
1422 }
1423 }
1424
1425 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1426 {
1427 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1428 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1429 }
1430
1431 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1432 {
1433 int m1 = get_field(s->fields, m1);
1434 bool is_imm = have_field(s->fields, i2);
1435 int imm = is_imm ? get_field(s->fields, i2) : 0;
1436 DisasCompare c;
1437
1438 disas_jcc(s, &c, m1);
1439 return help_branch(s, &c, is_imm, imm, o->in2);
1440 }
1441
1442 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1443 {
1444 int r1 = get_field(s->fields, r1);
1445 bool is_imm = have_field(s->fields, i2);
1446 int imm = is_imm ? get_field(s->fields, i2) : 0;
1447 DisasCompare c;
1448 TCGv_i64 t;
1449
1450 c.cond = TCG_COND_NE;
1451 c.is_64 = false;
1452 c.g1 = false;
1453 c.g2 = false;
1454
1455 t = tcg_temp_new_i64();
1456 tcg_gen_subi_i64(t, regs[r1], 1);
1457 store_reg32_i64(r1, t);
1458 c.u.s32.a = tcg_temp_new_i32();
1459 c.u.s32.b = tcg_const_i32(0);
1460 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1461 tcg_temp_free_i64(t);
1462
1463 return help_branch(s, &c, is_imm, imm, o->in2);
1464 }
1465
1466 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1467 {
1468 int r1 = get_field(s->fields, r1);
1469 bool is_imm = have_field(s->fields, i2);
1470 int imm = is_imm ? get_field(s->fields, i2) : 0;
1471 DisasCompare c;
1472
1473 c.cond = TCG_COND_NE;
1474 c.is_64 = true;
1475 c.g1 = true;
1476 c.g2 = false;
1477
1478 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1479 c.u.s64.a = regs[r1];
1480 c.u.s64.b = tcg_const_i64(0);
1481
1482 return help_branch(s, &c, is_imm, imm, o->in2);
1483 }
1484
1485 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1486 {
1487 int r1 = get_field(s->fields, r1);
1488 int r3 = get_field(s->fields, r3);
1489 bool is_imm = have_field(s->fields, i2);
1490 int imm = is_imm ? get_field(s->fields, i2) : 0;
1491 DisasCompare c;
1492 TCGv_i64 t;
1493
1494 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1495 c.is_64 = false;
1496 c.g1 = false;
1497 c.g2 = false;
1498
1499 t = tcg_temp_new_i64();
1500 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1501 c.u.s32.a = tcg_temp_new_i32();
1502 c.u.s32.b = tcg_temp_new_i32();
1503 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1504 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1505 store_reg32_i64(r1, t);
1506 tcg_temp_free_i64(t);
1507
1508 return help_branch(s, &c, is_imm, imm, o->in2);
1509 }
1510
1511 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1512 {
1513 int r1 = get_field(s->fields, r1);
1514 int r3 = get_field(s->fields, r3);
1515 bool is_imm = have_field(s->fields, i2);
1516 int imm = is_imm ? get_field(s->fields, i2) : 0;
1517 DisasCompare c;
1518
1519 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1520 c.is_64 = true;
1521
1522 if (r1 == (r3 | 1)) {
1523 c.u.s64.b = load_reg(r3 | 1);
1524 c.g2 = false;
1525 } else {
1526 c.u.s64.b = regs[r3 | 1];
1527 c.g2 = true;
1528 }
1529
1530 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1531 c.u.s64.a = regs[r1];
1532 c.g1 = true;
1533
1534 return help_branch(s, &c, is_imm, imm, o->in2);
1535 }
1536
1537 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1538 {
1539 int imm, m3 = get_field(s->fields, m3);
1540 bool is_imm;
1541 DisasCompare c;
1542
1543 c.cond = ltgt_cond[m3];
1544 if (s->insn->data) {
1545 c.cond = tcg_unsigned_cond(c.cond);
1546 }
1547 c.is_64 = c.g1 = c.g2 = true;
1548 c.u.s64.a = o->in1;
1549 c.u.s64.b = o->in2;
1550
1551 is_imm = have_field(s->fields, i4);
1552 if (is_imm) {
1553 imm = get_field(s->fields, i4);
1554 } else {
1555 imm = 0;
1556 o->out = get_address(s, 0, get_field(s->fields, b4),
1557 get_field(s->fields, d4));
1558 }
1559
1560 return help_branch(s, &c, is_imm, imm, o->out);
1561 }
1562
1563 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1564 {
1565 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1566 set_cc_static(s);
1567 return NO_EXIT;
1568 }
1569
1570 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1571 {
1572 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1573 set_cc_static(s);
1574 return NO_EXIT;
1575 }
1576
1577 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1578 {
1579 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1580 set_cc_static(s);
1581 return NO_EXIT;
1582 }
1583
1584 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1585 {
1586 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1587 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1588 tcg_temp_free_i32(m3);
1589 gen_set_cc_nz_f32(s, o->in2);
1590 return NO_EXIT;
1591 }
1592
1593 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1594 {
1595 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1596 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1597 tcg_temp_free_i32(m3);
1598 gen_set_cc_nz_f64(s, o->in2);
1599 return NO_EXIT;
1600 }
1601
1602 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1603 {
1604 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1605 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1606 tcg_temp_free_i32(m3);
1607 gen_set_cc_nz_f128(s, o->in1, o->in2);
1608 return NO_EXIT;
1609 }
1610
1611 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1612 {
1613 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1614 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1615 tcg_temp_free_i32(m3);
1616 gen_set_cc_nz_f32(s, o->in2);
1617 return NO_EXIT;
1618 }
1619
1620 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1621 {
1622 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1623 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1624 tcg_temp_free_i32(m3);
1625 gen_set_cc_nz_f64(s, o->in2);
1626 return NO_EXIT;
1627 }
1628
1629 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1630 {
1631 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1632 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1633 tcg_temp_free_i32(m3);
1634 gen_set_cc_nz_f128(s, o->in1, o->in2);
1635 return NO_EXIT;
1636 }
1637
1638 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1639 {
1640 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1641 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1642 tcg_temp_free_i32(m3);
1643 gen_set_cc_nz_f32(s, o->in2);
1644 return NO_EXIT;
1645 }
1646
1647 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1648 {
1649 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1650 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1651 tcg_temp_free_i32(m3);
1652 gen_set_cc_nz_f64(s, o->in2);
1653 return NO_EXIT;
1654 }
1655
1656 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1657 {
1658 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1659 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1660 tcg_temp_free_i32(m3);
1661 gen_set_cc_nz_f128(s, o->in1, o->in2);
1662 return NO_EXIT;
1663 }
1664
1665 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1666 {
1667 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1668 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1669 tcg_temp_free_i32(m3);
1670 gen_set_cc_nz_f32(s, o->in2);
1671 return NO_EXIT;
1672 }
1673
1674 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1675 {
1676 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1677 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1678 tcg_temp_free_i32(m3);
1679 gen_set_cc_nz_f64(s, o->in2);
1680 return NO_EXIT;
1681 }
1682
1683 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1684 {
1685 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1686 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1687 tcg_temp_free_i32(m3);
1688 gen_set_cc_nz_f128(s, o->in1, o->in2);
1689 return NO_EXIT;
1690 }
1691
1692 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1693 {
1694 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1695 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1696 tcg_temp_free_i32(m3);
1697 return NO_EXIT;
1698 }
1699
1700 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1701 {
1702 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1703 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1704 tcg_temp_free_i32(m3);
1705 return NO_EXIT;
1706 }
1707
1708 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1709 {
1710 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1711 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1712 tcg_temp_free_i32(m3);
1713 return_low128(o->out2);
1714 return NO_EXIT;
1715 }
1716
1717 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1718 {
1719 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1720 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1721 tcg_temp_free_i32(m3);
1722 return NO_EXIT;
1723 }
1724
1725 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1726 {
1727 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1728 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1729 tcg_temp_free_i32(m3);
1730 return NO_EXIT;
1731 }
1732
1733 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1734 {
1735 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1736 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1737 tcg_temp_free_i32(m3);
1738 return_low128(o->out2);
1739 return NO_EXIT;
1740 }
1741
1742 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1743 {
1744 int r2 = get_field(s->fields, r2);
1745 TCGv_i64 len = tcg_temp_new_i64();
1746
1747 potential_page_fault(s);
1748 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1749 set_cc_static(s);
1750 return_low128(o->out);
1751
1752 tcg_gen_add_i64(regs[r2], regs[r2], len);
1753 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1754 tcg_temp_free_i64(len);
1755
1756 return NO_EXIT;
1757 }
1758
1759 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1760 {
1761 int l = get_field(s->fields, l1);
1762 TCGv_i32 vl;
1763
1764 switch (l + 1) {
1765 case 1:
1766 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1767 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1768 break;
1769 case 2:
1770 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1771 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1772 break;
1773 case 4:
1774 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1775 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1776 break;
1777 case 8:
1778 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1779 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1780 break;
1781 default:
1782 potential_page_fault(s);
1783 vl = tcg_const_i32(l);
1784 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1785 tcg_temp_free_i32(vl);
1786 set_cc_static(s);
1787 return NO_EXIT;
1788 }
1789 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1790 return NO_EXIT;
1791 }
1792
1793 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1794 {
1795 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1796 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1797 potential_page_fault(s);
1798 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1799 tcg_temp_free_i32(r1);
1800 tcg_temp_free_i32(r3);
1801 set_cc_static(s);
1802 return NO_EXIT;
1803 }
1804
1805 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1806 {
1807 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1808 TCGv_i32 t1 = tcg_temp_new_i32();
1809 tcg_gen_trunc_i64_i32(t1, o->in1);
1810 potential_page_fault(s);
1811 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1812 set_cc_static(s);
1813 tcg_temp_free_i32(t1);
1814 tcg_temp_free_i32(m3);
1815 return NO_EXIT;
1816 }
1817
1818 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1819 {
1820 potential_page_fault(s);
1821 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1822 set_cc_static(s);
1823 return_low128(o->in2);
1824 return NO_EXIT;
1825 }
1826
1827 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1828 {
1829 TCGv_i64 t = tcg_temp_new_i64();
1830 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1831 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1832 tcg_gen_or_i64(o->out, o->out, t);
1833 tcg_temp_free_i64(t);
1834 return NO_EXIT;
1835 }
1836
1837 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1838 {
1839 int r3 = get_field(s->fields, r3);
1840 potential_page_fault(s);
1841 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1842 set_cc_static(s);
1843 return NO_EXIT;
1844 }
1845
1846 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1847 {
1848 int r3 = get_field(s->fields, r3);
1849 potential_page_fault(s);
1850 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1851 set_cc_static(s);
1852 return NO_EXIT;
1853 }
1854
1855 #ifndef CONFIG_USER_ONLY
1856 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1857 {
1858 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1859 check_privileged(s);
1860 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1861 tcg_temp_free_i32(r1);
1862 set_cc_static(s);
1863 return NO_EXIT;
1864 }
1865 #endif
1866
1867 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1868 {
1869 int r3 = get_field(s->fields, r3);
1870 TCGv_i64 in3 = tcg_temp_new_i64();
1871 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1872 potential_page_fault(s);
1873 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1874 tcg_temp_free_i64(in3);
1875 set_cc_static(s);
1876 return NO_EXIT;
1877 }
1878
1879 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1880 {
1881 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1882 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1883 potential_page_fault(s);
1884 /* XXX rewrite in tcg */
1885 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1886 set_cc_static(s);
1887 return NO_EXIT;
1888 }
1889
1890 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1891 {
1892 TCGv_i64 t1 = tcg_temp_new_i64();
1893 TCGv_i32 t2 = tcg_temp_new_i32();
1894 tcg_gen_trunc_i64_i32(t2, o->in1);
1895 gen_helper_cvd(t1, t2);
1896 tcg_temp_free_i32(t2);
1897 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1898 tcg_temp_free_i64(t1);
1899 return NO_EXIT;
1900 }
1901
1902 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1903 {
1904 int m3 = get_field(s->fields, m3);
1905 int lab = gen_new_label();
1906 TCGv_i32 t;
1907 TCGCond c;
1908
1909 c = tcg_invert_cond(ltgt_cond[m3]);
1910 if (s->insn->data) {
1911 c = tcg_unsigned_cond(c);
1912 }
1913 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1914
1915 /* Set DXC to 0xff. */
1916 t = tcg_temp_new_i32();
1917 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1918 tcg_gen_ori_i32(t, t, 0xff00);
1919 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1920 tcg_temp_free_i32(t);
1921
1922 /* Trap. */
1923 gen_program_exception(s, PGM_DATA);
1924
1925 gen_set_label(lab);
1926 return NO_EXIT;
1927 }
1928
1929 #ifndef CONFIG_USER_ONLY
1930 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1931 {
1932 TCGv_i32 tmp;
1933
1934 check_privileged(s);
1935 potential_page_fault(s);
1936
1937 /* We pretend the format is RX_a so that D2 is the field we want. */
1938 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1939 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1940 tcg_temp_free_i32(tmp);
1941 return NO_EXIT;
1942 }
1943 #endif
1944
1945 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1946 {
1947 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1948 return_low128(o->out);
1949 return NO_EXIT;
1950 }
1951
1952 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1953 {
1954 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1955 return_low128(o->out);
1956 return NO_EXIT;
1957 }
1958
1959 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1960 {
1961 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1962 return_low128(o->out);
1963 return NO_EXIT;
1964 }
1965
1966 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1967 {
1968 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
1969 return_low128(o->out);
1970 return NO_EXIT;
1971 }
1972
1973 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
1974 {
1975 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
1976 return NO_EXIT;
1977 }
1978
1979 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
1980 {
1981 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
1982 return NO_EXIT;
1983 }
1984
1985 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
1986 {
1987 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1988 return_low128(o->out2);
1989 return NO_EXIT;
1990 }
1991
1992 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
1993 {
1994 int r2 = get_field(s->fields, r2);
1995 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1996 return NO_EXIT;
1997 }
1998
1999 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2000 {
2001 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2002 return NO_EXIT;
2003 }
2004
2005 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2006 {
2007 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2008 tb->flags, (ab)use the tb->cs_base field as the address of
2009 the template in memory, and grab 8 bits of tb->flags/cflags for
2010 the contents of the register. We would then recognize all this
2011 in gen_intermediate_code_internal, generating code for exactly
2012 one instruction. This new TB then gets executed normally.
2013
2014 On the other hand, this seems to be mostly used for modifying
2015 MVC inside of memcpy, which needs a helper call anyway. So
2016 perhaps this doesn't bear thinking about any further. */
2017
2018 TCGv_i64 tmp;
2019
2020 update_psw_addr(s);
2021 update_cc_op(s);
2022
2023 tmp = tcg_const_i64(s->next_pc);
2024 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2025 tcg_temp_free_i64(tmp);
2026
2027 set_cc_static(s);
2028 return NO_EXIT;
2029 }
2030
2031 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2032 {
2033 /* We'll use the original input for cc computation, since we get to
2034 compare that against 0, which ought to be better than comparing
2035 the real output against 64. It also lets cc_dst be a convenient
2036 temporary during our computation. */
2037 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2038
2039 /* R1 = IN ? CLZ(IN) : 64. */
2040 gen_helper_clz(o->out, o->in2);
2041
2042 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2043 value by 64, which is undefined. But since the shift is 64 iff the
2044 input is zero, we still get the correct result after and'ing. */
2045 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2046 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2047 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2048 return NO_EXIT;
2049 }
2050
2051 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2052 {
2053 int m3 = get_field(s->fields, m3);
2054 int pos, len, base = s->insn->data;
2055 TCGv_i64 tmp = tcg_temp_new_i64();
2056 uint64_t ccm;
2057
2058 switch (m3) {
2059 case 0xf:
2060 /* Effectively a 32-bit load. */
2061 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2062 len = 32;
2063 goto one_insert;
2064
2065 case 0xc:
2066 case 0x6:
2067 case 0x3:
2068 /* Effectively a 16-bit load. */
2069 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2070 len = 16;
2071 goto one_insert;
2072
2073 case 0x8:
2074 case 0x4:
2075 case 0x2:
2076 case 0x1:
2077 /* Effectively an 8-bit load. */
2078 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2079 len = 8;
2080 goto one_insert;
2081
2082 one_insert:
2083 pos = base + ctz32(m3) * 8;
2084 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2085 ccm = ((1ull << len) - 1) << pos;
2086 break;
2087
2088 default:
2089 /* This is going to be a sequence of loads and inserts. */
2090 pos = base + 32 - 8;
2091 ccm = 0;
2092 while (m3) {
2093 if (m3 & 0x8) {
2094 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2095 tcg_gen_addi_i64(o->in2, o->in2, 1);
2096 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2097 ccm |= 0xff << pos;
2098 }
2099 m3 = (m3 << 1) & 0xf;
2100 pos -= 8;
2101 }
2102 break;
2103 }
2104
2105 tcg_gen_movi_i64(tmp, ccm);
2106 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2107 tcg_temp_free_i64(tmp);
2108 return NO_EXIT;
2109 }
2110
2111 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2112 {
2113 int shift = s->insn->data & 0xff;
2114 int size = s->insn->data >> 8;
2115 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2116 return NO_EXIT;
2117 }
2118
2119 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2120 {
2121 TCGv_i64 t1;
2122
2123 gen_op_calc_cc(s);
2124 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2125
2126 t1 = tcg_temp_new_i64();
2127 tcg_gen_shli_i64(t1, psw_mask, 20);
2128 tcg_gen_shri_i64(t1, t1, 36);
2129 tcg_gen_or_i64(o->out, o->out, t1);
2130
2131 tcg_gen_extu_i32_i64(t1, cc_op);
2132 tcg_gen_shli_i64(t1, t1, 28);
2133 tcg_gen_or_i64(o->out, o->out, t1);
2134 tcg_temp_free_i64(t1);
2135 return NO_EXIT;
2136 }
2137
2138 #ifndef CONFIG_USER_ONLY
2139 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2140 {
2141 check_privileged(s);
2142 gen_helper_ipte(cpu_env, o->in1, o->in2);
2143 return NO_EXIT;
2144 }
2145
2146 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2147 {
2148 check_privileged(s);
2149 gen_helper_iske(o->out, cpu_env, o->in2);
2150 return NO_EXIT;
2151 }
2152 #endif
2153
2154 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2155 {
2156 gen_helper_ldeb(o->out, cpu_env, o->in2);
2157 return NO_EXIT;
2158 }
2159
2160 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2161 {
2162 gen_helper_ledb(o->out, cpu_env, o->in2);
2163 return NO_EXIT;
2164 }
2165
2166 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2167 {
2168 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2169 return NO_EXIT;
2170 }
2171
2172 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2173 {
2174 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2175 return NO_EXIT;
2176 }
2177
2178 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2179 {
2180 gen_helper_lxdb(o->out, cpu_env, o->in2);
2181 return_low128(o->out2);
2182 return NO_EXIT;
2183 }
2184
2185 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2186 {
2187 gen_helper_lxeb(o->out, cpu_env, o->in2);
2188 return_low128(o->out2);
2189 return NO_EXIT;
2190 }
2191
2192 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2193 {
2194 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2195 return NO_EXIT;
2196 }
2197
2198 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2199 {
2200 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2201 return NO_EXIT;
2202 }
2203
2204 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2205 {
2206 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2207 return NO_EXIT;
2208 }
2209
2210 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2211 {
2212 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2213 return NO_EXIT;
2214 }
2215
2216 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2217 {
2218 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2219 return NO_EXIT;
2220 }
2221
2222 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2223 {
2224 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2225 return NO_EXIT;
2226 }
2227
2228 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2229 {
2230 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2231 return NO_EXIT;
2232 }
2233
2234 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2235 {
2236 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2237 return NO_EXIT;
2238 }
2239
2240 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2241 {
2242 DisasCompare c;
2243
2244 disas_jcc(s, &c, get_field(s->fields, m3));
2245
2246 if (c.is_64) {
2247 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2248 o->in2, o->in1);
2249 free_compare(&c);
2250 } else {
2251 TCGv_i32 t32 = tcg_temp_new_i32();
2252 TCGv_i64 t, z;
2253
2254 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2255 free_compare(&c);
2256
2257 t = tcg_temp_new_i64();
2258 tcg_gen_extu_i32_i64(t, t32);
2259 tcg_temp_free_i32(t32);
2260
2261 z = tcg_const_i64(0);
2262 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2263 tcg_temp_free_i64(t);
2264 tcg_temp_free_i64(z);
2265 }
2266
2267 return NO_EXIT;
2268 }
2269
2270 #ifndef CONFIG_USER_ONLY
2271 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2272 {
2273 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2274 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2275 check_privileged(s);
2276 potential_page_fault(s);
2277 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2278 tcg_temp_free_i32(r1);
2279 tcg_temp_free_i32(r3);
2280 return NO_EXIT;
2281 }
2282
2283 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2284 {
2285 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2286 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2287 check_privileged(s);
2288 potential_page_fault(s);
2289 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2290 tcg_temp_free_i32(r1);
2291 tcg_temp_free_i32(r3);
2292 return NO_EXIT;
2293 }
2294 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2295 {
2296 check_privileged(s);
2297 potential_page_fault(s);
2298 gen_helper_lra(o->out, cpu_env, o->in2);
2299 set_cc_static(s);
2300 return NO_EXIT;
2301 }
2302
2303 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2304 {
2305 TCGv_i64 t1, t2;
2306
2307 check_privileged(s);
2308
2309 t1 = tcg_temp_new_i64();
2310 t2 = tcg_temp_new_i64();
2311 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2312 tcg_gen_addi_i64(o->in2, o->in2, 4);
2313 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2314 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2315 tcg_gen_shli_i64(t1, t1, 32);
2316 gen_helper_load_psw(cpu_env, t1, t2);
2317 tcg_temp_free_i64(t1);
2318 tcg_temp_free_i64(t2);
2319 return EXIT_NORETURN;
2320 }
2321
2322 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2323 {
2324 TCGv_i64 t1, t2;
2325
2326 check_privileged(s);
2327
2328 t1 = tcg_temp_new_i64();
2329 t2 = tcg_temp_new_i64();
2330 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2331 tcg_gen_addi_i64(o->in2, o->in2, 8);
2332 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2333 gen_helper_load_psw(cpu_env, t1, t2);
2334 tcg_temp_free_i64(t1);
2335 tcg_temp_free_i64(t2);
2336 return EXIT_NORETURN;
2337 }
2338 #endif
2339
2340 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2341 {
2342 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2343 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2344 potential_page_fault(s);
2345 gen_helper_lam(cpu_env, r1, o->in2, r3);
2346 tcg_temp_free_i32(r1);
2347 tcg_temp_free_i32(r3);
2348 return NO_EXIT;
2349 }
2350
2351 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2352 {
2353 int r1 = get_field(s->fields, r1);
2354 int r3 = get_field(s->fields, r3);
2355 TCGv_i64 t = tcg_temp_new_i64();
2356 TCGv_i64 t4 = tcg_const_i64(4);
2357
2358 while (1) {
2359 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2360 store_reg32_i64(r1, t);
2361 if (r1 == r3) {
2362 break;
2363 }
2364 tcg_gen_add_i64(o->in2, o->in2, t4);
2365 r1 = (r1 + 1) & 15;
2366 }
2367
2368 tcg_temp_free_i64(t);
2369 tcg_temp_free_i64(t4);
2370 return NO_EXIT;
2371 }
2372
2373 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2374 {
2375 int r1 = get_field(s->fields, r1);
2376 int r3 = get_field(s->fields, r3);
2377 TCGv_i64 t = tcg_temp_new_i64();
2378 TCGv_i64 t4 = tcg_const_i64(4);
2379
2380 while (1) {
2381 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2382 store_reg32h_i64(r1, t);
2383 if (r1 == r3) {
2384 break;
2385 }
2386 tcg_gen_add_i64(o->in2, o->in2, t4);
2387 r1 = (r1 + 1) & 15;
2388 }
2389
2390 tcg_temp_free_i64(t);
2391 tcg_temp_free_i64(t4);
2392 return NO_EXIT;
2393 }
2394
2395 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2396 {
2397 int r1 = get_field(s->fields, r1);
2398 int r3 = get_field(s->fields, r3);
2399 TCGv_i64 t8 = tcg_const_i64(8);
2400
2401 while (1) {
2402 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2403 if (r1 == r3) {
2404 break;
2405 }
2406 tcg_gen_add_i64(o->in2, o->in2, t8);
2407 r1 = (r1 + 1) & 15;
2408 }
2409
2410 tcg_temp_free_i64(t8);
2411 return NO_EXIT;
2412 }
2413
2414 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2415 {
2416 o->out = o->in2;
2417 o->g_out = o->g_in2;
2418 TCGV_UNUSED_I64(o->in2);
2419 o->g_in2 = false;
2420 return NO_EXIT;
2421 }
2422
2423 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2424 {
2425 o->out = o->in1;
2426 o->out2 = o->in2;
2427 o->g_out = o->g_in1;
2428 o->g_out2 = o->g_in2;
2429 TCGV_UNUSED_I64(o->in1);
2430 TCGV_UNUSED_I64(o->in2);
2431 o->g_in1 = o->g_in2 = false;
2432 return NO_EXIT;
2433 }
2434
2435 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2436 {
2437 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2438 potential_page_fault(s);
2439 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2440 tcg_temp_free_i32(l);
2441 return NO_EXIT;
2442 }
2443
2444 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2445 {
2446 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2447 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2448 potential_page_fault(s);
2449 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2450 tcg_temp_free_i32(r1);
2451 tcg_temp_free_i32(r2);
2452 set_cc_static(s);
2453 return NO_EXIT;
2454 }
2455
2456 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2457 {
2458 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2459 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2460 potential_page_fault(s);
2461 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2462 tcg_temp_free_i32(r1);
2463 tcg_temp_free_i32(r3);
2464 set_cc_static(s);
2465 return NO_EXIT;
2466 }
2467
2468 #ifndef CONFIG_USER_ONLY
2469 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2470 {
2471 int r1 = get_field(s->fields, l1);
2472 check_privileged(s);
2473 potential_page_fault(s);
2474 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2475 set_cc_static(s);
2476 return NO_EXIT;
2477 }
2478
2479 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2480 {
2481 int r1 = get_field(s->fields, l1);
2482 check_privileged(s);
2483 potential_page_fault(s);
2484 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2485 set_cc_static(s);
2486 return NO_EXIT;
2487 }
2488 #endif
2489
2490 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2491 {
2492 potential_page_fault(s);
2493 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2494 set_cc_static(s);
2495 return NO_EXIT;
2496 }
2497
2498 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2499 {
2500 potential_page_fault(s);
2501 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2502 set_cc_static(s);
2503 return_low128(o->in2);
2504 return NO_EXIT;
2505 }
2506
2507 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2508 {
2509 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2510 return NO_EXIT;
2511 }
2512
2513 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2514 {
2515 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2516 return_low128(o->out2);
2517 return NO_EXIT;
2518 }
2519
2520 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2521 {
2522 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2523 return NO_EXIT;
2524 }
2525
2526 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2527 {
2528 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2529 return NO_EXIT;
2530 }
2531
2532 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2533 {
2534 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2535 return NO_EXIT;
2536 }
2537
2538 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2539 {
2540 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2541 return_low128(o->out2);
2542 return NO_EXIT;
2543 }
2544
2545 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2546 {
2547 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2548 return_low128(o->out2);
2549 return NO_EXIT;
2550 }
2551
2552 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2553 {
2554 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2555 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2556 tcg_temp_free_i64(r3);
2557 return NO_EXIT;
2558 }
2559
2560 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2561 {
2562 int r3 = get_field(s->fields, r3);
2563 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2564 return NO_EXIT;
2565 }
2566
2567 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2568 {
2569 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2570 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2571 tcg_temp_free_i64(r3);
2572 return NO_EXIT;
2573 }
2574
2575 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2576 {
2577 int r3 = get_field(s->fields, r3);
2578 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2579 return NO_EXIT;
2580 }
2581
2582 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2583 {
2584 gen_helper_nabs_i64(o->out, o->in2);
2585 return NO_EXIT;
2586 }
2587
2588 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2589 {
2590 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2591 return NO_EXIT;
2592 }
2593
2594 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2595 {
2596 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2597 return NO_EXIT;
2598 }
2599
2600 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2601 {
2602 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2603 tcg_gen_mov_i64(o->out2, o->in2);
2604 return NO_EXIT;
2605 }
2606
2607 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2608 {
2609 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2610 potential_page_fault(s);
2611 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2612 tcg_temp_free_i32(l);
2613 set_cc_static(s);
2614 return NO_EXIT;
2615 }
2616
2617 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2618 {
2619 tcg_gen_neg_i64(o->out, o->in2);
2620 return NO_EXIT;
2621 }
2622
2623 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2624 {
2625 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2626 return NO_EXIT;
2627 }
2628
2629 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2630 {
2631 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2632 return NO_EXIT;
2633 }
2634
2635 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2636 {
2637 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2638 tcg_gen_mov_i64(o->out2, o->in2);
2639 return NO_EXIT;
2640 }
2641
2642 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2643 {
2644 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2645 potential_page_fault(s);
2646 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2647 tcg_temp_free_i32(l);
2648 set_cc_static(s);
2649 return NO_EXIT;
2650 }
2651
2652 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2653 {
2654 tcg_gen_or_i64(o->out, o->in1, o->in2);
2655 return NO_EXIT;
2656 }
2657
2658 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2659 {
2660 int shift = s->insn->data & 0xff;
2661 int size = s->insn->data >> 8;
2662 uint64_t mask = ((1ull << size) - 1) << shift;
2663
2664 assert(!o->g_in2);
2665 tcg_gen_shli_i64(o->in2, o->in2, shift);
2666 tcg_gen_or_i64(o->out, o->in1, o->in2);
2667
2668 /* Produce the CC from only the bits manipulated. */
2669 tcg_gen_andi_i64(cc_dst, o->out, mask);
2670 set_cc_nz_u64(s, cc_dst);
2671 return NO_EXIT;
2672 }
2673
2674 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2675 {
2676 gen_helper_popcnt(o->out, o->in2);
2677 return NO_EXIT;
2678 }
2679
2680 #ifndef CONFIG_USER_ONLY
2681 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2682 {
2683 check_privileged(s);
2684 gen_helper_ptlb(cpu_env);
2685 return NO_EXIT;
2686 }
2687 #endif
2688
2689 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2690 {
2691 int i3 = get_field(s->fields, i3);
2692 int i4 = get_field(s->fields, i4);
2693 int i5 = get_field(s->fields, i5);
2694 int do_zero = i4 & 0x80;
2695 uint64_t mask, imask, pmask;
2696 int pos, len, rot;
2697
2698 /* Adjust the arguments for the specific insn. */
2699 switch (s->fields->op2) {
2700 case 0x55: /* risbg */
2701 i3 &= 63;
2702 i4 &= 63;
2703 pmask = ~0;
2704 break;
2705 case 0x5d: /* risbhg */
2706 i3 &= 31;
2707 i4 &= 31;
2708 pmask = 0xffffffff00000000ull;
2709 break;
2710 case 0x51: /* risblg */
2711 i3 &= 31;
2712 i4 &= 31;
2713 pmask = 0x00000000ffffffffull;
2714 break;
2715 default:
2716 abort();
2717 }
2718
2719 /* MASK is the set of bits to be inserted from R2.
2720 Take care for I3/I4 wraparound. */
2721 mask = pmask >> i3;
2722 if (i3 <= i4) {
2723 mask ^= pmask >> i4 >> 1;
2724 } else {
2725 mask |= ~(pmask >> i4 >> 1);
2726 }
2727 mask &= pmask;
2728
2729 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2730 insns, we need to keep the other half of the register. */
2731 imask = ~mask | ~pmask;
2732 if (do_zero) {
2733 if (s->fields->op2 == 0x55) {
2734 imask = 0;
2735 } else {
2736 imask = ~pmask;
2737 }
2738 }
2739
2740 /* In some cases we can implement this with deposit, which can be more
2741 efficient on some hosts. */
2742 if (~mask == imask && i3 <= i4) {
2743 if (s->fields->op2 == 0x5d) {
2744 i3 += 32, i4 += 32;
2745 }
2746 /* Note that we rotate the bits to be inserted to the lsb, not to
2747 the position as described in the PoO. */
2748 len = i4 - i3 + 1;
2749 pos = 63 - i4;
2750 rot = (i5 - pos) & 63;
2751 } else {
2752 pos = len = -1;
2753 rot = i5 & 63;
2754 }
2755
2756 /* Rotate the input as necessary. */
2757 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2758
2759 /* Insert the selected bits into the output. */
2760 if (pos >= 0) {
2761 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2762 } else if (imask == 0) {
2763 tcg_gen_andi_i64(o->out, o->in2, mask);
2764 } else {
2765 tcg_gen_andi_i64(o->in2, o->in2, mask);
2766 tcg_gen_andi_i64(o->out, o->out, imask);
2767 tcg_gen_or_i64(o->out, o->out, o->in2);
2768 }
2769 return NO_EXIT;
2770 }
2771
2772 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2773 {
2774 int i3 = get_field(s->fields, i3);
2775 int i4 = get_field(s->fields, i4);
2776 int i5 = get_field(s->fields, i5);
2777 uint64_t mask;
2778
2779 /* If this is a test-only form, arrange to discard the result. */
2780 if (i3 & 0x80) {
2781 o->out = tcg_temp_new_i64();
2782 o->g_out = false;
2783 }
2784
2785 i3 &= 63;
2786 i4 &= 63;
2787 i5 &= 63;
2788
2789 /* MASK is the set of bits to be operated on from R2.
2790 Take care for I3/I4 wraparound. */
2791 mask = ~0ull >> i3;
2792 if (i3 <= i4) {
2793 mask ^= ~0ull >> i4 >> 1;
2794 } else {
2795 mask |= ~(~0ull >> i4 >> 1);
2796 }
2797
2798 /* Rotate the input as necessary. */
2799 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2800
2801 /* Operate. */
2802 switch (s->fields->op2) {
2803 case 0x55: /* AND */
2804 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2805 tcg_gen_and_i64(o->out, o->out, o->in2);
2806 break;
2807 case 0x56: /* OR */
2808 tcg_gen_andi_i64(o->in2, o->in2, mask);
2809 tcg_gen_or_i64(o->out, o->out, o->in2);
2810 break;
2811 case 0x57: /* XOR */
2812 tcg_gen_andi_i64(o->in2, o->in2, mask);
2813 tcg_gen_xor_i64(o->out, o->out, o->in2);
2814 break;
2815 default:
2816 abort();
2817 }
2818
2819 /* Set the CC. */
2820 tcg_gen_andi_i64(cc_dst, o->out, mask);
2821 set_cc_nz_u64(s, cc_dst);
2822 return NO_EXIT;
2823 }
2824
2825 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2826 {
2827 tcg_gen_bswap16_i64(o->out, o->in2);
2828 return NO_EXIT;
2829 }
2830
2831 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2832 {
2833 tcg_gen_bswap32_i64(o->out, o->in2);
2834 return NO_EXIT;
2835 }
2836
2837 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2838 {
2839 tcg_gen_bswap64_i64(o->out, o->in2);
2840 return NO_EXIT;
2841 }
2842
2843 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2844 {
2845 TCGv_i32 t1 = tcg_temp_new_i32();
2846 TCGv_i32 t2 = tcg_temp_new_i32();
2847 TCGv_i32 to = tcg_temp_new_i32();
2848 tcg_gen_trunc_i64_i32(t1, o->in1);
2849 tcg_gen_trunc_i64_i32(t2, o->in2);
2850 tcg_gen_rotl_i32(to, t1, t2);
2851 tcg_gen_extu_i32_i64(o->out, to);
2852 tcg_temp_free_i32(t1);
2853 tcg_temp_free_i32(t2);
2854 tcg_temp_free_i32(to);
2855 return NO_EXIT;
2856 }
2857
2858 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2859 {
2860 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2861 return NO_EXIT;
2862 }
2863
2864 #ifndef CONFIG_USER_ONLY
2865 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2866 {
2867 check_privileged(s);
2868 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2869 set_cc_static(s);
2870 return NO_EXIT;
2871 }
2872
2873 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2874 {
2875 check_privileged(s);
2876 gen_helper_sacf(cpu_env, o->in2);
2877 /* Addressing mode has changed, so end the block. */
2878 return EXIT_PC_STALE;
2879 }
2880 #endif
2881
2882 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2883 {
2884 int r1 = get_field(s->fields, r1);
2885 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2886 return NO_EXIT;
2887 }
2888
2889 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2890 {
2891 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2892 return NO_EXIT;
2893 }
2894
2895 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2896 {
2897 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2898 return NO_EXIT;
2899 }
2900
2901 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2902 {
2903 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2904 return_low128(o->out2);
2905 return NO_EXIT;
2906 }
2907
2908 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2909 {
2910 gen_helper_sqeb(o->out, cpu_env, o->in2);
2911 return NO_EXIT;
2912 }
2913
2914 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2915 {
2916 gen_helper_sqdb(o->out, cpu_env, o->in2);
2917 return NO_EXIT;
2918 }
2919
2920 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2921 {
2922 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2923 return_low128(o->out2);
2924 return NO_EXIT;
2925 }
2926
2927 #ifndef CONFIG_USER_ONLY
2928 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2929 {
2930 check_privileged(s);
2931 potential_page_fault(s);
2932 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2933 set_cc_static(s);
2934 return NO_EXIT;
2935 }
2936
2937 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2938 {
2939 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2940 check_privileged(s);
2941 potential_page_fault(s);
2942 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2943 tcg_temp_free_i32(r1);
2944 return NO_EXIT;
2945 }
2946 #endif
2947
2948 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
2949 {
2950 DisasCompare c;
2951 TCGv_i64 a;
2952 int lab, r1;
2953
2954 disas_jcc(s, &c, get_field(s->fields, m3));
2955
2956 lab = gen_new_label();
2957 if (c.is_64) {
2958 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
2959 } else {
2960 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
2961 }
2962 free_compare(&c);
2963
2964 r1 = get_field(s->fields, r1);
2965 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2966 if (s->insn->data) {
2967 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
2968 } else {
2969 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
2970 }
2971 tcg_temp_free_i64(a);
2972
2973 gen_set_label(lab);
2974 return NO_EXIT;
2975 }
2976
2977 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2978 {
2979 uint64_t sign = 1ull << s->insn->data;
2980 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2981 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2982 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2983 /* The arithmetic left shift is curious in that it does not affect
2984 the sign bit. Copy that over from the source unchanged. */
2985 tcg_gen_andi_i64(o->out, o->out, ~sign);
2986 tcg_gen_andi_i64(o->in1, o->in1, sign);
2987 tcg_gen_or_i64(o->out, o->out, o->in1);
2988 return NO_EXIT;
2989 }
2990
2991 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2992 {
2993 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2994 return NO_EXIT;
2995 }
2996
2997 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2998 {
2999 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3000 return NO_EXIT;
3001 }
3002
3003 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3004 {
3005 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3006 return NO_EXIT;
3007 }
3008
3009 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3010 {
3011 gen_helper_sfpc(cpu_env, o->in2);
3012 return NO_EXIT;
3013 }
3014
3015 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3016 {
3017 gen_helper_sfas(cpu_env, o->in2);
3018 return NO_EXIT;
3019 }
3020
3021 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3022 {
3023 int b2 = get_field(s->fields, b2);
3024 int d2 = get_field(s->fields, d2);
3025 TCGv_i64 t1 = tcg_temp_new_i64();
3026 TCGv_i64 t2 = tcg_temp_new_i64();
3027 int mask, pos, len;
3028
3029 switch (s->fields->op2) {
3030 case 0x99: /* SRNM */
3031 pos = 0, len = 2;
3032 break;
3033 case 0xb8: /* SRNMB */
3034 pos = 0, len = 3;
3035 break;
3036 case 0xb9: /* SRNMT */
3037 pos = 4, len = 3;
3038 default:
3039 tcg_abort();
3040 }
3041 mask = (1 << len) - 1;
3042
3043 /* Insert the value into the appropriate field of the FPC. */
3044 if (b2 == 0) {
3045 tcg_gen_movi_i64(t1, d2 & mask);
3046 } else {
3047 tcg_gen_addi_i64(t1, regs[b2], d2);
3048 tcg_gen_andi_i64(t1, t1, mask);
3049 }
3050 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3051 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3052 tcg_temp_free_i64(t1);
3053
3054 /* Then install the new FPC to set the rounding mode in fpu_status. */
3055 gen_helper_sfpc(cpu_env, t2);
3056 tcg_temp_free_i64(t2);
3057 return NO_EXIT;
3058 }
3059
3060 #ifndef CONFIG_USER_ONLY
3061 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3062 {
3063 check_privileged(s);
3064 tcg_gen_shri_i64(o->in2, o->in2, 4);
3065 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3066 return NO_EXIT;
3067 }
3068
3069 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3070 {
3071 check_privileged(s);
3072 gen_helper_sske(cpu_env, o->in1, o->in2);
3073 return NO_EXIT;
3074 }
3075
3076 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3077 {
3078 check_privileged(s);
3079 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3080 return NO_EXIT;
3081 }
3082
3083 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3084 {
3085 check_privileged(s);
3086 /* ??? Surely cpu address != cpu number. In any case the previous
3087 version of this stored more than the required half-word, so it
3088 is unlikely this has ever been tested. */
3089 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3090 return NO_EXIT;
3091 }
3092
3093 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3094 {
3095 gen_helper_stck(o->out, cpu_env);
3096 /* ??? We don't implement clock states. */
3097 gen_op_movi_cc(s, 0);
3098 return NO_EXIT;
3099 }
3100
3101 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3102 {
3103 TCGv_i64 c1 = tcg_temp_new_i64();
3104 TCGv_i64 c2 = tcg_temp_new_i64();
3105 gen_helper_stck(c1, cpu_env);
3106 /* Shift the 64-bit value into its place as a zero-extended
3107 104-bit value. Note that "bit positions 64-103 are always
3108 non-zero so that they compare differently to STCK"; we set
3109 the least significant bit to 1. */
3110 tcg_gen_shli_i64(c2, c1, 56);
3111 tcg_gen_shri_i64(c1, c1, 8);
3112 tcg_gen_ori_i64(c2, c2, 0x10000);
3113 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3114 tcg_gen_addi_i64(o->in2, o->in2, 8);
3115 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3116 tcg_temp_free_i64(c1);
3117 tcg_temp_free_i64(c2);
3118 /* ??? We don't implement clock states. */
3119 gen_op_movi_cc(s, 0);
3120 return NO_EXIT;
3121 }
3122
3123 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3124 {
3125 check_privileged(s);
3126 gen_helper_sckc(cpu_env, o->in2);
3127 return NO_EXIT;
3128 }
3129
3130 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3131 {
3132 check_privileged(s);
3133 gen_helper_stckc(o->out, cpu_env);
3134 return NO_EXIT;
3135 }
3136
3137 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3138 {
3139 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3140 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3141 check_privileged(s);
3142 potential_page_fault(s);
3143 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3144 tcg_temp_free_i32(r1);
3145 tcg_temp_free_i32(r3);
3146 return NO_EXIT;
3147 }
3148
3149 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3150 {
3151 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3152 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3153 check_privileged(s);
3154 potential_page_fault(s);
3155 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3156 tcg_temp_free_i32(r1);
3157 tcg_temp_free_i32(r3);
3158 return NO_EXIT;
3159 }
3160
3161 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3162 {
3163 check_privileged(s);
3164 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3165 return NO_EXIT;
3166 }
3167
3168 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3169 {
3170 check_privileged(s);
3171 gen_helper_spt(cpu_env, o->in2);
3172 return NO_EXIT;
3173 }
3174
3175 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3176 {
3177 TCGv_i64 f, a;
3178 /* We really ought to have more complete indication of facilities
3179 that we implement. Address this when STFLE is implemented. */
3180 check_privileged(s);
3181 f = tcg_const_i64(0xc0000000);
3182 a = tcg_const_i64(200);
3183 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3184 tcg_temp_free_i64(f);
3185 tcg_temp_free_i64(a);
3186 return NO_EXIT;
3187 }
3188
3189 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3190 {
3191 check_privileged(s);
3192 gen_helper_stpt(o->out, cpu_env);
3193 return NO_EXIT;
3194 }
3195
3196 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3197 {
3198 check_privileged(s);
3199 potential_page_fault(s);
3200 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3201 set_cc_static(s);
3202 return NO_EXIT;
3203 }
3204
3205 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3206 {
3207 check_privileged(s);
3208 gen_helper_spx(cpu_env, o->in2);
3209 return NO_EXIT;
3210 }
3211
3212 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3213 {
3214 check_privileged(s);
3215 /* Not operational. */
3216 gen_op_movi_cc(s, 3);
3217 return NO_EXIT;
3218 }
3219
3220 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3221 {
3222 check_privileged(s);
3223 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3224 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3225 return NO_EXIT;
3226 }
3227
3228 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3229 {
3230 uint64_t i2 = get_field(s->fields, i2);
3231 TCGv_i64 t;
3232
3233 check_privileged(s);
3234
3235 /* It is important to do what the instruction name says: STORE THEN.
3236 If we let the output hook perform the store then if we fault and
3237 restart, we'll have the wrong SYSTEM MASK in place. */
3238 t = tcg_temp_new_i64();
3239 tcg_gen_shri_i64(t, psw_mask, 56);
3240 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3241 tcg_temp_free_i64(t);
3242
3243 if (s->fields->op == 0xac) {
3244 tcg_gen_andi_i64(psw_mask, psw_mask,
3245 (i2 << 56) | 0x00ffffffffffffffull);
3246 } else {
3247 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3248 }
3249 return NO_EXIT;
3250 }
3251
3252 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3253 {
3254 check_privileged(s);
3255 potential_page_fault(s);
3256 gen_helper_stura(cpu_env, o->in2, o->in1);
3257 return NO_EXIT;
3258 }
3259 #endif
3260
3261 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3262 {
3263 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3264 return NO_EXIT;
3265 }
3266
3267 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3268 {
3269 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3270 return NO_EXIT;
3271 }
3272
3273 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3274 {
3275 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3276 return NO_EXIT;
3277 }
3278
3279 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3280 {
3281 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3282 return NO_EXIT;
3283 }
3284
3285 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3286 {
3287 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3288 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3289 potential_page_fault(s);
3290 gen_helper_stam(cpu_env, r1, o->in2, r3);
3291 tcg_temp_free_i32(r1);
3292 tcg_temp_free_i32(r3);
3293 return NO_EXIT;
3294 }
3295
3296 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3297 {
3298 int m3 = get_field(s->fields, m3);
3299 int pos, base = s->insn->data;
3300 TCGv_i64 tmp = tcg_temp_new_i64();
3301
3302 pos = base + ctz32(m3) * 8;
3303 switch (m3) {
3304 case 0xf:
3305 /* Effectively a 32-bit store. */
3306 tcg_gen_shri_i64(tmp, o->in1, pos);
3307 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3308 break;
3309
3310 case 0xc:
3311 case 0x6:
3312 case 0x3:
3313 /* Effectively a 16-bit store. */
3314 tcg_gen_shri_i64(tmp, o->in1, pos);
3315 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3316 break;
3317
3318 case 0x8:
3319 case 0x4:
3320 case 0x2:
3321 case 0x1:
3322 /* Effectively an 8-bit store. */
3323 tcg_gen_shri_i64(tmp, o->in1, pos);
3324 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3325 break;
3326
3327 default:
3328 /* This is going to be a sequence of shifts and stores. */
3329 pos = base + 32 - 8;
3330 while (m3) {
3331 if (m3 & 0x8) {
3332 tcg_gen_shri_i64(tmp, o->in1, pos);
3333 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3334 tcg_gen_addi_i64(o->in2, o->in2, 1);
3335 }
3336 m3 = (m3 << 1) & 0xf;
3337 pos -= 8;
3338 }
3339 break;
3340 }
3341 tcg_temp_free_i64(tmp);
3342 return NO_EXIT;
3343 }
3344
3345 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3346 {
3347 int r1 = get_field(s->fields, r1);
3348 int r3 = get_field(s->fields, r3);
3349 int size = s->insn->data;
3350 TCGv_i64 tsize = tcg_const_i64(size);
3351
3352 while (1) {
3353 if (size == 8) {
3354 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3355 } else {
3356 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3357 }
3358 if (r1 == r3) {
3359 break;
3360 }
3361 tcg_gen_add_i64(o->in2, o->in2, tsize);
3362 r1 = (r1 + 1) & 15;
3363 }
3364
3365 tcg_temp_free_i64(tsize);
3366 return NO_EXIT;
3367 }
3368
3369 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3370 {
3371 int r1 = get_field(s->fields, r1);
3372 int r3 = get_field(s->fields, r3);
3373 TCGv_i64 t = tcg_temp_new_i64();
3374 TCGv_i64 t4 = tcg_const_i64(4);
3375 TCGv_i64 t32 = tcg_const_i64(32);
3376
3377 while (1) {
3378 tcg_gen_shl_i64(t, regs[r1], t32);
3379 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3380 if (r1 == r3) {
3381 break;
3382 }
3383 tcg_gen_add_i64(o->in2, o->in2, t4);
3384 r1 = (r1 + 1) & 15;
3385 }
3386
3387 tcg_temp_free_i64(t);
3388 tcg_temp_free_i64(t4);
3389 tcg_temp_free_i64(t32);
3390 return NO_EXIT;
3391 }
3392
3393 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3394 {
3395 potential_page_fault(s);
3396 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3397 set_cc_static(s);
3398 return_low128(o->in2);
3399 return NO_EXIT;
3400 }
3401
3402 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3403 {
3404 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3405 return NO_EXIT;
3406 }
3407
3408 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3409 {
3410 DisasCompare cmp;
3411 TCGv_i64 borrow;
3412
3413 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3414
3415 /* The !borrow flag is the msb of CC. Since we want the inverse of
3416 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3417 disas_jcc(s, &cmp, 8 | 4);
3418 borrow = tcg_temp_new_i64();
3419 if (cmp.is_64) {
3420 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3421 } else {
3422 TCGv_i32 t = tcg_temp_new_i32();
3423 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3424 tcg_gen_extu_i32_i64(borrow, t);
3425 tcg_temp_free_i32(t);
3426 }
3427 free_compare(&cmp);
3428
3429 tcg_gen_sub_i64(o->out, o->out, borrow);
3430 tcg_temp_free_i64(borrow);
3431 return NO_EXIT;
3432 }
3433
3434 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3435 {
3436 TCGv_i32 t;
3437
3438 update_psw_addr(s);
3439 update_cc_op(s);
3440
3441 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3442 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3443 tcg_temp_free_i32(t);
3444
3445 t = tcg_const_i32(s->next_pc - s->pc);
3446 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3447 tcg_temp_free_i32(t);
3448
3449 gen_exception(EXCP_SVC);
3450 return EXIT_NORETURN;
3451 }
3452
3453 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3454 {
3455 gen_helper_tceb(cc_op, o->in1, o->in2);
3456 set_cc_static(s);
3457 return NO_EXIT;
3458 }
3459
3460 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3461 {
3462 gen_helper_tcdb(cc_op, o->in1, o->in2);
3463 set_cc_static(s);
3464 return NO_EXIT;
3465 }
3466
3467 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3468 {
3469 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3470 set_cc_static(s);
3471 return NO_EXIT;
3472 }
3473
3474 #ifndef CONFIG_USER_ONLY
3475 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3476 {
3477 potential_page_fault(s);
3478 gen_helper_tprot(cc_op, o->addr1, o->in2);
3479 set_cc_static(s);
3480 return NO_EXIT;
3481 }
3482 #endif
3483
3484 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3485 {
3486 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3487 potential_page_fault(s);
3488 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3489 tcg_temp_free_i32(l);
3490 set_cc_static(s);
3491 return NO_EXIT;
3492 }
3493
3494 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3495 {
3496 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3497 potential_page_fault(s);
3498 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3499 tcg_temp_free_i32(l);
3500 return NO_EXIT;
3501 }
3502
3503 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3504 {
3505 int d1 = get_field(s->fields, d1);
3506 int d2 = get_field(s->fields, d2);
3507 int b1 = get_field(s->fields, b1);
3508 int b2 = get_field(s->fields, b2);
3509 int l = get_field(s->fields, l1);
3510 TCGv_i32 t32;
3511
3512 o->addr1 = get_address(s, 0, b1, d1);
3513
3514 /* If the addresses are identical, this is a store/memset of zero. */
3515 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3516 o->in2 = tcg_const_i64(0);
3517
3518 l++;
3519 while (l >= 8) {
3520 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3521 l -= 8;
3522 if (l > 0) {
3523 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3524 }
3525 }
3526 if (l >= 4) {
3527 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3528 l -= 4;
3529 if (l > 0) {
3530 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3531 }
3532 }
3533 if (l >= 2) {
3534 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3535 l -= 2;
3536 if (l > 0) {
3537 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3538 }
3539 }
3540 if (l) {
3541 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3542 }
3543 gen_op_movi_cc(s, 0);
3544 return NO_EXIT;
3545 }
3546
3547 /* But in general we'll defer to a helper. */
3548 o->in2 = get_address(s, 0, b2, d2);
3549 t32 = tcg_const_i32(l);
3550 potential_page_fault(s);
3551 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3552 tcg_temp_free_i32(t32);
3553 set_cc_static(s);
3554 return NO_EXIT;
3555 }
3556
3557 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3558 {
3559 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3560 return NO_EXIT;
3561 }
3562
3563 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3564 {
3565 int shift = s->insn->data & 0xff;
3566 int size = s->insn->data >> 8;
3567 uint64_t mask = ((1ull << size) - 1) << shift;
3568
3569 assert(!o->g_in2);
3570 tcg_gen_shli_i64(o->in2, o->in2, shift);
3571 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3572
3573 /* Produce the CC from only the bits manipulated. */
3574 tcg_gen_andi_i64(cc_dst, o->out, mask);
3575 set_cc_nz_u64(s, cc_dst);
3576 return NO_EXIT;
3577 }
3578
3579 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3580 {
3581 o->out = tcg_const_i64(0);
3582 return NO_EXIT;
3583 }
3584
3585 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3586 {
3587 o->out = tcg_const_i64(0);
3588 o->out2 = o->out;
3589 o->g_out2 = true;
3590 return NO_EXIT;
3591 }
3592
3593 /* ====================================================================== */
3594 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3595 the original inputs), update the various cc data structures in order to
3596 be able to compute the new condition code. */
3597
3598 static void cout_abs32(DisasContext *s, DisasOps *o)
3599 {
3600 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3601 }
3602
3603 static void cout_abs64(DisasContext *s, DisasOps *o)
3604 {
3605 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3606 }
3607
3608 static void cout_adds32(DisasContext *s, DisasOps *o)
3609 {
3610 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3611 }
3612
3613 static void cout_adds64(DisasContext *s, DisasOps *o)
3614 {
3615 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3616 }
3617
3618 static void cout_addu32(DisasContext *s, DisasOps *o)
3619 {
3620 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3621 }
3622
3623 static void cout_addu64(DisasContext *s, DisasOps *o)
3624 {
3625 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3626 }
3627
3628 static void cout_addc32(DisasContext *s, DisasOps *o)
3629 {
3630 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3631 }
3632
3633 static void cout_addc64(DisasContext *s, DisasOps *o)
3634 {
3635 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3636 }
3637
3638 static void cout_cmps32(DisasContext *s, DisasOps *o)
3639 {
3640 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3641 }
3642
3643 static void cout_cmps64(DisasContext *s, DisasOps *o)
3644 {
3645 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3646 }
3647
3648 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3649 {
3650 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3651 }
3652
3653 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3654 {
3655 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3656 }
3657
3658 static void cout_f32(DisasContext *s, DisasOps *o)
3659 {
3660 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3661 }
3662
3663 static void cout_f64(DisasContext *s, DisasOps *o)
3664 {
3665 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3666 }
3667
3668 static void cout_f128(DisasContext *s, DisasOps *o)
3669 {
3670 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3671 }
3672
3673 static void cout_nabs32(DisasContext *s, DisasOps *o)
3674 {
3675 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3676 }
3677
3678 static void cout_nabs64(DisasContext *s, DisasOps *o)
3679 {
3680 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3681 }
3682
3683 static void cout_neg32(DisasContext *s, DisasOps *o)
3684 {
3685 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3686 }
3687
3688 static void cout_neg64(DisasContext *s, DisasOps *o)
3689 {
3690 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3691 }
3692
3693 static void cout_nz32(DisasContext *s, DisasOps *o)
3694 {
3695 tcg_gen_ext32u_i64(cc_dst, o->out);
3696 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3697 }
3698
3699 static void cout_nz64(DisasContext *s, DisasOps *o)
3700 {
3701 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3702 }
3703
3704 static void cout_s32(DisasContext *s, DisasOps *o)
3705 {
3706 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3707 }
3708
3709 static void cout_s64(DisasContext *s, DisasOps *o)
3710 {
3711 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3712 }
3713
3714 static void cout_subs32(DisasContext *s, DisasOps *o)
3715 {
3716 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3717 }
3718
3719 static void cout_subs64(DisasContext *s, DisasOps *o)
3720 {
3721 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3722 }
3723
3724 static void cout_subu32(DisasContext *s, DisasOps *o)
3725 {
3726 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3727 }
3728
3729 static void cout_subu64(DisasContext *s, DisasOps *o)
3730 {
3731 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3732 }
3733
3734 static void cout_subb32(DisasContext *s, DisasOps *o)
3735 {
3736 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3737 }
3738
3739 static void cout_subb64(DisasContext *s, DisasOps *o)
3740 {
3741 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3742 }
3743
3744 static void cout_tm32(DisasContext *s, DisasOps *o)
3745 {
3746 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3747 }
3748
3749 static void cout_tm64(DisasContext *s, DisasOps *o)
3750 {
3751 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3752 }
3753
3754 /* ====================================================================== */
3755 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3756 with the TCG register to which we will write. Used in combination with
3757 the "wout" generators, in some cases we need a new temporary, and in
3758 some cases we can write to a TCG global. */
3759
3760 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3761 {
3762 o->out = tcg_temp_new_i64();
3763 }
3764 #define SPEC_prep_new 0
3765
3766 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3767 {
3768 o->out = tcg_temp_new_i64();
3769 o->out2 = tcg_temp_new_i64();
3770 }
3771 #define SPEC_prep_new_P 0
3772
3773 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3774 {
3775 o->out = regs[get_field(f, r1)];
3776 o->g_out = true;
3777 }
3778 #define SPEC_prep_r1 0
3779
3780 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3781 {
3782 int r1 = get_field(f, r1);
3783 o->out = regs[r1];
3784 o->out2 = regs[r1 + 1];
3785 o->g_out = o->g_out2 = true;
3786 }
3787 #define SPEC_prep_r1_P SPEC_r1_even
3788
3789 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3790 {
3791 o->out = fregs[get_field(f, r1)];
3792 o->g_out = true;
3793 }
3794 #define SPEC_prep_f1 0
3795
3796 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3797 {
3798 int r1 = get_field(f, r1);
3799 o->out = fregs[r1];
3800 o->out2 = fregs[r1 + 2];
3801 o->g_out = o->g_out2 = true;
3802 }
3803 #define SPEC_prep_x1 SPEC_r1_f128
3804
3805 /* ====================================================================== */
3806 /* The "Write OUTput" generators. These generally perform some non-trivial
3807 copy of data to TCG globals, or to main memory. The trivial cases are
3808 generally handled by having a "prep" generator install the TCG global
3809 as the destination of the operation. */
3810
3811 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3812 {
3813 store_reg(get_field(f, r1), o->out);
3814 }
3815 #define SPEC_wout_r1 0
3816
3817 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3818 {
3819 int r1 = get_field(f, r1);
3820 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3821 }
3822 #define SPEC_wout_r1_8 0
3823
3824 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3825 {
3826 int r1 = get_field(f, r1);
3827 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3828 }
3829 #define SPEC_wout_r1_16 0
3830
3831 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3832 {
3833 store_reg32_i64(get_field(f, r1), o->out);
3834 }
3835 #define SPEC_wout_r1_32 0
3836
3837 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3838 {
3839 int r1 = get_field(f, r1);
3840 store_reg32_i64(r1, o->out);
3841 store_reg32_i64(r1 + 1, o->out2);
3842 }
3843 #define SPEC_wout_r1_P32 SPEC_r1_even
3844
3845 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3846 {
3847 int r1 = get_field(f, r1);
3848 store_reg32_i64(r1 + 1, o->out);
3849 tcg_gen_shri_i64(o->out, o->out, 32);
3850 store_reg32_i64(r1, o->out);
3851 }
3852 #define SPEC_wout_r1_D32 SPEC_r1_even
3853
3854 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3855 {
3856 store_freg32_i64(get_field(f, r1), o->out);
3857 }
3858 #define SPEC_wout_e1 0
3859
3860 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3861 {
3862 store_freg(get_field(f, r1), o->out);
3863 }
3864 #define SPEC_wout_f1 0
3865
3866 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3867 {
3868 int f1 = get_field(s->fields, r1);
3869 store_freg(f1, o->out);
3870 store_freg(f1 + 2, o->out2);
3871 }
3872 #define SPEC_wout_x1 SPEC_r1_f128
3873
3874 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3875 {
3876 if (get_field(f, r1) != get_field(f, r2)) {
3877 store_reg32_i64(get_field(f, r1), o->out);
3878 }
3879 }
3880 #define SPEC_wout_cond_r1r2_32 0
3881
3882 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3883 {
3884 if (get_field(f, r1) != get_field(f, r2)) {
3885 store_freg32_i64(get_field(f, r1), o->out);
3886 }
3887 }
3888 #define SPEC_wout_cond_e1e2 0
3889
3890 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3891 {
3892 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3893 }
3894 #define SPEC_wout_m1_8 0
3895
3896 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3897 {
3898 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3899 }
3900 #define SPEC_wout_m1_16 0
3901
3902 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3903 {
3904 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3905 }
3906 #define SPEC_wout_m1_32 0
3907
3908 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3909 {
3910 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3911 }
3912 #define SPEC_wout_m1_64 0
3913
3914 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3915 {
3916 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3917 }
3918 #define SPEC_wout_m2_32 0
3919
3920 /* ====================================================================== */
3921 /* The "INput 1" generators. These load the first operand to an insn. */
3922
3923 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3924 {
3925 o->in1 = load_reg(get_field(f, r1));
3926 }
3927 #define SPEC_in1_r1 0
3928
3929 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3930 {
3931 o->in1 = regs[get_field(f, r1)];
3932 o->g_in1 = true;
3933 }
3934 #define SPEC_in1_r1_o 0
3935
3936 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3937 {
3938 o->in1 = tcg_temp_new_i64();
3939 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3940 }
3941 #define SPEC_in1_r1_32s 0
3942
3943 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3944 {
3945 o->in1 = tcg_temp_new_i64();
3946 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3947 }
3948 #define SPEC_in1_r1_32u 0
3949
3950 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3951 {
3952 o->in1 = tcg_temp_new_i64();
3953 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3954 }
3955 #define SPEC_in1_r1_sr32 0
3956
3957 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3958 {
3959 o->in1 = load_reg(get_field(f, r1) + 1);
3960 }
3961 #define SPEC_in1_r1p1 SPEC_r1_even
3962
3963 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3964 {
3965 o->in1 = tcg_temp_new_i64();
3966 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
3967 }
3968 #define SPEC_in1_r1p1_32s SPEC_r1_even
3969
3970 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3971 {
3972 o->in1 = tcg_temp_new_i64();
3973 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
3974 }
3975 #define SPEC_in1_r1p1_32u SPEC_r1_even
3976
3977 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3978 {
3979 int r1 = get_field(f, r1);
3980 o->in1 = tcg_temp_new_i64();
3981 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3982 }
3983 #define SPEC_in1_r1_D32 SPEC_r1_even
3984
3985 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3986 {
3987 o->in1 = load_reg(get_field(f, r2));
3988 }
3989 #define SPEC_in1_r2 0
3990
3991 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3992 {
3993 o->in1 = load_reg(get_field(f, r3));
3994 }
3995 #define SPEC_in1_r3 0
3996
3997 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3998 {
3999 o->in1 = regs[get_field(f, r3)];
4000 o->g_in1 = true;
4001 }
4002 #define SPEC_in1_r3_o 0
4003
4004 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4005 {
4006 o->in1 = tcg_temp_new_i64();
4007 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4008 }
4009 #define SPEC_in1_r3_32s 0
4010
4011 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4012 {
4013 o->in1 = tcg_temp_new_i64();
4014 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4015 }
4016 #define SPEC_in1_r3_32u 0
4017
4018 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4019 {
4020 o->in1 = load_freg32_i64(get_field(f, r1));
4021 }
4022 #define SPEC_in1_e1 0
4023
4024 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4025 {
4026 o->in1 = fregs[get_field(f, r1)];
4027 o->g_in1 = true;
4028 }
4029 #define SPEC_in1_f1_o 0
4030
4031 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4032 {
4033 int r1 = get_field(f, r1);
4034 o->out = fregs[r1];
4035 o->out2 = fregs[r1 + 2];
4036 o->g_out = o->g_out2 = true;
4037 }
4038 #define SPEC_in1_x1_o SPEC_r1_f128
4039
4040 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4041 {
4042 o->in1 = fregs[get_field(f, r3)];
4043 o->g_in1 = true;
4044 }
4045 #define SPEC_in1_f3_o 0
4046
4047 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4048 {
4049 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4050 }
4051 #define SPEC_in1_la1 0
4052
4053 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4054 {
4055 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4056 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4057 }
4058 #define SPEC_in1_la2 0
4059
4060 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4061 {
4062 in1_la1(s, f, o);
4063 o->in1 = tcg_temp_new_i64();
4064 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4065 }
4066 #define SPEC_in1_m1_8u 0
4067
4068 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4069 {
4070 in1_la1(s, f, o);
4071 o->in1 = tcg_temp_new_i64();
4072 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4073 }
4074 #define SPEC_in1_m1_16s 0
4075
4076 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4077 {
4078 in1_la1(s, f, o);
4079 o->in1 = tcg_temp_new_i64();
4080 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4081 }
4082 #define SPEC_in1_m1_16u 0
4083
4084 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4085 {
4086 in1_la1(s, f, o);
4087 o->in1 = tcg_temp_new_i64();
4088 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4089 }
4090 #define SPEC_in1_m1_32s 0
4091
4092 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4093 {
4094 in1_la1(s, f, o);
4095 o->in1 = tcg_temp_new_i64();
4096 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4097 }
4098 #define SPEC_in1_m1_32u 0
4099
4100 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4101 {
4102 in1_la1(s, f, o);
4103 o->in1 = tcg_temp_new_i64();
4104 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4105 }
4106 #define SPEC_in1_m1_64 0
4107
4108 /* ====================================================================== */
4109 /* The "INput 2" generators. These load the second operand to an insn. */
4110
4111 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4112 {
4113 o->in2 = regs[get_field(f, r1)];
4114 o->g_in2 = true;
4115 }
4116 #define SPEC_in2_r1_o 0
4117
4118 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4119 {
4120 o->in2 = tcg_temp_new_i64();
4121 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4122 }
4123 #define SPEC_in2_r1_16u 0
4124
4125 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4126 {
4127 o->in2 = tcg_temp_new_i64();
4128 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4129 }
4130 #define SPEC_in2_r1_32u 0
4131
4132 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4133 {
4134 o->in2 = load_reg(get_field(f, r2));
4135 }
4136 #define SPEC_in2_r2 0
4137
4138 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4139 {
4140 o->in2 = regs[get_field(f, r2)];
4141 o->g_in2 = true;
4142 }
4143 #define SPEC_in2_r2_o 0
4144
4145 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4146 {
4147 int r2 = get_field(f, r2);
4148 if (r2 != 0) {
4149 o->in2 = load_reg(r2);
4150 }
4151 }
4152 #define SPEC_in2_r2_nz 0
4153
4154 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4155 {
4156 o->in2 = tcg_temp_new_i64();
4157 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4158 }
4159 #define SPEC_in2_r2_8s 0
4160
4161 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4162 {
4163 o->in2 = tcg_temp_new_i64();
4164 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4165 }
4166 #define SPEC_in2_r2_8u 0
4167
4168 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4169 {
4170 o->in2 = tcg_temp_new_i64();
4171 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4172 }
4173 #define SPEC_in2_r2_16s 0
4174
4175 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4176 {
4177 o->in2 = tcg_temp_new_i64();
4178 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4179 }
4180 #define SPEC_in2_r2_16u 0
4181
4182 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4183 {
4184 o->in2 = load_reg(get_field(f, r3));
4185 }
4186 #define SPEC_in2_r3 0
4187
4188 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4189 {
4190 o->in2 = tcg_temp_new_i64();
4191 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4192 }
4193 #define SPEC_in2_r2_32s 0
4194
4195 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4196 {
4197 o->in2 = tcg_temp_new_i64();
4198 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4199 }
4200 #define SPEC_in2_r2_32u 0
4201
4202 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4203 {
4204 o->in2 = load_freg32_i64(get_field(f, r2));
4205 }
4206 #define SPEC_in2_e2 0
4207
4208 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4209 {
4210 o->in2 = fregs[get_field(f, r2)];
4211 o->g_in2 = true;
4212 }
4213 #define SPEC_in2_f2_o 0
4214
4215 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4216 {
4217 int r2 = get_field(f, r2);
4218 o->in1 = fregs[r2];
4219 o->in2 = fregs[r2 + 2];
4220 o->g_in1 = o->g_in2 = true;
4221 }
4222 #define SPEC_in2_x2_o SPEC_r2_f128
4223
4224 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4225 {
4226 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4227 }
4228 #define SPEC_in2_ra2 0
4229
4230 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4231 {
4232 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4233 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4234 }
4235 #define SPEC_in2_a2 0
4236
4237 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4238 {
4239 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4240 }
4241 #define SPEC_in2_ri2 0
4242
4243 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4244 {
4245 help_l2_shift(s, f, o, 31);
4246 }
4247 #define SPEC_in2_sh32 0
4248
4249 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4250 {
4251 help_l2_shift(s, f, o, 63);
4252 }
4253 #define SPEC_in2_sh64 0
4254
4255 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4256 {
4257 in2_a2(s, f, o);
4258 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4259 }
4260 #define SPEC_in2_m2_8u 0
4261
4262 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4263 {
4264 in2_a2(s, f, o);
4265 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4266 }
4267 #define SPEC_in2_m2_16s 0
4268
4269 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4270 {
4271 in2_a2(s, f, o);
4272 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4273 }
4274 #define SPEC_in2_m2_16u 0
4275
4276 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4277 {
4278 in2_a2(s, f, o);
4279 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4280 }
4281 #define SPEC_in2_m2_32s 0
4282
4283 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4284 {
4285 in2_a2(s, f, o);
4286 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4287 }
4288 #define SPEC_in2_m2_32u 0
4289
4290 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4291 {
4292 in2_a2(s, f, o);
4293 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4294 }
4295 #define SPEC_in2_m2_64 0
4296
4297 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4298 {
4299 in2_ri2(s, f, o);
4300 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4301 }
4302 #define SPEC_in2_mri2_16u 0
4303
4304 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4305 {
4306 in2_ri2(s, f, o);
4307 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4308 }
4309 #define SPEC_in2_mri2_32s 0
4310
4311 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4312 {
4313 in2_ri2(s, f, o);
4314 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4315 }
4316 #define SPEC_in2_mri2_32u 0
4317
4318 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4319 {
4320 in2_ri2(s, f, o);
4321 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4322 }
4323 #define SPEC_in2_mri2_64 0
4324
4325 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4326 {
4327 o->in2 = tcg_const_i64(get_field(f, i2));
4328 }
4329 #define SPEC_in2_i2 0
4330
4331 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4332 {
4333 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4334 }
4335 #define SPEC_in2_i2_8u 0
4336
4337 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4338 {
4339 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4340 }
4341 #define SPEC_in2_i2_16u 0
4342
4343 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4344 {
4345 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4346 }
4347 #define SPEC_in2_i2_32u 0
4348
4349 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4350 {
4351 uint64_t i2 = (uint16_t)get_field(f, i2);
4352 o->in2 = tcg_const_i64(i2 << s->insn->data);
4353 }
4354 #define SPEC_in2_i2_16u_shl 0
4355
4356 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4357 {
4358 uint64_t i2 = (uint32_t)get_field(f, i2);
4359 o->in2 = tcg_const_i64(i2 << s->insn->data);
4360 }
4361 #define SPEC_in2_i2_32u_shl 0
4362
4363 /* ====================================================================== */
4364
4365 /* Find opc within the table of insns. This is formulated as a switch
4366 statement so that (1) we get compile-time notice of cut-paste errors
4367 for duplicated opcodes, and (2) the compiler generates the binary
4368 search tree, rather than us having to post-process the table. */
4369
4370 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4371 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4372
4373 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4374
4375 enum DisasInsnEnum {
4376 #include "insn-data.def"
4377 };
4378
4379 #undef D
4380 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4381 .opc = OPC, \
4382 .fmt = FMT_##FT, \
4383 .fac = FAC_##FC, \
4384 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4385 .name = #NM, \
4386 .help_in1 = in1_##I1, \
4387 .help_in2 = in2_##I2, \
4388 .help_prep = prep_##P, \
4389 .help_wout = wout_##W, \
4390 .help_cout = cout_##CC, \
4391 .help_op = op_##OP, \
4392 .data = D \
4393 },
4394
4395 /* Allow 0 to be used for NULL in the table below. */
4396 #define in1_0 NULL
4397 #define in2_0 NULL
4398 #define prep_0 NULL
4399 #define wout_0 NULL
4400 #define cout_0 NULL
4401 #define op_0 NULL
4402
4403 #define SPEC_in1_0 0
4404 #define SPEC_in2_0 0
4405 #define SPEC_prep_0 0
4406 #define SPEC_wout_0 0
4407
4408 static const DisasInsn insn_info[] = {
4409 #include "insn-data.def"
4410 };
4411
4412 #undef D
4413 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4414 case OPC: return &insn_info[insn_ ## NM];
4415
4416 static const DisasInsn *lookup_opc(uint16_t opc)
4417 {
4418 switch (opc) {
4419 #include "insn-data.def"
4420 default:
4421 return NULL;
4422 }
4423 }
4424
4425 #undef D
4426 #undef C
4427
4428 /* Extract a field from the insn. The INSN should be left-aligned in
4429 the uint64_t so that we can more easily utilize the big-bit-endian
4430 definitions we extract from the Principals of Operation. */
4431
4432 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4433 {
4434 uint32_t r, m;
4435
4436 if (f->size == 0) {
4437 return;
4438 }
4439
4440 /* Zero extract the field from the insn. */
4441 r = (insn << f->beg) >> (64 - f->size);
4442
4443 /* Sign-extend, or un-swap the field as necessary. */
4444 switch (f->type) {
4445 case 0: /* unsigned */
4446 break;
4447 case 1: /* signed */
4448 assert(f->size <= 32);
4449 m = 1u << (f->size - 1);
4450 r = (r ^ m) - m;
4451 break;
4452 case 2: /* dl+dh split, signed 20 bit. */
4453 r = ((int8_t)r << 12) | (r >> 8);
4454 break;
4455 default:
4456 abort();
4457 }
4458
4459 /* Validate that the "compressed" encoding we selected above is valid.
4460 I.e. we havn't make two different original fields overlap. */
4461 assert(((o->presentC >> f->indexC) & 1) == 0);
4462 o->presentC |= 1 << f->indexC;
4463 o->presentO |= 1 << f->indexO;
4464
4465 o->c[f->indexC] = r;
4466 }
4467
4468 /* Lookup the insn at the current PC, extracting the operands into O and
4469 returning the info struct for the insn. Returns NULL for invalid insn. */
4470
4471 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4472 DisasFields *f)
4473 {
4474 uint64_t insn, pc = s->pc;
4475 int op, op2, ilen;
4476 const DisasInsn *info;
4477
4478 insn = ld_code2(env, pc);
4479 op = (insn >> 8) & 0xff;
4480 ilen = get_ilen(op);
4481 s->next_pc = s->pc + ilen;
4482
4483 switch (ilen) {
4484 case 2:
4485 insn = insn << 48;
4486 break;
4487 case 4:
4488 insn = ld_code4(env, pc) << 32;
4489 break;
4490 case 6:
4491 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4492 break;
4493 default:
4494 abort();
4495 }
4496
4497 /* We can't actually determine the insn format until we've looked up
4498 the full insn opcode. Which we can't do without locating the
4499 secondary opcode. Assume by default that OP2 is at bit 40; for
4500 those smaller insns that don't actually have a secondary opcode
4501 this will correctly result in OP2 = 0. */
4502 switch (op) {
4503 case 0x01: /* E */
4504 case 0x80: /* S */
4505 case 0x82: /* S */
4506 case 0x93: /* S */
4507 case 0xb2: /* S, RRF, RRE */
4508 case 0xb3: /* RRE, RRD, RRF */
4509 case 0xb9: /* RRE, RRF */
4510 case 0xe5: /* SSE, SIL */
4511 op2 = (insn << 8) >> 56;
4512 break;
4513 case 0xa5: /* RI */
4514 case 0xa7: /* RI */
4515 case 0xc0: /* RIL */
4516 case 0xc2: /* RIL */
4517 case 0xc4: /* RIL */
4518 case 0xc6: /* RIL */
4519 case 0xc8: /* SSF */
4520 case 0xcc: /* RIL */
4521 op2 = (insn << 12) >> 60;
4522 break;
4523 case 0xd0 ... 0xdf: /* SS */
4524 case 0xe1: /* SS */
4525 case 0xe2: /* SS */
4526 case 0xe8: /* SS */
4527 case 0xe9: /* SS */
4528 case 0xea: /* SS */
4529 case 0xee ... 0xf3: /* SS */
4530 case 0xf8 ... 0xfd: /* SS */
4531 op2 = 0;
4532 break;
4533 default:
4534 op2 = (insn << 40) >> 56;
4535 break;
4536 }
4537
4538 memset(f, 0, sizeof(*f));
4539 f->op = op;
4540 f->op2 = op2;
4541
4542 /* Lookup the instruction. */
4543 info = lookup_opc(op << 8 | op2);
4544
4545 /* If we found it, extract the operands. */
4546 if (info != NULL) {
4547 DisasFormat fmt = info->fmt;
4548 int i;
4549
4550 for (i = 0; i < NUM_C_FIELD; ++i) {
4551 extract_field(f, &format_info[fmt].op[i], insn);
4552 }
4553 }
4554 return info;
4555 }
4556
4557 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4558 {
4559 const DisasInsn *insn;
4560 ExitStatus ret = NO_EXIT;
4561 DisasFields f;
4562 DisasOps o;
4563
4564 /* Search for the insn in the table. */
4565 insn = extract_insn(env, s, &f);
4566
4567 /* Not found means unimplemented/illegal opcode. */
4568 if (insn == NULL) {
4569 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4570 f.op, f.op2);
4571 gen_illegal_opcode(s);
4572 return EXIT_NORETURN;
4573 }
4574
4575 /* Check for insn specification exceptions. */
4576 if (insn->spec) {
4577 int spec = insn->spec, excp = 0, r;
4578
4579 if (spec & SPEC_r1_even) {
4580 r = get_field(&f, r1);
4581 if (r & 1) {
4582 excp = PGM_SPECIFICATION;
4583 }
4584 }
4585 if (spec & SPEC_r2_even) {
4586 r = get_field(&f, r2);
4587 if (r & 1) {
4588 excp = PGM_SPECIFICATION;
4589 }
4590 }
4591 if (spec & SPEC_r1_f128) {
4592 r = get_field(&f, r1);
4593 if (r > 13) {
4594 excp = PGM_SPECIFICATION;
4595 }
4596 }
4597 if (spec & SPEC_r2_f128) {
4598 r = get_field(&f, r2);
4599 if (r > 13) {
4600 excp = PGM_SPECIFICATION;
4601 }
4602 }
4603 if (excp) {
4604 gen_program_exception(s, excp);
4605 return EXIT_NORETURN;
4606 }
4607 }
4608
4609 /* Set up the strutures we use to communicate with the helpers. */
4610 s->insn = insn;
4611 s->fields = &f;
4612 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4613 TCGV_UNUSED_I64(o.out);
4614 TCGV_UNUSED_I64(o.out2);
4615 TCGV_UNUSED_I64(o.in1);
4616 TCGV_UNUSED_I64(o.in2);
4617 TCGV_UNUSED_I64(o.addr1);
4618
4619 /* Implement the instruction. */
4620 if (insn->help_in1) {
4621 insn->help_in1(s, &f, &o);
4622 }
4623 if (insn->help_in2) {
4624 insn->help_in2(s, &f, &o);
4625 }
4626 if (insn->help_prep) {
4627 insn->help_prep(s, &f, &o);
4628 }
4629 if (insn->help_op) {
4630 ret = insn->help_op(s, &o);
4631 }
4632 if (insn->help_wout) {
4633 insn->help_wout(s, &f, &o);
4634 }
4635 if (insn->help_cout) {
4636 insn->help_cout(s, &o);
4637 }
4638
4639 /* Free any temporaries created by the helpers. */
4640 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4641 tcg_temp_free_i64(o.out);
4642 }
4643 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4644 tcg_temp_free_i64(o.out2);
4645 }
4646 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4647 tcg_temp_free_i64(o.in1);
4648 }
4649 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4650 tcg_temp_free_i64(o.in2);
4651 }
4652 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4653 tcg_temp_free_i64(o.addr1);
4654 }
4655
4656 /* Advance to the next instruction. */
4657 s->pc = s->next_pc;
4658 return ret;
4659 }
4660
4661 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4662 TranslationBlock *tb,
4663 int search_pc)
4664 {
4665 DisasContext dc;
4666 target_ulong pc_start;
4667 uint64_t next_page_start;
4668 uint16_t *gen_opc_end;
4669 int j, lj = -1;
4670 int num_insns, max_insns;
4671 CPUBreakpoint *bp;
4672 ExitStatus status;
4673 bool do_debug;
4674
4675 pc_start = tb->pc;
4676
4677 /* 31-bit mode */
4678 if (!(tb->flags & FLAG_MASK_64)) {
4679 pc_start &= 0x7fffffff;
4680 }
4681
4682 dc.tb = tb;
4683 dc.pc = pc_start;
4684 dc.cc_op = CC_OP_DYNAMIC;
4685 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4686
4687 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4688
4689 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4690
4691 num_insns = 0;
4692 max_insns = tb->cflags & CF_COUNT_MASK;
4693 if (max_insns == 0) {
4694 max_insns = CF_COUNT_MASK;
4695 }
4696
4697 gen_icount_start();
4698
4699 do {
4700 if (search_pc) {
4701 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4702 if (lj < j) {
4703 lj++;
4704 while (lj < j) {
4705 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4706 }
4707 }
4708 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4709 gen_opc_cc_op[lj] = dc.cc_op;
4710 tcg_ctx.gen_opc_instr_start[lj] = 1;
4711 tcg_ctx.gen_opc_icount[lj] = num_insns;
4712 }
4713 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4714 gen_io_start();
4715 }
4716
4717 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4718 tcg_gen_debug_insn_start(dc.pc);
4719 }
4720
4721 status = NO_EXIT;
4722 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4723 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4724 if (bp->pc == dc.pc) {
4725 status = EXIT_PC_STALE;
4726 do_debug = true;
4727 break;
4728 }
4729 }
4730 }
4731 if (status == NO_EXIT) {
4732 status = translate_one(env, &dc);
4733 }
4734
4735 /* If we reach a page boundary, are single stepping,
4736 or exhaust instruction count, stop generation. */
4737 if (status == NO_EXIT
4738 && (dc.pc >= next_page_start
4739 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4740 || num_insns >= max_insns
4741 || singlestep
4742 || env->singlestep_enabled)) {
4743 status = EXIT_PC_STALE;
4744 }
4745 } while (status == NO_EXIT);
4746
4747 if (tb->cflags & CF_LAST_IO) {
4748 gen_io_end();
4749 }
4750
4751 switch (status) {
4752 case EXIT_GOTO_TB:
4753 case EXIT_NORETURN:
4754 break;
4755 case EXIT_PC_STALE:
4756 update_psw_addr(&dc);
4757 /* FALLTHRU */
4758 case EXIT_PC_UPDATED:
4759 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4760 cc op type is in env */
4761 update_cc_op(&dc);
4762 /* Exit the TB, either by raising a debug exception or by return. */
4763 if (do_debug) {
4764 gen_exception(EXCP_DEBUG);
4765 } else {
4766 tcg_gen_exit_tb(0);
4767 }
4768 break;
4769 default:
4770 abort();
4771 }
4772
4773 gen_icount_end(tb, num_insns);
4774 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4775 if (search_pc) {
4776 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4777 lj++;
4778 while (lj <= j) {
4779 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4780 }
4781 } else {
4782 tb->size = dc.pc - pc_start;
4783 tb->icount = num_insns;
4784 }
4785
4786 #if defined(S390X_DEBUG_DISAS)
4787 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4788 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4789 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4790 qemu_log("\n");
4791 }
4792 #endif
4793 }
4794
4795 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4796 {
4797 gen_intermediate_code_internal(env, tb, 0);
4798 }
4799
4800 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4801 {
4802 gen_intermediate_code_internal(env, tb, 1);
4803 }
4804
4805 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4806 {
4807 int cc_op;
4808 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4809 cc_op = gen_opc_cc_op[pc_pos];
4810 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4811 env->cc_op = cc_op;
4812 }
4813 }