]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/translate.c
target-s390: Optimize get_address
[mirror_qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 };
59
60 /* Information carried about a condition to be evaluated. */
61 typedef struct {
62 TCGCond cond:8;
63 bool is_64;
64 bool g1;
65 bool g2;
66 union {
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
69 } u;
70 } DisasCompare;
71
72 #define DISAS_EXCP 4
73
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit[CC_OP_MAX];
76 static uint64_t inline_branch_miss[CC_OP_MAX];
77 #endif
78
79 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
80 {
81 if (!(s->tb->flags & FLAG_MASK_64)) {
82 if (s->tb->flags & FLAG_MASK_32) {
83 return pc | 0x80000000;
84 }
85 }
86 return pc;
87 }
88
89 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
90 int flags)
91 {
92 int i;
93
94 if (env->cc_op > 3) {
95 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
96 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
97 } else {
98 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
99 env->psw.mask, env->psw.addr, env->cc_op);
100 }
101
102 for (i = 0; i < 16; i++) {
103 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
104 if ((i % 4) == 3) {
105 cpu_fprintf(f, "\n");
106 } else {
107 cpu_fprintf(f, " ");
108 }
109 }
110
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
113 if ((i % 4) == 3) {
114 cpu_fprintf(f, "\n");
115 } else {
116 cpu_fprintf(f, " ");
117 }
118 }
119
120 #ifndef CONFIG_USER_ONLY
121 for (i = 0; i < 16; i++) {
122 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
123 if ((i % 4) == 3) {
124 cpu_fprintf(f, "\n");
125 } else {
126 cpu_fprintf(f, " ");
127 }
128 }
129 #endif
130
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i = 0; i < CC_OP_MAX; i++) {
133 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
134 inline_branch_miss[i], inline_branch_hit[i]);
135 }
136 #endif
137
138 cpu_fprintf(f, "\n");
139 }
140
141 static TCGv_i64 psw_addr;
142 static TCGv_i64 psw_mask;
143
144 static TCGv_i32 cc_op;
145 static TCGv_i64 cc_src;
146 static TCGv_i64 cc_dst;
147 static TCGv_i64 cc_vr;
148
149 static char cpu_reg_names[32][4];
150 static TCGv_i64 regs[16];
151 static TCGv_i64 fregs[16];
152
153 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
154
155 void s390x_translate_init(void)
156 {
157 int i;
158
159 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
160 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
161 offsetof(CPUS390XState, psw.addr),
162 "psw_addr");
163 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
164 offsetof(CPUS390XState, psw.mask),
165 "psw_mask");
166
167 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
168 "cc_op");
169 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
170 "cc_src");
171 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
172 "cc_dst");
173 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
174 "cc_vr");
175
176 for (i = 0; i < 16; i++) {
177 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
178 regs[i] = tcg_global_mem_new(TCG_AREG0,
179 offsetof(CPUS390XState, regs[i]),
180 cpu_reg_names[i]);
181 }
182
183 for (i = 0; i < 16; i++) {
184 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
185 fregs[i] = tcg_global_mem_new(TCG_AREG0,
186 offsetof(CPUS390XState, fregs[i].d),
187 cpu_reg_names[i + 16]);
188 }
189
190 /* register helpers */
191 #define GEN_HELPER 2
192 #include "helper.h"
193 }
194
195 static TCGv_i64 load_reg(int reg)
196 {
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
200 }
201
202 static TCGv_i64 load_freg32_i64(int reg)
203 {
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
206 return r;
207 }
208
209 static void store_reg(int reg, TCGv_i64 v)
210 {
211 tcg_gen_mov_i64(regs[reg], v);
212 }
213
214 static void store_freg(int reg, TCGv_i64 v)
215 {
216 tcg_gen_mov_i64(fregs[reg], v);
217 }
218
219 static void store_reg32_i64(int reg, TCGv_i64 v)
220 {
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
223 }
224
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
226 {
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
228 }
229
230 static void store_freg32_i64(int reg, TCGv_i64 v)
231 {
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
233 }
234
235 static void return_low128(TCGv_i64 dest)
236 {
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
238 }
239
240 static void update_psw_addr(DisasContext *s)
241 {
242 /* psw.addr */
243 tcg_gen_movi_i64(psw_addr, s->pc);
244 }
245
246 static void update_cc_op(DisasContext *s)
247 {
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
250 }
251 }
252
253 static void potential_page_fault(DisasContext *s)
254 {
255 update_psw_addr(s);
256 update_cc_op(s);
257 }
258
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
260 {
261 return (uint64_t)cpu_lduw_code(env, pc);
262 }
263
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
265 {
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
267 }
268
269 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
270 {
271 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
272 }
273
274 static int get_mem_index(DisasContext *s)
275 {
276 switch (s->tb->flags & FLAG_MASK_ASC) {
277 case PSW_ASC_PRIMARY >> 32:
278 return 0;
279 case PSW_ASC_SECONDARY >> 32:
280 return 1;
281 case PSW_ASC_HOME >> 32:
282 return 2;
283 default:
284 tcg_abort();
285 break;
286 }
287 }
288
289 static void gen_exception(int excp)
290 {
291 TCGv_i32 tmp = tcg_const_i32(excp);
292 gen_helper_exception(cpu_env, tmp);
293 tcg_temp_free_i32(tmp);
294 }
295
296 static void gen_program_exception(DisasContext *s, int code)
297 {
298 TCGv_i32 tmp;
299
300 /* Remember what pgm exeption this was. */
301 tmp = tcg_const_i32(code);
302 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
303 tcg_temp_free_i32(tmp);
304
305 tmp = tcg_const_i32(s->next_pc - s->pc);
306 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
307 tcg_temp_free_i32(tmp);
308
309 /* Advance past instruction. */
310 s->pc = s->next_pc;
311 update_psw_addr(s);
312
313 /* Save off cc. */
314 update_cc_op(s);
315
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM);
318 }
319
320 static inline void gen_illegal_opcode(DisasContext *s)
321 {
322 gen_program_exception(s, PGM_SPECIFICATION);
323 }
324
325 static inline void check_privileged(DisasContext *s)
326 {
327 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
328 gen_program_exception(s, PGM_PRIVILEGED);
329 }
330 }
331
332 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
333 {
334 TCGv_i64 tmp = tcg_temp_new_i64();
335 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
336
337 /* Note that d2 is limited to 20 bits, signed. If we crop negative
338 displacements early we create larger immedate addends. */
339
340 /* Note that addi optimizes the imm==0 case. */
341 if (b2 && x2) {
342 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
343 tcg_gen_addi_i64(tmp, tmp, d2);
344 } else if (b2) {
345 tcg_gen_addi_i64(tmp, regs[b2], d2);
346 } else if (x2) {
347 tcg_gen_addi_i64(tmp, regs[x2], d2);
348 } else {
349 if (need_31) {
350 d2 &= 0x7fffffff;
351 need_31 = false;
352 }
353 tcg_gen_movi_i64(tmp, d2);
354 }
355 if (need_31) {
356 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
357 }
358
359 return tmp;
360 }
361
362 static inline bool live_cc_data(DisasContext *s)
363 {
364 return (s->cc_op != CC_OP_DYNAMIC
365 && s->cc_op != CC_OP_STATIC
366 && s->cc_op > 3);
367 }
368
369 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
370 {
371 if (live_cc_data(s)) {
372 tcg_gen_discard_i64(cc_src);
373 tcg_gen_discard_i64(cc_dst);
374 tcg_gen_discard_i64(cc_vr);
375 }
376 s->cc_op = CC_OP_CONST0 + val;
377 }
378
379 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
380 {
381 if (live_cc_data(s)) {
382 tcg_gen_discard_i64(cc_src);
383 tcg_gen_discard_i64(cc_vr);
384 }
385 tcg_gen_mov_i64(cc_dst, dst);
386 s->cc_op = op;
387 }
388
389 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
390 TCGv_i64 dst)
391 {
392 if (live_cc_data(s)) {
393 tcg_gen_discard_i64(cc_vr);
394 }
395 tcg_gen_mov_i64(cc_src, src);
396 tcg_gen_mov_i64(cc_dst, dst);
397 s->cc_op = op;
398 }
399
400 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
401 TCGv_i64 dst, TCGv_i64 vr)
402 {
403 tcg_gen_mov_i64(cc_src, src);
404 tcg_gen_mov_i64(cc_dst, dst);
405 tcg_gen_mov_i64(cc_vr, vr);
406 s->cc_op = op;
407 }
408
409 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
410 {
411 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
412 }
413
414 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
415 {
416 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
417 }
418
419 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
420 {
421 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
422 }
423
424 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
425 {
426 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
427 }
428
429 /* CC value is in env->cc_op */
430 static void set_cc_static(DisasContext *s)
431 {
432 if (live_cc_data(s)) {
433 tcg_gen_discard_i64(cc_src);
434 tcg_gen_discard_i64(cc_dst);
435 tcg_gen_discard_i64(cc_vr);
436 }
437 s->cc_op = CC_OP_STATIC;
438 }
439
440 /* calculates cc into cc_op */
441 static void gen_op_calc_cc(DisasContext *s)
442 {
443 TCGv_i32 local_cc_op;
444 TCGv_i64 dummy;
445
446 TCGV_UNUSED_I32(local_cc_op);
447 TCGV_UNUSED_I64(dummy);
448 switch (s->cc_op) {
449 default:
450 dummy = tcg_const_i64(0);
451 /* FALLTHRU */
452 case CC_OP_ADD_64:
453 case CC_OP_ADDU_64:
454 case CC_OP_ADDC_64:
455 case CC_OP_SUB_64:
456 case CC_OP_SUBU_64:
457 case CC_OP_SUBB_64:
458 case CC_OP_ADD_32:
459 case CC_OP_ADDU_32:
460 case CC_OP_ADDC_32:
461 case CC_OP_SUB_32:
462 case CC_OP_SUBU_32:
463 case CC_OP_SUBB_32:
464 local_cc_op = tcg_const_i32(s->cc_op);
465 break;
466 case CC_OP_CONST0:
467 case CC_OP_CONST1:
468 case CC_OP_CONST2:
469 case CC_OP_CONST3:
470 case CC_OP_STATIC:
471 case CC_OP_DYNAMIC:
472 break;
473 }
474
475 switch (s->cc_op) {
476 case CC_OP_CONST0:
477 case CC_OP_CONST1:
478 case CC_OP_CONST2:
479 case CC_OP_CONST3:
480 /* s->cc_op is the cc value */
481 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
482 break;
483 case CC_OP_STATIC:
484 /* env->cc_op already is the cc value */
485 break;
486 case CC_OP_NZ:
487 case CC_OP_ABS_64:
488 case CC_OP_NABS_64:
489 case CC_OP_ABS_32:
490 case CC_OP_NABS_32:
491 case CC_OP_LTGT0_32:
492 case CC_OP_LTGT0_64:
493 case CC_OP_COMP_32:
494 case CC_OP_COMP_64:
495 case CC_OP_NZ_F32:
496 case CC_OP_NZ_F64:
497 case CC_OP_FLOGR:
498 /* 1 argument */
499 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
500 break;
501 case CC_OP_ICM:
502 case CC_OP_LTGT_32:
503 case CC_OP_LTGT_64:
504 case CC_OP_LTUGTU_32:
505 case CC_OP_LTUGTU_64:
506 case CC_OP_TM_32:
507 case CC_OP_TM_64:
508 case CC_OP_SLA_32:
509 case CC_OP_SLA_64:
510 case CC_OP_NZ_F128:
511 /* 2 arguments */
512 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
513 break;
514 case CC_OP_ADD_64:
515 case CC_OP_ADDU_64:
516 case CC_OP_ADDC_64:
517 case CC_OP_SUB_64:
518 case CC_OP_SUBU_64:
519 case CC_OP_SUBB_64:
520 case CC_OP_ADD_32:
521 case CC_OP_ADDU_32:
522 case CC_OP_ADDC_32:
523 case CC_OP_SUB_32:
524 case CC_OP_SUBU_32:
525 case CC_OP_SUBB_32:
526 /* 3 arguments */
527 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
528 break;
529 case CC_OP_DYNAMIC:
530 /* unknown operation - assume 3 arguments and cc_op in env */
531 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
532 break;
533 default:
534 tcg_abort();
535 }
536
537 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
538 tcg_temp_free_i32(local_cc_op);
539 }
540 if (!TCGV_IS_UNUSED_I64(dummy)) {
541 tcg_temp_free_i64(dummy);
542 }
543
544 /* We now have cc in cc_op as constant */
545 set_cc_static(s);
546 }
547
548 static int use_goto_tb(DisasContext *s, uint64_t dest)
549 {
550 /* NOTE: we handle the case where the TB spans two pages here */
551 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
552 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
553 && !s->singlestep_enabled
554 && !(s->tb->cflags & CF_LAST_IO));
555 }
556
557 static void account_noninline_branch(DisasContext *s, int cc_op)
558 {
559 #ifdef DEBUG_INLINE_BRANCHES
560 inline_branch_miss[cc_op]++;
561 #endif
562 }
563
564 static void account_inline_branch(DisasContext *s, int cc_op)
565 {
566 #ifdef DEBUG_INLINE_BRANCHES
567 inline_branch_hit[cc_op]++;
568 #endif
569 }
570
571 /* Table of mask values to comparison codes, given a comparison as input.
572 For such, CC=3 should not be possible. */
573 static const TCGCond ltgt_cond[16] = {
574 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
575 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
576 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
577 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
578 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
579 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
580 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
581 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
582 };
583
584 /* Table of mask values to comparison codes, given a logic op as input.
585 For such, only CC=0 and CC=1 should be possible. */
586 static const TCGCond nz_cond[16] = {
587 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
588 TCG_COND_NEVER, TCG_COND_NEVER,
589 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
590 TCG_COND_NE, TCG_COND_NE,
591 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
592 TCG_COND_EQ, TCG_COND_EQ,
593 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
594 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
595 };
596
597 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
598 details required to generate a TCG comparison. */
599 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
600 {
601 TCGCond cond;
602 enum cc_op old_cc_op = s->cc_op;
603
604 if (mask == 15 || mask == 0) {
605 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
606 c->u.s32.a = cc_op;
607 c->u.s32.b = cc_op;
608 c->g1 = c->g2 = true;
609 c->is_64 = false;
610 return;
611 }
612
613 /* Find the TCG condition for the mask + cc op. */
614 switch (old_cc_op) {
615 case CC_OP_LTGT0_32:
616 case CC_OP_LTGT0_64:
617 case CC_OP_LTGT_32:
618 case CC_OP_LTGT_64:
619 cond = ltgt_cond[mask];
620 if (cond == TCG_COND_NEVER) {
621 goto do_dynamic;
622 }
623 account_inline_branch(s, old_cc_op);
624 break;
625
626 case CC_OP_LTUGTU_32:
627 case CC_OP_LTUGTU_64:
628 cond = tcg_unsigned_cond(ltgt_cond[mask]);
629 if (cond == TCG_COND_NEVER) {
630 goto do_dynamic;
631 }
632 account_inline_branch(s, old_cc_op);
633 break;
634
635 case CC_OP_NZ:
636 cond = nz_cond[mask];
637 if (cond == TCG_COND_NEVER) {
638 goto do_dynamic;
639 }
640 account_inline_branch(s, old_cc_op);
641 break;
642
643 case CC_OP_TM_32:
644 case CC_OP_TM_64:
645 switch (mask) {
646 case 8:
647 cond = TCG_COND_EQ;
648 break;
649 case 4 | 2 | 1:
650 cond = TCG_COND_NE;
651 break;
652 default:
653 goto do_dynamic;
654 }
655 account_inline_branch(s, old_cc_op);
656 break;
657
658 case CC_OP_ICM:
659 switch (mask) {
660 case 8:
661 cond = TCG_COND_EQ;
662 break;
663 case 4 | 2 | 1:
664 case 4 | 2:
665 cond = TCG_COND_NE;
666 break;
667 default:
668 goto do_dynamic;
669 }
670 account_inline_branch(s, old_cc_op);
671 break;
672
673 case CC_OP_FLOGR:
674 switch (mask & 0xa) {
675 case 8: /* src == 0 -> no one bit found */
676 cond = TCG_COND_EQ;
677 break;
678 case 2: /* src != 0 -> one bit found */
679 cond = TCG_COND_NE;
680 break;
681 default:
682 goto do_dynamic;
683 }
684 account_inline_branch(s, old_cc_op);
685 break;
686
687 case CC_OP_ADDU_32:
688 case CC_OP_ADDU_64:
689 switch (mask) {
690 case 8 | 2: /* vr == 0 */
691 cond = TCG_COND_EQ;
692 break;
693 case 4 | 1: /* vr != 0 */
694 cond = TCG_COND_NE;
695 break;
696 case 8 | 4: /* no carry -> vr >= src */
697 cond = TCG_COND_GEU;
698 break;
699 case 2 | 1: /* carry -> vr < src */
700 cond = TCG_COND_LTU;
701 break;
702 default:
703 goto do_dynamic;
704 }
705 account_inline_branch(s, old_cc_op);
706 break;
707
708 case CC_OP_SUBU_32:
709 case CC_OP_SUBU_64:
710 /* Note that CC=0 is impossible; treat it as dont-care. */
711 switch (mask & 7) {
712 case 2: /* zero -> op1 == op2 */
713 cond = TCG_COND_EQ;
714 break;
715 case 4 | 1: /* !zero -> op1 != op2 */
716 cond = TCG_COND_NE;
717 break;
718 case 4: /* borrow (!carry) -> op1 < op2 */
719 cond = TCG_COND_LTU;
720 break;
721 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
722 cond = TCG_COND_GEU;
723 break;
724 default:
725 goto do_dynamic;
726 }
727 account_inline_branch(s, old_cc_op);
728 break;
729
730 default:
731 do_dynamic:
732 /* Calculate cc value. */
733 gen_op_calc_cc(s);
734 /* FALLTHRU */
735
736 case CC_OP_STATIC:
737 /* Jump based on CC. We'll load up the real cond below;
738 the assignment here merely avoids a compiler warning. */
739 account_noninline_branch(s, old_cc_op);
740 old_cc_op = CC_OP_STATIC;
741 cond = TCG_COND_NEVER;
742 break;
743 }
744
745 /* Load up the arguments of the comparison. */
746 c->is_64 = true;
747 c->g1 = c->g2 = false;
748 switch (old_cc_op) {
749 case CC_OP_LTGT0_32:
750 c->is_64 = false;
751 c->u.s32.a = tcg_temp_new_i32();
752 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
753 c->u.s32.b = tcg_const_i32(0);
754 break;
755 case CC_OP_LTGT_32:
756 case CC_OP_LTUGTU_32:
757 case CC_OP_SUBU_32:
758 c->is_64 = false;
759 c->u.s32.a = tcg_temp_new_i32();
760 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
761 c->u.s32.b = tcg_temp_new_i32();
762 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
763 break;
764
765 case CC_OP_LTGT0_64:
766 case CC_OP_NZ:
767 case CC_OP_FLOGR:
768 c->u.s64.a = cc_dst;
769 c->u.s64.b = tcg_const_i64(0);
770 c->g1 = true;
771 break;
772 case CC_OP_LTGT_64:
773 case CC_OP_LTUGTU_64:
774 case CC_OP_SUBU_64:
775 c->u.s64.a = cc_src;
776 c->u.s64.b = cc_dst;
777 c->g1 = c->g2 = true;
778 break;
779
780 case CC_OP_TM_32:
781 case CC_OP_TM_64:
782 case CC_OP_ICM:
783 c->u.s64.a = tcg_temp_new_i64();
784 c->u.s64.b = tcg_const_i64(0);
785 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
786 break;
787
788 case CC_OP_ADDU_32:
789 c->is_64 = false;
790 c->u.s32.a = tcg_temp_new_i32();
791 c->u.s32.b = tcg_temp_new_i32();
792 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
793 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
794 tcg_gen_movi_i32(c->u.s32.b, 0);
795 } else {
796 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
797 }
798 break;
799
800 case CC_OP_ADDU_64:
801 c->u.s64.a = cc_vr;
802 c->g1 = true;
803 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
804 c->u.s64.b = tcg_const_i64(0);
805 } else {
806 c->u.s64.b = cc_src;
807 c->g2 = true;
808 }
809 break;
810
811 case CC_OP_STATIC:
812 c->is_64 = false;
813 c->u.s32.a = cc_op;
814 c->g1 = true;
815 switch (mask) {
816 case 0x8 | 0x4 | 0x2: /* cc != 3 */
817 cond = TCG_COND_NE;
818 c->u.s32.b = tcg_const_i32(3);
819 break;
820 case 0x8 | 0x4 | 0x1: /* cc != 2 */
821 cond = TCG_COND_NE;
822 c->u.s32.b = tcg_const_i32(2);
823 break;
824 case 0x8 | 0x2 | 0x1: /* cc != 1 */
825 cond = TCG_COND_NE;
826 c->u.s32.b = tcg_const_i32(1);
827 break;
828 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
829 cond = TCG_COND_EQ;
830 c->g1 = false;
831 c->u.s32.a = tcg_temp_new_i32();
832 c->u.s32.b = tcg_const_i32(0);
833 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
834 break;
835 case 0x8 | 0x4: /* cc < 2 */
836 cond = TCG_COND_LTU;
837 c->u.s32.b = tcg_const_i32(2);
838 break;
839 case 0x8: /* cc == 0 */
840 cond = TCG_COND_EQ;
841 c->u.s32.b = tcg_const_i32(0);
842 break;
843 case 0x4 | 0x2 | 0x1: /* cc != 0 */
844 cond = TCG_COND_NE;
845 c->u.s32.b = tcg_const_i32(0);
846 break;
847 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
848 cond = TCG_COND_NE;
849 c->g1 = false;
850 c->u.s32.a = tcg_temp_new_i32();
851 c->u.s32.b = tcg_const_i32(0);
852 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
853 break;
854 case 0x4: /* cc == 1 */
855 cond = TCG_COND_EQ;
856 c->u.s32.b = tcg_const_i32(1);
857 break;
858 case 0x2 | 0x1: /* cc > 1 */
859 cond = TCG_COND_GTU;
860 c->u.s32.b = tcg_const_i32(1);
861 break;
862 case 0x2: /* cc == 2 */
863 cond = TCG_COND_EQ;
864 c->u.s32.b = tcg_const_i32(2);
865 break;
866 case 0x1: /* cc == 3 */
867 cond = TCG_COND_EQ;
868 c->u.s32.b = tcg_const_i32(3);
869 break;
870 default:
871 /* CC is masked by something else: (8 >> cc) & mask. */
872 cond = TCG_COND_NE;
873 c->g1 = false;
874 c->u.s32.a = tcg_const_i32(8);
875 c->u.s32.b = tcg_const_i32(0);
876 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
877 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
878 break;
879 }
880 break;
881
882 default:
883 abort();
884 }
885 c->cond = cond;
886 }
887
888 static void free_compare(DisasCompare *c)
889 {
890 if (!c->g1) {
891 if (c->is_64) {
892 tcg_temp_free_i64(c->u.s64.a);
893 } else {
894 tcg_temp_free_i32(c->u.s32.a);
895 }
896 }
897 if (!c->g2) {
898 if (c->is_64) {
899 tcg_temp_free_i64(c->u.s64.b);
900 } else {
901 tcg_temp_free_i32(c->u.s32.b);
902 }
903 }
904 }
905
906 /* ====================================================================== */
907 /* Define the insn format enumeration. */
908 #define F0(N) FMT_##N,
909 #define F1(N, X1) F0(N)
910 #define F2(N, X1, X2) F0(N)
911 #define F3(N, X1, X2, X3) F0(N)
912 #define F4(N, X1, X2, X3, X4) F0(N)
913 #define F5(N, X1, X2, X3, X4, X5) F0(N)
914
915 typedef enum {
916 #include "insn-format.def"
917 } DisasFormat;
918
919 #undef F0
920 #undef F1
921 #undef F2
922 #undef F3
923 #undef F4
924 #undef F5
925
926 /* Define a structure to hold the decoded fields. We'll store each inside
927 an array indexed by an enum. In order to conserve memory, we'll arrange
928 for fields that do not exist at the same time to overlap, thus the "C"
929 for compact. For checking purposes there is an "O" for original index
930 as well that will be applied to availability bitmaps. */
931
932 enum DisasFieldIndexO {
933 FLD_O_r1,
934 FLD_O_r2,
935 FLD_O_r3,
936 FLD_O_m1,
937 FLD_O_m3,
938 FLD_O_m4,
939 FLD_O_b1,
940 FLD_O_b2,
941 FLD_O_b4,
942 FLD_O_d1,
943 FLD_O_d2,
944 FLD_O_d4,
945 FLD_O_x2,
946 FLD_O_l1,
947 FLD_O_l2,
948 FLD_O_i1,
949 FLD_O_i2,
950 FLD_O_i3,
951 FLD_O_i4,
952 FLD_O_i5
953 };
954
955 enum DisasFieldIndexC {
956 FLD_C_r1 = 0,
957 FLD_C_m1 = 0,
958 FLD_C_b1 = 0,
959 FLD_C_i1 = 0,
960
961 FLD_C_r2 = 1,
962 FLD_C_b2 = 1,
963 FLD_C_i2 = 1,
964
965 FLD_C_r3 = 2,
966 FLD_C_m3 = 2,
967 FLD_C_i3 = 2,
968
969 FLD_C_m4 = 3,
970 FLD_C_b4 = 3,
971 FLD_C_i4 = 3,
972 FLD_C_l1 = 3,
973
974 FLD_C_i5 = 4,
975 FLD_C_d1 = 4,
976
977 FLD_C_d2 = 5,
978
979 FLD_C_d4 = 6,
980 FLD_C_x2 = 6,
981 FLD_C_l2 = 6,
982
983 NUM_C_FIELD = 7
984 };
985
986 struct DisasFields {
987 unsigned op:8;
988 unsigned op2:8;
989 unsigned presentC:16;
990 unsigned int presentO;
991 int c[NUM_C_FIELD];
992 };
993
994 /* This is the way fields are to be accessed out of DisasFields. */
995 #define have_field(S, F) have_field1((S), FLD_O_##F)
996 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
997
998 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
999 {
1000 return (f->presentO >> c) & 1;
1001 }
1002
1003 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1004 enum DisasFieldIndexC c)
1005 {
1006 assert(have_field1(f, o));
1007 return f->c[c];
1008 }
1009
1010 /* Describe the layout of each field in each format. */
1011 typedef struct DisasField {
1012 unsigned int beg:8;
1013 unsigned int size:8;
1014 unsigned int type:2;
1015 unsigned int indexC:6;
1016 enum DisasFieldIndexO indexO:8;
1017 } DisasField;
1018
1019 typedef struct DisasFormatInfo {
1020 DisasField op[NUM_C_FIELD];
1021 } DisasFormatInfo;
1022
1023 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1024 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1025 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1027 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1028 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1029 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1030 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1031 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1032 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1033 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1034 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1035 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1036 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1037
1038 #define F0(N) { { } },
1039 #define F1(N, X1) { { X1 } },
1040 #define F2(N, X1, X2) { { X1, X2 } },
1041 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1042 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1043 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1044
1045 static const DisasFormatInfo format_info[] = {
1046 #include "insn-format.def"
1047 };
1048
1049 #undef F0
1050 #undef F1
1051 #undef F2
1052 #undef F3
1053 #undef F4
1054 #undef F5
1055 #undef R
1056 #undef M
1057 #undef BD
1058 #undef BXD
1059 #undef BDL
1060 #undef BXDL
1061 #undef I
1062 #undef L
1063
1064 /* Generally, we'll extract operands into this structures, operate upon
1065 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1066 of routines below for more details. */
1067 typedef struct {
1068 bool g_out, g_out2, g_in1, g_in2;
1069 TCGv_i64 out, out2, in1, in2;
1070 TCGv_i64 addr1;
1071 } DisasOps;
1072
1073 /* Instructions can place constraints on their operands, raising specification
1074 exceptions if they are violated. To make this easy to automate, each "in1",
1075 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1076 of the following, or 0. To make this easy to document, we'll put the
1077 SPEC_<name> defines next to <name>. */
1078
1079 #define SPEC_r1_even 1
1080 #define SPEC_r2_even 2
1081 #define SPEC_r1_f128 4
1082 #define SPEC_r2_f128 8
1083
1084 /* Return values from translate_one, indicating the state of the TB. */
1085 typedef enum {
1086 /* Continue the TB. */
1087 NO_EXIT,
1088 /* We have emitted one or more goto_tb. No fixup required. */
1089 EXIT_GOTO_TB,
1090 /* We are not using a goto_tb (for whatever reason), but have updated
1091 the PC (for whatever reason), so there's no need to do it again on
1092 exiting the TB. */
1093 EXIT_PC_UPDATED,
1094 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1095 updated the PC for the next instruction to be executed. */
1096 EXIT_PC_STALE,
1097 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1098 No following code will be executed. */
1099 EXIT_NORETURN,
1100 } ExitStatus;
1101
1102 typedef enum DisasFacility {
1103 FAC_Z, /* zarch (default) */
1104 FAC_CASS, /* compare and swap and store */
1105 FAC_CASS2, /* compare and swap and store 2*/
1106 FAC_DFP, /* decimal floating point */
1107 FAC_DFPR, /* decimal floating point rounding */
1108 FAC_DO, /* distinct operands */
1109 FAC_EE, /* execute extensions */
1110 FAC_EI, /* extended immediate */
1111 FAC_FPE, /* floating point extension */
1112 FAC_FPSSH, /* floating point support sign handling */
1113 FAC_FPRGR, /* FPR-GR transfer */
1114 FAC_GIE, /* general instructions extension */
1115 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1116 FAC_HW, /* high-word */
1117 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1118 FAC_LOC, /* load/store on condition */
1119 FAC_LD, /* long displacement */
1120 FAC_PC, /* population count */
1121 FAC_SCF, /* store clock fast */
1122 FAC_SFLE, /* store facility list extended */
1123 } DisasFacility;
1124
1125 struct DisasInsn {
1126 unsigned opc:16;
1127 DisasFormat fmt:6;
1128 DisasFacility fac:6;
1129 unsigned spec:4;
1130
1131 const char *name;
1132
1133 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1134 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1135 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1136 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1137 void (*help_cout)(DisasContext *, DisasOps *);
1138 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1139
1140 uint64_t data;
1141 };
1142
1143 /* ====================================================================== */
1144 /* Miscelaneous helpers, used by several operations. */
1145
1146 static void help_l2_shift(DisasContext *s, DisasFields *f,
1147 DisasOps *o, int mask)
1148 {
1149 int b2 = get_field(f, b2);
1150 int d2 = get_field(f, d2);
1151
1152 if (b2 == 0) {
1153 o->in2 = tcg_const_i64(d2 & mask);
1154 } else {
1155 o->in2 = get_address(s, 0, b2, d2);
1156 tcg_gen_andi_i64(o->in2, o->in2, mask);
1157 }
1158 }
1159
1160 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1161 {
1162 if (dest == s->next_pc) {
1163 return NO_EXIT;
1164 }
1165 if (use_goto_tb(s, dest)) {
1166 update_cc_op(s);
1167 tcg_gen_goto_tb(0);
1168 tcg_gen_movi_i64(psw_addr, dest);
1169 tcg_gen_exit_tb((tcg_target_long)s->tb);
1170 return EXIT_GOTO_TB;
1171 } else {
1172 tcg_gen_movi_i64(psw_addr, dest);
1173 return EXIT_PC_UPDATED;
1174 }
1175 }
1176
1177 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1178 bool is_imm, int imm, TCGv_i64 cdest)
1179 {
1180 ExitStatus ret;
1181 uint64_t dest = s->pc + 2 * imm;
1182 int lab;
1183
1184 /* Take care of the special cases first. */
1185 if (c->cond == TCG_COND_NEVER) {
1186 ret = NO_EXIT;
1187 goto egress;
1188 }
1189 if (is_imm) {
1190 if (dest == s->next_pc) {
1191 /* Branch to next. */
1192 ret = NO_EXIT;
1193 goto egress;
1194 }
1195 if (c->cond == TCG_COND_ALWAYS) {
1196 ret = help_goto_direct(s, dest);
1197 goto egress;
1198 }
1199 } else {
1200 if (TCGV_IS_UNUSED_I64(cdest)) {
1201 /* E.g. bcr %r0 -> no branch. */
1202 ret = NO_EXIT;
1203 goto egress;
1204 }
1205 if (c->cond == TCG_COND_ALWAYS) {
1206 tcg_gen_mov_i64(psw_addr, cdest);
1207 ret = EXIT_PC_UPDATED;
1208 goto egress;
1209 }
1210 }
1211
1212 if (use_goto_tb(s, s->next_pc)) {
1213 if (is_imm && use_goto_tb(s, dest)) {
1214 /* Both exits can use goto_tb. */
1215 update_cc_op(s);
1216
1217 lab = gen_new_label();
1218 if (c->is_64) {
1219 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1220 } else {
1221 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1222 }
1223
1224 /* Branch not taken. */
1225 tcg_gen_goto_tb(0);
1226 tcg_gen_movi_i64(psw_addr, s->next_pc);
1227 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1228
1229 /* Branch taken. */
1230 gen_set_label(lab);
1231 tcg_gen_goto_tb(1);
1232 tcg_gen_movi_i64(psw_addr, dest);
1233 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1234
1235 ret = EXIT_GOTO_TB;
1236 } else {
1237 /* Fallthru can use goto_tb, but taken branch cannot. */
1238 /* Store taken branch destination before the brcond. This
1239 avoids having to allocate a new local temp to hold it.
1240 We'll overwrite this in the not taken case anyway. */
1241 if (!is_imm) {
1242 tcg_gen_mov_i64(psw_addr, cdest);
1243 }
1244
1245 lab = gen_new_label();
1246 if (c->is_64) {
1247 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1248 } else {
1249 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1250 }
1251
1252 /* Branch not taken. */
1253 update_cc_op(s);
1254 tcg_gen_goto_tb(0);
1255 tcg_gen_movi_i64(psw_addr, s->next_pc);
1256 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1257
1258 gen_set_label(lab);
1259 if (is_imm) {
1260 tcg_gen_movi_i64(psw_addr, dest);
1261 }
1262 ret = EXIT_PC_UPDATED;
1263 }
1264 } else {
1265 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1266 Most commonly we're single-stepping or some other condition that
1267 disables all use of goto_tb. Just update the PC and exit. */
1268
1269 TCGv_i64 next = tcg_const_i64(s->next_pc);
1270 if (is_imm) {
1271 cdest = tcg_const_i64(dest);
1272 }
1273
1274 if (c->is_64) {
1275 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1276 cdest, next);
1277 } else {
1278 TCGv_i32 t0 = tcg_temp_new_i32();
1279 TCGv_i64 t1 = tcg_temp_new_i64();
1280 TCGv_i64 z = tcg_const_i64(0);
1281 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1282 tcg_gen_extu_i32_i64(t1, t0);
1283 tcg_temp_free_i32(t0);
1284 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1285 tcg_temp_free_i64(t1);
1286 tcg_temp_free_i64(z);
1287 }
1288
1289 if (is_imm) {
1290 tcg_temp_free_i64(cdest);
1291 }
1292 tcg_temp_free_i64(next);
1293
1294 ret = EXIT_PC_UPDATED;
1295 }
1296
1297 egress:
1298 free_compare(c);
1299 return ret;
1300 }
1301
1302 /* ====================================================================== */
1303 /* The operations. These perform the bulk of the work for any insn,
1304 usually after the operands have been loaded and output initialized. */
1305
1306 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1307 {
1308 gen_helper_abs_i64(o->out, o->in2);
1309 return NO_EXIT;
1310 }
1311
1312 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1313 {
1314 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1315 return NO_EXIT;
1316 }
1317
1318 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1319 {
1320 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1321 return NO_EXIT;
1322 }
1323
1324 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1325 {
1326 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1327 tcg_gen_mov_i64(o->out2, o->in2);
1328 return NO_EXIT;
1329 }
1330
1331 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1332 {
1333 tcg_gen_add_i64(o->out, o->in1, o->in2);
1334 return NO_EXIT;
1335 }
1336
1337 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1338 {
1339 DisasCompare cmp;
1340 TCGv_i64 carry;
1341
1342 tcg_gen_add_i64(o->out, o->in1, o->in2);
1343
1344 /* The carry flag is the msb of CC, therefore the branch mask that would
1345 create that comparison is 3. Feeding the generated comparison to
1346 setcond produces the carry flag that we desire. */
1347 disas_jcc(s, &cmp, 3);
1348 carry = tcg_temp_new_i64();
1349 if (cmp.is_64) {
1350 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1351 } else {
1352 TCGv_i32 t = tcg_temp_new_i32();
1353 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1354 tcg_gen_extu_i32_i64(carry, t);
1355 tcg_temp_free_i32(t);
1356 }
1357 free_compare(&cmp);
1358
1359 tcg_gen_add_i64(o->out, o->out, carry);
1360 tcg_temp_free_i64(carry);
1361 return NO_EXIT;
1362 }
1363
1364 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1365 {
1366 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1367 return NO_EXIT;
1368 }
1369
1370 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1371 {
1372 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1373 return NO_EXIT;
1374 }
1375
1376 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1377 {
1378 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1379 return_low128(o->out2);
1380 return NO_EXIT;
1381 }
1382
1383 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1384 {
1385 tcg_gen_and_i64(o->out, o->in1, o->in2);
1386 return NO_EXIT;
1387 }
1388
1389 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1390 {
1391 int shift = s->insn->data & 0xff;
1392 int size = s->insn->data >> 8;
1393 uint64_t mask = ((1ull << size) - 1) << shift;
1394
1395 assert(!o->g_in2);
1396 tcg_gen_shli_i64(o->in2, o->in2, shift);
1397 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1398 tcg_gen_and_i64(o->out, o->in1, o->in2);
1399
1400 /* Produce the CC from only the bits manipulated. */
1401 tcg_gen_andi_i64(cc_dst, o->out, mask);
1402 set_cc_nz_u64(s, cc_dst);
1403 return NO_EXIT;
1404 }
1405
1406 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1407 {
1408 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1409 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1410 tcg_gen_mov_i64(psw_addr, o->in2);
1411 return EXIT_PC_UPDATED;
1412 } else {
1413 return NO_EXIT;
1414 }
1415 }
1416
1417 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1418 {
1419 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1420 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1421 }
1422
1423 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1424 {
1425 int m1 = get_field(s->fields, m1);
1426 bool is_imm = have_field(s->fields, i2);
1427 int imm = is_imm ? get_field(s->fields, i2) : 0;
1428 DisasCompare c;
1429
1430 disas_jcc(s, &c, m1);
1431 return help_branch(s, &c, is_imm, imm, o->in2);
1432 }
1433
1434 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1435 {
1436 int r1 = get_field(s->fields, r1);
1437 bool is_imm = have_field(s->fields, i2);
1438 int imm = is_imm ? get_field(s->fields, i2) : 0;
1439 DisasCompare c;
1440 TCGv_i64 t;
1441
1442 c.cond = TCG_COND_NE;
1443 c.is_64 = false;
1444 c.g1 = false;
1445 c.g2 = false;
1446
1447 t = tcg_temp_new_i64();
1448 tcg_gen_subi_i64(t, regs[r1], 1);
1449 store_reg32_i64(r1, t);
1450 c.u.s32.a = tcg_temp_new_i32();
1451 c.u.s32.b = tcg_const_i32(0);
1452 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1453 tcg_temp_free_i64(t);
1454
1455 return help_branch(s, &c, is_imm, imm, o->in2);
1456 }
1457
1458 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1459 {
1460 int r1 = get_field(s->fields, r1);
1461 bool is_imm = have_field(s->fields, i2);
1462 int imm = is_imm ? get_field(s->fields, i2) : 0;
1463 DisasCompare c;
1464
1465 c.cond = TCG_COND_NE;
1466 c.is_64 = true;
1467 c.g1 = true;
1468 c.g2 = false;
1469
1470 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1471 c.u.s64.a = regs[r1];
1472 c.u.s64.b = tcg_const_i64(0);
1473
1474 return help_branch(s, &c, is_imm, imm, o->in2);
1475 }
1476
1477 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1478 {
1479 int r1 = get_field(s->fields, r1);
1480 int r3 = get_field(s->fields, r3);
1481 bool is_imm = have_field(s->fields, i2);
1482 int imm = is_imm ? get_field(s->fields, i2) : 0;
1483 DisasCompare c;
1484 TCGv_i64 t;
1485
1486 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1487 c.is_64 = false;
1488 c.g1 = false;
1489 c.g2 = false;
1490
1491 t = tcg_temp_new_i64();
1492 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1493 c.u.s32.a = tcg_temp_new_i32();
1494 c.u.s32.b = tcg_temp_new_i32();
1495 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1496 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1497 store_reg32_i64(r1, t);
1498 tcg_temp_free_i64(t);
1499
1500 return help_branch(s, &c, is_imm, imm, o->in2);
1501 }
1502
1503 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1504 {
1505 int r1 = get_field(s->fields, r1);
1506 int r3 = get_field(s->fields, r3);
1507 bool is_imm = have_field(s->fields, i2);
1508 int imm = is_imm ? get_field(s->fields, i2) : 0;
1509 DisasCompare c;
1510
1511 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1512 c.is_64 = true;
1513
1514 if (r1 == (r3 | 1)) {
1515 c.u.s64.b = load_reg(r3 | 1);
1516 c.g2 = false;
1517 } else {
1518 c.u.s64.b = regs[r3 | 1];
1519 c.g2 = true;
1520 }
1521
1522 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1523 c.u.s64.a = regs[r1];
1524 c.g1 = true;
1525
1526 return help_branch(s, &c, is_imm, imm, o->in2);
1527 }
1528
1529 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1530 {
1531 int imm, m3 = get_field(s->fields, m3);
1532 bool is_imm;
1533 DisasCompare c;
1534
1535 c.cond = ltgt_cond[m3];
1536 if (s->insn->data) {
1537 c.cond = tcg_unsigned_cond(c.cond);
1538 }
1539 c.is_64 = c.g1 = c.g2 = true;
1540 c.u.s64.a = o->in1;
1541 c.u.s64.b = o->in2;
1542
1543 is_imm = have_field(s->fields, i4);
1544 if (is_imm) {
1545 imm = get_field(s->fields, i4);
1546 } else {
1547 imm = 0;
1548 o->out = get_address(s, 0, get_field(s->fields, b4),
1549 get_field(s->fields, d4));
1550 }
1551
1552 return help_branch(s, &c, is_imm, imm, o->out);
1553 }
1554
1555 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1556 {
1557 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1558 set_cc_static(s);
1559 return NO_EXIT;
1560 }
1561
1562 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1563 {
1564 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1565 set_cc_static(s);
1566 return NO_EXIT;
1567 }
1568
1569 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1570 {
1571 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1572 set_cc_static(s);
1573 return NO_EXIT;
1574 }
1575
1576 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1577 {
1578 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1579 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1580 tcg_temp_free_i32(m3);
1581 gen_set_cc_nz_f32(s, o->in2);
1582 return NO_EXIT;
1583 }
1584
1585 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1586 {
1587 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1588 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1589 tcg_temp_free_i32(m3);
1590 gen_set_cc_nz_f64(s, o->in2);
1591 return NO_EXIT;
1592 }
1593
1594 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1595 {
1596 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1597 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1598 tcg_temp_free_i32(m3);
1599 gen_set_cc_nz_f128(s, o->in1, o->in2);
1600 return NO_EXIT;
1601 }
1602
1603 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1604 {
1605 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1606 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1607 tcg_temp_free_i32(m3);
1608 gen_set_cc_nz_f32(s, o->in2);
1609 return NO_EXIT;
1610 }
1611
1612 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1613 {
1614 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1615 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1616 tcg_temp_free_i32(m3);
1617 gen_set_cc_nz_f64(s, o->in2);
1618 return NO_EXIT;
1619 }
1620
1621 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1622 {
1623 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1624 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1625 tcg_temp_free_i32(m3);
1626 gen_set_cc_nz_f128(s, o->in1, o->in2);
1627 return NO_EXIT;
1628 }
1629
1630 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1631 {
1632 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1633 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1634 tcg_temp_free_i32(m3);
1635 gen_set_cc_nz_f32(s, o->in2);
1636 return NO_EXIT;
1637 }
1638
1639 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1640 {
1641 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1642 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1643 tcg_temp_free_i32(m3);
1644 gen_set_cc_nz_f64(s, o->in2);
1645 return NO_EXIT;
1646 }
1647
1648 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1649 {
1650 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1651 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1652 tcg_temp_free_i32(m3);
1653 gen_set_cc_nz_f128(s, o->in1, o->in2);
1654 return NO_EXIT;
1655 }
1656
1657 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1658 {
1659 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1660 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1661 tcg_temp_free_i32(m3);
1662 gen_set_cc_nz_f32(s, o->in2);
1663 return NO_EXIT;
1664 }
1665
1666 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1667 {
1668 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1669 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1670 tcg_temp_free_i32(m3);
1671 gen_set_cc_nz_f64(s, o->in2);
1672 return NO_EXIT;
1673 }
1674
1675 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1676 {
1677 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1678 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1679 tcg_temp_free_i32(m3);
1680 gen_set_cc_nz_f128(s, o->in1, o->in2);
1681 return NO_EXIT;
1682 }
1683
1684 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1685 {
1686 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1687 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1688 tcg_temp_free_i32(m3);
1689 return NO_EXIT;
1690 }
1691
1692 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1693 {
1694 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1695 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1696 tcg_temp_free_i32(m3);
1697 return NO_EXIT;
1698 }
1699
1700 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1701 {
1702 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1703 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1704 tcg_temp_free_i32(m3);
1705 return_low128(o->out2);
1706 return NO_EXIT;
1707 }
1708
1709 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1710 {
1711 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1712 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1713 tcg_temp_free_i32(m3);
1714 return NO_EXIT;
1715 }
1716
1717 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1718 {
1719 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1720 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1721 tcg_temp_free_i32(m3);
1722 return NO_EXIT;
1723 }
1724
1725 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1726 {
1727 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1728 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1729 tcg_temp_free_i32(m3);
1730 return_low128(o->out2);
1731 return NO_EXIT;
1732 }
1733
1734 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1735 {
1736 int r2 = get_field(s->fields, r2);
1737 TCGv_i64 len = tcg_temp_new_i64();
1738
1739 potential_page_fault(s);
1740 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1741 set_cc_static(s);
1742 return_low128(o->out);
1743
1744 tcg_gen_add_i64(regs[r2], regs[r2], len);
1745 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1746 tcg_temp_free_i64(len);
1747
1748 return NO_EXIT;
1749 }
1750
1751 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1752 {
1753 int l = get_field(s->fields, l1);
1754 TCGv_i32 vl;
1755
1756 switch (l + 1) {
1757 case 1:
1758 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1759 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1760 break;
1761 case 2:
1762 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1763 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1764 break;
1765 case 4:
1766 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1767 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1768 break;
1769 case 8:
1770 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1771 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1772 break;
1773 default:
1774 potential_page_fault(s);
1775 vl = tcg_const_i32(l);
1776 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1777 tcg_temp_free_i32(vl);
1778 set_cc_static(s);
1779 return NO_EXIT;
1780 }
1781 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1782 return NO_EXIT;
1783 }
1784
1785 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1786 {
1787 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1788 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1789 potential_page_fault(s);
1790 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1791 tcg_temp_free_i32(r1);
1792 tcg_temp_free_i32(r3);
1793 set_cc_static(s);
1794 return NO_EXIT;
1795 }
1796
1797 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1798 {
1799 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1800 TCGv_i32 t1 = tcg_temp_new_i32();
1801 tcg_gen_trunc_i64_i32(t1, o->in1);
1802 potential_page_fault(s);
1803 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1804 set_cc_static(s);
1805 tcg_temp_free_i32(t1);
1806 tcg_temp_free_i32(m3);
1807 return NO_EXIT;
1808 }
1809
1810 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1811 {
1812 potential_page_fault(s);
1813 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1814 set_cc_static(s);
1815 return_low128(o->in2);
1816 return NO_EXIT;
1817 }
1818
1819 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1820 {
1821 TCGv_i64 t = tcg_temp_new_i64();
1822 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1823 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1824 tcg_gen_or_i64(o->out, o->out, t);
1825 tcg_temp_free_i64(t);
1826 return NO_EXIT;
1827 }
1828
1829 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1830 {
1831 int r3 = get_field(s->fields, r3);
1832 potential_page_fault(s);
1833 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1834 set_cc_static(s);
1835 return NO_EXIT;
1836 }
1837
1838 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1839 {
1840 int r3 = get_field(s->fields, r3);
1841 potential_page_fault(s);
1842 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1843 set_cc_static(s);
1844 return NO_EXIT;
1845 }
1846
1847 #ifndef CONFIG_USER_ONLY
1848 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1849 {
1850 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1851 check_privileged(s);
1852 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1853 tcg_temp_free_i32(r1);
1854 set_cc_static(s);
1855 return NO_EXIT;
1856 }
1857 #endif
1858
1859 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1860 {
1861 int r3 = get_field(s->fields, r3);
1862 TCGv_i64 in3 = tcg_temp_new_i64();
1863 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1864 potential_page_fault(s);
1865 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1866 tcg_temp_free_i64(in3);
1867 set_cc_static(s);
1868 return NO_EXIT;
1869 }
1870
1871 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1872 {
1873 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1874 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1875 potential_page_fault(s);
1876 /* XXX rewrite in tcg */
1877 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1878 set_cc_static(s);
1879 return NO_EXIT;
1880 }
1881
1882 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1883 {
1884 TCGv_i64 t1 = tcg_temp_new_i64();
1885 TCGv_i32 t2 = tcg_temp_new_i32();
1886 tcg_gen_trunc_i64_i32(t2, o->in1);
1887 gen_helper_cvd(t1, t2);
1888 tcg_temp_free_i32(t2);
1889 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1890 tcg_temp_free_i64(t1);
1891 return NO_EXIT;
1892 }
1893
1894 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1895 {
1896 int m3 = get_field(s->fields, m3);
1897 int lab = gen_new_label();
1898 TCGv_i32 t;
1899 TCGCond c;
1900
1901 c = tcg_invert_cond(ltgt_cond[m3]);
1902 if (s->insn->data) {
1903 c = tcg_unsigned_cond(c);
1904 }
1905 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1906
1907 /* Set DXC to 0xff. */
1908 t = tcg_temp_new_i32();
1909 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1910 tcg_gen_ori_i32(t, t, 0xff00);
1911 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1912 tcg_temp_free_i32(t);
1913
1914 /* Trap. */
1915 gen_program_exception(s, PGM_DATA);
1916
1917 gen_set_label(lab);
1918 return NO_EXIT;
1919 }
1920
1921 #ifndef CONFIG_USER_ONLY
1922 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1923 {
1924 TCGv_i32 tmp;
1925
1926 check_privileged(s);
1927 potential_page_fault(s);
1928
1929 /* We pretend the format is RX_a so that D2 is the field we want. */
1930 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1931 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1932 tcg_temp_free_i32(tmp);
1933 return NO_EXIT;
1934 }
1935 #endif
1936
1937 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1938 {
1939 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1940 return_low128(o->out);
1941 return NO_EXIT;
1942 }
1943
1944 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1945 {
1946 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1947 return_low128(o->out);
1948 return NO_EXIT;
1949 }
1950
1951 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1952 {
1953 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1954 return_low128(o->out);
1955 return NO_EXIT;
1956 }
1957
1958 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1959 {
1960 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
1961 return_low128(o->out);
1962 return NO_EXIT;
1963 }
1964
1965 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
1966 {
1967 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
1968 return NO_EXIT;
1969 }
1970
1971 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
1972 {
1973 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
1974 return NO_EXIT;
1975 }
1976
1977 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
1978 {
1979 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1980 return_low128(o->out2);
1981 return NO_EXIT;
1982 }
1983
1984 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
1985 {
1986 int r2 = get_field(s->fields, r2);
1987 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1988 return NO_EXIT;
1989 }
1990
1991 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
1992 {
1993 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
1994 return NO_EXIT;
1995 }
1996
1997 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
1998 {
1999 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2000 tb->flags, (ab)use the tb->cs_base field as the address of
2001 the template in memory, and grab 8 bits of tb->flags/cflags for
2002 the contents of the register. We would then recognize all this
2003 in gen_intermediate_code_internal, generating code for exactly
2004 one instruction. This new TB then gets executed normally.
2005
2006 On the other hand, this seems to be mostly used for modifying
2007 MVC inside of memcpy, which needs a helper call anyway. So
2008 perhaps this doesn't bear thinking about any further. */
2009
2010 TCGv_i64 tmp;
2011
2012 update_psw_addr(s);
2013 update_cc_op(s);
2014
2015 tmp = tcg_const_i64(s->next_pc);
2016 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2017 tcg_temp_free_i64(tmp);
2018
2019 set_cc_static(s);
2020 return NO_EXIT;
2021 }
2022
2023 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2024 {
2025 /* We'll use the original input for cc computation, since we get to
2026 compare that against 0, which ought to be better than comparing
2027 the real output against 64. It also lets cc_dst be a convenient
2028 temporary during our computation. */
2029 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2030
2031 /* R1 = IN ? CLZ(IN) : 64. */
2032 gen_helper_clz(o->out, o->in2);
2033
2034 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2035 value by 64, which is undefined. But since the shift is 64 iff the
2036 input is zero, we still get the correct result after and'ing. */
2037 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2038 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2039 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2040 return NO_EXIT;
2041 }
2042
2043 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2044 {
2045 int m3 = get_field(s->fields, m3);
2046 int pos, len, base = s->insn->data;
2047 TCGv_i64 tmp = tcg_temp_new_i64();
2048 uint64_t ccm;
2049
2050 switch (m3) {
2051 case 0xf:
2052 /* Effectively a 32-bit load. */
2053 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2054 len = 32;
2055 goto one_insert;
2056
2057 case 0xc:
2058 case 0x6:
2059 case 0x3:
2060 /* Effectively a 16-bit load. */
2061 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2062 len = 16;
2063 goto one_insert;
2064
2065 case 0x8:
2066 case 0x4:
2067 case 0x2:
2068 case 0x1:
2069 /* Effectively an 8-bit load. */
2070 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2071 len = 8;
2072 goto one_insert;
2073
2074 one_insert:
2075 pos = base + ctz32(m3) * 8;
2076 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2077 ccm = ((1ull << len) - 1) << pos;
2078 break;
2079
2080 default:
2081 /* This is going to be a sequence of loads and inserts. */
2082 pos = base + 32 - 8;
2083 ccm = 0;
2084 while (m3) {
2085 if (m3 & 0x8) {
2086 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2087 tcg_gen_addi_i64(o->in2, o->in2, 1);
2088 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2089 ccm |= 0xff << pos;
2090 }
2091 m3 = (m3 << 1) & 0xf;
2092 pos -= 8;
2093 }
2094 break;
2095 }
2096
2097 tcg_gen_movi_i64(tmp, ccm);
2098 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2099 tcg_temp_free_i64(tmp);
2100 return NO_EXIT;
2101 }
2102
2103 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2104 {
2105 int shift = s->insn->data & 0xff;
2106 int size = s->insn->data >> 8;
2107 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2108 return NO_EXIT;
2109 }
2110
2111 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2112 {
2113 TCGv_i64 t1;
2114
2115 gen_op_calc_cc(s);
2116 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2117
2118 t1 = tcg_temp_new_i64();
2119 tcg_gen_shli_i64(t1, psw_mask, 20);
2120 tcg_gen_shri_i64(t1, t1, 36);
2121 tcg_gen_or_i64(o->out, o->out, t1);
2122
2123 tcg_gen_extu_i32_i64(t1, cc_op);
2124 tcg_gen_shli_i64(t1, t1, 28);
2125 tcg_gen_or_i64(o->out, o->out, t1);
2126 tcg_temp_free_i64(t1);
2127 return NO_EXIT;
2128 }
2129
2130 #ifndef CONFIG_USER_ONLY
2131 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2132 {
2133 check_privileged(s);
2134 gen_helper_ipte(cpu_env, o->in1, o->in2);
2135 return NO_EXIT;
2136 }
2137
2138 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2139 {
2140 check_privileged(s);
2141 gen_helper_iske(o->out, cpu_env, o->in2);
2142 return NO_EXIT;
2143 }
2144 #endif
2145
2146 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2147 {
2148 gen_helper_ldeb(o->out, cpu_env, o->in2);
2149 return NO_EXIT;
2150 }
2151
2152 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2153 {
2154 gen_helper_ledb(o->out, cpu_env, o->in2);
2155 return NO_EXIT;
2156 }
2157
2158 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2159 {
2160 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2161 return NO_EXIT;
2162 }
2163
2164 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2165 {
2166 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2167 return NO_EXIT;
2168 }
2169
2170 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2171 {
2172 gen_helper_lxdb(o->out, cpu_env, o->in2);
2173 return_low128(o->out2);
2174 return NO_EXIT;
2175 }
2176
2177 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2178 {
2179 gen_helper_lxeb(o->out, cpu_env, o->in2);
2180 return_low128(o->out2);
2181 return NO_EXIT;
2182 }
2183
2184 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2185 {
2186 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2187 return NO_EXIT;
2188 }
2189
2190 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2191 {
2192 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2193 return NO_EXIT;
2194 }
2195
2196 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2197 {
2198 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2199 return NO_EXIT;
2200 }
2201
2202 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2203 {
2204 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2205 return NO_EXIT;
2206 }
2207
2208 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2209 {
2210 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2211 return NO_EXIT;
2212 }
2213
2214 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2215 {
2216 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2217 return NO_EXIT;
2218 }
2219
2220 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2221 {
2222 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2223 return NO_EXIT;
2224 }
2225
2226 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2227 {
2228 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2229 return NO_EXIT;
2230 }
2231
2232 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2233 {
2234 DisasCompare c;
2235
2236 disas_jcc(s, &c, get_field(s->fields, m3));
2237
2238 if (c.is_64) {
2239 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2240 o->in2, o->in1);
2241 free_compare(&c);
2242 } else {
2243 TCGv_i32 t32 = tcg_temp_new_i32();
2244 TCGv_i64 t, z;
2245
2246 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2247 free_compare(&c);
2248
2249 t = tcg_temp_new_i64();
2250 tcg_gen_extu_i32_i64(t, t32);
2251 tcg_temp_free_i32(t32);
2252
2253 z = tcg_const_i64(0);
2254 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2255 tcg_temp_free_i64(t);
2256 tcg_temp_free_i64(z);
2257 }
2258
2259 return NO_EXIT;
2260 }
2261
2262 #ifndef CONFIG_USER_ONLY
2263 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2264 {
2265 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2266 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2267 check_privileged(s);
2268 potential_page_fault(s);
2269 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2270 tcg_temp_free_i32(r1);
2271 tcg_temp_free_i32(r3);
2272 return NO_EXIT;
2273 }
2274
2275 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2276 {
2277 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2278 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2279 check_privileged(s);
2280 potential_page_fault(s);
2281 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2282 tcg_temp_free_i32(r1);
2283 tcg_temp_free_i32(r3);
2284 return NO_EXIT;
2285 }
2286 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2287 {
2288 check_privileged(s);
2289 potential_page_fault(s);
2290 gen_helper_lra(o->out, cpu_env, o->in2);
2291 set_cc_static(s);
2292 return NO_EXIT;
2293 }
2294
2295 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2296 {
2297 TCGv_i64 t1, t2;
2298
2299 check_privileged(s);
2300
2301 t1 = tcg_temp_new_i64();
2302 t2 = tcg_temp_new_i64();
2303 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2304 tcg_gen_addi_i64(o->in2, o->in2, 4);
2305 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2306 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2307 tcg_gen_shli_i64(t1, t1, 32);
2308 gen_helper_load_psw(cpu_env, t1, t2);
2309 tcg_temp_free_i64(t1);
2310 tcg_temp_free_i64(t2);
2311 return EXIT_NORETURN;
2312 }
2313
2314 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2315 {
2316 TCGv_i64 t1, t2;
2317
2318 check_privileged(s);
2319
2320 t1 = tcg_temp_new_i64();
2321 t2 = tcg_temp_new_i64();
2322 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2323 tcg_gen_addi_i64(o->in2, o->in2, 8);
2324 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2325 gen_helper_load_psw(cpu_env, t1, t2);
2326 tcg_temp_free_i64(t1);
2327 tcg_temp_free_i64(t2);
2328 return EXIT_NORETURN;
2329 }
2330 #endif
2331
2332 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2333 {
2334 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2335 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2336 potential_page_fault(s);
2337 gen_helper_lam(cpu_env, r1, o->in2, r3);
2338 tcg_temp_free_i32(r1);
2339 tcg_temp_free_i32(r3);
2340 return NO_EXIT;
2341 }
2342
2343 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2344 {
2345 int r1 = get_field(s->fields, r1);
2346 int r3 = get_field(s->fields, r3);
2347 TCGv_i64 t = tcg_temp_new_i64();
2348 TCGv_i64 t4 = tcg_const_i64(4);
2349
2350 while (1) {
2351 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2352 store_reg32_i64(r1, t);
2353 if (r1 == r3) {
2354 break;
2355 }
2356 tcg_gen_add_i64(o->in2, o->in2, t4);
2357 r1 = (r1 + 1) & 15;
2358 }
2359
2360 tcg_temp_free_i64(t);
2361 tcg_temp_free_i64(t4);
2362 return NO_EXIT;
2363 }
2364
2365 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2366 {
2367 int r1 = get_field(s->fields, r1);
2368 int r3 = get_field(s->fields, r3);
2369 TCGv_i64 t = tcg_temp_new_i64();
2370 TCGv_i64 t4 = tcg_const_i64(4);
2371
2372 while (1) {
2373 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2374 store_reg32h_i64(r1, t);
2375 if (r1 == r3) {
2376 break;
2377 }
2378 tcg_gen_add_i64(o->in2, o->in2, t4);
2379 r1 = (r1 + 1) & 15;
2380 }
2381
2382 tcg_temp_free_i64(t);
2383 tcg_temp_free_i64(t4);
2384 return NO_EXIT;
2385 }
2386
2387 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2388 {
2389 int r1 = get_field(s->fields, r1);
2390 int r3 = get_field(s->fields, r3);
2391 TCGv_i64 t8 = tcg_const_i64(8);
2392
2393 while (1) {
2394 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2395 if (r1 == r3) {
2396 break;
2397 }
2398 tcg_gen_add_i64(o->in2, o->in2, t8);
2399 r1 = (r1 + 1) & 15;
2400 }
2401
2402 tcg_temp_free_i64(t8);
2403 return NO_EXIT;
2404 }
2405
2406 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2407 {
2408 o->out = o->in2;
2409 o->g_out = o->g_in2;
2410 TCGV_UNUSED_I64(o->in2);
2411 o->g_in2 = false;
2412 return NO_EXIT;
2413 }
2414
2415 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2416 {
2417 o->out = o->in1;
2418 o->out2 = o->in2;
2419 o->g_out = o->g_in1;
2420 o->g_out2 = o->g_in2;
2421 TCGV_UNUSED_I64(o->in1);
2422 TCGV_UNUSED_I64(o->in2);
2423 o->g_in1 = o->g_in2 = false;
2424 return NO_EXIT;
2425 }
2426
2427 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2428 {
2429 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2430 potential_page_fault(s);
2431 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2432 tcg_temp_free_i32(l);
2433 return NO_EXIT;
2434 }
2435
2436 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2437 {
2438 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2439 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2440 potential_page_fault(s);
2441 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2442 tcg_temp_free_i32(r1);
2443 tcg_temp_free_i32(r2);
2444 set_cc_static(s);
2445 return NO_EXIT;
2446 }
2447
2448 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2449 {
2450 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2451 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2452 potential_page_fault(s);
2453 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2454 tcg_temp_free_i32(r1);
2455 tcg_temp_free_i32(r3);
2456 set_cc_static(s);
2457 return NO_EXIT;
2458 }
2459
2460 #ifndef CONFIG_USER_ONLY
2461 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2462 {
2463 int r1 = get_field(s->fields, l1);
2464 check_privileged(s);
2465 potential_page_fault(s);
2466 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2467 set_cc_static(s);
2468 return NO_EXIT;
2469 }
2470
2471 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2472 {
2473 int r1 = get_field(s->fields, l1);
2474 check_privileged(s);
2475 potential_page_fault(s);
2476 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2477 set_cc_static(s);
2478 return NO_EXIT;
2479 }
2480 #endif
2481
2482 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2483 {
2484 potential_page_fault(s);
2485 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2486 set_cc_static(s);
2487 return NO_EXIT;
2488 }
2489
2490 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2491 {
2492 potential_page_fault(s);
2493 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2494 set_cc_static(s);
2495 return_low128(o->in2);
2496 return NO_EXIT;
2497 }
2498
2499 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2500 {
2501 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2502 return NO_EXIT;
2503 }
2504
2505 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2506 {
2507 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2508 return_low128(o->out2);
2509 return NO_EXIT;
2510 }
2511
2512 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2513 {
2514 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2515 return NO_EXIT;
2516 }
2517
2518 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2519 {
2520 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2521 return NO_EXIT;
2522 }
2523
2524 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2525 {
2526 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2527 return NO_EXIT;
2528 }
2529
2530 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2531 {
2532 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2533 return_low128(o->out2);
2534 return NO_EXIT;
2535 }
2536
2537 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2538 {
2539 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2540 return_low128(o->out2);
2541 return NO_EXIT;
2542 }
2543
2544 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2545 {
2546 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2547 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2548 tcg_temp_free_i64(r3);
2549 return NO_EXIT;
2550 }
2551
2552 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2553 {
2554 int r3 = get_field(s->fields, r3);
2555 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2556 return NO_EXIT;
2557 }
2558
2559 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2560 {
2561 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2562 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2563 tcg_temp_free_i64(r3);
2564 return NO_EXIT;
2565 }
2566
2567 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2568 {
2569 int r3 = get_field(s->fields, r3);
2570 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2571 return NO_EXIT;
2572 }
2573
2574 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2575 {
2576 gen_helper_nabs_i64(o->out, o->in2);
2577 return NO_EXIT;
2578 }
2579
2580 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2581 {
2582 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2583 return NO_EXIT;
2584 }
2585
2586 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2587 {
2588 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2589 return NO_EXIT;
2590 }
2591
2592 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2593 {
2594 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2595 tcg_gen_mov_i64(o->out2, o->in2);
2596 return NO_EXIT;
2597 }
2598
2599 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2600 {
2601 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2602 potential_page_fault(s);
2603 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2604 tcg_temp_free_i32(l);
2605 set_cc_static(s);
2606 return NO_EXIT;
2607 }
2608
2609 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2610 {
2611 tcg_gen_neg_i64(o->out, o->in2);
2612 return NO_EXIT;
2613 }
2614
2615 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2616 {
2617 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2618 return NO_EXIT;
2619 }
2620
2621 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2622 {
2623 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2624 return NO_EXIT;
2625 }
2626
2627 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2628 {
2629 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2630 tcg_gen_mov_i64(o->out2, o->in2);
2631 return NO_EXIT;
2632 }
2633
2634 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2635 {
2636 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2637 potential_page_fault(s);
2638 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2639 tcg_temp_free_i32(l);
2640 set_cc_static(s);
2641 return NO_EXIT;
2642 }
2643
2644 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2645 {
2646 tcg_gen_or_i64(o->out, o->in1, o->in2);
2647 return NO_EXIT;
2648 }
2649
2650 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2651 {
2652 int shift = s->insn->data & 0xff;
2653 int size = s->insn->data >> 8;
2654 uint64_t mask = ((1ull << size) - 1) << shift;
2655
2656 assert(!o->g_in2);
2657 tcg_gen_shli_i64(o->in2, o->in2, shift);
2658 tcg_gen_or_i64(o->out, o->in1, o->in2);
2659
2660 /* Produce the CC from only the bits manipulated. */
2661 tcg_gen_andi_i64(cc_dst, o->out, mask);
2662 set_cc_nz_u64(s, cc_dst);
2663 return NO_EXIT;
2664 }
2665
2666 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2667 {
2668 gen_helper_popcnt(o->out, o->in2);
2669 return NO_EXIT;
2670 }
2671
2672 #ifndef CONFIG_USER_ONLY
2673 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2674 {
2675 check_privileged(s);
2676 gen_helper_ptlb(cpu_env);
2677 return NO_EXIT;
2678 }
2679 #endif
2680
2681 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2682 {
2683 int i3 = get_field(s->fields, i3);
2684 int i4 = get_field(s->fields, i4);
2685 int i5 = get_field(s->fields, i5);
2686 int do_zero = i4 & 0x80;
2687 uint64_t mask, imask, pmask;
2688 int pos, len, rot;
2689
2690 /* Adjust the arguments for the specific insn. */
2691 switch (s->fields->op2) {
2692 case 0x55: /* risbg */
2693 i3 &= 63;
2694 i4 &= 63;
2695 pmask = ~0;
2696 break;
2697 case 0x5d: /* risbhg */
2698 i3 &= 31;
2699 i4 &= 31;
2700 pmask = 0xffffffff00000000ull;
2701 break;
2702 case 0x51: /* risblg */
2703 i3 &= 31;
2704 i4 &= 31;
2705 pmask = 0x00000000ffffffffull;
2706 break;
2707 default:
2708 abort();
2709 }
2710
2711 /* MASK is the set of bits to be inserted from R2.
2712 Take care for I3/I4 wraparound. */
2713 mask = pmask >> i3;
2714 if (i3 <= i4) {
2715 mask ^= pmask >> i4 >> 1;
2716 } else {
2717 mask |= ~(pmask >> i4 >> 1);
2718 }
2719 mask &= pmask;
2720
2721 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2722 insns, we need to keep the other half of the register. */
2723 imask = ~mask | ~pmask;
2724 if (do_zero) {
2725 if (s->fields->op2 == 0x55) {
2726 imask = 0;
2727 } else {
2728 imask = ~pmask;
2729 }
2730 }
2731
2732 /* In some cases we can implement this with deposit, which can be more
2733 efficient on some hosts. */
2734 if (~mask == imask && i3 <= i4) {
2735 if (s->fields->op2 == 0x5d) {
2736 i3 += 32, i4 += 32;
2737 }
2738 /* Note that we rotate the bits to be inserted to the lsb, not to
2739 the position as described in the PoO. */
2740 len = i4 - i3 + 1;
2741 pos = 63 - i4;
2742 rot = (i5 - pos) & 63;
2743 } else {
2744 pos = len = -1;
2745 rot = i5 & 63;
2746 }
2747
2748 /* Rotate the input as necessary. */
2749 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2750
2751 /* Insert the selected bits into the output. */
2752 if (pos >= 0) {
2753 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2754 } else if (imask == 0) {
2755 tcg_gen_andi_i64(o->out, o->in2, mask);
2756 } else {
2757 tcg_gen_andi_i64(o->in2, o->in2, mask);
2758 tcg_gen_andi_i64(o->out, o->out, imask);
2759 tcg_gen_or_i64(o->out, o->out, o->in2);
2760 }
2761 return NO_EXIT;
2762 }
2763
2764 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2765 {
2766 int i3 = get_field(s->fields, i3);
2767 int i4 = get_field(s->fields, i4);
2768 int i5 = get_field(s->fields, i5);
2769 uint64_t mask;
2770
2771 /* If this is a test-only form, arrange to discard the result. */
2772 if (i3 & 0x80) {
2773 o->out = tcg_temp_new_i64();
2774 o->g_out = false;
2775 }
2776
2777 i3 &= 63;
2778 i4 &= 63;
2779 i5 &= 63;
2780
2781 /* MASK is the set of bits to be operated on from R2.
2782 Take care for I3/I4 wraparound. */
2783 mask = ~0ull >> i3;
2784 if (i3 <= i4) {
2785 mask ^= ~0ull >> i4 >> 1;
2786 } else {
2787 mask |= ~(~0ull >> i4 >> 1);
2788 }
2789
2790 /* Rotate the input as necessary. */
2791 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2792
2793 /* Operate. */
2794 switch (s->fields->op2) {
2795 case 0x55: /* AND */
2796 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2797 tcg_gen_and_i64(o->out, o->out, o->in2);
2798 break;
2799 case 0x56: /* OR */
2800 tcg_gen_andi_i64(o->in2, o->in2, mask);
2801 tcg_gen_or_i64(o->out, o->out, o->in2);
2802 break;
2803 case 0x57: /* XOR */
2804 tcg_gen_andi_i64(o->in2, o->in2, mask);
2805 tcg_gen_xor_i64(o->out, o->out, o->in2);
2806 break;
2807 default:
2808 abort();
2809 }
2810
2811 /* Set the CC. */
2812 tcg_gen_andi_i64(cc_dst, o->out, mask);
2813 set_cc_nz_u64(s, cc_dst);
2814 return NO_EXIT;
2815 }
2816
2817 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2818 {
2819 tcg_gen_bswap16_i64(o->out, o->in2);
2820 return NO_EXIT;
2821 }
2822
2823 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2824 {
2825 tcg_gen_bswap32_i64(o->out, o->in2);
2826 return NO_EXIT;
2827 }
2828
2829 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2830 {
2831 tcg_gen_bswap64_i64(o->out, o->in2);
2832 return NO_EXIT;
2833 }
2834
2835 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2836 {
2837 TCGv_i32 t1 = tcg_temp_new_i32();
2838 TCGv_i32 t2 = tcg_temp_new_i32();
2839 TCGv_i32 to = tcg_temp_new_i32();
2840 tcg_gen_trunc_i64_i32(t1, o->in1);
2841 tcg_gen_trunc_i64_i32(t2, o->in2);
2842 tcg_gen_rotl_i32(to, t1, t2);
2843 tcg_gen_extu_i32_i64(o->out, to);
2844 tcg_temp_free_i32(t1);
2845 tcg_temp_free_i32(t2);
2846 tcg_temp_free_i32(to);
2847 return NO_EXIT;
2848 }
2849
2850 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2851 {
2852 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2853 return NO_EXIT;
2854 }
2855
2856 #ifndef CONFIG_USER_ONLY
2857 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2858 {
2859 check_privileged(s);
2860 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2861 set_cc_static(s);
2862 return NO_EXIT;
2863 }
2864
2865 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2866 {
2867 check_privileged(s);
2868 gen_helper_sacf(cpu_env, o->in2);
2869 /* Addressing mode has changed, so end the block. */
2870 return EXIT_PC_STALE;
2871 }
2872 #endif
2873
2874 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2875 {
2876 int r1 = get_field(s->fields, r1);
2877 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2878 return NO_EXIT;
2879 }
2880
2881 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2882 {
2883 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2884 return NO_EXIT;
2885 }
2886
2887 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2888 {
2889 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2890 return NO_EXIT;
2891 }
2892
2893 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2894 {
2895 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2896 return_low128(o->out2);
2897 return NO_EXIT;
2898 }
2899
2900 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2901 {
2902 gen_helper_sqeb(o->out, cpu_env, o->in2);
2903 return NO_EXIT;
2904 }
2905
2906 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2907 {
2908 gen_helper_sqdb(o->out, cpu_env, o->in2);
2909 return NO_EXIT;
2910 }
2911
2912 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2913 {
2914 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2915 return_low128(o->out2);
2916 return NO_EXIT;
2917 }
2918
2919 #ifndef CONFIG_USER_ONLY
2920 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2921 {
2922 check_privileged(s);
2923 potential_page_fault(s);
2924 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2925 set_cc_static(s);
2926 return NO_EXIT;
2927 }
2928
2929 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2930 {
2931 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2932 check_privileged(s);
2933 potential_page_fault(s);
2934 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2935 tcg_temp_free_i32(r1);
2936 return NO_EXIT;
2937 }
2938 #endif
2939
2940 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
2941 {
2942 DisasCompare c;
2943 TCGv_i64 a;
2944 int lab, r1;
2945
2946 disas_jcc(s, &c, get_field(s->fields, m3));
2947
2948 lab = gen_new_label();
2949 if (c.is_64) {
2950 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
2951 } else {
2952 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
2953 }
2954 free_compare(&c);
2955
2956 r1 = get_field(s->fields, r1);
2957 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2958 if (s->insn->data) {
2959 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
2960 } else {
2961 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
2962 }
2963 tcg_temp_free_i64(a);
2964
2965 gen_set_label(lab);
2966 return NO_EXIT;
2967 }
2968
2969 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2970 {
2971 uint64_t sign = 1ull << s->insn->data;
2972 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2973 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2974 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2975 /* The arithmetic left shift is curious in that it does not affect
2976 the sign bit. Copy that over from the source unchanged. */
2977 tcg_gen_andi_i64(o->out, o->out, ~sign);
2978 tcg_gen_andi_i64(o->in1, o->in1, sign);
2979 tcg_gen_or_i64(o->out, o->out, o->in1);
2980 return NO_EXIT;
2981 }
2982
2983 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2984 {
2985 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2986 return NO_EXIT;
2987 }
2988
2989 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2990 {
2991 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2992 return NO_EXIT;
2993 }
2994
2995 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2996 {
2997 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2998 return NO_EXIT;
2999 }
3000
3001 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3002 {
3003 gen_helper_sfpc(cpu_env, o->in2);
3004 return NO_EXIT;
3005 }
3006
3007 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3008 {
3009 gen_helper_sfas(cpu_env, o->in2);
3010 return NO_EXIT;
3011 }
3012
3013 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3014 {
3015 int b2 = get_field(s->fields, b2);
3016 int d2 = get_field(s->fields, d2);
3017 TCGv_i64 t1 = tcg_temp_new_i64();
3018 TCGv_i64 t2 = tcg_temp_new_i64();
3019 int mask, pos, len;
3020
3021 switch (s->fields->op2) {
3022 case 0x99: /* SRNM */
3023 pos = 0, len = 2;
3024 break;
3025 case 0xb8: /* SRNMB */
3026 pos = 0, len = 3;
3027 break;
3028 case 0xb9: /* SRNMT */
3029 pos = 4, len = 3;
3030 default:
3031 tcg_abort();
3032 }
3033 mask = (1 << len) - 1;
3034
3035 /* Insert the value into the appropriate field of the FPC. */
3036 if (b2 == 0) {
3037 tcg_gen_movi_i64(t1, d2 & mask);
3038 } else {
3039 tcg_gen_addi_i64(t1, regs[b2], d2);
3040 tcg_gen_andi_i64(t1, t1, mask);
3041 }
3042 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3043 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3044 tcg_temp_free_i64(t1);
3045
3046 /* Then install the new FPC to set the rounding mode in fpu_status. */
3047 gen_helper_sfpc(cpu_env, t2);
3048 tcg_temp_free_i64(t2);
3049 return NO_EXIT;
3050 }
3051
3052 #ifndef CONFIG_USER_ONLY
3053 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3054 {
3055 check_privileged(s);
3056 tcg_gen_shri_i64(o->in2, o->in2, 4);
3057 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3058 return NO_EXIT;
3059 }
3060
3061 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3062 {
3063 check_privileged(s);
3064 gen_helper_sske(cpu_env, o->in1, o->in2);
3065 return NO_EXIT;
3066 }
3067
3068 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3069 {
3070 check_privileged(s);
3071 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3072 return NO_EXIT;
3073 }
3074
3075 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3076 {
3077 check_privileged(s);
3078 /* ??? Surely cpu address != cpu number. In any case the previous
3079 version of this stored more than the required half-word, so it
3080 is unlikely this has ever been tested. */
3081 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3082 return NO_EXIT;
3083 }
3084
3085 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3086 {
3087 gen_helper_stck(o->out, cpu_env);
3088 /* ??? We don't implement clock states. */
3089 gen_op_movi_cc(s, 0);
3090 return NO_EXIT;
3091 }
3092
3093 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3094 {
3095 TCGv_i64 c1 = tcg_temp_new_i64();
3096 TCGv_i64 c2 = tcg_temp_new_i64();
3097 gen_helper_stck(c1, cpu_env);
3098 /* Shift the 64-bit value into its place as a zero-extended
3099 104-bit value. Note that "bit positions 64-103 are always
3100 non-zero so that they compare differently to STCK"; we set
3101 the least significant bit to 1. */
3102 tcg_gen_shli_i64(c2, c1, 56);
3103 tcg_gen_shri_i64(c1, c1, 8);
3104 tcg_gen_ori_i64(c2, c2, 0x10000);
3105 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3106 tcg_gen_addi_i64(o->in2, o->in2, 8);
3107 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3108 tcg_temp_free_i64(c1);
3109 tcg_temp_free_i64(c2);
3110 /* ??? We don't implement clock states. */
3111 gen_op_movi_cc(s, 0);
3112 return NO_EXIT;
3113 }
3114
3115 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3116 {
3117 check_privileged(s);
3118 gen_helper_sckc(cpu_env, o->in2);
3119 return NO_EXIT;
3120 }
3121
3122 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3123 {
3124 check_privileged(s);
3125 gen_helper_stckc(o->out, cpu_env);
3126 return NO_EXIT;
3127 }
3128
3129 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3130 {
3131 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3132 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3133 check_privileged(s);
3134 potential_page_fault(s);
3135 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3136 tcg_temp_free_i32(r1);
3137 tcg_temp_free_i32(r3);
3138 return NO_EXIT;
3139 }
3140
3141 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3142 {
3143 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3144 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3145 check_privileged(s);
3146 potential_page_fault(s);
3147 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3148 tcg_temp_free_i32(r1);
3149 tcg_temp_free_i32(r3);
3150 return NO_EXIT;
3151 }
3152
3153 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3154 {
3155 check_privileged(s);
3156 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3157 return NO_EXIT;
3158 }
3159
3160 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3161 {
3162 check_privileged(s);
3163 gen_helper_spt(cpu_env, o->in2);
3164 return NO_EXIT;
3165 }
3166
3167 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3168 {
3169 TCGv_i64 f, a;
3170 /* We really ought to have more complete indication of facilities
3171 that we implement. Address this when STFLE is implemented. */
3172 check_privileged(s);
3173 f = tcg_const_i64(0xc0000000);
3174 a = tcg_const_i64(200);
3175 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3176 tcg_temp_free_i64(f);
3177 tcg_temp_free_i64(a);
3178 return NO_EXIT;
3179 }
3180
3181 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3182 {
3183 check_privileged(s);
3184 gen_helper_stpt(o->out, cpu_env);
3185 return NO_EXIT;
3186 }
3187
3188 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3189 {
3190 check_privileged(s);
3191 potential_page_fault(s);
3192 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3193 set_cc_static(s);
3194 return NO_EXIT;
3195 }
3196
3197 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3198 {
3199 check_privileged(s);
3200 gen_helper_spx(cpu_env, o->in2);
3201 return NO_EXIT;
3202 }
3203
3204 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3205 {
3206 check_privileged(s);
3207 /* Not operational. */
3208 gen_op_movi_cc(s, 3);
3209 return NO_EXIT;
3210 }
3211
3212 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3213 {
3214 check_privileged(s);
3215 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3216 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3217 return NO_EXIT;
3218 }
3219
3220 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3221 {
3222 uint64_t i2 = get_field(s->fields, i2);
3223 TCGv_i64 t;
3224
3225 check_privileged(s);
3226
3227 /* It is important to do what the instruction name says: STORE THEN.
3228 If we let the output hook perform the store then if we fault and
3229 restart, we'll have the wrong SYSTEM MASK in place. */
3230 t = tcg_temp_new_i64();
3231 tcg_gen_shri_i64(t, psw_mask, 56);
3232 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3233 tcg_temp_free_i64(t);
3234
3235 if (s->fields->op == 0xac) {
3236 tcg_gen_andi_i64(psw_mask, psw_mask,
3237 (i2 << 56) | 0x00ffffffffffffffull);
3238 } else {
3239 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3240 }
3241 return NO_EXIT;
3242 }
3243
3244 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3245 {
3246 check_privileged(s);
3247 potential_page_fault(s);
3248 gen_helper_stura(cpu_env, o->in2, o->in1);
3249 return NO_EXIT;
3250 }
3251 #endif
3252
3253 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3254 {
3255 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3256 return NO_EXIT;
3257 }
3258
3259 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3260 {
3261 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3262 return NO_EXIT;
3263 }
3264
3265 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3266 {
3267 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3268 return NO_EXIT;
3269 }
3270
3271 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3272 {
3273 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3274 return NO_EXIT;
3275 }
3276
3277 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3278 {
3279 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3280 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3281 potential_page_fault(s);
3282 gen_helper_stam(cpu_env, r1, o->in2, r3);
3283 tcg_temp_free_i32(r1);
3284 tcg_temp_free_i32(r3);
3285 return NO_EXIT;
3286 }
3287
3288 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3289 {
3290 int m3 = get_field(s->fields, m3);
3291 int pos, base = s->insn->data;
3292 TCGv_i64 tmp = tcg_temp_new_i64();
3293
3294 pos = base + ctz32(m3) * 8;
3295 switch (m3) {
3296 case 0xf:
3297 /* Effectively a 32-bit store. */
3298 tcg_gen_shri_i64(tmp, o->in1, pos);
3299 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3300 break;
3301
3302 case 0xc:
3303 case 0x6:
3304 case 0x3:
3305 /* Effectively a 16-bit store. */
3306 tcg_gen_shri_i64(tmp, o->in1, pos);
3307 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3308 break;
3309
3310 case 0x8:
3311 case 0x4:
3312 case 0x2:
3313 case 0x1:
3314 /* Effectively an 8-bit store. */
3315 tcg_gen_shri_i64(tmp, o->in1, pos);
3316 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3317 break;
3318
3319 default:
3320 /* This is going to be a sequence of shifts and stores. */
3321 pos = base + 32 - 8;
3322 while (m3) {
3323 if (m3 & 0x8) {
3324 tcg_gen_shri_i64(tmp, o->in1, pos);
3325 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3326 tcg_gen_addi_i64(o->in2, o->in2, 1);
3327 }
3328 m3 = (m3 << 1) & 0xf;
3329 pos -= 8;
3330 }
3331 break;
3332 }
3333 tcg_temp_free_i64(tmp);
3334 return NO_EXIT;
3335 }
3336
3337 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3338 {
3339 int r1 = get_field(s->fields, r1);
3340 int r3 = get_field(s->fields, r3);
3341 int size = s->insn->data;
3342 TCGv_i64 tsize = tcg_const_i64(size);
3343
3344 while (1) {
3345 if (size == 8) {
3346 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3347 } else {
3348 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3349 }
3350 if (r1 == r3) {
3351 break;
3352 }
3353 tcg_gen_add_i64(o->in2, o->in2, tsize);
3354 r1 = (r1 + 1) & 15;
3355 }
3356
3357 tcg_temp_free_i64(tsize);
3358 return NO_EXIT;
3359 }
3360
3361 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3362 {
3363 int r1 = get_field(s->fields, r1);
3364 int r3 = get_field(s->fields, r3);
3365 TCGv_i64 t = tcg_temp_new_i64();
3366 TCGv_i64 t4 = tcg_const_i64(4);
3367 TCGv_i64 t32 = tcg_const_i64(32);
3368
3369 while (1) {
3370 tcg_gen_shl_i64(t, regs[r1], t32);
3371 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3372 if (r1 == r3) {
3373 break;
3374 }
3375 tcg_gen_add_i64(o->in2, o->in2, t4);
3376 r1 = (r1 + 1) & 15;
3377 }
3378
3379 tcg_temp_free_i64(t);
3380 tcg_temp_free_i64(t4);
3381 tcg_temp_free_i64(t32);
3382 return NO_EXIT;
3383 }
3384
3385 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3386 {
3387 potential_page_fault(s);
3388 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3389 set_cc_static(s);
3390 return_low128(o->in2);
3391 return NO_EXIT;
3392 }
3393
3394 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3395 {
3396 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3397 return NO_EXIT;
3398 }
3399
3400 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3401 {
3402 DisasCompare cmp;
3403 TCGv_i64 borrow;
3404
3405 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3406
3407 /* The !borrow flag is the msb of CC. Since we want the inverse of
3408 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3409 disas_jcc(s, &cmp, 8 | 4);
3410 borrow = tcg_temp_new_i64();
3411 if (cmp.is_64) {
3412 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3413 } else {
3414 TCGv_i32 t = tcg_temp_new_i32();
3415 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3416 tcg_gen_extu_i32_i64(borrow, t);
3417 tcg_temp_free_i32(t);
3418 }
3419 free_compare(&cmp);
3420
3421 tcg_gen_sub_i64(o->out, o->out, borrow);
3422 tcg_temp_free_i64(borrow);
3423 return NO_EXIT;
3424 }
3425
3426 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3427 {
3428 TCGv_i32 t;
3429
3430 update_psw_addr(s);
3431 update_cc_op(s);
3432
3433 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3434 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3435 tcg_temp_free_i32(t);
3436
3437 t = tcg_const_i32(s->next_pc - s->pc);
3438 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3439 tcg_temp_free_i32(t);
3440
3441 gen_exception(EXCP_SVC);
3442 return EXIT_NORETURN;
3443 }
3444
3445 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3446 {
3447 gen_helper_tceb(cc_op, o->in1, o->in2);
3448 set_cc_static(s);
3449 return NO_EXIT;
3450 }
3451
3452 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3453 {
3454 gen_helper_tcdb(cc_op, o->in1, o->in2);
3455 set_cc_static(s);
3456 return NO_EXIT;
3457 }
3458
3459 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3460 {
3461 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3462 set_cc_static(s);
3463 return NO_EXIT;
3464 }
3465
3466 #ifndef CONFIG_USER_ONLY
3467 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3468 {
3469 potential_page_fault(s);
3470 gen_helper_tprot(cc_op, o->addr1, o->in2);
3471 set_cc_static(s);
3472 return NO_EXIT;
3473 }
3474 #endif
3475
3476 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3477 {
3478 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3479 potential_page_fault(s);
3480 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3481 tcg_temp_free_i32(l);
3482 set_cc_static(s);
3483 return NO_EXIT;
3484 }
3485
3486 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3487 {
3488 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3489 potential_page_fault(s);
3490 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3491 tcg_temp_free_i32(l);
3492 return NO_EXIT;
3493 }
3494
3495 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3496 {
3497 int d1 = get_field(s->fields, d1);
3498 int d2 = get_field(s->fields, d2);
3499 int b1 = get_field(s->fields, b1);
3500 int b2 = get_field(s->fields, b2);
3501 int l = get_field(s->fields, l1);
3502 TCGv_i32 t32;
3503
3504 o->addr1 = get_address(s, 0, b1, d1);
3505
3506 /* If the addresses are identical, this is a store/memset of zero. */
3507 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3508 o->in2 = tcg_const_i64(0);
3509
3510 l++;
3511 while (l >= 8) {
3512 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3513 l -= 8;
3514 if (l > 0) {
3515 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3516 }
3517 }
3518 if (l >= 4) {
3519 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3520 l -= 4;
3521 if (l > 0) {
3522 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3523 }
3524 }
3525 if (l >= 2) {
3526 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3527 l -= 2;
3528 if (l > 0) {
3529 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3530 }
3531 }
3532 if (l) {
3533 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3534 }
3535 gen_op_movi_cc(s, 0);
3536 return NO_EXIT;
3537 }
3538
3539 /* But in general we'll defer to a helper. */
3540 o->in2 = get_address(s, 0, b2, d2);
3541 t32 = tcg_const_i32(l);
3542 potential_page_fault(s);
3543 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3544 tcg_temp_free_i32(t32);
3545 set_cc_static(s);
3546 return NO_EXIT;
3547 }
3548
3549 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3550 {
3551 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3552 return NO_EXIT;
3553 }
3554
3555 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3556 {
3557 int shift = s->insn->data & 0xff;
3558 int size = s->insn->data >> 8;
3559 uint64_t mask = ((1ull << size) - 1) << shift;
3560
3561 assert(!o->g_in2);
3562 tcg_gen_shli_i64(o->in2, o->in2, shift);
3563 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3564
3565 /* Produce the CC from only the bits manipulated. */
3566 tcg_gen_andi_i64(cc_dst, o->out, mask);
3567 set_cc_nz_u64(s, cc_dst);
3568 return NO_EXIT;
3569 }
3570
3571 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3572 {
3573 o->out = tcg_const_i64(0);
3574 return NO_EXIT;
3575 }
3576
3577 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3578 {
3579 o->out = tcg_const_i64(0);
3580 o->out2 = o->out;
3581 o->g_out2 = true;
3582 return NO_EXIT;
3583 }
3584
3585 /* ====================================================================== */
3586 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3587 the original inputs), update the various cc data structures in order to
3588 be able to compute the new condition code. */
3589
3590 static void cout_abs32(DisasContext *s, DisasOps *o)
3591 {
3592 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3593 }
3594
3595 static void cout_abs64(DisasContext *s, DisasOps *o)
3596 {
3597 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3598 }
3599
3600 static void cout_adds32(DisasContext *s, DisasOps *o)
3601 {
3602 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3603 }
3604
3605 static void cout_adds64(DisasContext *s, DisasOps *o)
3606 {
3607 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3608 }
3609
3610 static void cout_addu32(DisasContext *s, DisasOps *o)
3611 {
3612 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3613 }
3614
3615 static void cout_addu64(DisasContext *s, DisasOps *o)
3616 {
3617 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3618 }
3619
3620 static void cout_addc32(DisasContext *s, DisasOps *o)
3621 {
3622 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3623 }
3624
3625 static void cout_addc64(DisasContext *s, DisasOps *o)
3626 {
3627 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3628 }
3629
3630 static void cout_cmps32(DisasContext *s, DisasOps *o)
3631 {
3632 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3633 }
3634
3635 static void cout_cmps64(DisasContext *s, DisasOps *o)
3636 {
3637 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3638 }
3639
3640 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3641 {
3642 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3643 }
3644
3645 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3646 {
3647 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3648 }
3649
3650 static void cout_f32(DisasContext *s, DisasOps *o)
3651 {
3652 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3653 }
3654
3655 static void cout_f64(DisasContext *s, DisasOps *o)
3656 {
3657 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3658 }
3659
3660 static void cout_f128(DisasContext *s, DisasOps *o)
3661 {
3662 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3663 }
3664
3665 static void cout_nabs32(DisasContext *s, DisasOps *o)
3666 {
3667 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3668 }
3669
3670 static void cout_nabs64(DisasContext *s, DisasOps *o)
3671 {
3672 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3673 }
3674
3675 static void cout_neg32(DisasContext *s, DisasOps *o)
3676 {
3677 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3678 }
3679
3680 static void cout_neg64(DisasContext *s, DisasOps *o)
3681 {
3682 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3683 }
3684
3685 static void cout_nz32(DisasContext *s, DisasOps *o)
3686 {
3687 tcg_gen_ext32u_i64(cc_dst, o->out);
3688 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3689 }
3690
3691 static void cout_nz64(DisasContext *s, DisasOps *o)
3692 {
3693 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3694 }
3695
3696 static void cout_s32(DisasContext *s, DisasOps *o)
3697 {
3698 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3699 }
3700
3701 static void cout_s64(DisasContext *s, DisasOps *o)
3702 {
3703 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3704 }
3705
3706 static void cout_subs32(DisasContext *s, DisasOps *o)
3707 {
3708 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3709 }
3710
3711 static void cout_subs64(DisasContext *s, DisasOps *o)
3712 {
3713 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3714 }
3715
3716 static void cout_subu32(DisasContext *s, DisasOps *o)
3717 {
3718 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3719 }
3720
3721 static void cout_subu64(DisasContext *s, DisasOps *o)
3722 {
3723 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3724 }
3725
3726 static void cout_subb32(DisasContext *s, DisasOps *o)
3727 {
3728 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3729 }
3730
3731 static void cout_subb64(DisasContext *s, DisasOps *o)
3732 {
3733 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3734 }
3735
3736 static void cout_tm32(DisasContext *s, DisasOps *o)
3737 {
3738 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3739 }
3740
3741 static void cout_tm64(DisasContext *s, DisasOps *o)
3742 {
3743 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3744 }
3745
3746 /* ====================================================================== */
3747 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3748 with the TCG register to which we will write. Used in combination with
3749 the "wout" generators, in some cases we need a new temporary, and in
3750 some cases we can write to a TCG global. */
3751
3752 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3753 {
3754 o->out = tcg_temp_new_i64();
3755 }
3756 #define SPEC_prep_new 0
3757
3758 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3759 {
3760 o->out = tcg_temp_new_i64();
3761 o->out2 = tcg_temp_new_i64();
3762 }
3763 #define SPEC_prep_new_P 0
3764
3765 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3766 {
3767 o->out = regs[get_field(f, r1)];
3768 o->g_out = true;
3769 }
3770 #define SPEC_prep_r1 0
3771
3772 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3773 {
3774 int r1 = get_field(f, r1);
3775 o->out = regs[r1];
3776 o->out2 = regs[r1 + 1];
3777 o->g_out = o->g_out2 = true;
3778 }
3779 #define SPEC_prep_r1_P SPEC_r1_even
3780
3781 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3782 {
3783 o->out = fregs[get_field(f, r1)];
3784 o->g_out = true;
3785 }
3786 #define SPEC_prep_f1 0
3787
3788 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3789 {
3790 int r1 = get_field(f, r1);
3791 o->out = fregs[r1];
3792 o->out2 = fregs[r1 + 2];
3793 o->g_out = o->g_out2 = true;
3794 }
3795 #define SPEC_prep_x1 SPEC_r1_f128
3796
3797 /* ====================================================================== */
3798 /* The "Write OUTput" generators. These generally perform some non-trivial
3799 copy of data to TCG globals, or to main memory. The trivial cases are
3800 generally handled by having a "prep" generator install the TCG global
3801 as the destination of the operation. */
3802
3803 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3804 {
3805 store_reg(get_field(f, r1), o->out);
3806 }
3807 #define SPEC_wout_r1 0
3808
3809 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3810 {
3811 int r1 = get_field(f, r1);
3812 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3813 }
3814 #define SPEC_wout_r1_8 0
3815
3816 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3817 {
3818 int r1 = get_field(f, r1);
3819 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3820 }
3821 #define SPEC_wout_r1_16 0
3822
3823 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3824 {
3825 store_reg32_i64(get_field(f, r1), o->out);
3826 }
3827 #define SPEC_wout_r1_32 0
3828
3829 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3830 {
3831 int r1 = get_field(f, r1);
3832 store_reg32_i64(r1, o->out);
3833 store_reg32_i64(r1 + 1, o->out2);
3834 }
3835 #define SPEC_wout_r1_P32 SPEC_r1_even
3836
3837 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3838 {
3839 int r1 = get_field(f, r1);
3840 store_reg32_i64(r1 + 1, o->out);
3841 tcg_gen_shri_i64(o->out, o->out, 32);
3842 store_reg32_i64(r1, o->out);
3843 }
3844 #define SPEC_wout_r1_D32 SPEC_r1_even
3845
3846 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3847 {
3848 store_freg32_i64(get_field(f, r1), o->out);
3849 }
3850 #define SPEC_wout_e1 0
3851
3852 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3853 {
3854 store_freg(get_field(f, r1), o->out);
3855 }
3856 #define SPEC_wout_f1 0
3857
3858 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3859 {
3860 int f1 = get_field(s->fields, r1);
3861 store_freg(f1, o->out);
3862 store_freg(f1 + 2, o->out2);
3863 }
3864 #define SPEC_wout_x1 SPEC_r1_f128
3865
3866 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3867 {
3868 if (get_field(f, r1) != get_field(f, r2)) {
3869 store_reg32_i64(get_field(f, r1), o->out);
3870 }
3871 }
3872 #define SPEC_wout_cond_r1r2_32 0
3873
3874 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3875 {
3876 if (get_field(f, r1) != get_field(f, r2)) {
3877 store_freg32_i64(get_field(f, r1), o->out);
3878 }
3879 }
3880 #define SPEC_wout_cond_e1e2 0
3881
3882 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3883 {
3884 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3885 }
3886 #define SPEC_wout_m1_8 0
3887
3888 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3889 {
3890 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3891 }
3892 #define SPEC_wout_m1_16 0
3893
3894 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3895 {
3896 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3897 }
3898 #define SPEC_wout_m1_32 0
3899
3900 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3901 {
3902 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3903 }
3904 #define SPEC_wout_m1_64 0
3905
3906 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3907 {
3908 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3909 }
3910 #define SPEC_wout_m2_32 0
3911
3912 /* ====================================================================== */
3913 /* The "INput 1" generators. These load the first operand to an insn. */
3914
3915 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3916 {
3917 o->in1 = load_reg(get_field(f, r1));
3918 }
3919 #define SPEC_in1_r1 0
3920
3921 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3922 {
3923 o->in1 = regs[get_field(f, r1)];
3924 o->g_in1 = true;
3925 }
3926 #define SPEC_in1_r1_o 0
3927
3928 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3929 {
3930 o->in1 = tcg_temp_new_i64();
3931 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3932 }
3933 #define SPEC_in1_r1_32s 0
3934
3935 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3936 {
3937 o->in1 = tcg_temp_new_i64();
3938 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3939 }
3940 #define SPEC_in1_r1_32u 0
3941
3942 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3943 {
3944 o->in1 = tcg_temp_new_i64();
3945 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3946 }
3947 #define SPEC_in1_r1_sr32 0
3948
3949 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3950 {
3951 o->in1 = load_reg(get_field(f, r1) + 1);
3952 }
3953 #define SPEC_in1_r1p1 SPEC_r1_even
3954
3955 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3956 {
3957 o->in1 = tcg_temp_new_i64();
3958 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
3959 }
3960 #define SPEC_in1_r1p1_32s SPEC_r1_even
3961
3962 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3963 {
3964 o->in1 = tcg_temp_new_i64();
3965 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
3966 }
3967 #define SPEC_in1_r1p1_32u SPEC_r1_even
3968
3969 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3970 {
3971 int r1 = get_field(f, r1);
3972 o->in1 = tcg_temp_new_i64();
3973 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3974 }
3975 #define SPEC_in1_r1_D32 SPEC_r1_even
3976
3977 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3978 {
3979 o->in1 = load_reg(get_field(f, r2));
3980 }
3981 #define SPEC_in1_r2 0
3982
3983 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3984 {
3985 o->in1 = load_reg(get_field(f, r3));
3986 }
3987 #define SPEC_in1_r3 0
3988
3989 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3990 {
3991 o->in1 = regs[get_field(f, r3)];
3992 o->g_in1 = true;
3993 }
3994 #define SPEC_in1_r3_o 0
3995
3996 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3997 {
3998 o->in1 = tcg_temp_new_i64();
3999 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4000 }
4001 #define SPEC_in1_r3_32s 0
4002
4003 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4004 {
4005 o->in1 = tcg_temp_new_i64();
4006 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4007 }
4008 #define SPEC_in1_r3_32u 0
4009
4010 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4011 {
4012 o->in1 = load_freg32_i64(get_field(f, r1));
4013 }
4014 #define SPEC_in1_e1 0
4015
4016 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4017 {
4018 o->in1 = fregs[get_field(f, r1)];
4019 o->g_in1 = true;
4020 }
4021 #define SPEC_in1_f1_o 0
4022
4023 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4024 {
4025 int r1 = get_field(f, r1);
4026 o->out = fregs[r1];
4027 o->out2 = fregs[r1 + 2];
4028 o->g_out = o->g_out2 = true;
4029 }
4030 #define SPEC_in1_x1_o SPEC_r1_f128
4031
4032 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4033 {
4034 o->in1 = fregs[get_field(f, r3)];
4035 o->g_in1 = true;
4036 }
4037 #define SPEC_in1_f3_o 0
4038
4039 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4040 {
4041 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4042 }
4043 #define SPEC_in1_la1 0
4044
4045 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4046 {
4047 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4048 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4049 }
4050 #define SPEC_in1_la2 0
4051
4052 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4053 {
4054 in1_la1(s, f, o);
4055 o->in1 = tcg_temp_new_i64();
4056 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4057 }
4058 #define SPEC_in1_m1_8u 0
4059
4060 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4061 {
4062 in1_la1(s, f, o);
4063 o->in1 = tcg_temp_new_i64();
4064 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4065 }
4066 #define SPEC_in1_m1_16s 0
4067
4068 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4069 {
4070 in1_la1(s, f, o);
4071 o->in1 = tcg_temp_new_i64();
4072 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4073 }
4074 #define SPEC_in1_m1_16u 0
4075
4076 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4077 {
4078 in1_la1(s, f, o);
4079 o->in1 = tcg_temp_new_i64();
4080 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4081 }
4082 #define SPEC_in1_m1_32s 0
4083
4084 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4085 {
4086 in1_la1(s, f, o);
4087 o->in1 = tcg_temp_new_i64();
4088 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4089 }
4090 #define SPEC_in1_m1_32u 0
4091
4092 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4093 {
4094 in1_la1(s, f, o);
4095 o->in1 = tcg_temp_new_i64();
4096 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4097 }
4098 #define SPEC_in1_m1_64 0
4099
4100 /* ====================================================================== */
4101 /* The "INput 2" generators. These load the second operand to an insn. */
4102
4103 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4104 {
4105 o->in2 = regs[get_field(f, r1)];
4106 o->g_in2 = true;
4107 }
4108 #define SPEC_in2_r1_o 0
4109
4110 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4111 {
4112 o->in2 = tcg_temp_new_i64();
4113 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4114 }
4115 #define SPEC_in2_r1_16u 0
4116
4117 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4118 {
4119 o->in2 = tcg_temp_new_i64();
4120 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4121 }
4122 #define SPEC_in2_r1_32u 0
4123
4124 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4125 {
4126 o->in2 = load_reg(get_field(f, r2));
4127 }
4128 #define SPEC_in2_r2 0
4129
4130 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4131 {
4132 o->in2 = regs[get_field(f, r2)];
4133 o->g_in2 = true;
4134 }
4135 #define SPEC_in2_r2_o 0
4136
4137 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4138 {
4139 int r2 = get_field(f, r2);
4140 if (r2 != 0) {
4141 o->in2 = load_reg(r2);
4142 }
4143 }
4144 #define SPEC_in2_r2_nz 0
4145
4146 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4147 {
4148 o->in2 = tcg_temp_new_i64();
4149 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4150 }
4151 #define SPEC_in2_r2_8s 0
4152
4153 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4154 {
4155 o->in2 = tcg_temp_new_i64();
4156 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4157 }
4158 #define SPEC_in2_r2_8u 0
4159
4160 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4161 {
4162 o->in2 = tcg_temp_new_i64();
4163 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4164 }
4165 #define SPEC_in2_r2_16s 0
4166
4167 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4168 {
4169 o->in2 = tcg_temp_new_i64();
4170 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4171 }
4172 #define SPEC_in2_r2_16u 0
4173
4174 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4175 {
4176 o->in2 = load_reg(get_field(f, r3));
4177 }
4178 #define SPEC_in2_r3 0
4179
4180 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4181 {
4182 o->in2 = tcg_temp_new_i64();
4183 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4184 }
4185 #define SPEC_in2_r2_32s 0
4186
4187 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4188 {
4189 o->in2 = tcg_temp_new_i64();
4190 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4191 }
4192 #define SPEC_in2_r2_32u 0
4193
4194 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4195 {
4196 o->in2 = load_freg32_i64(get_field(f, r2));
4197 }
4198 #define SPEC_in2_e2 0
4199
4200 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4201 {
4202 o->in2 = fregs[get_field(f, r2)];
4203 o->g_in2 = true;
4204 }
4205 #define SPEC_in2_f2_o 0
4206
4207 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4208 {
4209 int r2 = get_field(f, r2);
4210 o->in1 = fregs[r2];
4211 o->in2 = fregs[r2 + 2];
4212 o->g_in1 = o->g_in2 = true;
4213 }
4214 #define SPEC_in2_x2_o SPEC_r2_f128
4215
4216 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4217 {
4218 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4219 }
4220 #define SPEC_in2_ra2 0
4221
4222 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4223 {
4224 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4225 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4226 }
4227 #define SPEC_in2_a2 0
4228
4229 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4230 {
4231 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4232 }
4233 #define SPEC_in2_ri2 0
4234
4235 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4236 {
4237 help_l2_shift(s, f, o, 31);
4238 }
4239 #define SPEC_in2_sh32 0
4240
4241 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4242 {
4243 help_l2_shift(s, f, o, 63);
4244 }
4245 #define SPEC_in2_sh64 0
4246
4247 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4248 {
4249 in2_a2(s, f, o);
4250 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4251 }
4252 #define SPEC_in2_m2_8u 0
4253
4254 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4255 {
4256 in2_a2(s, f, o);
4257 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4258 }
4259 #define SPEC_in2_m2_16s 0
4260
4261 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4262 {
4263 in2_a2(s, f, o);
4264 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4265 }
4266 #define SPEC_in2_m2_16u 0
4267
4268 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4269 {
4270 in2_a2(s, f, o);
4271 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4272 }
4273 #define SPEC_in2_m2_32s 0
4274
4275 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4276 {
4277 in2_a2(s, f, o);
4278 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4279 }
4280 #define SPEC_in2_m2_32u 0
4281
4282 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4283 {
4284 in2_a2(s, f, o);
4285 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4286 }
4287 #define SPEC_in2_m2_64 0
4288
4289 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4290 {
4291 in2_ri2(s, f, o);
4292 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4293 }
4294 #define SPEC_in2_mri2_16u 0
4295
4296 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4297 {
4298 in2_ri2(s, f, o);
4299 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4300 }
4301 #define SPEC_in2_mri2_32s 0
4302
4303 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4304 {
4305 in2_ri2(s, f, o);
4306 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4307 }
4308 #define SPEC_in2_mri2_32u 0
4309
4310 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4311 {
4312 in2_ri2(s, f, o);
4313 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4314 }
4315 #define SPEC_in2_mri2_64 0
4316
4317 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4318 {
4319 o->in2 = tcg_const_i64(get_field(f, i2));
4320 }
4321 #define SPEC_in2_i2 0
4322
4323 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4324 {
4325 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4326 }
4327 #define SPEC_in2_i2_8u 0
4328
4329 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4330 {
4331 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4332 }
4333 #define SPEC_in2_i2_16u 0
4334
4335 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4336 {
4337 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4338 }
4339 #define SPEC_in2_i2_32u 0
4340
4341 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4342 {
4343 uint64_t i2 = (uint16_t)get_field(f, i2);
4344 o->in2 = tcg_const_i64(i2 << s->insn->data);
4345 }
4346 #define SPEC_in2_i2_16u_shl 0
4347
4348 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4349 {
4350 uint64_t i2 = (uint32_t)get_field(f, i2);
4351 o->in2 = tcg_const_i64(i2 << s->insn->data);
4352 }
4353 #define SPEC_in2_i2_32u_shl 0
4354
4355 /* ====================================================================== */
4356
4357 /* Find opc within the table of insns. This is formulated as a switch
4358 statement so that (1) we get compile-time notice of cut-paste errors
4359 for duplicated opcodes, and (2) the compiler generates the binary
4360 search tree, rather than us having to post-process the table. */
4361
4362 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4363 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4364
4365 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4366
4367 enum DisasInsnEnum {
4368 #include "insn-data.def"
4369 };
4370
4371 #undef D
4372 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4373 .opc = OPC, \
4374 .fmt = FMT_##FT, \
4375 .fac = FAC_##FC, \
4376 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4377 .name = #NM, \
4378 .help_in1 = in1_##I1, \
4379 .help_in2 = in2_##I2, \
4380 .help_prep = prep_##P, \
4381 .help_wout = wout_##W, \
4382 .help_cout = cout_##CC, \
4383 .help_op = op_##OP, \
4384 .data = D \
4385 },
4386
4387 /* Allow 0 to be used for NULL in the table below. */
4388 #define in1_0 NULL
4389 #define in2_0 NULL
4390 #define prep_0 NULL
4391 #define wout_0 NULL
4392 #define cout_0 NULL
4393 #define op_0 NULL
4394
4395 #define SPEC_in1_0 0
4396 #define SPEC_in2_0 0
4397 #define SPEC_prep_0 0
4398 #define SPEC_wout_0 0
4399
4400 static const DisasInsn insn_info[] = {
4401 #include "insn-data.def"
4402 };
4403
4404 #undef D
4405 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4406 case OPC: return &insn_info[insn_ ## NM];
4407
4408 static const DisasInsn *lookup_opc(uint16_t opc)
4409 {
4410 switch (opc) {
4411 #include "insn-data.def"
4412 default:
4413 return NULL;
4414 }
4415 }
4416
4417 #undef D
4418 #undef C
4419
4420 /* Extract a field from the insn. The INSN should be left-aligned in
4421 the uint64_t so that we can more easily utilize the big-bit-endian
4422 definitions we extract from the Principals of Operation. */
4423
4424 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4425 {
4426 uint32_t r, m;
4427
4428 if (f->size == 0) {
4429 return;
4430 }
4431
4432 /* Zero extract the field from the insn. */
4433 r = (insn << f->beg) >> (64 - f->size);
4434
4435 /* Sign-extend, or un-swap the field as necessary. */
4436 switch (f->type) {
4437 case 0: /* unsigned */
4438 break;
4439 case 1: /* signed */
4440 assert(f->size <= 32);
4441 m = 1u << (f->size - 1);
4442 r = (r ^ m) - m;
4443 break;
4444 case 2: /* dl+dh split, signed 20 bit. */
4445 r = ((int8_t)r << 12) | (r >> 8);
4446 break;
4447 default:
4448 abort();
4449 }
4450
4451 /* Validate that the "compressed" encoding we selected above is valid.
4452 I.e. we havn't make two different original fields overlap. */
4453 assert(((o->presentC >> f->indexC) & 1) == 0);
4454 o->presentC |= 1 << f->indexC;
4455 o->presentO |= 1 << f->indexO;
4456
4457 o->c[f->indexC] = r;
4458 }
4459
4460 /* Lookup the insn at the current PC, extracting the operands into O and
4461 returning the info struct for the insn. Returns NULL for invalid insn. */
4462
4463 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4464 DisasFields *f)
4465 {
4466 uint64_t insn, pc = s->pc;
4467 int op, op2, ilen;
4468 const DisasInsn *info;
4469
4470 insn = ld_code2(env, pc);
4471 op = (insn >> 8) & 0xff;
4472 ilen = get_ilen(op);
4473 s->next_pc = s->pc + ilen;
4474
4475 switch (ilen) {
4476 case 2:
4477 insn = insn << 48;
4478 break;
4479 case 4:
4480 insn = ld_code4(env, pc) << 32;
4481 break;
4482 case 6:
4483 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4484 break;
4485 default:
4486 abort();
4487 }
4488
4489 /* We can't actually determine the insn format until we've looked up
4490 the full insn opcode. Which we can't do without locating the
4491 secondary opcode. Assume by default that OP2 is at bit 40; for
4492 those smaller insns that don't actually have a secondary opcode
4493 this will correctly result in OP2 = 0. */
4494 switch (op) {
4495 case 0x01: /* E */
4496 case 0x80: /* S */
4497 case 0x82: /* S */
4498 case 0x93: /* S */
4499 case 0xb2: /* S, RRF, RRE */
4500 case 0xb3: /* RRE, RRD, RRF */
4501 case 0xb9: /* RRE, RRF */
4502 case 0xe5: /* SSE, SIL */
4503 op2 = (insn << 8) >> 56;
4504 break;
4505 case 0xa5: /* RI */
4506 case 0xa7: /* RI */
4507 case 0xc0: /* RIL */
4508 case 0xc2: /* RIL */
4509 case 0xc4: /* RIL */
4510 case 0xc6: /* RIL */
4511 case 0xc8: /* SSF */
4512 case 0xcc: /* RIL */
4513 op2 = (insn << 12) >> 60;
4514 break;
4515 case 0xd0 ... 0xdf: /* SS */
4516 case 0xe1: /* SS */
4517 case 0xe2: /* SS */
4518 case 0xe8: /* SS */
4519 case 0xe9: /* SS */
4520 case 0xea: /* SS */
4521 case 0xee ... 0xf3: /* SS */
4522 case 0xf8 ... 0xfd: /* SS */
4523 op2 = 0;
4524 break;
4525 default:
4526 op2 = (insn << 40) >> 56;
4527 break;
4528 }
4529
4530 memset(f, 0, sizeof(*f));
4531 f->op = op;
4532 f->op2 = op2;
4533
4534 /* Lookup the instruction. */
4535 info = lookup_opc(op << 8 | op2);
4536
4537 /* If we found it, extract the operands. */
4538 if (info != NULL) {
4539 DisasFormat fmt = info->fmt;
4540 int i;
4541
4542 for (i = 0; i < NUM_C_FIELD; ++i) {
4543 extract_field(f, &format_info[fmt].op[i], insn);
4544 }
4545 }
4546 return info;
4547 }
4548
4549 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4550 {
4551 const DisasInsn *insn;
4552 ExitStatus ret = NO_EXIT;
4553 DisasFields f;
4554 DisasOps o;
4555
4556 /* Search for the insn in the table. */
4557 insn = extract_insn(env, s, &f);
4558
4559 /* Not found means unimplemented/illegal opcode. */
4560 if (insn == NULL) {
4561 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4562 f.op, f.op2);
4563 gen_illegal_opcode(s);
4564 return EXIT_NORETURN;
4565 }
4566
4567 /* Check for insn specification exceptions. */
4568 if (insn->spec) {
4569 int spec = insn->spec, excp = 0, r;
4570
4571 if (spec & SPEC_r1_even) {
4572 r = get_field(&f, r1);
4573 if (r & 1) {
4574 excp = PGM_SPECIFICATION;
4575 }
4576 }
4577 if (spec & SPEC_r2_even) {
4578 r = get_field(&f, r2);
4579 if (r & 1) {
4580 excp = PGM_SPECIFICATION;
4581 }
4582 }
4583 if (spec & SPEC_r1_f128) {
4584 r = get_field(&f, r1);
4585 if (r > 13) {
4586 excp = PGM_SPECIFICATION;
4587 }
4588 }
4589 if (spec & SPEC_r2_f128) {
4590 r = get_field(&f, r2);
4591 if (r > 13) {
4592 excp = PGM_SPECIFICATION;
4593 }
4594 }
4595 if (excp) {
4596 gen_program_exception(s, excp);
4597 return EXIT_NORETURN;
4598 }
4599 }
4600
4601 /* Set up the strutures we use to communicate with the helpers. */
4602 s->insn = insn;
4603 s->fields = &f;
4604 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4605 TCGV_UNUSED_I64(o.out);
4606 TCGV_UNUSED_I64(o.out2);
4607 TCGV_UNUSED_I64(o.in1);
4608 TCGV_UNUSED_I64(o.in2);
4609 TCGV_UNUSED_I64(o.addr1);
4610
4611 /* Implement the instruction. */
4612 if (insn->help_in1) {
4613 insn->help_in1(s, &f, &o);
4614 }
4615 if (insn->help_in2) {
4616 insn->help_in2(s, &f, &o);
4617 }
4618 if (insn->help_prep) {
4619 insn->help_prep(s, &f, &o);
4620 }
4621 if (insn->help_op) {
4622 ret = insn->help_op(s, &o);
4623 }
4624 if (insn->help_wout) {
4625 insn->help_wout(s, &f, &o);
4626 }
4627 if (insn->help_cout) {
4628 insn->help_cout(s, &o);
4629 }
4630
4631 /* Free any temporaries created by the helpers. */
4632 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4633 tcg_temp_free_i64(o.out);
4634 }
4635 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4636 tcg_temp_free_i64(o.out2);
4637 }
4638 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4639 tcg_temp_free_i64(o.in1);
4640 }
4641 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4642 tcg_temp_free_i64(o.in2);
4643 }
4644 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4645 tcg_temp_free_i64(o.addr1);
4646 }
4647
4648 /* Advance to the next instruction. */
4649 s->pc = s->next_pc;
4650 return ret;
4651 }
4652
4653 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4654 TranslationBlock *tb,
4655 int search_pc)
4656 {
4657 DisasContext dc;
4658 target_ulong pc_start;
4659 uint64_t next_page_start;
4660 uint16_t *gen_opc_end;
4661 int j, lj = -1;
4662 int num_insns, max_insns;
4663 CPUBreakpoint *bp;
4664 ExitStatus status;
4665 bool do_debug;
4666
4667 pc_start = tb->pc;
4668
4669 /* 31-bit mode */
4670 if (!(tb->flags & FLAG_MASK_64)) {
4671 pc_start &= 0x7fffffff;
4672 }
4673
4674 dc.tb = tb;
4675 dc.pc = pc_start;
4676 dc.cc_op = CC_OP_DYNAMIC;
4677 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4678
4679 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4680
4681 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4682
4683 num_insns = 0;
4684 max_insns = tb->cflags & CF_COUNT_MASK;
4685 if (max_insns == 0) {
4686 max_insns = CF_COUNT_MASK;
4687 }
4688
4689 gen_icount_start();
4690
4691 do {
4692 if (search_pc) {
4693 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4694 if (lj < j) {
4695 lj++;
4696 while (lj < j) {
4697 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4698 }
4699 }
4700 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4701 gen_opc_cc_op[lj] = dc.cc_op;
4702 tcg_ctx.gen_opc_instr_start[lj] = 1;
4703 tcg_ctx.gen_opc_icount[lj] = num_insns;
4704 }
4705 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4706 gen_io_start();
4707 }
4708
4709 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4710 tcg_gen_debug_insn_start(dc.pc);
4711 }
4712
4713 status = NO_EXIT;
4714 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4715 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4716 if (bp->pc == dc.pc) {
4717 status = EXIT_PC_STALE;
4718 do_debug = true;
4719 break;
4720 }
4721 }
4722 }
4723 if (status == NO_EXIT) {
4724 status = translate_one(env, &dc);
4725 }
4726
4727 /* If we reach a page boundary, are single stepping,
4728 or exhaust instruction count, stop generation. */
4729 if (status == NO_EXIT
4730 && (dc.pc >= next_page_start
4731 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4732 || num_insns >= max_insns
4733 || singlestep
4734 || env->singlestep_enabled)) {
4735 status = EXIT_PC_STALE;
4736 }
4737 } while (status == NO_EXIT);
4738
4739 if (tb->cflags & CF_LAST_IO) {
4740 gen_io_end();
4741 }
4742
4743 switch (status) {
4744 case EXIT_GOTO_TB:
4745 case EXIT_NORETURN:
4746 break;
4747 case EXIT_PC_STALE:
4748 update_psw_addr(&dc);
4749 /* FALLTHRU */
4750 case EXIT_PC_UPDATED:
4751 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4752 cc op type is in env */
4753 update_cc_op(&dc);
4754 /* Exit the TB, either by raising a debug exception or by return. */
4755 if (do_debug) {
4756 gen_exception(EXCP_DEBUG);
4757 } else {
4758 tcg_gen_exit_tb(0);
4759 }
4760 break;
4761 default:
4762 abort();
4763 }
4764
4765 gen_icount_end(tb, num_insns);
4766 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4767 if (search_pc) {
4768 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4769 lj++;
4770 while (lj <= j) {
4771 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4772 }
4773 } else {
4774 tb->size = dc.pc - pc_start;
4775 tb->icount = num_insns;
4776 }
4777
4778 #if defined(S390X_DEBUG_DISAS)
4779 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4780 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4781 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4782 qemu_log("\n");
4783 }
4784 #endif
4785 }
4786
4787 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4788 {
4789 gen_intermediate_code_internal(env, tb, 0);
4790 }
4791
4792 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4793 {
4794 gen_intermediate_code_internal(env, tb, 1);
4795 }
4796
4797 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4798 {
4799 int cc_op;
4800 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4801 cc_op = gen_opc_cc_op[pc_pos];
4802 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4803 env->cc_op = cc_op;
4804 }
4805 }