]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/translate.c
target-s390x: optimize (negative-) abs computation
[mirror_qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
37
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
40
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44
45 #include "trace-tcg.h"
46
47
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
52
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t pc, next_pc;
58 enum cc_op cc_op;
59 bool singlestep_enabled;
60 };
61
62 /* Information carried about a condition to be evaluated. */
63 typedef struct {
64 TCGCond cond:8;
65 bool is_64;
66 bool g1;
67 bool g2;
68 union {
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
71 } u;
72 } DisasCompare;
73
74 #define DISAS_EXCP 4
75
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
80
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
82 {
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
86 }
87 }
88 return pc;
89 }
90
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
92 int flags)
93 {
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
96 int i;
97
98 if (env->cc_op > 3) {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 } else {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
104 }
105
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 if ((i % 4) == 3) {
109 cpu_fprintf(f, "\n");
110 } else {
111 cpu_fprintf(f, " ");
112 }
113 }
114
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
117 if ((i % 4) == 3) {
118 cpu_fprintf(f, "\n");
119 } else {
120 cpu_fprintf(f, " ");
121 }
122 }
123
124 for (i = 0; i < 32; i++) {
125 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
126 env->vregs[i][0].ll, env->vregs[i][1].ll);
127 cpu_fprintf(f, (i % 2) ? " " : "\n");
128 }
129
130 #ifndef CONFIG_USER_ONLY
131 for (i = 0; i < 16; i++) {
132 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
133 if ((i % 4) == 3) {
134 cpu_fprintf(f, "\n");
135 } else {
136 cpu_fprintf(f, " ");
137 }
138 }
139 #endif
140
141 #ifdef DEBUG_INLINE_BRANCHES
142 for (i = 0; i < CC_OP_MAX; i++) {
143 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
144 inline_branch_miss[i], inline_branch_hit[i]);
145 }
146 #endif
147
148 cpu_fprintf(f, "\n");
149 }
150
151 static TCGv_i64 psw_addr;
152 static TCGv_i64 psw_mask;
153
154 static TCGv_i32 cc_op;
155 static TCGv_i64 cc_src;
156 static TCGv_i64 cc_dst;
157 static TCGv_i64 cc_vr;
158
159 static char cpu_reg_names[32][4];
160 static TCGv_i64 regs[16];
161 static TCGv_i64 fregs[16];
162
163 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
164
165 void s390x_translate_init(void)
166 {
167 int i;
168
169 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
170 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
171 offsetof(CPUS390XState, psw.addr),
172 "psw_addr");
173 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
174 offsetof(CPUS390XState, psw.mask),
175 "psw_mask");
176
177 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
178 "cc_op");
179 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
180 "cc_src");
181 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
182 "cc_dst");
183 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
184 "cc_vr");
185
186 for (i = 0; i < 16; i++) {
187 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
188 regs[i] = tcg_global_mem_new(TCG_AREG0,
189 offsetof(CPUS390XState, regs[i]),
190 cpu_reg_names[i]);
191 }
192
193 for (i = 0; i < 16; i++) {
194 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
195 fregs[i] = tcg_global_mem_new(TCG_AREG0,
196 offsetof(CPUS390XState, vregs[i][0].d),
197 cpu_reg_names[i + 16]);
198 }
199 }
200
201 static TCGv_i64 load_reg(int reg)
202 {
203 TCGv_i64 r = tcg_temp_new_i64();
204 tcg_gen_mov_i64(r, regs[reg]);
205 return r;
206 }
207
208 static TCGv_i64 load_freg32_i64(int reg)
209 {
210 TCGv_i64 r = tcg_temp_new_i64();
211 tcg_gen_shri_i64(r, fregs[reg], 32);
212 return r;
213 }
214
215 static void store_reg(int reg, TCGv_i64 v)
216 {
217 tcg_gen_mov_i64(regs[reg], v);
218 }
219
220 static void store_freg(int reg, TCGv_i64 v)
221 {
222 tcg_gen_mov_i64(fregs[reg], v);
223 }
224
225 static void store_reg32_i64(int reg, TCGv_i64 v)
226 {
227 /* 32 bit register writes keep the upper half */
228 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
229 }
230
231 static void store_reg32h_i64(int reg, TCGv_i64 v)
232 {
233 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
234 }
235
236 static void store_freg32_i64(int reg, TCGv_i64 v)
237 {
238 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
239 }
240
241 static void return_low128(TCGv_i64 dest)
242 {
243 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
244 }
245
246 static void update_psw_addr(DisasContext *s)
247 {
248 /* psw.addr */
249 tcg_gen_movi_i64(psw_addr, s->pc);
250 }
251
252 static void update_cc_op(DisasContext *s)
253 {
254 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
255 tcg_gen_movi_i32(cc_op, s->cc_op);
256 }
257 }
258
259 static void potential_page_fault(DisasContext *s)
260 {
261 update_psw_addr(s);
262 update_cc_op(s);
263 }
264
265 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
266 {
267 return (uint64_t)cpu_lduw_code(env, pc);
268 }
269
270 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
271 {
272 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
273 }
274
275 static int get_mem_index(DisasContext *s)
276 {
277 switch (s->tb->flags & FLAG_MASK_ASC) {
278 case PSW_ASC_PRIMARY >> 32:
279 return 0;
280 case PSW_ASC_SECONDARY >> 32:
281 return 1;
282 case PSW_ASC_HOME >> 32:
283 return 2;
284 default:
285 tcg_abort();
286 break;
287 }
288 }
289
290 static void gen_exception(int excp)
291 {
292 TCGv_i32 tmp = tcg_const_i32(excp);
293 gen_helper_exception(cpu_env, tmp);
294 tcg_temp_free_i32(tmp);
295 }
296
297 static void gen_program_exception(DisasContext *s, int code)
298 {
299 TCGv_i32 tmp;
300
301 /* Remember what pgm exeption this was. */
302 tmp = tcg_const_i32(code);
303 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
304 tcg_temp_free_i32(tmp);
305
306 tmp = tcg_const_i32(s->next_pc - s->pc);
307 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
308 tcg_temp_free_i32(tmp);
309
310 /* Advance past instruction. */
311 s->pc = s->next_pc;
312 update_psw_addr(s);
313
314 /* Save off cc. */
315 update_cc_op(s);
316
317 /* Trigger exception. */
318 gen_exception(EXCP_PGM);
319 }
320
321 static inline void gen_illegal_opcode(DisasContext *s)
322 {
323 gen_program_exception(s, PGM_SPECIFICATION);
324 }
325
326 #ifndef CONFIG_USER_ONLY
327 static void check_privileged(DisasContext *s)
328 {
329 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
330 gen_program_exception(s, PGM_PRIVILEGED);
331 }
332 }
333 #endif
334
335 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
336 {
337 TCGv_i64 tmp = tcg_temp_new_i64();
338 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
339
340 /* Note that d2 is limited to 20 bits, signed. If we crop negative
341 displacements early we create larger immedate addends. */
342
343 /* Note that addi optimizes the imm==0 case. */
344 if (b2 && x2) {
345 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
346 tcg_gen_addi_i64(tmp, tmp, d2);
347 } else if (b2) {
348 tcg_gen_addi_i64(tmp, regs[b2], d2);
349 } else if (x2) {
350 tcg_gen_addi_i64(tmp, regs[x2], d2);
351 } else {
352 if (need_31) {
353 d2 &= 0x7fffffff;
354 need_31 = false;
355 }
356 tcg_gen_movi_i64(tmp, d2);
357 }
358 if (need_31) {
359 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
360 }
361
362 return tmp;
363 }
364
365 static inline bool live_cc_data(DisasContext *s)
366 {
367 return (s->cc_op != CC_OP_DYNAMIC
368 && s->cc_op != CC_OP_STATIC
369 && s->cc_op > 3);
370 }
371
372 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
373 {
374 if (live_cc_data(s)) {
375 tcg_gen_discard_i64(cc_src);
376 tcg_gen_discard_i64(cc_dst);
377 tcg_gen_discard_i64(cc_vr);
378 }
379 s->cc_op = CC_OP_CONST0 + val;
380 }
381
382 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
383 {
384 if (live_cc_data(s)) {
385 tcg_gen_discard_i64(cc_src);
386 tcg_gen_discard_i64(cc_vr);
387 }
388 tcg_gen_mov_i64(cc_dst, dst);
389 s->cc_op = op;
390 }
391
392 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
393 TCGv_i64 dst)
394 {
395 if (live_cc_data(s)) {
396 tcg_gen_discard_i64(cc_vr);
397 }
398 tcg_gen_mov_i64(cc_src, src);
399 tcg_gen_mov_i64(cc_dst, dst);
400 s->cc_op = op;
401 }
402
403 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
404 TCGv_i64 dst, TCGv_i64 vr)
405 {
406 tcg_gen_mov_i64(cc_src, src);
407 tcg_gen_mov_i64(cc_dst, dst);
408 tcg_gen_mov_i64(cc_vr, vr);
409 s->cc_op = op;
410 }
411
412 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
413 {
414 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
415 }
416
417 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
418 {
419 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
420 }
421
422 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
423 {
424 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
425 }
426
427 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
428 {
429 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
430 }
431
432 /* CC value is in env->cc_op */
433 static void set_cc_static(DisasContext *s)
434 {
435 if (live_cc_data(s)) {
436 tcg_gen_discard_i64(cc_src);
437 tcg_gen_discard_i64(cc_dst);
438 tcg_gen_discard_i64(cc_vr);
439 }
440 s->cc_op = CC_OP_STATIC;
441 }
442
443 /* calculates cc into cc_op */
444 static void gen_op_calc_cc(DisasContext *s)
445 {
446 TCGv_i32 local_cc_op;
447 TCGv_i64 dummy;
448
449 TCGV_UNUSED_I32(local_cc_op);
450 TCGV_UNUSED_I64(dummy);
451 switch (s->cc_op) {
452 default:
453 dummy = tcg_const_i64(0);
454 /* FALLTHRU */
455 case CC_OP_ADD_64:
456 case CC_OP_ADDU_64:
457 case CC_OP_ADDC_64:
458 case CC_OP_SUB_64:
459 case CC_OP_SUBU_64:
460 case CC_OP_SUBB_64:
461 case CC_OP_ADD_32:
462 case CC_OP_ADDU_32:
463 case CC_OP_ADDC_32:
464 case CC_OP_SUB_32:
465 case CC_OP_SUBU_32:
466 case CC_OP_SUBB_32:
467 local_cc_op = tcg_const_i32(s->cc_op);
468 break;
469 case CC_OP_CONST0:
470 case CC_OP_CONST1:
471 case CC_OP_CONST2:
472 case CC_OP_CONST3:
473 case CC_OP_STATIC:
474 case CC_OP_DYNAMIC:
475 break;
476 }
477
478 switch (s->cc_op) {
479 case CC_OP_CONST0:
480 case CC_OP_CONST1:
481 case CC_OP_CONST2:
482 case CC_OP_CONST3:
483 /* s->cc_op is the cc value */
484 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
485 break;
486 case CC_OP_STATIC:
487 /* env->cc_op already is the cc value */
488 break;
489 case CC_OP_NZ:
490 case CC_OP_ABS_64:
491 case CC_OP_NABS_64:
492 case CC_OP_ABS_32:
493 case CC_OP_NABS_32:
494 case CC_OP_LTGT0_32:
495 case CC_OP_LTGT0_64:
496 case CC_OP_COMP_32:
497 case CC_OP_COMP_64:
498 case CC_OP_NZ_F32:
499 case CC_OP_NZ_F64:
500 case CC_OP_FLOGR:
501 /* 1 argument */
502 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
503 break;
504 case CC_OP_ICM:
505 case CC_OP_LTGT_32:
506 case CC_OP_LTGT_64:
507 case CC_OP_LTUGTU_32:
508 case CC_OP_LTUGTU_64:
509 case CC_OP_TM_32:
510 case CC_OP_TM_64:
511 case CC_OP_SLA_32:
512 case CC_OP_SLA_64:
513 case CC_OP_NZ_F128:
514 /* 2 arguments */
515 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
516 break;
517 case CC_OP_ADD_64:
518 case CC_OP_ADDU_64:
519 case CC_OP_ADDC_64:
520 case CC_OP_SUB_64:
521 case CC_OP_SUBU_64:
522 case CC_OP_SUBB_64:
523 case CC_OP_ADD_32:
524 case CC_OP_ADDU_32:
525 case CC_OP_ADDC_32:
526 case CC_OP_SUB_32:
527 case CC_OP_SUBU_32:
528 case CC_OP_SUBB_32:
529 /* 3 arguments */
530 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
531 break;
532 case CC_OP_DYNAMIC:
533 /* unknown operation - assume 3 arguments and cc_op in env */
534 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
535 break;
536 default:
537 tcg_abort();
538 }
539
540 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
541 tcg_temp_free_i32(local_cc_op);
542 }
543 if (!TCGV_IS_UNUSED_I64(dummy)) {
544 tcg_temp_free_i64(dummy);
545 }
546
547 /* We now have cc in cc_op as constant */
548 set_cc_static(s);
549 }
550
551 static int use_goto_tb(DisasContext *s, uint64_t dest)
552 {
553 /* NOTE: we handle the case where the TB spans two pages here */
554 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
555 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
556 && !s->singlestep_enabled
557 && !(s->tb->cflags & CF_LAST_IO));
558 }
559
560 static void account_noninline_branch(DisasContext *s, int cc_op)
561 {
562 #ifdef DEBUG_INLINE_BRANCHES
563 inline_branch_miss[cc_op]++;
564 #endif
565 }
566
567 static void account_inline_branch(DisasContext *s, int cc_op)
568 {
569 #ifdef DEBUG_INLINE_BRANCHES
570 inline_branch_hit[cc_op]++;
571 #endif
572 }
573
574 /* Table of mask values to comparison codes, given a comparison as input.
575 For such, CC=3 should not be possible. */
576 static const TCGCond ltgt_cond[16] = {
577 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
578 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
579 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
580 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
581 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
582 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
583 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
584 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
585 };
586
587 /* Table of mask values to comparison codes, given a logic op as input.
588 For such, only CC=0 and CC=1 should be possible. */
589 static const TCGCond nz_cond[16] = {
590 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
591 TCG_COND_NEVER, TCG_COND_NEVER,
592 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
593 TCG_COND_NE, TCG_COND_NE,
594 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
595 TCG_COND_EQ, TCG_COND_EQ,
596 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
597 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
598 };
599
600 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
601 details required to generate a TCG comparison. */
602 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
603 {
604 TCGCond cond;
605 enum cc_op old_cc_op = s->cc_op;
606
607 if (mask == 15 || mask == 0) {
608 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
609 c->u.s32.a = cc_op;
610 c->u.s32.b = cc_op;
611 c->g1 = c->g2 = true;
612 c->is_64 = false;
613 return;
614 }
615
616 /* Find the TCG condition for the mask + cc op. */
617 switch (old_cc_op) {
618 case CC_OP_LTGT0_32:
619 case CC_OP_LTGT0_64:
620 case CC_OP_LTGT_32:
621 case CC_OP_LTGT_64:
622 cond = ltgt_cond[mask];
623 if (cond == TCG_COND_NEVER) {
624 goto do_dynamic;
625 }
626 account_inline_branch(s, old_cc_op);
627 break;
628
629 case CC_OP_LTUGTU_32:
630 case CC_OP_LTUGTU_64:
631 cond = tcg_unsigned_cond(ltgt_cond[mask]);
632 if (cond == TCG_COND_NEVER) {
633 goto do_dynamic;
634 }
635 account_inline_branch(s, old_cc_op);
636 break;
637
638 case CC_OP_NZ:
639 cond = nz_cond[mask];
640 if (cond == TCG_COND_NEVER) {
641 goto do_dynamic;
642 }
643 account_inline_branch(s, old_cc_op);
644 break;
645
646 case CC_OP_TM_32:
647 case CC_OP_TM_64:
648 switch (mask) {
649 case 8:
650 cond = TCG_COND_EQ;
651 break;
652 case 4 | 2 | 1:
653 cond = TCG_COND_NE;
654 break;
655 default:
656 goto do_dynamic;
657 }
658 account_inline_branch(s, old_cc_op);
659 break;
660
661 case CC_OP_ICM:
662 switch (mask) {
663 case 8:
664 cond = TCG_COND_EQ;
665 break;
666 case 4 | 2 | 1:
667 case 4 | 2:
668 cond = TCG_COND_NE;
669 break;
670 default:
671 goto do_dynamic;
672 }
673 account_inline_branch(s, old_cc_op);
674 break;
675
676 case CC_OP_FLOGR:
677 switch (mask & 0xa) {
678 case 8: /* src == 0 -> no one bit found */
679 cond = TCG_COND_EQ;
680 break;
681 case 2: /* src != 0 -> one bit found */
682 cond = TCG_COND_NE;
683 break;
684 default:
685 goto do_dynamic;
686 }
687 account_inline_branch(s, old_cc_op);
688 break;
689
690 case CC_OP_ADDU_32:
691 case CC_OP_ADDU_64:
692 switch (mask) {
693 case 8 | 2: /* vr == 0 */
694 cond = TCG_COND_EQ;
695 break;
696 case 4 | 1: /* vr != 0 */
697 cond = TCG_COND_NE;
698 break;
699 case 8 | 4: /* no carry -> vr >= src */
700 cond = TCG_COND_GEU;
701 break;
702 case 2 | 1: /* carry -> vr < src */
703 cond = TCG_COND_LTU;
704 break;
705 default:
706 goto do_dynamic;
707 }
708 account_inline_branch(s, old_cc_op);
709 break;
710
711 case CC_OP_SUBU_32:
712 case CC_OP_SUBU_64:
713 /* Note that CC=0 is impossible; treat it as dont-care. */
714 switch (mask & 7) {
715 case 2: /* zero -> op1 == op2 */
716 cond = TCG_COND_EQ;
717 break;
718 case 4 | 1: /* !zero -> op1 != op2 */
719 cond = TCG_COND_NE;
720 break;
721 case 4: /* borrow (!carry) -> op1 < op2 */
722 cond = TCG_COND_LTU;
723 break;
724 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
725 cond = TCG_COND_GEU;
726 break;
727 default:
728 goto do_dynamic;
729 }
730 account_inline_branch(s, old_cc_op);
731 break;
732
733 default:
734 do_dynamic:
735 /* Calculate cc value. */
736 gen_op_calc_cc(s);
737 /* FALLTHRU */
738
739 case CC_OP_STATIC:
740 /* Jump based on CC. We'll load up the real cond below;
741 the assignment here merely avoids a compiler warning. */
742 account_noninline_branch(s, old_cc_op);
743 old_cc_op = CC_OP_STATIC;
744 cond = TCG_COND_NEVER;
745 break;
746 }
747
748 /* Load up the arguments of the comparison. */
749 c->is_64 = true;
750 c->g1 = c->g2 = false;
751 switch (old_cc_op) {
752 case CC_OP_LTGT0_32:
753 c->is_64 = false;
754 c->u.s32.a = tcg_temp_new_i32();
755 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
756 c->u.s32.b = tcg_const_i32(0);
757 break;
758 case CC_OP_LTGT_32:
759 case CC_OP_LTUGTU_32:
760 case CC_OP_SUBU_32:
761 c->is_64 = false;
762 c->u.s32.a = tcg_temp_new_i32();
763 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
764 c->u.s32.b = tcg_temp_new_i32();
765 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
766 break;
767
768 case CC_OP_LTGT0_64:
769 case CC_OP_NZ:
770 case CC_OP_FLOGR:
771 c->u.s64.a = cc_dst;
772 c->u.s64.b = tcg_const_i64(0);
773 c->g1 = true;
774 break;
775 case CC_OP_LTGT_64:
776 case CC_OP_LTUGTU_64:
777 case CC_OP_SUBU_64:
778 c->u.s64.a = cc_src;
779 c->u.s64.b = cc_dst;
780 c->g1 = c->g2 = true;
781 break;
782
783 case CC_OP_TM_32:
784 case CC_OP_TM_64:
785 case CC_OP_ICM:
786 c->u.s64.a = tcg_temp_new_i64();
787 c->u.s64.b = tcg_const_i64(0);
788 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
789 break;
790
791 case CC_OP_ADDU_32:
792 c->is_64 = false;
793 c->u.s32.a = tcg_temp_new_i32();
794 c->u.s32.b = tcg_temp_new_i32();
795 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
796 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
797 tcg_gen_movi_i32(c->u.s32.b, 0);
798 } else {
799 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
800 }
801 break;
802
803 case CC_OP_ADDU_64:
804 c->u.s64.a = cc_vr;
805 c->g1 = true;
806 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
807 c->u.s64.b = tcg_const_i64(0);
808 } else {
809 c->u.s64.b = cc_src;
810 c->g2 = true;
811 }
812 break;
813
814 case CC_OP_STATIC:
815 c->is_64 = false;
816 c->u.s32.a = cc_op;
817 c->g1 = true;
818 switch (mask) {
819 case 0x8 | 0x4 | 0x2: /* cc != 3 */
820 cond = TCG_COND_NE;
821 c->u.s32.b = tcg_const_i32(3);
822 break;
823 case 0x8 | 0x4 | 0x1: /* cc != 2 */
824 cond = TCG_COND_NE;
825 c->u.s32.b = tcg_const_i32(2);
826 break;
827 case 0x8 | 0x2 | 0x1: /* cc != 1 */
828 cond = TCG_COND_NE;
829 c->u.s32.b = tcg_const_i32(1);
830 break;
831 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
832 cond = TCG_COND_EQ;
833 c->g1 = false;
834 c->u.s32.a = tcg_temp_new_i32();
835 c->u.s32.b = tcg_const_i32(0);
836 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
837 break;
838 case 0x8 | 0x4: /* cc < 2 */
839 cond = TCG_COND_LTU;
840 c->u.s32.b = tcg_const_i32(2);
841 break;
842 case 0x8: /* cc == 0 */
843 cond = TCG_COND_EQ;
844 c->u.s32.b = tcg_const_i32(0);
845 break;
846 case 0x4 | 0x2 | 0x1: /* cc != 0 */
847 cond = TCG_COND_NE;
848 c->u.s32.b = tcg_const_i32(0);
849 break;
850 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
851 cond = TCG_COND_NE;
852 c->g1 = false;
853 c->u.s32.a = tcg_temp_new_i32();
854 c->u.s32.b = tcg_const_i32(0);
855 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
856 break;
857 case 0x4: /* cc == 1 */
858 cond = TCG_COND_EQ;
859 c->u.s32.b = tcg_const_i32(1);
860 break;
861 case 0x2 | 0x1: /* cc > 1 */
862 cond = TCG_COND_GTU;
863 c->u.s32.b = tcg_const_i32(1);
864 break;
865 case 0x2: /* cc == 2 */
866 cond = TCG_COND_EQ;
867 c->u.s32.b = tcg_const_i32(2);
868 break;
869 case 0x1: /* cc == 3 */
870 cond = TCG_COND_EQ;
871 c->u.s32.b = tcg_const_i32(3);
872 break;
873 default:
874 /* CC is masked by something else: (8 >> cc) & mask. */
875 cond = TCG_COND_NE;
876 c->g1 = false;
877 c->u.s32.a = tcg_const_i32(8);
878 c->u.s32.b = tcg_const_i32(0);
879 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
880 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
881 break;
882 }
883 break;
884
885 default:
886 abort();
887 }
888 c->cond = cond;
889 }
890
891 static void free_compare(DisasCompare *c)
892 {
893 if (!c->g1) {
894 if (c->is_64) {
895 tcg_temp_free_i64(c->u.s64.a);
896 } else {
897 tcg_temp_free_i32(c->u.s32.a);
898 }
899 }
900 if (!c->g2) {
901 if (c->is_64) {
902 tcg_temp_free_i64(c->u.s64.b);
903 } else {
904 tcg_temp_free_i32(c->u.s32.b);
905 }
906 }
907 }
908
909 /* ====================================================================== */
910 /* Define the insn format enumeration. */
911 #define F0(N) FMT_##N,
912 #define F1(N, X1) F0(N)
913 #define F2(N, X1, X2) F0(N)
914 #define F3(N, X1, X2, X3) F0(N)
915 #define F4(N, X1, X2, X3, X4) F0(N)
916 #define F5(N, X1, X2, X3, X4, X5) F0(N)
917
918 typedef enum {
919 #include "insn-format.def"
920 } DisasFormat;
921
922 #undef F0
923 #undef F1
924 #undef F2
925 #undef F3
926 #undef F4
927 #undef F5
928
929 /* Define a structure to hold the decoded fields. We'll store each inside
930 an array indexed by an enum. In order to conserve memory, we'll arrange
931 for fields that do not exist at the same time to overlap, thus the "C"
932 for compact. For checking purposes there is an "O" for original index
933 as well that will be applied to availability bitmaps. */
934
935 enum DisasFieldIndexO {
936 FLD_O_r1,
937 FLD_O_r2,
938 FLD_O_r3,
939 FLD_O_m1,
940 FLD_O_m3,
941 FLD_O_m4,
942 FLD_O_b1,
943 FLD_O_b2,
944 FLD_O_b4,
945 FLD_O_d1,
946 FLD_O_d2,
947 FLD_O_d4,
948 FLD_O_x2,
949 FLD_O_l1,
950 FLD_O_l2,
951 FLD_O_i1,
952 FLD_O_i2,
953 FLD_O_i3,
954 FLD_O_i4,
955 FLD_O_i5
956 };
957
958 enum DisasFieldIndexC {
959 FLD_C_r1 = 0,
960 FLD_C_m1 = 0,
961 FLD_C_b1 = 0,
962 FLD_C_i1 = 0,
963
964 FLD_C_r2 = 1,
965 FLD_C_b2 = 1,
966 FLD_C_i2 = 1,
967
968 FLD_C_r3 = 2,
969 FLD_C_m3 = 2,
970 FLD_C_i3 = 2,
971
972 FLD_C_m4 = 3,
973 FLD_C_b4 = 3,
974 FLD_C_i4 = 3,
975 FLD_C_l1 = 3,
976
977 FLD_C_i5 = 4,
978 FLD_C_d1 = 4,
979
980 FLD_C_d2 = 5,
981
982 FLD_C_d4 = 6,
983 FLD_C_x2 = 6,
984 FLD_C_l2 = 6,
985
986 NUM_C_FIELD = 7
987 };
988
989 struct DisasFields {
990 unsigned op:8;
991 unsigned op2:8;
992 unsigned presentC:16;
993 unsigned int presentO;
994 int c[NUM_C_FIELD];
995 };
996
997 /* This is the way fields are to be accessed out of DisasFields. */
998 #define have_field(S, F) have_field1((S), FLD_O_##F)
999 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1000
1001 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1002 {
1003 return (f->presentO >> c) & 1;
1004 }
1005
1006 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1007 enum DisasFieldIndexC c)
1008 {
1009 assert(have_field1(f, o));
1010 return f->c[c];
1011 }
1012
1013 /* Describe the layout of each field in each format. */
1014 typedef struct DisasField {
1015 unsigned int beg:8;
1016 unsigned int size:8;
1017 unsigned int type:2;
1018 unsigned int indexC:6;
1019 enum DisasFieldIndexO indexO:8;
1020 } DisasField;
1021
1022 typedef struct DisasFormatInfo {
1023 DisasField op[NUM_C_FIELD];
1024 } DisasFormatInfo;
1025
1026 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1027 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1028 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1029 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1030 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1031 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1032 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1033 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1034 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1035 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1036 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1037 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1038 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1039 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1040
1041 #define F0(N) { { } },
1042 #define F1(N, X1) { { X1 } },
1043 #define F2(N, X1, X2) { { X1, X2 } },
1044 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1045 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1046 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1047
1048 static const DisasFormatInfo format_info[] = {
1049 #include "insn-format.def"
1050 };
1051
1052 #undef F0
1053 #undef F1
1054 #undef F2
1055 #undef F3
1056 #undef F4
1057 #undef F5
1058 #undef R
1059 #undef M
1060 #undef BD
1061 #undef BXD
1062 #undef BDL
1063 #undef BXDL
1064 #undef I
1065 #undef L
1066
1067 /* Generally, we'll extract operands into this structures, operate upon
1068 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1069 of routines below for more details. */
1070 typedef struct {
1071 bool g_out, g_out2, g_in1, g_in2;
1072 TCGv_i64 out, out2, in1, in2;
1073 TCGv_i64 addr1;
1074 } DisasOps;
1075
1076 /* Instructions can place constraints on their operands, raising specification
1077 exceptions if they are violated. To make this easy to automate, each "in1",
1078 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1079 of the following, or 0. To make this easy to document, we'll put the
1080 SPEC_<name> defines next to <name>. */
1081
1082 #define SPEC_r1_even 1
1083 #define SPEC_r2_even 2
1084 #define SPEC_r3_even 4
1085 #define SPEC_r1_f128 8
1086 #define SPEC_r2_f128 16
1087
1088 /* Return values from translate_one, indicating the state of the TB. */
1089 typedef enum {
1090 /* Continue the TB. */
1091 NO_EXIT,
1092 /* We have emitted one or more goto_tb. No fixup required. */
1093 EXIT_GOTO_TB,
1094 /* We are not using a goto_tb (for whatever reason), but have updated
1095 the PC (for whatever reason), so there's no need to do it again on
1096 exiting the TB. */
1097 EXIT_PC_UPDATED,
1098 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1099 updated the PC for the next instruction to be executed. */
1100 EXIT_PC_STALE,
1101 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1102 No following code will be executed. */
1103 EXIT_NORETURN,
1104 } ExitStatus;
1105
1106 typedef enum DisasFacility {
1107 FAC_Z, /* zarch (default) */
1108 FAC_CASS, /* compare and swap and store */
1109 FAC_CASS2, /* compare and swap and store 2*/
1110 FAC_DFP, /* decimal floating point */
1111 FAC_DFPR, /* decimal floating point rounding */
1112 FAC_DO, /* distinct operands */
1113 FAC_EE, /* execute extensions */
1114 FAC_EI, /* extended immediate */
1115 FAC_FPE, /* floating point extension */
1116 FAC_FPSSH, /* floating point support sign handling */
1117 FAC_FPRGR, /* FPR-GR transfer */
1118 FAC_GIE, /* general instructions extension */
1119 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1120 FAC_HW, /* high-word */
1121 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1122 FAC_LOC, /* load/store on condition */
1123 FAC_LD, /* long displacement */
1124 FAC_PC, /* population count */
1125 FAC_SCF, /* store clock fast */
1126 FAC_SFLE, /* store facility list extended */
1127 FAC_ILA, /* interlocked access facility 1 */
1128 } DisasFacility;
1129
1130 struct DisasInsn {
1131 unsigned opc:16;
1132 DisasFormat fmt:8;
1133 DisasFacility fac:8;
1134 unsigned spec:8;
1135
1136 const char *name;
1137
1138 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1139 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1140 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1141 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1142 void (*help_cout)(DisasContext *, DisasOps *);
1143 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1144
1145 uint64_t data;
1146 };
1147
1148 /* ====================================================================== */
1149 /* Miscellaneous helpers, used by several operations. */
1150
1151 static void help_l2_shift(DisasContext *s, DisasFields *f,
1152 DisasOps *o, int mask)
1153 {
1154 int b2 = get_field(f, b2);
1155 int d2 = get_field(f, d2);
1156
1157 if (b2 == 0) {
1158 o->in2 = tcg_const_i64(d2 & mask);
1159 } else {
1160 o->in2 = get_address(s, 0, b2, d2);
1161 tcg_gen_andi_i64(o->in2, o->in2, mask);
1162 }
1163 }
1164
1165 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1166 {
1167 if (dest == s->next_pc) {
1168 return NO_EXIT;
1169 }
1170 if (use_goto_tb(s, dest)) {
1171 update_cc_op(s);
1172 tcg_gen_goto_tb(0);
1173 tcg_gen_movi_i64(psw_addr, dest);
1174 tcg_gen_exit_tb((uintptr_t)s->tb);
1175 return EXIT_GOTO_TB;
1176 } else {
1177 tcg_gen_movi_i64(psw_addr, dest);
1178 return EXIT_PC_UPDATED;
1179 }
1180 }
1181
1182 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1183 bool is_imm, int imm, TCGv_i64 cdest)
1184 {
1185 ExitStatus ret;
1186 uint64_t dest = s->pc + 2 * imm;
1187 TCGLabel *lab;
1188
1189 /* Take care of the special cases first. */
1190 if (c->cond == TCG_COND_NEVER) {
1191 ret = NO_EXIT;
1192 goto egress;
1193 }
1194 if (is_imm) {
1195 if (dest == s->next_pc) {
1196 /* Branch to next. */
1197 ret = NO_EXIT;
1198 goto egress;
1199 }
1200 if (c->cond == TCG_COND_ALWAYS) {
1201 ret = help_goto_direct(s, dest);
1202 goto egress;
1203 }
1204 } else {
1205 if (TCGV_IS_UNUSED_I64(cdest)) {
1206 /* E.g. bcr %r0 -> no branch. */
1207 ret = NO_EXIT;
1208 goto egress;
1209 }
1210 if (c->cond == TCG_COND_ALWAYS) {
1211 tcg_gen_mov_i64(psw_addr, cdest);
1212 ret = EXIT_PC_UPDATED;
1213 goto egress;
1214 }
1215 }
1216
1217 if (use_goto_tb(s, s->next_pc)) {
1218 if (is_imm && use_goto_tb(s, dest)) {
1219 /* Both exits can use goto_tb. */
1220 update_cc_op(s);
1221
1222 lab = gen_new_label();
1223 if (c->is_64) {
1224 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1225 } else {
1226 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1227 }
1228
1229 /* Branch not taken. */
1230 tcg_gen_goto_tb(0);
1231 tcg_gen_movi_i64(psw_addr, s->next_pc);
1232 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1233
1234 /* Branch taken. */
1235 gen_set_label(lab);
1236 tcg_gen_goto_tb(1);
1237 tcg_gen_movi_i64(psw_addr, dest);
1238 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1239
1240 ret = EXIT_GOTO_TB;
1241 } else {
1242 /* Fallthru can use goto_tb, but taken branch cannot. */
1243 /* Store taken branch destination before the brcond. This
1244 avoids having to allocate a new local temp to hold it.
1245 We'll overwrite this in the not taken case anyway. */
1246 if (!is_imm) {
1247 tcg_gen_mov_i64(psw_addr, cdest);
1248 }
1249
1250 lab = gen_new_label();
1251 if (c->is_64) {
1252 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1253 } else {
1254 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1255 }
1256
1257 /* Branch not taken. */
1258 update_cc_op(s);
1259 tcg_gen_goto_tb(0);
1260 tcg_gen_movi_i64(psw_addr, s->next_pc);
1261 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1262
1263 gen_set_label(lab);
1264 if (is_imm) {
1265 tcg_gen_movi_i64(psw_addr, dest);
1266 }
1267 ret = EXIT_PC_UPDATED;
1268 }
1269 } else {
1270 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1271 Most commonly we're single-stepping or some other condition that
1272 disables all use of goto_tb. Just update the PC and exit. */
1273
1274 TCGv_i64 next = tcg_const_i64(s->next_pc);
1275 if (is_imm) {
1276 cdest = tcg_const_i64(dest);
1277 }
1278
1279 if (c->is_64) {
1280 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1281 cdest, next);
1282 } else {
1283 TCGv_i32 t0 = tcg_temp_new_i32();
1284 TCGv_i64 t1 = tcg_temp_new_i64();
1285 TCGv_i64 z = tcg_const_i64(0);
1286 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1287 tcg_gen_extu_i32_i64(t1, t0);
1288 tcg_temp_free_i32(t0);
1289 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1290 tcg_temp_free_i64(t1);
1291 tcg_temp_free_i64(z);
1292 }
1293
1294 if (is_imm) {
1295 tcg_temp_free_i64(cdest);
1296 }
1297 tcg_temp_free_i64(next);
1298
1299 ret = EXIT_PC_UPDATED;
1300 }
1301
1302 egress:
1303 free_compare(c);
1304 return ret;
1305 }
1306
1307 /* ====================================================================== */
1308 /* The operations. These perform the bulk of the work for any insn,
1309 usually after the operands have been loaded and output initialized. */
1310
1311 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1312 {
1313 TCGv_i64 z, n;
1314 z = tcg_const_i64(0);
1315 n = tcg_temp_new_i64();
1316 tcg_gen_neg_i64(n, o->in2);
1317 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1318 tcg_temp_free_i64(n);
1319 tcg_temp_free_i64(z);
1320 return NO_EXIT;
1321 }
1322
1323 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1324 {
1325 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1326 return NO_EXIT;
1327 }
1328
1329 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1330 {
1331 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1332 return NO_EXIT;
1333 }
1334
1335 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1336 {
1337 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1338 tcg_gen_mov_i64(o->out2, o->in2);
1339 return NO_EXIT;
1340 }
1341
1342 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1343 {
1344 tcg_gen_add_i64(o->out, o->in1, o->in2);
1345 return NO_EXIT;
1346 }
1347
1348 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1349 {
1350 DisasCompare cmp;
1351 TCGv_i64 carry;
1352
1353 tcg_gen_add_i64(o->out, o->in1, o->in2);
1354
1355 /* The carry flag is the msb of CC, therefore the branch mask that would
1356 create that comparison is 3. Feeding the generated comparison to
1357 setcond produces the carry flag that we desire. */
1358 disas_jcc(s, &cmp, 3);
1359 carry = tcg_temp_new_i64();
1360 if (cmp.is_64) {
1361 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1362 } else {
1363 TCGv_i32 t = tcg_temp_new_i32();
1364 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1365 tcg_gen_extu_i32_i64(carry, t);
1366 tcg_temp_free_i32(t);
1367 }
1368 free_compare(&cmp);
1369
1370 tcg_gen_add_i64(o->out, o->out, carry);
1371 tcg_temp_free_i64(carry);
1372 return NO_EXIT;
1373 }
1374
1375 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1376 {
1377 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1378 return NO_EXIT;
1379 }
1380
1381 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1382 {
1383 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1384 return NO_EXIT;
1385 }
1386
1387 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1388 {
1389 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1390 return_low128(o->out2);
1391 return NO_EXIT;
1392 }
1393
1394 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1395 {
1396 tcg_gen_and_i64(o->out, o->in1, o->in2);
1397 return NO_EXIT;
1398 }
1399
1400 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1401 {
1402 int shift = s->insn->data & 0xff;
1403 int size = s->insn->data >> 8;
1404 uint64_t mask = ((1ull << size) - 1) << shift;
1405
1406 assert(!o->g_in2);
1407 tcg_gen_shli_i64(o->in2, o->in2, shift);
1408 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1409 tcg_gen_and_i64(o->out, o->in1, o->in2);
1410
1411 /* Produce the CC from only the bits manipulated. */
1412 tcg_gen_andi_i64(cc_dst, o->out, mask);
1413 set_cc_nz_u64(s, cc_dst);
1414 return NO_EXIT;
1415 }
1416
1417 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1418 {
1419 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1420 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1421 tcg_gen_mov_i64(psw_addr, o->in2);
1422 return EXIT_PC_UPDATED;
1423 } else {
1424 return NO_EXIT;
1425 }
1426 }
1427
1428 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1429 {
1430 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1431 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1432 }
1433
1434 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1435 {
1436 int m1 = get_field(s->fields, m1);
1437 bool is_imm = have_field(s->fields, i2);
1438 int imm = is_imm ? get_field(s->fields, i2) : 0;
1439 DisasCompare c;
1440
1441 disas_jcc(s, &c, m1);
1442 return help_branch(s, &c, is_imm, imm, o->in2);
1443 }
1444
1445 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1446 {
1447 int r1 = get_field(s->fields, r1);
1448 bool is_imm = have_field(s->fields, i2);
1449 int imm = is_imm ? get_field(s->fields, i2) : 0;
1450 DisasCompare c;
1451 TCGv_i64 t;
1452
1453 c.cond = TCG_COND_NE;
1454 c.is_64 = false;
1455 c.g1 = false;
1456 c.g2 = false;
1457
1458 t = tcg_temp_new_i64();
1459 tcg_gen_subi_i64(t, regs[r1], 1);
1460 store_reg32_i64(r1, t);
1461 c.u.s32.a = tcg_temp_new_i32();
1462 c.u.s32.b = tcg_const_i32(0);
1463 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1464 tcg_temp_free_i64(t);
1465
1466 return help_branch(s, &c, is_imm, imm, o->in2);
1467 }
1468
1469 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1470 {
1471 int r1 = get_field(s->fields, r1);
1472 bool is_imm = have_field(s->fields, i2);
1473 int imm = is_imm ? get_field(s->fields, i2) : 0;
1474 DisasCompare c;
1475
1476 c.cond = TCG_COND_NE;
1477 c.is_64 = true;
1478 c.g1 = true;
1479 c.g2 = false;
1480
1481 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1482 c.u.s64.a = regs[r1];
1483 c.u.s64.b = tcg_const_i64(0);
1484
1485 return help_branch(s, &c, is_imm, imm, o->in2);
1486 }
1487
1488 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1489 {
1490 int r1 = get_field(s->fields, r1);
1491 int r3 = get_field(s->fields, r3);
1492 bool is_imm = have_field(s->fields, i2);
1493 int imm = is_imm ? get_field(s->fields, i2) : 0;
1494 DisasCompare c;
1495 TCGv_i64 t;
1496
1497 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1498 c.is_64 = false;
1499 c.g1 = false;
1500 c.g2 = false;
1501
1502 t = tcg_temp_new_i64();
1503 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1504 c.u.s32.a = tcg_temp_new_i32();
1505 c.u.s32.b = tcg_temp_new_i32();
1506 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1507 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1508 store_reg32_i64(r1, t);
1509 tcg_temp_free_i64(t);
1510
1511 return help_branch(s, &c, is_imm, imm, o->in2);
1512 }
1513
1514 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1515 {
1516 int r1 = get_field(s->fields, r1);
1517 int r3 = get_field(s->fields, r3);
1518 bool is_imm = have_field(s->fields, i2);
1519 int imm = is_imm ? get_field(s->fields, i2) : 0;
1520 DisasCompare c;
1521
1522 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1523 c.is_64 = true;
1524
1525 if (r1 == (r3 | 1)) {
1526 c.u.s64.b = load_reg(r3 | 1);
1527 c.g2 = false;
1528 } else {
1529 c.u.s64.b = regs[r3 | 1];
1530 c.g2 = true;
1531 }
1532
1533 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1534 c.u.s64.a = regs[r1];
1535 c.g1 = true;
1536
1537 return help_branch(s, &c, is_imm, imm, o->in2);
1538 }
1539
1540 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1541 {
1542 int imm, m3 = get_field(s->fields, m3);
1543 bool is_imm;
1544 DisasCompare c;
1545
1546 c.cond = ltgt_cond[m3];
1547 if (s->insn->data) {
1548 c.cond = tcg_unsigned_cond(c.cond);
1549 }
1550 c.is_64 = c.g1 = c.g2 = true;
1551 c.u.s64.a = o->in1;
1552 c.u.s64.b = o->in2;
1553
1554 is_imm = have_field(s->fields, i4);
1555 if (is_imm) {
1556 imm = get_field(s->fields, i4);
1557 } else {
1558 imm = 0;
1559 o->out = get_address(s, 0, get_field(s->fields, b4),
1560 get_field(s->fields, d4));
1561 }
1562
1563 return help_branch(s, &c, is_imm, imm, o->out);
1564 }
1565
1566 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1567 {
1568 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1569 set_cc_static(s);
1570 return NO_EXIT;
1571 }
1572
1573 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1574 {
1575 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1576 set_cc_static(s);
1577 return NO_EXIT;
1578 }
1579
1580 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1581 {
1582 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1583 set_cc_static(s);
1584 return NO_EXIT;
1585 }
1586
1587 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1588 {
1589 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1590 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1591 tcg_temp_free_i32(m3);
1592 gen_set_cc_nz_f32(s, o->in2);
1593 return NO_EXIT;
1594 }
1595
1596 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1597 {
1598 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1599 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1600 tcg_temp_free_i32(m3);
1601 gen_set_cc_nz_f64(s, o->in2);
1602 return NO_EXIT;
1603 }
1604
1605 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1606 {
1607 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1608 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1609 tcg_temp_free_i32(m3);
1610 gen_set_cc_nz_f128(s, o->in1, o->in2);
1611 return NO_EXIT;
1612 }
1613
1614 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1615 {
1616 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1617 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1618 tcg_temp_free_i32(m3);
1619 gen_set_cc_nz_f32(s, o->in2);
1620 return NO_EXIT;
1621 }
1622
1623 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1624 {
1625 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1626 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1627 tcg_temp_free_i32(m3);
1628 gen_set_cc_nz_f64(s, o->in2);
1629 return NO_EXIT;
1630 }
1631
1632 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1633 {
1634 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1635 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1636 tcg_temp_free_i32(m3);
1637 gen_set_cc_nz_f128(s, o->in1, o->in2);
1638 return NO_EXIT;
1639 }
1640
1641 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1642 {
1643 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1644 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1645 tcg_temp_free_i32(m3);
1646 gen_set_cc_nz_f32(s, o->in2);
1647 return NO_EXIT;
1648 }
1649
1650 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1651 {
1652 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1653 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1654 tcg_temp_free_i32(m3);
1655 gen_set_cc_nz_f64(s, o->in2);
1656 return NO_EXIT;
1657 }
1658
1659 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1660 {
1661 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1662 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1663 tcg_temp_free_i32(m3);
1664 gen_set_cc_nz_f128(s, o->in1, o->in2);
1665 return NO_EXIT;
1666 }
1667
1668 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1669 {
1670 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1671 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1672 tcg_temp_free_i32(m3);
1673 gen_set_cc_nz_f32(s, o->in2);
1674 return NO_EXIT;
1675 }
1676
1677 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1678 {
1679 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1680 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1681 tcg_temp_free_i32(m3);
1682 gen_set_cc_nz_f64(s, o->in2);
1683 return NO_EXIT;
1684 }
1685
1686 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1687 {
1688 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1689 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1690 tcg_temp_free_i32(m3);
1691 gen_set_cc_nz_f128(s, o->in1, o->in2);
1692 return NO_EXIT;
1693 }
1694
1695 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1696 {
1697 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1698 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1699 tcg_temp_free_i32(m3);
1700 return NO_EXIT;
1701 }
1702
1703 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1704 {
1705 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1706 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1707 tcg_temp_free_i32(m3);
1708 return NO_EXIT;
1709 }
1710
1711 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1712 {
1713 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1714 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1715 tcg_temp_free_i32(m3);
1716 return_low128(o->out2);
1717 return NO_EXIT;
1718 }
1719
1720 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1721 {
1722 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1723 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1724 tcg_temp_free_i32(m3);
1725 return NO_EXIT;
1726 }
1727
1728 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1729 {
1730 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1731 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1732 tcg_temp_free_i32(m3);
1733 return NO_EXIT;
1734 }
1735
1736 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1737 {
1738 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1739 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1740 tcg_temp_free_i32(m3);
1741 return_low128(o->out2);
1742 return NO_EXIT;
1743 }
1744
1745 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1746 {
1747 int r2 = get_field(s->fields, r2);
1748 TCGv_i64 len = tcg_temp_new_i64();
1749
1750 potential_page_fault(s);
1751 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1752 set_cc_static(s);
1753 return_low128(o->out);
1754
1755 tcg_gen_add_i64(regs[r2], regs[r2], len);
1756 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1757 tcg_temp_free_i64(len);
1758
1759 return NO_EXIT;
1760 }
1761
1762 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1763 {
1764 int l = get_field(s->fields, l1);
1765 TCGv_i32 vl;
1766
1767 switch (l + 1) {
1768 case 1:
1769 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1770 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1771 break;
1772 case 2:
1773 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1774 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1775 break;
1776 case 4:
1777 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1778 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1779 break;
1780 case 8:
1781 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1782 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1783 break;
1784 default:
1785 potential_page_fault(s);
1786 vl = tcg_const_i32(l);
1787 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1788 tcg_temp_free_i32(vl);
1789 set_cc_static(s);
1790 return NO_EXIT;
1791 }
1792 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1793 return NO_EXIT;
1794 }
1795
1796 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1797 {
1798 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1799 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1800 potential_page_fault(s);
1801 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1802 tcg_temp_free_i32(r1);
1803 tcg_temp_free_i32(r3);
1804 set_cc_static(s);
1805 return NO_EXIT;
1806 }
1807
1808 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1809 {
1810 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1811 TCGv_i32 t1 = tcg_temp_new_i32();
1812 tcg_gen_trunc_i64_i32(t1, o->in1);
1813 potential_page_fault(s);
1814 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1815 set_cc_static(s);
1816 tcg_temp_free_i32(t1);
1817 tcg_temp_free_i32(m3);
1818 return NO_EXIT;
1819 }
1820
1821 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1822 {
1823 potential_page_fault(s);
1824 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1825 set_cc_static(s);
1826 return_low128(o->in2);
1827 return NO_EXIT;
1828 }
1829
1830 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1831 {
1832 TCGv_i64 t = tcg_temp_new_i64();
1833 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1834 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1835 tcg_gen_or_i64(o->out, o->out, t);
1836 tcg_temp_free_i64(t);
1837 return NO_EXIT;
1838 }
1839
1840 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1841 {
1842 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1843 int d2 = get_field(s->fields, d2);
1844 int b2 = get_field(s->fields, b2);
1845 int is_64 = s->insn->data;
1846 TCGv_i64 addr, mem, cc, z;
1847
1848 /* Note that in1 = R3 (new value) and
1849 in2 = (zero-extended) R1 (expected value). */
1850
1851 /* Load the memory into the (temporary) output. While the PoO only talks
1852 about moving the memory to R1 on inequality, if we include equality it
1853 means that R1 is equal to the memory in all conditions. */
1854 addr = get_address(s, 0, b2, d2);
1855 if (is_64) {
1856 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1857 } else {
1858 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1859 }
1860
1861 /* Are the memory and expected values (un)equal? Note that this setcond
1862 produces the output CC value, thus the NE sense of the test. */
1863 cc = tcg_temp_new_i64();
1864 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1865
1866 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1867 Recall that we are allowed to unconditionally issue the store (and
1868 thus any possible write trap), so (re-)store the original contents
1869 of MEM in case of inequality. */
1870 z = tcg_const_i64(0);
1871 mem = tcg_temp_new_i64();
1872 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1873 if (is_64) {
1874 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1875 } else {
1876 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1877 }
1878 tcg_temp_free_i64(z);
1879 tcg_temp_free_i64(mem);
1880 tcg_temp_free_i64(addr);
1881
1882 /* Store CC back to cc_op. Wait until after the store so that any
1883 exception gets the old cc_op value. */
1884 tcg_gen_trunc_i64_i32(cc_op, cc);
1885 tcg_temp_free_i64(cc);
1886 set_cc_static(s);
1887 return NO_EXIT;
1888 }
1889
1890 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1891 {
1892 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1893 int r1 = get_field(s->fields, r1);
1894 int r3 = get_field(s->fields, r3);
1895 int d2 = get_field(s->fields, d2);
1896 int b2 = get_field(s->fields, b2);
1897 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1898
1899 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1900
1901 addrh = get_address(s, 0, b2, d2);
1902 addrl = get_address(s, 0, b2, d2 + 8);
1903 outh = tcg_temp_new_i64();
1904 outl = tcg_temp_new_i64();
1905
1906 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1907 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1908
1909 /* Fold the double-word compare with arithmetic. */
1910 cc = tcg_temp_new_i64();
1911 z = tcg_temp_new_i64();
1912 tcg_gen_xor_i64(cc, outh, regs[r1]);
1913 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1914 tcg_gen_or_i64(cc, cc, z);
1915 tcg_gen_movi_i64(z, 0);
1916 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1917
1918 memh = tcg_temp_new_i64();
1919 meml = tcg_temp_new_i64();
1920 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1921 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1922 tcg_temp_free_i64(z);
1923
1924 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1925 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1926 tcg_temp_free_i64(memh);
1927 tcg_temp_free_i64(meml);
1928 tcg_temp_free_i64(addrh);
1929 tcg_temp_free_i64(addrl);
1930
1931 /* Save back state now that we've passed all exceptions. */
1932 tcg_gen_mov_i64(regs[r1], outh);
1933 tcg_gen_mov_i64(regs[r1 + 1], outl);
1934 tcg_gen_trunc_i64_i32(cc_op, cc);
1935 tcg_temp_free_i64(outh);
1936 tcg_temp_free_i64(outl);
1937 tcg_temp_free_i64(cc);
1938 set_cc_static(s);
1939 return NO_EXIT;
1940 }
1941
1942 #ifndef CONFIG_USER_ONLY
1943 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1944 {
1945 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1946 check_privileged(s);
1947 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1948 tcg_temp_free_i32(r1);
1949 set_cc_static(s);
1950 return NO_EXIT;
1951 }
1952 #endif
1953
1954 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1955 {
1956 TCGv_i64 t1 = tcg_temp_new_i64();
1957 TCGv_i32 t2 = tcg_temp_new_i32();
1958 tcg_gen_trunc_i64_i32(t2, o->in1);
1959 gen_helper_cvd(t1, t2);
1960 tcg_temp_free_i32(t2);
1961 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1962 tcg_temp_free_i64(t1);
1963 return NO_EXIT;
1964 }
1965
1966 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1967 {
1968 int m3 = get_field(s->fields, m3);
1969 TCGLabel *lab = gen_new_label();
1970 TCGv_i32 t;
1971 TCGCond c;
1972
1973 c = tcg_invert_cond(ltgt_cond[m3]);
1974 if (s->insn->data) {
1975 c = tcg_unsigned_cond(c);
1976 }
1977 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1978
1979 /* Set DXC to 0xff. */
1980 t = tcg_temp_new_i32();
1981 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1982 tcg_gen_ori_i32(t, t, 0xff00);
1983 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1984 tcg_temp_free_i32(t);
1985
1986 /* Trap. */
1987 gen_program_exception(s, PGM_DATA);
1988
1989 gen_set_label(lab);
1990 return NO_EXIT;
1991 }
1992
1993 #ifndef CONFIG_USER_ONLY
1994 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1995 {
1996 TCGv_i32 tmp;
1997
1998 check_privileged(s);
1999 potential_page_fault(s);
2000
2001 /* We pretend the format is RX_a so that D2 is the field we want. */
2002 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2003 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2004 tcg_temp_free_i32(tmp);
2005 return NO_EXIT;
2006 }
2007 #endif
2008
2009 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2010 {
2011 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2012 return_low128(o->out);
2013 return NO_EXIT;
2014 }
2015
2016 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2017 {
2018 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2019 return_low128(o->out);
2020 return NO_EXIT;
2021 }
2022
2023 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2024 {
2025 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2026 return_low128(o->out);
2027 return NO_EXIT;
2028 }
2029
2030 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2031 {
2032 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2033 return_low128(o->out);
2034 return NO_EXIT;
2035 }
2036
2037 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2038 {
2039 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2040 return NO_EXIT;
2041 }
2042
2043 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2044 {
2045 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2046 return NO_EXIT;
2047 }
2048
2049 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2050 {
2051 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2052 return_low128(o->out2);
2053 return NO_EXIT;
2054 }
2055
2056 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2057 {
2058 int r2 = get_field(s->fields, r2);
2059 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2060 return NO_EXIT;
2061 }
2062
2063 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2064 {
2065 /* No cache information provided. */
2066 tcg_gen_movi_i64(o->out, -1);
2067 return NO_EXIT;
2068 }
2069
2070 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2071 {
2072 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2073 return NO_EXIT;
2074 }
2075
2076 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2077 {
2078 int r1 = get_field(s->fields, r1);
2079 int r2 = get_field(s->fields, r2);
2080 TCGv_i64 t = tcg_temp_new_i64();
2081
2082 /* Note the "subsequently" in the PoO, which implies a defined result
2083 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2084 tcg_gen_shri_i64(t, psw_mask, 32);
2085 store_reg32_i64(r1, t);
2086 if (r2 != 0) {
2087 store_reg32_i64(r2, psw_mask);
2088 }
2089
2090 tcg_temp_free_i64(t);
2091 return NO_EXIT;
2092 }
2093
2094 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2095 {
2096 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2097 tb->flags, (ab)use the tb->cs_base field as the address of
2098 the template in memory, and grab 8 bits of tb->flags/cflags for
2099 the contents of the register. We would then recognize all this
2100 in gen_intermediate_code_internal, generating code for exactly
2101 one instruction. This new TB then gets executed normally.
2102
2103 On the other hand, this seems to be mostly used for modifying
2104 MVC inside of memcpy, which needs a helper call anyway. So
2105 perhaps this doesn't bear thinking about any further. */
2106
2107 TCGv_i64 tmp;
2108
2109 update_psw_addr(s);
2110 gen_op_calc_cc(s);
2111
2112 tmp = tcg_const_i64(s->next_pc);
2113 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2114 tcg_temp_free_i64(tmp);
2115
2116 return NO_EXIT;
2117 }
2118
2119 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2120 {
2121 /* We'll use the original input for cc computation, since we get to
2122 compare that against 0, which ought to be better than comparing
2123 the real output against 64. It also lets cc_dst be a convenient
2124 temporary during our computation. */
2125 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2126
2127 /* R1 = IN ? CLZ(IN) : 64. */
2128 gen_helper_clz(o->out, o->in2);
2129
2130 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2131 value by 64, which is undefined. But since the shift is 64 iff the
2132 input is zero, we still get the correct result after and'ing. */
2133 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2134 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2135 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2136 return NO_EXIT;
2137 }
2138
2139 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2140 {
2141 int m3 = get_field(s->fields, m3);
2142 int pos, len, base = s->insn->data;
2143 TCGv_i64 tmp = tcg_temp_new_i64();
2144 uint64_t ccm;
2145
2146 switch (m3) {
2147 case 0xf:
2148 /* Effectively a 32-bit load. */
2149 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2150 len = 32;
2151 goto one_insert;
2152
2153 case 0xc:
2154 case 0x6:
2155 case 0x3:
2156 /* Effectively a 16-bit load. */
2157 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2158 len = 16;
2159 goto one_insert;
2160
2161 case 0x8:
2162 case 0x4:
2163 case 0x2:
2164 case 0x1:
2165 /* Effectively an 8-bit load. */
2166 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2167 len = 8;
2168 goto one_insert;
2169
2170 one_insert:
2171 pos = base + ctz32(m3) * 8;
2172 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2173 ccm = ((1ull << len) - 1) << pos;
2174 break;
2175
2176 default:
2177 /* This is going to be a sequence of loads and inserts. */
2178 pos = base + 32 - 8;
2179 ccm = 0;
2180 while (m3) {
2181 if (m3 & 0x8) {
2182 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2183 tcg_gen_addi_i64(o->in2, o->in2, 1);
2184 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2185 ccm |= 0xff << pos;
2186 }
2187 m3 = (m3 << 1) & 0xf;
2188 pos -= 8;
2189 }
2190 break;
2191 }
2192
2193 tcg_gen_movi_i64(tmp, ccm);
2194 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2195 tcg_temp_free_i64(tmp);
2196 return NO_EXIT;
2197 }
2198
2199 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2200 {
2201 int shift = s->insn->data & 0xff;
2202 int size = s->insn->data >> 8;
2203 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2204 return NO_EXIT;
2205 }
2206
2207 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2208 {
2209 TCGv_i64 t1;
2210
2211 gen_op_calc_cc(s);
2212 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2213
2214 t1 = tcg_temp_new_i64();
2215 tcg_gen_shli_i64(t1, psw_mask, 20);
2216 tcg_gen_shri_i64(t1, t1, 36);
2217 tcg_gen_or_i64(o->out, o->out, t1);
2218
2219 tcg_gen_extu_i32_i64(t1, cc_op);
2220 tcg_gen_shli_i64(t1, t1, 28);
2221 tcg_gen_or_i64(o->out, o->out, t1);
2222 tcg_temp_free_i64(t1);
2223 return NO_EXIT;
2224 }
2225
2226 #ifndef CONFIG_USER_ONLY
2227 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2228 {
2229 check_privileged(s);
2230 gen_helper_ipte(cpu_env, o->in1, o->in2);
2231 return NO_EXIT;
2232 }
2233
2234 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2235 {
2236 check_privileged(s);
2237 gen_helper_iske(o->out, cpu_env, o->in2);
2238 return NO_EXIT;
2239 }
2240 #endif
2241
2242 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2243 {
2244 gen_helper_ldeb(o->out, cpu_env, o->in2);
2245 return NO_EXIT;
2246 }
2247
2248 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2249 {
2250 gen_helper_ledb(o->out, cpu_env, o->in2);
2251 return NO_EXIT;
2252 }
2253
2254 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2255 {
2256 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2257 return NO_EXIT;
2258 }
2259
2260 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2261 {
2262 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2263 return NO_EXIT;
2264 }
2265
2266 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2267 {
2268 gen_helper_lxdb(o->out, cpu_env, o->in2);
2269 return_low128(o->out2);
2270 return NO_EXIT;
2271 }
2272
2273 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2274 {
2275 gen_helper_lxeb(o->out, cpu_env, o->in2);
2276 return_low128(o->out2);
2277 return NO_EXIT;
2278 }
2279
2280 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2281 {
2282 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2283 return NO_EXIT;
2284 }
2285
2286 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2287 {
2288 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2289 return NO_EXIT;
2290 }
2291
2292 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2293 {
2294 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2295 return NO_EXIT;
2296 }
2297
2298 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2299 {
2300 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2301 return NO_EXIT;
2302 }
2303
2304 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2305 {
2306 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2307 return NO_EXIT;
2308 }
2309
2310 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2311 {
2312 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2313 return NO_EXIT;
2314 }
2315
2316 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2317 {
2318 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2319 return NO_EXIT;
2320 }
2321
2322 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2323 {
2324 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2325 return NO_EXIT;
2326 }
2327
2328 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2329 {
2330 DisasCompare c;
2331
2332 disas_jcc(s, &c, get_field(s->fields, m3));
2333
2334 if (c.is_64) {
2335 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2336 o->in2, o->in1);
2337 free_compare(&c);
2338 } else {
2339 TCGv_i32 t32 = tcg_temp_new_i32();
2340 TCGv_i64 t, z;
2341
2342 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2343 free_compare(&c);
2344
2345 t = tcg_temp_new_i64();
2346 tcg_gen_extu_i32_i64(t, t32);
2347 tcg_temp_free_i32(t32);
2348
2349 z = tcg_const_i64(0);
2350 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2351 tcg_temp_free_i64(t);
2352 tcg_temp_free_i64(z);
2353 }
2354
2355 return NO_EXIT;
2356 }
2357
2358 #ifndef CONFIG_USER_ONLY
2359 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2360 {
2361 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2362 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2363 check_privileged(s);
2364 potential_page_fault(s);
2365 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2366 tcg_temp_free_i32(r1);
2367 tcg_temp_free_i32(r3);
2368 return NO_EXIT;
2369 }
2370
2371 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2372 {
2373 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2374 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2375 check_privileged(s);
2376 potential_page_fault(s);
2377 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2378 tcg_temp_free_i32(r1);
2379 tcg_temp_free_i32(r3);
2380 return NO_EXIT;
2381 }
2382 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2383 {
2384 check_privileged(s);
2385 potential_page_fault(s);
2386 gen_helper_lra(o->out, cpu_env, o->in2);
2387 set_cc_static(s);
2388 return NO_EXIT;
2389 }
2390
2391 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2392 {
2393 TCGv_i64 t1, t2;
2394
2395 check_privileged(s);
2396
2397 t1 = tcg_temp_new_i64();
2398 t2 = tcg_temp_new_i64();
2399 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2400 tcg_gen_addi_i64(o->in2, o->in2, 4);
2401 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2402 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2403 tcg_gen_shli_i64(t1, t1, 32);
2404 gen_helper_load_psw(cpu_env, t1, t2);
2405 tcg_temp_free_i64(t1);
2406 tcg_temp_free_i64(t2);
2407 return EXIT_NORETURN;
2408 }
2409
2410 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2411 {
2412 TCGv_i64 t1, t2;
2413
2414 check_privileged(s);
2415
2416 t1 = tcg_temp_new_i64();
2417 t2 = tcg_temp_new_i64();
2418 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2419 tcg_gen_addi_i64(o->in2, o->in2, 8);
2420 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2421 gen_helper_load_psw(cpu_env, t1, t2);
2422 tcg_temp_free_i64(t1);
2423 tcg_temp_free_i64(t2);
2424 return EXIT_NORETURN;
2425 }
2426 #endif
2427
2428 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2429 {
2430 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2431 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2432 potential_page_fault(s);
2433 gen_helper_lam(cpu_env, r1, o->in2, r3);
2434 tcg_temp_free_i32(r1);
2435 tcg_temp_free_i32(r3);
2436 return NO_EXIT;
2437 }
2438
2439 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2440 {
2441 int r1 = get_field(s->fields, r1);
2442 int r3 = get_field(s->fields, r3);
2443 TCGv_i64 t = tcg_temp_new_i64();
2444 TCGv_i64 t4 = tcg_const_i64(4);
2445
2446 while (1) {
2447 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2448 store_reg32_i64(r1, t);
2449 if (r1 == r3) {
2450 break;
2451 }
2452 tcg_gen_add_i64(o->in2, o->in2, t4);
2453 r1 = (r1 + 1) & 15;
2454 }
2455
2456 tcg_temp_free_i64(t);
2457 tcg_temp_free_i64(t4);
2458 return NO_EXIT;
2459 }
2460
2461 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2462 {
2463 int r1 = get_field(s->fields, r1);
2464 int r3 = get_field(s->fields, r3);
2465 TCGv_i64 t = tcg_temp_new_i64();
2466 TCGv_i64 t4 = tcg_const_i64(4);
2467
2468 while (1) {
2469 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2470 store_reg32h_i64(r1, t);
2471 if (r1 == r3) {
2472 break;
2473 }
2474 tcg_gen_add_i64(o->in2, o->in2, t4);
2475 r1 = (r1 + 1) & 15;
2476 }
2477
2478 tcg_temp_free_i64(t);
2479 tcg_temp_free_i64(t4);
2480 return NO_EXIT;
2481 }
2482
2483 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2484 {
2485 int r1 = get_field(s->fields, r1);
2486 int r3 = get_field(s->fields, r3);
2487 TCGv_i64 t8 = tcg_const_i64(8);
2488
2489 while (1) {
2490 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2491 if (r1 == r3) {
2492 break;
2493 }
2494 tcg_gen_add_i64(o->in2, o->in2, t8);
2495 r1 = (r1 + 1) & 15;
2496 }
2497
2498 tcg_temp_free_i64(t8);
2499 return NO_EXIT;
2500 }
2501
2502 #ifndef CONFIG_USER_ONLY
2503 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2504 {
2505 check_privileged(s);
2506 potential_page_fault(s);
2507 gen_helper_lura(o->out, cpu_env, o->in2);
2508 return NO_EXIT;
2509 }
2510
2511 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2512 {
2513 check_privileged(s);
2514 potential_page_fault(s);
2515 gen_helper_lurag(o->out, cpu_env, o->in2);
2516 return NO_EXIT;
2517 }
2518 #endif
2519
2520 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2521 {
2522 o->out = o->in2;
2523 o->g_out = o->g_in2;
2524 TCGV_UNUSED_I64(o->in2);
2525 o->g_in2 = false;
2526 return NO_EXIT;
2527 }
2528
2529 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2530 {
2531 o->out = o->in1;
2532 o->out2 = o->in2;
2533 o->g_out = o->g_in1;
2534 o->g_out2 = o->g_in2;
2535 TCGV_UNUSED_I64(o->in1);
2536 TCGV_UNUSED_I64(o->in2);
2537 o->g_in1 = o->g_in2 = false;
2538 return NO_EXIT;
2539 }
2540
2541 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2542 {
2543 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2544 potential_page_fault(s);
2545 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2546 tcg_temp_free_i32(l);
2547 return NO_EXIT;
2548 }
2549
2550 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2551 {
2552 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2553 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2554 potential_page_fault(s);
2555 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2556 tcg_temp_free_i32(r1);
2557 tcg_temp_free_i32(r2);
2558 set_cc_static(s);
2559 return NO_EXIT;
2560 }
2561
2562 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2563 {
2564 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2565 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2566 potential_page_fault(s);
2567 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2568 tcg_temp_free_i32(r1);
2569 tcg_temp_free_i32(r3);
2570 set_cc_static(s);
2571 return NO_EXIT;
2572 }
2573
2574 #ifndef CONFIG_USER_ONLY
2575 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2576 {
2577 int r1 = get_field(s->fields, l1);
2578 check_privileged(s);
2579 potential_page_fault(s);
2580 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2581 set_cc_static(s);
2582 return NO_EXIT;
2583 }
2584
2585 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2586 {
2587 int r1 = get_field(s->fields, l1);
2588 check_privileged(s);
2589 potential_page_fault(s);
2590 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2591 set_cc_static(s);
2592 return NO_EXIT;
2593 }
2594 #endif
2595
2596 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2597 {
2598 potential_page_fault(s);
2599 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2600 set_cc_static(s);
2601 return NO_EXIT;
2602 }
2603
2604 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2605 {
2606 potential_page_fault(s);
2607 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2608 set_cc_static(s);
2609 return_low128(o->in2);
2610 return NO_EXIT;
2611 }
2612
2613 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2614 {
2615 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2616 return NO_EXIT;
2617 }
2618
2619 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2620 {
2621 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2622 return NO_EXIT;
2623 }
2624
2625 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2626 {
2627 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2628 return NO_EXIT;
2629 }
2630
2631 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2632 {
2633 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2634 return NO_EXIT;
2635 }
2636
2637 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2638 {
2639 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2640 return NO_EXIT;
2641 }
2642
2643 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2644 {
2645 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2646 return_low128(o->out2);
2647 return NO_EXIT;
2648 }
2649
2650 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2651 {
2652 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2653 return_low128(o->out2);
2654 return NO_EXIT;
2655 }
2656
2657 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2658 {
2659 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2660 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2661 tcg_temp_free_i64(r3);
2662 return NO_EXIT;
2663 }
2664
2665 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2666 {
2667 int r3 = get_field(s->fields, r3);
2668 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2669 return NO_EXIT;
2670 }
2671
2672 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2673 {
2674 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2675 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2676 tcg_temp_free_i64(r3);
2677 return NO_EXIT;
2678 }
2679
2680 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2681 {
2682 int r3 = get_field(s->fields, r3);
2683 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2684 return NO_EXIT;
2685 }
2686
2687 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2688 {
2689 TCGv_i64 z, n;
2690 z = tcg_const_i64(0);
2691 n = tcg_temp_new_i64();
2692 tcg_gen_neg_i64(n, o->in2);
2693 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2694 tcg_temp_free_i64(n);
2695 tcg_temp_free_i64(z);
2696 return NO_EXIT;
2697 }
2698
2699 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2700 {
2701 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2702 return NO_EXIT;
2703 }
2704
2705 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2706 {
2707 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2708 return NO_EXIT;
2709 }
2710
2711 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2712 {
2713 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2714 tcg_gen_mov_i64(o->out2, o->in2);
2715 return NO_EXIT;
2716 }
2717
2718 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2719 {
2720 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2721 potential_page_fault(s);
2722 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2723 tcg_temp_free_i32(l);
2724 set_cc_static(s);
2725 return NO_EXIT;
2726 }
2727
2728 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2729 {
2730 tcg_gen_neg_i64(o->out, o->in2);
2731 return NO_EXIT;
2732 }
2733
2734 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2735 {
2736 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2737 return NO_EXIT;
2738 }
2739
2740 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2741 {
2742 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2743 return NO_EXIT;
2744 }
2745
2746 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2747 {
2748 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2749 tcg_gen_mov_i64(o->out2, o->in2);
2750 return NO_EXIT;
2751 }
2752
2753 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2754 {
2755 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2756 potential_page_fault(s);
2757 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2758 tcg_temp_free_i32(l);
2759 set_cc_static(s);
2760 return NO_EXIT;
2761 }
2762
2763 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2764 {
2765 tcg_gen_or_i64(o->out, o->in1, o->in2);
2766 return NO_EXIT;
2767 }
2768
2769 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2770 {
2771 int shift = s->insn->data & 0xff;
2772 int size = s->insn->data >> 8;
2773 uint64_t mask = ((1ull << size) - 1) << shift;
2774
2775 assert(!o->g_in2);
2776 tcg_gen_shli_i64(o->in2, o->in2, shift);
2777 tcg_gen_or_i64(o->out, o->in1, o->in2);
2778
2779 /* Produce the CC from only the bits manipulated. */
2780 tcg_gen_andi_i64(cc_dst, o->out, mask);
2781 set_cc_nz_u64(s, cc_dst);
2782 return NO_EXIT;
2783 }
2784
2785 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2786 {
2787 gen_helper_popcnt(o->out, o->in2);
2788 return NO_EXIT;
2789 }
2790
2791 #ifndef CONFIG_USER_ONLY
2792 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2793 {
2794 check_privileged(s);
2795 gen_helper_ptlb(cpu_env);
2796 return NO_EXIT;
2797 }
2798 #endif
2799
2800 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2801 {
2802 int i3 = get_field(s->fields, i3);
2803 int i4 = get_field(s->fields, i4);
2804 int i5 = get_field(s->fields, i5);
2805 int do_zero = i4 & 0x80;
2806 uint64_t mask, imask, pmask;
2807 int pos, len, rot;
2808
2809 /* Adjust the arguments for the specific insn. */
2810 switch (s->fields->op2) {
2811 case 0x55: /* risbg */
2812 i3 &= 63;
2813 i4 &= 63;
2814 pmask = ~0;
2815 break;
2816 case 0x5d: /* risbhg */
2817 i3 &= 31;
2818 i4 &= 31;
2819 pmask = 0xffffffff00000000ull;
2820 break;
2821 case 0x51: /* risblg */
2822 i3 &= 31;
2823 i4 &= 31;
2824 pmask = 0x00000000ffffffffull;
2825 break;
2826 default:
2827 abort();
2828 }
2829
2830 /* MASK is the set of bits to be inserted from R2.
2831 Take care for I3/I4 wraparound. */
2832 mask = pmask >> i3;
2833 if (i3 <= i4) {
2834 mask ^= pmask >> i4 >> 1;
2835 } else {
2836 mask |= ~(pmask >> i4 >> 1);
2837 }
2838 mask &= pmask;
2839
2840 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2841 insns, we need to keep the other half of the register. */
2842 imask = ~mask | ~pmask;
2843 if (do_zero) {
2844 if (s->fields->op2 == 0x55) {
2845 imask = 0;
2846 } else {
2847 imask = ~pmask;
2848 }
2849 }
2850
2851 /* In some cases we can implement this with deposit, which can be more
2852 efficient on some hosts. */
2853 if (~mask == imask && i3 <= i4) {
2854 if (s->fields->op2 == 0x5d) {
2855 i3 += 32, i4 += 32;
2856 }
2857 /* Note that we rotate the bits to be inserted to the lsb, not to
2858 the position as described in the PoO. */
2859 len = i4 - i3 + 1;
2860 pos = 63 - i4;
2861 rot = (i5 - pos) & 63;
2862 } else {
2863 pos = len = -1;
2864 rot = i5 & 63;
2865 }
2866
2867 /* Rotate the input as necessary. */
2868 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2869
2870 /* Insert the selected bits into the output. */
2871 if (pos >= 0) {
2872 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2873 } else if (imask == 0) {
2874 tcg_gen_andi_i64(o->out, o->in2, mask);
2875 } else {
2876 tcg_gen_andi_i64(o->in2, o->in2, mask);
2877 tcg_gen_andi_i64(o->out, o->out, imask);
2878 tcg_gen_or_i64(o->out, o->out, o->in2);
2879 }
2880 return NO_EXIT;
2881 }
2882
2883 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2884 {
2885 int i3 = get_field(s->fields, i3);
2886 int i4 = get_field(s->fields, i4);
2887 int i5 = get_field(s->fields, i5);
2888 uint64_t mask;
2889
2890 /* If this is a test-only form, arrange to discard the result. */
2891 if (i3 & 0x80) {
2892 o->out = tcg_temp_new_i64();
2893 o->g_out = false;
2894 }
2895
2896 i3 &= 63;
2897 i4 &= 63;
2898 i5 &= 63;
2899
2900 /* MASK is the set of bits to be operated on from R2.
2901 Take care for I3/I4 wraparound. */
2902 mask = ~0ull >> i3;
2903 if (i3 <= i4) {
2904 mask ^= ~0ull >> i4 >> 1;
2905 } else {
2906 mask |= ~(~0ull >> i4 >> 1);
2907 }
2908
2909 /* Rotate the input as necessary. */
2910 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2911
2912 /* Operate. */
2913 switch (s->fields->op2) {
2914 case 0x55: /* AND */
2915 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2916 tcg_gen_and_i64(o->out, o->out, o->in2);
2917 break;
2918 case 0x56: /* OR */
2919 tcg_gen_andi_i64(o->in2, o->in2, mask);
2920 tcg_gen_or_i64(o->out, o->out, o->in2);
2921 break;
2922 case 0x57: /* XOR */
2923 tcg_gen_andi_i64(o->in2, o->in2, mask);
2924 tcg_gen_xor_i64(o->out, o->out, o->in2);
2925 break;
2926 default:
2927 abort();
2928 }
2929
2930 /* Set the CC. */
2931 tcg_gen_andi_i64(cc_dst, o->out, mask);
2932 set_cc_nz_u64(s, cc_dst);
2933 return NO_EXIT;
2934 }
2935
2936 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2937 {
2938 tcg_gen_bswap16_i64(o->out, o->in2);
2939 return NO_EXIT;
2940 }
2941
2942 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2943 {
2944 tcg_gen_bswap32_i64(o->out, o->in2);
2945 return NO_EXIT;
2946 }
2947
2948 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2949 {
2950 tcg_gen_bswap64_i64(o->out, o->in2);
2951 return NO_EXIT;
2952 }
2953
2954 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2955 {
2956 TCGv_i32 t1 = tcg_temp_new_i32();
2957 TCGv_i32 t2 = tcg_temp_new_i32();
2958 TCGv_i32 to = tcg_temp_new_i32();
2959 tcg_gen_trunc_i64_i32(t1, o->in1);
2960 tcg_gen_trunc_i64_i32(t2, o->in2);
2961 tcg_gen_rotl_i32(to, t1, t2);
2962 tcg_gen_extu_i32_i64(o->out, to);
2963 tcg_temp_free_i32(t1);
2964 tcg_temp_free_i32(t2);
2965 tcg_temp_free_i32(to);
2966 return NO_EXIT;
2967 }
2968
2969 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2970 {
2971 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2972 return NO_EXIT;
2973 }
2974
2975 #ifndef CONFIG_USER_ONLY
2976 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2977 {
2978 check_privileged(s);
2979 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2980 set_cc_static(s);
2981 return NO_EXIT;
2982 }
2983
2984 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2985 {
2986 check_privileged(s);
2987 gen_helper_sacf(cpu_env, o->in2);
2988 /* Addressing mode has changed, so end the block. */
2989 return EXIT_PC_STALE;
2990 }
2991 #endif
2992
2993 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
2994 {
2995 int sam = s->insn->data;
2996 TCGv_i64 tsam;
2997 uint64_t mask;
2998
2999 switch (sam) {
3000 case 0:
3001 mask = 0xffffff;
3002 break;
3003 case 1:
3004 mask = 0x7fffffff;
3005 break;
3006 default:
3007 mask = -1;
3008 break;
3009 }
3010
3011 /* Bizarre but true, we check the address of the current insn for the
3012 specification exception, not the next to be executed. Thus the PoO
3013 documents that Bad Things Happen two bytes before the end. */
3014 if (s->pc & ~mask) {
3015 gen_program_exception(s, PGM_SPECIFICATION);
3016 return EXIT_NORETURN;
3017 }
3018 s->next_pc &= mask;
3019
3020 tsam = tcg_const_i64(sam);
3021 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3022 tcg_temp_free_i64(tsam);
3023
3024 /* Always exit the TB, since we (may have) changed execution mode. */
3025 return EXIT_PC_STALE;
3026 }
3027
3028 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3029 {
3030 int r1 = get_field(s->fields, r1);
3031 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3032 return NO_EXIT;
3033 }
3034
3035 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3036 {
3037 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3038 return NO_EXIT;
3039 }
3040
3041 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3042 {
3043 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3044 return NO_EXIT;
3045 }
3046
3047 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3048 {
3049 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3050 return_low128(o->out2);
3051 return NO_EXIT;
3052 }
3053
3054 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3055 {
3056 gen_helper_sqeb(o->out, cpu_env, o->in2);
3057 return NO_EXIT;
3058 }
3059
3060 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3061 {
3062 gen_helper_sqdb(o->out, cpu_env, o->in2);
3063 return NO_EXIT;
3064 }
3065
3066 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3067 {
3068 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3069 return_low128(o->out2);
3070 return NO_EXIT;
3071 }
3072
3073 #ifndef CONFIG_USER_ONLY
3074 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3075 {
3076 check_privileged(s);
3077 potential_page_fault(s);
3078 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3079 set_cc_static(s);
3080 return NO_EXIT;
3081 }
3082
3083 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3084 {
3085 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3086 check_privileged(s);
3087 potential_page_fault(s);
3088 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3089 tcg_temp_free_i32(r1);
3090 return NO_EXIT;
3091 }
3092 #endif
3093
3094 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3095 {
3096 DisasCompare c;
3097 TCGv_i64 a;
3098 TCGLabel *lab;
3099 int r1;
3100
3101 disas_jcc(s, &c, get_field(s->fields, m3));
3102
3103 /* We want to store when the condition is fulfilled, so branch
3104 out when it's not */
3105 c.cond = tcg_invert_cond(c.cond);
3106
3107 lab = gen_new_label();
3108 if (c.is_64) {
3109 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3110 } else {
3111 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3112 }
3113 free_compare(&c);
3114
3115 r1 = get_field(s->fields, r1);
3116 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3117 if (s->insn->data) {
3118 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3119 } else {
3120 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3121 }
3122 tcg_temp_free_i64(a);
3123
3124 gen_set_label(lab);
3125 return NO_EXIT;
3126 }
3127
3128 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3129 {
3130 uint64_t sign = 1ull << s->insn->data;
3131 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3132 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3133 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3134 /* The arithmetic left shift is curious in that it does not affect
3135 the sign bit. Copy that over from the source unchanged. */
3136 tcg_gen_andi_i64(o->out, o->out, ~sign);
3137 tcg_gen_andi_i64(o->in1, o->in1, sign);
3138 tcg_gen_or_i64(o->out, o->out, o->in1);
3139 return NO_EXIT;
3140 }
3141
3142 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3143 {
3144 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3145 return NO_EXIT;
3146 }
3147
3148 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3149 {
3150 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3151 return NO_EXIT;
3152 }
3153
3154 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3155 {
3156 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3157 return NO_EXIT;
3158 }
3159
3160 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3161 {
3162 gen_helper_sfpc(cpu_env, o->in2);
3163 return NO_EXIT;
3164 }
3165
3166 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3167 {
3168 gen_helper_sfas(cpu_env, o->in2);
3169 return NO_EXIT;
3170 }
3171
3172 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3173 {
3174 int b2 = get_field(s->fields, b2);
3175 int d2 = get_field(s->fields, d2);
3176 TCGv_i64 t1 = tcg_temp_new_i64();
3177 TCGv_i64 t2 = tcg_temp_new_i64();
3178 int mask, pos, len;
3179
3180 switch (s->fields->op2) {
3181 case 0x99: /* SRNM */
3182 pos = 0, len = 2;
3183 break;
3184 case 0xb8: /* SRNMB */
3185 pos = 0, len = 3;
3186 break;
3187 case 0xb9: /* SRNMT */
3188 pos = 4, len = 3;
3189 break;
3190 default:
3191 tcg_abort();
3192 }
3193 mask = (1 << len) - 1;
3194
3195 /* Insert the value into the appropriate field of the FPC. */
3196 if (b2 == 0) {
3197 tcg_gen_movi_i64(t1, d2 & mask);
3198 } else {
3199 tcg_gen_addi_i64(t1, regs[b2], d2);
3200 tcg_gen_andi_i64(t1, t1, mask);
3201 }
3202 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3203 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3204 tcg_temp_free_i64(t1);
3205
3206 /* Then install the new FPC to set the rounding mode in fpu_status. */
3207 gen_helper_sfpc(cpu_env, t2);
3208 tcg_temp_free_i64(t2);
3209 return NO_EXIT;
3210 }
3211
3212 #ifndef CONFIG_USER_ONLY
3213 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3214 {
3215 check_privileged(s);
3216 tcg_gen_shri_i64(o->in2, o->in2, 4);
3217 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3218 return NO_EXIT;
3219 }
3220
3221 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3222 {
3223 check_privileged(s);
3224 gen_helper_sske(cpu_env, o->in1, o->in2);
3225 return NO_EXIT;
3226 }
3227
3228 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3229 {
3230 check_privileged(s);
3231 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3232 return NO_EXIT;
3233 }
3234
3235 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3236 {
3237 check_privileged(s);
3238 /* ??? Surely cpu address != cpu number. In any case the previous
3239 version of this stored more than the required half-word, so it
3240 is unlikely this has ever been tested. */
3241 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3242 return NO_EXIT;
3243 }
3244
3245 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3246 {
3247 gen_helper_stck(o->out, cpu_env);
3248 /* ??? We don't implement clock states. */
3249 gen_op_movi_cc(s, 0);
3250 return NO_EXIT;
3251 }
3252
3253 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3254 {
3255 TCGv_i64 c1 = tcg_temp_new_i64();
3256 TCGv_i64 c2 = tcg_temp_new_i64();
3257 gen_helper_stck(c1, cpu_env);
3258 /* Shift the 64-bit value into its place as a zero-extended
3259 104-bit value. Note that "bit positions 64-103 are always
3260 non-zero so that they compare differently to STCK"; we set
3261 the least significant bit to 1. */
3262 tcg_gen_shli_i64(c2, c1, 56);
3263 tcg_gen_shri_i64(c1, c1, 8);
3264 tcg_gen_ori_i64(c2, c2, 0x10000);
3265 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3266 tcg_gen_addi_i64(o->in2, o->in2, 8);
3267 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3268 tcg_temp_free_i64(c1);
3269 tcg_temp_free_i64(c2);
3270 /* ??? We don't implement clock states. */
3271 gen_op_movi_cc(s, 0);
3272 return NO_EXIT;
3273 }
3274
3275 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3276 {
3277 check_privileged(s);
3278 gen_helper_sckc(cpu_env, o->in2);
3279 return NO_EXIT;
3280 }
3281
3282 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3283 {
3284 check_privileged(s);
3285 gen_helper_stckc(o->out, cpu_env);
3286 return NO_EXIT;
3287 }
3288
3289 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3290 {
3291 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3292 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3293 check_privileged(s);
3294 potential_page_fault(s);
3295 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3296 tcg_temp_free_i32(r1);
3297 tcg_temp_free_i32(r3);
3298 return NO_EXIT;
3299 }
3300
3301 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3302 {
3303 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3304 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3305 check_privileged(s);
3306 potential_page_fault(s);
3307 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3308 tcg_temp_free_i32(r1);
3309 tcg_temp_free_i32(r3);
3310 return NO_EXIT;
3311 }
3312
3313 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3314 {
3315 TCGv_i64 t1 = tcg_temp_new_i64();
3316
3317 check_privileged(s);
3318 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3319 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3320 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3321 tcg_temp_free_i64(t1);
3322
3323 return NO_EXIT;
3324 }
3325
3326 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3327 {
3328 check_privileged(s);
3329 gen_helper_spt(cpu_env, o->in2);
3330 return NO_EXIT;
3331 }
3332
3333 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3334 {
3335 TCGv_i64 f, a;
3336 /* We really ought to have more complete indication of facilities
3337 that we implement. Address this when STFLE is implemented. */
3338 check_privileged(s);
3339 f = tcg_const_i64(0xc0000000);
3340 a = tcg_const_i64(200);
3341 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3342 tcg_temp_free_i64(f);
3343 tcg_temp_free_i64(a);
3344 return NO_EXIT;
3345 }
3346
3347 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3348 {
3349 check_privileged(s);
3350 gen_helper_stpt(o->out, cpu_env);
3351 return NO_EXIT;
3352 }
3353
3354 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3355 {
3356 check_privileged(s);
3357 potential_page_fault(s);
3358 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3359 set_cc_static(s);
3360 return NO_EXIT;
3361 }
3362
3363 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3364 {
3365 check_privileged(s);
3366 gen_helper_spx(cpu_env, o->in2);
3367 return NO_EXIT;
3368 }
3369
3370 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3371 {
3372 check_privileged(s);
3373 /* Not operational. */
3374 gen_op_movi_cc(s, 3);
3375 return NO_EXIT;
3376 }
3377
3378 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3379 {
3380 check_privileged(s);
3381 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3382 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3383 return NO_EXIT;
3384 }
3385
3386 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3387 {
3388 uint64_t i2 = get_field(s->fields, i2);
3389 TCGv_i64 t;
3390
3391 check_privileged(s);
3392
3393 /* It is important to do what the instruction name says: STORE THEN.
3394 If we let the output hook perform the store then if we fault and
3395 restart, we'll have the wrong SYSTEM MASK in place. */
3396 t = tcg_temp_new_i64();
3397 tcg_gen_shri_i64(t, psw_mask, 56);
3398 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3399 tcg_temp_free_i64(t);
3400
3401 if (s->fields->op == 0xac) {
3402 tcg_gen_andi_i64(psw_mask, psw_mask,
3403 (i2 << 56) | 0x00ffffffffffffffull);
3404 } else {
3405 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3406 }
3407 return NO_EXIT;
3408 }
3409
3410 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3411 {
3412 check_privileged(s);
3413 potential_page_fault(s);
3414 gen_helper_stura(cpu_env, o->in2, o->in1);
3415 return NO_EXIT;
3416 }
3417
3418 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3419 {
3420 check_privileged(s);
3421 potential_page_fault(s);
3422 gen_helper_sturg(cpu_env, o->in2, o->in1);
3423 return NO_EXIT;
3424 }
3425 #endif
3426
3427 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3428 {
3429 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3430 return NO_EXIT;
3431 }
3432
3433 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3434 {
3435 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3436 return NO_EXIT;
3437 }
3438
3439 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3440 {
3441 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3442 return NO_EXIT;
3443 }
3444
3445 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3446 {
3447 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3448 return NO_EXIT;
3449 }
3450
3451 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3452 {
3453 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3454 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3455 potential_page_fault(s);
3456 gen_helper_stam(cpu_env, r1, o->in2, r3);
3457 tcg_temp_free_i32(r1);
3458 tcg_temp_free_i32(r3);
3459 return NO_EXIT;
3460 }
3461
3462 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3463 {
3464 int m3 = get_field(s->fields, m3);
3465 int pos, base = s->insn->data;
3466 TCGv_i64 tmp = tcg_temp_new_i64();
3467
3468 pos = base + ctz32(m3) * 8;
3469 switch (m3) {
3470 case 0xf:
3471 /* Effectively a 32-bit store. */
3472 tcg_gen_shri_i64(tmp, o->in1, pos);
3473 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3474 break;
3475
3476 case 0xc:
3477 case 0x6:
3478 case 0x3:
3479 /* Effectively a 16-bit store. */
3480 tcg_gen_shri_i64(tmp, o->in1, pos);
3481 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3482 break;
3483
3484 case 0x8:
3485 case 0x4:
3486 case 0x2:
3487 case 0x1:
3488 /* Effectively an 8-bit store. */
3489 tcg_gen_shri_i64(tmp, o->in1, pos);
3490 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3491 break;
3492
3493 default:
3494 /* This is going to be a sequence of shifts and stores. */
3495 pos = base + 32 - 8;
3496 while (m3) {
3497 if (m3 & 0x8) {
3498 tcg_gen_shri_i64(tmp, o->in1, pos);
3499 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3500 tcg_gen_addi_i64(o->in2, o->in2, 1);
3501 }
3502 m3 = (m3 << 1) & 0xf;
3503 pos -= 8;
3504 }
3505 break;
3506 }
3507 tcg_temp_free_i64(tmp);
3508 return NO_EXIT;
3509 }
3510
3511 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3512 {
3513 int r1 = get_field(s->fields, r1);
3514 int r3 = get_field(s->fields, r3);
3515 int size = s->insn->data;
3516 TCGv_i64 tsize = tcg_const_i64(size);
3517
3518 while (1) {
3519 if (size == 8) {
3520 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3521 } else {
3522 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3523 }
3524 if (r1 == r3) {
3525 break;
3526 }
3527 tcg_gen_add_i64(o->in2, o->in2, tsize);
3528 r1 = (r1 + 1) & 15;
3529 }
3530
3531 tcg_temp_free_i64(tsize);
3532 return NO_EXIT;
3533 }
3534
3535 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3536 {
3537 int r1 = get_field(s->fields, r1);
3538 int r3 = get_field(s->fields, r3);
3539 TCGv_i64 t = tcg_temp_new_i64();
3540 TCGv_i64 t4 = tcg_const_i64(4);
3541 TCGv_i64 t32 = tcg_const_i64(32);
3542
3543 while (1) {
3544 tcg_gen_shl_i64(t, regs[r1], t32);
3545 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3546 if (r1 == r3) {
3547 break;
3548 }
3549 tcg_gen_add_i64(o->in2, o->in2, t4);
3550 r1 = (r1 + 1) & 15;
3551 }
3552
3553 tcg_temp_free_i64(t);
3554 tcg_temp_free_i64(t4);
3555 tcg_temp_free_i64(t32);
3556 return NO_EXIT;
3557 }
3558
3559 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3560 {
3561 potential_page_fault(s);
3562 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3563 set_cc_static(s);
3564 return_low128(o->in2);
3565 return NO_EXIT;
3566 }
3567
3568 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3569 {
3570 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3571 return NO_EXIT;
3572 }
3573
3574 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3575 {
3576 DisasCompare cmp;
3577 TCGv_i64 borrow;
3578
3579 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3580
3581 /* The !borrow flag is the msb of CC. Since we want the inverse of
3582 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3583 disas_jcc(s, &cmp, 8 | 4);
3584 borrow = tcg_temp_new_i64();
3585 if (cmp.is_64) {
3586 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3587 } else {
3588 TCGv_i32 t = tcg_temp_new_i32();
3589 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3590 tcg_gen_extu_i32_i64(borrow, t);
3591 tcg_temp_free_i32(t);
3592 }
3593 free_compare(&cmp);
3594
3595 tcg_gen_sub_i64(o->out, o->out, borrow);
3596 tcg_temp_free_i64(borrow);
3597 return NO_EXIT;
3598 }
3599
3600 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3601 {
3602 TCGv_i32 t;
3603
3604 update_psw_addr(s);
3605 update_cc_op(s);
3606
3607 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3608 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3609 tcg_temp_free_i32(t);
3610
3611 t = tcg_const_i32(s->next_pc - s->pc);
3612 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3613 tcg_temp_free_i32(t);
3614
3615 gen_exception(EXCP_SVC);
3616 return EXIT_NORETURN;
3617 }
3618
3619 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3620 {
3621 gen_helper_tceb(cc_op, o->in1, o->in2);
3622 set_cc_static(s);
3623 return NO_EXIT;
3624 }
3625
3626 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3627 {
3628 gen_helper_tcdb(cc_op, o->in1, o->in2);
3629 set_cc_static(s);
3630 return NO_EXIT;
3631 }
3632
3633 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3634 {
3635 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3636 set_cc_static(s);
3637 return NO_EXIT;
3638 }
3639
3640 #ifndef CONFIG_USER_ONLY
3641 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3642 {
3643 potential_page_fault(s);
3644 gen_helper_tprot(cc_op, o->addr1, o->in2);
3645 set_cc_static(s);
3646 return NO_EXIT;
3647 }
3648 #endif
3649
3650 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3651 {
3652 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3653 potential_page_fault(s);
3654 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3655 tcg_temp_free_i32(l);
3656 set_cc_static(s);
3657 return NO_EXIT;
3658 }
3659
3660 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3661 {
3662 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3663 potential_page_fault(s);
3664 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3665 tcg_temp_free_i32(l);
3666 return NO_EXIT;
3667 }
3668
3669 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3670 {
3671 int d1 = get_field(s->fields, d1);
3672 int d2 = get_field(s->fields, d2);
3673 int b1 = get_field(s->fields, b1);
3674 int b2 = get_field(s->fields, b2);
3675 int l = get_field(s->fields, l1);
3676 TCGv_i32 t32;
3677
3678 o->addr1 = get_address(s, 0, b1, d1);
3679
3680 /* If the addresses are identical, this is a store/memset of zero. */
3681 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3682 o->in2 = tcg_const_i64(0);
3683
3684 l++;
3685 while (l >= 8) {
3686 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3687 l -= 8;
3688 if (l > 0) {
3689 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3690 }
3691 }
3692 if (l >= 4) {
3693 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3694 l -= 4;
3695 if (l > 0) {
3696 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3697 }
3698 }
3699 if (l >= 2) {
3700 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3701 l -= 2;
3702 if (l > 0) {
3703 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3704 }
3705 }
3706 if (l) {
3707 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3708 }
3709 gen_op_movi_cc(s, 0);
3710 return NO_EXIT;
3711 }
3712
3713 /* But in general we'll defer to a helper. */
3714 o->in2 = get_address(s, 0, b2, d2);
3715 t32 = tcg_const_i32(l);
3716 potential_page_fault(s);
3717 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3718 tcg_temp_free_i32(t32);
3719 set_cc_static(s);
3720 return NO_EXIT;
3721 }
3722
3723 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3724 {
3725 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3726 return NO_EXIT;
3727 }
3728
3729 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3730 {
3731 int shift = s->insn->data & 0xff;
3732 int size = s->insn->data >> 8;
3733 uint64_t mask = ((1ull << size) - 1) << shift;
3734
3735 assert(!o->g_in2);
3736 tcg_gen_shli_i64(o->in2, o->in2, shift);
3737 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3738
3739 /* Produce the CC from only the bits manipulated. */
3740 tcg_gen_andi_i64(cc_dst, o->out, mask);
3741 set_cc_nz_u64(s, cc_dst);
3742 return NO_EXIT;
3743 }
3744
3745 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3746 {
3747 o->out = tcg_const_i64(0);
3748 return NO_EXIT;
3749 }
3750
3751 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3752 {
3753 o->out = tcg_const_i64(0);
3754 o->out2 = o->out;
3755 o->g_out2 = true;
3756 return NO_EXIT;
3757 }
3758
3759 /* ====================================================================== */
3760 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3761 the original inputs), update the various cc data structures in order to
3762 be able to compute the new condition code. */
3763
3764 static void cout_abs32(DisasContext *s, DisasOps *o)
3765 {
3766 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3767 }
3768
3769 static void cout_abs64(DisasContext *s, DisasOps *o)
3770 {
3771 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3772 }
3773
3774 static void cout_adds32(DisasContext *s, DisasOps *o)
3775 {
3776 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3777 }
3778
3779 static void cout_adds64(DisasContext *s, DisasOps *o)
3780 {
3781 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3782 }
3783
3784 static void cout_addu32(DisasContext *s, DisasOps *o)
3785 {
3786 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3787 }
3788
3789 static void cout_addu64(DisasContext *s, DisasOps *o)
3790 {
3791 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3792 }
3793
3794 static void cout_addc32(DisasContext *s, DisasOps *o)
3795 {
3796 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3797 }
3798
3799 static void cout_addc64(DisasContext *s, DisasOps *o)
3800 {
3801 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3802 }
3803
3804 static void cout_cmps32(DisasContext *s, DisasOps *o)
3805 {
3806 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3807 }
3808
3809 static void cout_cmps64(DisasContext *s, DisasOps *o)
3810 {
3811 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3812 }
3813
3814 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3815 {
3816 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3817 }
3818
3819 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3820 {
3821 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3822 }
3823
3824 static void cout_f32(DisasContext *s, DisasOps *o)
3825 {
3826 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3827 }
3828
3829 static void cout_f64(DisasContext *s, DisasOps *o)
3830 {
3831 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3832 }
3833
3834 static void cout_f128(DisasContext *s, DisasOps *o)
3835 {
3836 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3837 }
3838
3839 static void cout_nabs32(DisasContext *s, DisasOps *o)
3840 {
3841 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3842 }
3843
3844 static void cout_nabs64(DisasContext *s, DisasOps *o)
3845 {
3846 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3847 }
3848
3849 static void cout_neg32(DisasContext *s, DisasOps *o)
3850 {
3851 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3852 }
3853
3854 static void cout_neg64(DisasContext *s, DisasOps *o)
3855 {
3856 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3857 }
3858
3859 static void cout_nz32(DisasContext *s, DisasOps *o)
3860 {
3861 tcg_gen_ext32u_i64(cc_dst, o->out);
3862 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3863 }
3864
3865 static void cout_nz64(DisasContext *s, DisasOps *o)
3866 {
3867 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3868 }
3869
3870 static void cout_s32(DisasContext *s, DisasOps *o)
3871 {
3872 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3873 }
3874
3875 static void cout_s64(DisasContext *s, DisasOps *o)
3876 {
3877 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3878 }
3879
3880 static void cout_subs32(DisasContext *s, DisasOps *o)
3881 {
3882 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3883 }
3884
3885 static void cout_subs64(DisasContext *s, DisasOps *o)
3886 {
3887 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3888 }
3889
3890 static void cout_subu32(DisasContext *s, DisasOps *o)
3891 {
3892 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3893 }
3894
3895 static void cout_subu64(DisasContext *s, DisasOps *o)
3896 {
3897 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3898 }
3899
3900 static void cout_subb32(DisasContext *s, DisasOps *o)
3901 {
3902 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3903 }
3904
3905 static void cout_subb64(DisasContext *s, DisasOps *o)
3906 {
3907 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3908 }
3909
3910 static void cout_tm32(DisasContext *s, DisasOps *o)
3911 {
3912 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3913 }
3914
3915 static void cout_tm64(DisasContext *s, DisasOps *o)
3916 {
3917 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3918 }
3919
3920 /* ====================================================================== */
3921 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
3922 with the TCG register to which we will write. Used in combination with
3923 the "wout" generators, in some cases we need a new temporary, and in
3924 some cases we can write to a TCG global. */
3925
3926 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3927 {
3928 o->out = tcg_temp_new_i64();
3929 }
3930 #define SPEC_prep_new 0
3931
3932 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3933 {
3934 o->out = tcg_temp_new_i64();
3935 o->out2 = tcg_temp_new_i64();
3936 }
3937 #define SPEC_prep_new_P 0
3938
3939 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3940 {
3941 o->out = regs[get_field(f, r1)];
3942 o->g_out = true;
3943 }
3944 #define SPEC_prep_r1 0
3945
3946 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3947 {
3948 int r1 = get_field(f, r1);
3949 o->out = regs[r1];
3950 o->out2 = regs[r1 + 1];
3951 o->g_out = o->g_out2 = true;
3952 }
3953 #define SPEC_prep_r1_P SPEC_r1_even
3954
3955 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3956 {
3957 o->out = fregs[get_field(f, r1)];
3958 o->g_out = true;
3959 }
3960 #define SPEC_prep_f1 0
3961
3962 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3963 {
3964 int r1 = get_field(f, r1);
3965 o->out = fregs[r1];
3966 o->out2 = fregs[r1 + 2];
3967 o->g_out = o->g_out2 = true;
3968 }
3969 #define SPEC_prep_x1 SPEC_r1_f128
3970
3971 /* ====================================================================== */
3972 /* The "Write OUTput" generators. These generally perform some non-trivial
3973 copy of data to TCG globals, or to main memory. The trivial cases are
3974 generally handled by having a "prep" generator install the TCG global
3975 as the destination of the operation. */
3976
3977 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3978 {
3979 store_reg(get_field(f, r1), o->out);
3980 }
3981 #define SPEC_wout_r1 0
3982
3983 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3984 {
3985 int r1 = get_field(f, r1);
3986 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3987 }
3988 #define SPEC_wout_r1_8 0
3989
3990 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3991 {
3992 int r1 = get_field(f, r1);
3993 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3994 }
3995 #define SPEC_wout_r1_16 0
3996
3997 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3998 {
3999 store_reg32_i64(get_field(f, r1), o->out);
4000 }
4001 #define SPEC_wout_r1_32 0
4002
4003 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4004 {
4005 int r1 = get_field(f, r1);
4006 store_reg32_i64(r1, o->out);
4007 store_reg32_i64(r1 + 1, o->out2);
4008 }
4009 #define SPEC_wout_r1_P32 SPEC_r1_even
4010
4011 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4012 {
4013 int r1 = get_field(f, r1);
4014 store_reg32_i64(r1 + 1, o->out);
4015 tcg_gen_shri_i64(o->out, o->out, 32);
4016 store_reg32_i64(r1, o->out);
4017 }
4018 #define SPEC_wout_r1_D32 SPEC_r1_even
4019
4020 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4021 {
4022 store_freg32_i64(get_field(f, r1), o->out);
4023 }
4024 #define SPEC_wout_e1 0
4025
4026 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4027 {
4028 store_freg(get_field(f, r1), o->out);
4029 }
4030 #define SPEC_wout_f1 0
4031
4032 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4033 {
4034 int f1 = get_field(s->fields, r1);
4035 store_freg(f1, o->out);
4036 store_freg(f1 + 2, o->out2);
4037 }
4038 #define SPEC_wout_x1 SPEC_r1_f128
4039
4040 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4041 {
4042 if (get_field(f, r1) != get_field(f, r2)) {
4043 store_reg32_i64(get_field(f, r1), o->out);
4044 }
4045 }
4046 #define SPEC_wout_cond_r1r2_32 0
4047
4048 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4049 {
4050 if (get_field(f, r1) != get_field(f, r2)) {
4051 store_freg32_i64(get_field(f, r1), o->out);
4052 }
4053 }
4054 #define SPEC_wout_cond_e1e2 0
4055
4056 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4057 {
4058 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4059 }
4060 #define SPEC_wout_m1_8 0
4061
4062 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4063 {
4064 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4065 }
4066 #define SPEC_wout_m1_16 0
4067
4068 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4069 {
4070 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4071 }
4072 #define SPEC_wout_m1_32 0
4073
4074 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4075 {
4076 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4077 }
4078 #define SPEC_wout_m1_64 0
4079
4080 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4081 {
4082 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4083 }
4084 #define SPEC_wout_m2_32 0
4085
4086 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4087 {
4088 /* XXX release reservation */
4089 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4090 store_reg32_i64(get_field(f, r1), o->in2);
4091 }
4092 #define SPEC_wout_m2_32_r1_atomic 0
4093
4094 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4095 {
4096 /* XXX release reservation */
4097 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4098 store_reg(get_field(f, r1), o->in2);
4099 }
4100 #define SPEC_wout_m2_64_r1_atomic 0
4101
4102 /* ====================================================================== */
4103 /* The "INput 1" generators. These load the first operand to an insn. */
4104
4105 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4106 {
4107 o->in1 = load_reg(get_field(f, r1));
4108 }
4109 #define SPEC_in1_r1 0
4110
4111 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4112 {
4113 o->in1 = regs[get_field(f, r1)];
4114 o->g_in1 = true;
4115 }
4116 #define SPEC_in1_r1_o 0
4117
4118 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4119 {
4120 o->in1 = tcg_temp_new_i64();
4121 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4122 }
4123 #define SPEC_in1_r1_32s 0
4124
4125 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4126 {
4127 o->in1 = tcg_temp_new_i64();
4128 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4129 }
4130 #define SPEC_in1_r1_32u 0
4131
4132 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4133 {
4134 o->in1 = tcg_temp_new_i64();
4135 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4136 }
4137 #define SPEC_in1_r1_sr32 0
4138
4139 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4140 {
4141 o->in1 = load_reg(get_field(f, r1) + 1);
4142 }
4143 #define SPEC_in1_r1p1 SPEC_r1_even
4144
4145 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4146 {
4147 o->in1 = tcg_temp_new_i64();
4148 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4149 }
4150 #define SPEC_in1_r1p1_32s SPEC_r1_even
4151
4152 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4153 {
4154 o->in1 = tcg_temp_new_i64();
4155 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4156 }
4157 #define SPEC_in1_r1p1_32u SPEC_r1_even
4158
4159 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4160 {
4161 int r1 = get_field(f, r1);
4162 o->in1 = tcg_temp_new_i64();
4163 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4164 }
4165 #define SPEC_in1_r1_D32 SPEC_r1_even
4166
4167 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4168 {
4169 o->in1 = load_reg(get_field(f, r2));
4170 }
4171 #define SPEC_in1_r2 0
4172
4173 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4174 {
4175 o->in1 = load_reg(get_field(f, r3));
4176 }
4177 #define SPEC_in1_r3 0
4178
4179 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4180 {
4181 o->in1 = regs[get_field(f, r3)];
4182 o->g_in1 = true;
4183 }
4184 #define SPEC_in1_r3_o 0
4185
4186 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4187 {
4188 o->in1 = tcg_temp_new_i64();
4189 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4190 }
4191 #define SPEC_in1_r3_32s 0
4192
4193 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4194 {
4195 o->in1 = tcg_temp_new_i64();
4196 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4197 }
4198 #define SPEC_in1_r3_32u 0
4199
4200 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4201 {
4202 int r3 = get_field(f, r3);
4203 o->in1 = tcg_temp_new_i64();
4204 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4205 }
4206 #define SPEC_in1_r3_D32 SPEC_r3_even
4207
4208 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4209 {
4210 o->in1 = load_freg32_i64(get_field(f, r1));
4211 }
4212 #define SPEC_in1_e1 0
4213
4214 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4215 {
4216 o->in1 = fregs[get_field(f, r1)];
4217 o->g_in1 = true;
4218 }
4219 #define SPEC_in1_f1_o 0
4220
4221 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4222 {
4223 int r1 = get_field(f, r1);
4224 o->out = fregs[r1];
4225 o->out2 = fregs[r1 + 2];
4226 o->g_out = o->g_out2 = true;
4227 }
4228 #define SPEC_in1_x1_o SPEC_r1_f128
4229
4230 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4231 {
4232 o->in1 = fregs[get_field(f, r3)];
4233 o->g_in1 = true;
4234 }
4235 #define SPEC_in1_f3_o 0
4236
4237 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4238 {
4239 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4240 }
4241 #define SPEC_in1_la1 0
4242
4243 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4244 {
4245 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4246 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4247 }
4248 #define SPEC_in1_la2 0
4249
4250 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4251 {
4252 in1_la1(s, f, o);
4253 o->in1 = tcg_temp_new_i64();
4254 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4255 }
4256 #define SPEC_in1_m1_8u 0
4257
4258 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4259 {
4260 in1_la1(s, f, o);
4261 o->in1 = tcg_temp_new_i64();
4262 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4263 }
4264 #define SPEC_in1_m1_16s 0
4265
4266 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4267 {
4268 in1_la1(s, f, o);
4269 o->in1 = tcg_temp_new_i64();
4270 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4271 }
4272 #define SPEC_in1_m1_16u 0
4273
4274 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4275 {
4276 in1_la1(s, f, o);
4277 o->in1 = tcg_temp_new_i64();
4278 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4279 }
4280 #define SPEC_in1_m1_32s 0
4281
4282 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4283 {
4284 in1_la1(s, f, o);
4285 o->in1 = tcg_temp_new_i64();
4286 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4287 }
4288 #define SPEC_in1_m1_32u 0
4289
4290 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4291 {
4292 in1_la1(s, f, o);
4293 o->in1 = tcg_temp_new_i64();
4294 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4295 }
4296 #define SPEC_in1_m1_64 0
4297
4298 /* ====================================================================== */
4299 /* The "INput 2" generators. These load the second operand to an insn. */
4300
4301 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4302 {
4303 o->in2 = regs[get_field(f, r1)];
4304 o->g_in2 = true;
4305 }
4306 #define SPEC_in2_r1_o 0
4307
4308 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4309 {
4310 o->in2 = tcg_temp_new_i64();
4311 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4312 }
4313 #define SPEC_in2_r1_16u 0
4314
4315 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4316 {
4317 o->in2 = tcg_temp_new_i64();
4318 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4319 }
4320 #define SPEC_in2_r1_32u 0
4321
4322 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4323 {
4324 int r1 = get_field(f, r1);
4325 o->in2 = tcg_temp_new_i64();
4326 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4327 }
4328 #define SPEC_in2_r1_D32 SPEC_r1_even
4329
4330 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4331 {
4332 o->in2 = load_reg(get_field(f, r2));
4333 }
4334 #define SPEC_in2_r2 0
4335
4336 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4337 {
4338 o->in2 = regs[get_field(f, r2)];
4339 o->g_in2 = true;
4340 }
4341 #define SPEC_in2_r2_o 0
4342
4343 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4344 {
4345 int r2 = get_field(f, r2);
4346 if (r2 != 0) {
4347 o->in2 = load_reg(r2);
4348 }
4349 }
4350 #define SPEC_in2_r2_nz 0
4351
4352 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4353 {
4354 o->in2 = tcg_temp_new_i64();
4355 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4356 }
4357 #define SPEC_in2_r2_8s 0
4358
4359 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4360 {
4361 o->in2 = tcg_temp_new_i64();
4362 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4363 }
4364 #define SPEC_in2_r2_8u 0
4365
4366 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4367 {
4368 o->in2 = tcg_temp_new_i64();
4369 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4370 }
4371 #define SPEC_in2_r2_16s 0
4372
4373 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4374 {
4375 o->in2 = tcg_temp_new_i64();
4376 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4377 }
4378 #define SPEC_in2_r2_16u 0
4379
4380 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4381 {
4382 o->in2 = load_reg(get_field(f, r3));
4383 }
4384 #define SPEC_in2_r3 0
4385
4386 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4387 {
4388 o->in2 = tcg_temp_new_i64();
4389 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4390 }
4391 #define SPEC_in2_r2_32s 0
4392
4393 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4394 {
4395 o->in2 = tcg_temp_new_i64();
4396 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4397 }
4398 #define SPEC_in2_r2_32u 0
4399
4400 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4401 {
4402 o->in2 = load_freg32_i64(get_field(f, r2));
4403 }
4404 #define SPEC_in2_e2 0
4405
4406 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4407 {
4408 o->in2 = fregs[get_field(f, r2)];
4409 o->g_in2 = true;
4410 }
4411 #define SPEC_in2_f2_o 0
4412
4413 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4414 {
4415 int r2 = get_field(f, r2);
4416 o->in1 = fregs[r2];
4417 o->in2 = fregs[r2 + 2];
4418 o->g_in1 = o->g_in2 = true;
4419 }
4420 #define SPEC_in2_x2_o SPEC_r2_f128
4421
4422 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4423 {
4424 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4425 }
4426 #define SPEC_in2_ra2 0
4427
4428 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4429 {
4430 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4431 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4432 }
4433 #define SPEC_in2_a2 0
4434
4435 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4436 {
4437 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4438 }
4439 #define SPEC_in2_ri2 0
4440
4441 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4442 {
4443 help_l2_shift(s, f, o, 31);
4444 }
4445 #define SPEC_in2_sh32 0
4446
4447 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4448 {
4449 help_l2_shift(s, f, o, 63);
4450 }
4451 #define SPEC_in2_sh64 0
4452
4453 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4454 {
4455 in2_a2(s, f, o);
4456 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4457 }
4458 #define SPEC_in2_m2_8u 0
4459
4460 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4461 {
4462 in2_a2(s, f, o);
4463 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4464 }
4465 #define SPEC_in2_m2_16s 0
4466
4467 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4468 {
4469 in2_a2(s, f, o);
4470 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4471 }
4472 #define SPEC_in2_m2_16u 0
4473
4474 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4475 {
4476 in2_a2(s, f, o);
4477 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4478 }
4479 #define SPEC_in2_m2_32s 0
4480
4481 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4482 {
4483 in2_a2(s, f, o);
4484 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4485 }
4486 #define SPEC_in2_m2_32u 0
4487
4488 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4489 {
4490 in2_a2(s, f, o);
4491 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4492 }
4493 #define SPEC_in2_m2_64 0
4494
4495 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4496 {
4497 in2_ri2(s, f, o);
4498 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4499 }
4500 #define SPEC_in2_mri2_16u 0
4501
4502 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4503 {
4504 in2_ri2(s, f, o);
4505 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4506 }
4507 #define SPEC_in2_mri2_32s 0
4508
4509 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4510 {
4511 in2_ri2(s, f, o);
4512 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4513 }
4514 #define SPEC_in2_mri2_32u 0
4515
4516 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4517 {
4518 in2_ri2(s, f, o);
4519 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4520 }
4521 #define SPEC_in2_mri2_64 0
4522
4523 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4524 {
4525 /* XXX should reserve the address */
4526 in1_la2(s, f, o);
4527 o->in2 = tcg_temp_new_i64();
4528 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4529 }
4530 #define SPEC_in2_m2_32s_atomic 0
4531
4532 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4533 {
4534 /* XXX should reserve the address */
4535 in1_la2(s, f, o);
4536 o->in2 = tcg_temp_new_i64();
4537 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4538 }
4539 #define SPEC_in2_m2_64_atomic 0
4540
4541 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4542 {
4543 o->in2 = tcg_const_i64(get_field(f, i2));
4544 }
4545 #define SPEC_in2_i2 0
4546
4547 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4548 {
4549 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4550 }
4551 #define SPEC_in2_i2_8u 0
4552
4553 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4554 {
4555 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4556 }
4557 #define SPEC_in2_i2_16u 0
4558
4559 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4560 {
4561 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4562 }
4563 #define SPEC_in2_i2_32u 0
4564
4565 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4566 {
4567 uint64_t i2 = (uint16_t)get_field(f, i2);
4568 o->in2 = tcg_const_i64(i2 << s->insn->data);
4569 }
4570 #define SPEC_in2_i2_16u_shl 0
4571
4572 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4573 {
4574 uint64_t i2 = (uint32_t)get_field(f, i2);
4575 o->in2 = tcg_const_i64(i2 << s->insn->data);
4576 }
4577 #define SPEC_in2_i2_32u_shl 0
4578
4579 /* ====================================================================== */
4580
4581 /* Find opc within the table of insns. This is formulated as a switch
4582 statement so that (1) we get compile-time notice of cut-paste errors
4583 for duplicated opcodes, and (2) the compiler generates the binary
4584 search tree, rather than us having to post-process the table. */
4585
4586 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4587 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4588
4589 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4590
4591 enum DisasInsnEnum {
4592 #include "insn-data.def"
4593 };
4594
4595 #undef D
4596 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4597 .opc = OPC, \
4598 .fmt = FMT_##FT, \
4599 .fac = FAC_##FC, \
4600 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4601 .name = #NM, \
4602 .help_in1 = in1_##I1, \
4603 .help_in2 = in2_##I2, \
4604 .help_prep = prep_##P, \
4605 .help_wout = wout_##W, \
4606 .help_cout = cout_##CC, \
4607 .help_op = op_##OP, \
4608 .data = D \
4609 },
4610
4611 /* Allow 0 to be used for NULL in the table below. */
4612 #define in1_0 NULL
4613 #define in2_0 NULL
4614 #define prep_0 NULL
4615 #define wout_0 NULL
4616 #define cout_0 NULL
4617 #define op_0 NULL
4618
4619 #define SPEC_in1_0 0
4620 #define SPEC_in2_0 0
4621 #define SPEC_prep_0 0
4622 #define SPEC_wout_0 0
4623
4624 static const DisasInsn insn_info[] = {
4625 #include "insn-data.def"
4626 };
4627
4628 #undef D
4629 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4630 case OPC: return &insn_info[insn_ ## NM];
4631
4632 static const DisasInsn *lookup_opc(uint16_t opc)
4633 {
4634 switch (opc) {
4635 #include "insn-data.def"
4636 default:
4637 return NULL;
4638 }
4639 }
4640
4641 #undef D
4642 #undef C
4643
4644 /* Extract a field from the insn. The INSN should be left-aligned in
4645 the uint64_t so that we can more easily utilize the big-bit-endian
4646 definitions we extract from the Principals of Operation. */
4647
4648 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4649 {
4650 uint32_t r, m;
4651
4652 if (f->size == 0) {
4653 return;
4654 }
4655
4656 /* Zero extract the field from the insn. */
4657 r = (insn << f->beg) >> (64 - f->size);
4658
4659 /* Sign-extend, or un-swap the field as necessary. */
4660 switch (f->type) {
4661 case 0: /* unsigned */
4662 break;
4663 case 1: /* signed */
4664 assert(f->size <= 32);
4665 m = 1u << (f->size - 1);
4666 r = (r ^ m) - m;
4667 break;
4668 case 2: /* dl+dh split, signed 20 bit. */
4669 r = ((int8_t)r << 12) | (r >> 8);
4670 break;
4671 default:
4672 abort();
4673 }
4674
4675 /* Validate that the "compressed" encoding we selected above is valid.
4676 I.e. we havn't make two different original fields overlap. */
4677 assert(((o->presentC >> f->indexC) & 1) == 0);
4678 o->presentC |= 1 << f->indexC;
4679 o->presentO |= 1 << f->indexO;
4680
4681 o->c[f->indexC] = r;
4682 }
4683
4684 /* Lookup the insn at the current PC, extracting the operands into O and
4685 returning the info struct for the insn. Returns NULL for invalid insn. */
4686
4687 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4688 DisasFields *f)
4689 {
4690 uint64_t insn, pc = s->pc;
4691 int op, op2, ilen;
4692 const DisasInsn *info;
4693
4694 insn = ld_code2(env, pc);
4695 op = (insn >> 8) & 0xff;
4696 ilen = get_ilen(op);
4697 s->next_pc = s->pc + ilen;
4698
4699 switch (ilen) {
4700 case 2:
4701 insn = insn << 48;
4702 break;
4703 case 4:
4704 insn = ld_code4(env, pc) << 32;
4705 break;
4706 case 6:
4707 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4708 break;
4709 default:
4710 abort();
4711 }
4712
4713 /* We can't actually determine the insn format until we've looked up
4714 the full insn opcode. Which we can't do without locating the
4715 secondary opcode. Assume by default that OP2 is at bit 40; for
4716 those smaller insns that don't actually have a secondary opcode
4717 this will correctly result in OP2 = 0. */
4718 switch (op) {
4719 case 0x01: /* E */
4720 case 0x80: /* S */
4721 case 0x82: /* S */
4722 case 0x93: /* S */
4723 case 0xb2: /* S, RRF, RRE */
4724 case 0xb3: /* RRE, RRD, RRF */
4725 case 0xb9: /* RRE, RRF */
4726 case 0xe5: /* SSE, SIL */
4727 op2 = (insn << 8) >> 56;
4728 break;
4729 case 0xa5: /* RI */
4730 case 0xa7: /* RI */
4731 case 0xc0: /* RIL */
4732 case 0xc2: /* RIL */
4733 case 0xc4: /* RIL */
4734 case 0xc6: /* RIL */
4735 case 0xc8: /* SSF */
4736 case 0xcc: /* RIL */
4737 op2 = (insn << 12) >> 60;
4738 break;
4739 case 0xd0 ... 0xdf: /* SS */
4740 case 0xe1: /* SS */
4741 case 0xe2: /* SS */
4742 case 0xe8: /* SS */
4743 case 0xe9: /* SS */
4744 case 0xea: /* SS */
4745 case 0xee ... 0xf3: /* SS */
4746 case 0xf8 ... 0xfd: /* SS */
4747 op2 = 0;
4748 break;
4749 default:
4750 op2 = (insn << 40) >> 56;
4751 break;
4752 }
4753
4754 memset(f, 0, sizeof(*f));
4755 f->op = op;
4756 f->op2 = op2;
4757
4758 /* Lookup the instruction. */
4759 info = lookup_opc(op << 8 | op2);
4760
4761 /* If we found it, extract the operands. */
4762 if (info != NULL) {
4763 DisasFormat fmt = info->fmt;
4764 int i;
4765
4766 for (i = 0; i < NUM_C_FIELD; ++i) {
4767 extract_field(f, &format_info[fmt].op[i], insn);
4768 }
4769 }
4770 return info;
4771 }
4772
4773 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4774 {
4775 const DisasInsn *insn;
4776 ExitStatus ret = NO_EXIT;
4777 DisasFields f;
4778 DisasOps o;
4779
4780 /* Search for the insn in the table. */
4781 insn = extract_insn(env, s, &f);
4782
4783 /* Not found means unimplemented/illegal opcode. */
4784 if (insn == NULL) {
4785 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4786 f.op, f.op2);
4787 gen_illegal_opcode(s);
4788 return EXIT_NORETURN;
4789 }
4790
4791 /* Check for insn specification exceptions. */
4792 if (insn->spec) {
4793 int spec = insn->spec, excp = 0, r;
4794
4795 if (spec & SPEC_r1_even) {
4796 r = get_field(&f, r1);
4797 if (r & 1) {
4798 excp = PGM_SPECIFICATION;
4799 }
4800 }
4801 if (spec & SPEC_r2_even) {
4802 r = get_field(&f, r2);
4803 if (r & 1) {
4804 excp = PGM_SPECIFICATION;
4805 }
4806 }
4807 if (spec & SPEC_r3_even) {
4808 r = get_field(&f, r3);
4809 if (r & 1) {
4810 excp = PGM_SPECIFICATION;
4811 }
4812 }
4813 if (spec & SPEC_r1_f128) {
4814 r = get_field(&f, r1);
4815 if (r > 13) {
4816 excp = PGM_SPECIFICATION;
4817 }
4818 }
4819 if (spec & SPEC_r2_f128) {
4820 r = get_field(&f, r2);
4821 if (r > 13) {
4822 excp = PGM_SPECIFICATION;
4823 }
4824 }
4825 if (excp) {
4826 gen_program_exception(s, excp);
4827 return EXIT_NORETURN;
4828 }
4829 }
4830
4831 /* Set up the strutures we use to communicate with the helpers. */
4832 s->insn = insn;
4833 s->fields = &f;
4834 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4835 TCGV_UNUSED_I64(o.out);
4836 TCGV_UNUSED_I64(o.out2);
4837 TCGV_UNUSED_I64(o.in1);
4838 TCGV_UNUSED_I64(o.in2);
4839 TCGV_UNUSED_I64(o.addr1);
4840
4841 /* Implement the instruction. */
4842 if (insn->help_in1) {
4843 insn->help_in1(s, &f, &o);
4844 }
4845 if (insn->help_in2) {
4846 insn->help_in2(s, &f, &o);
4847 }
4848 if (insn->help_prep) {
4849 insn->help_prep(s, &f, &o);
4850 }
4851 if (insn->help_op) {
4852 ret = insn->help_op(s, &o);
4853 }
4854 if (insn->help_wout) {
4855 insn->help_wout(s, &f, &o);
4856 }
4857 if (insn->help_cout) {
4858 insn->help_cout(s, &o);
4859 }
4860
4861 /* Free any temporaries created by the helpers. */
4862 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4863 tcg_temp_free_i64(o.out);
4864 }
4865 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4866 tcg_temp_free_i64(o.out2);
4867 }
4868 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4869 tcg_temp_free_i64(o.in1);
4870 }
4871 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4872 tcg_temp_free_i64(o.in2);
4873 }
4874 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4875 tcg_temp_free_i64(o.addr1);
4876 }
4877
4878 /* Advance to the next instruction. */
4879 s->pc = s->next_pc;
4880 return ret;
4881 }
4882
4883 static inline void gen_intermediate_code_internal(S390CPU *cpu,
4884 TranslationBlock *tb,
4885 bool search_pc)
4886 {
4887 CPUState *cs = CPU(cpu);
4888 CPUS390XState *env = &cpu->env;
4889 DisasContext dc;
4890 target_ulong pc_start;
4891 uint64_t next_page_start;
4892 int j, lj = -1;
4893 int num_insns, max_insns;
4894 CPUBreakpoint *bp;
4895 ExitStatus status;
4896 bool do_debug;
4897
4898 pc_start = tb->pc;
4899
4900 /* 31-bit mode */
4901 if (!(tb->flags & FLAG_MASK_64)) {
4902 pc_start &= 0x7fffffff;
4903 }
4904
4905 dc.tb = tb;
4906 dc.pc = pc_start;
4907 dc.cc_op = CC_OP_DYNAMIC;
4908 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
4909
4910 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4911
4912 num_insns = 0;
4913 max_insns = tb->cflags & CF_COUNT_MASK;
4914 if (max_insns == 0) {
4915 max_insns = CF_COUNT_MASK;
4916 }
4917
4918 gen_tb_start(tb);
4919
4920 do {
4921 if (search_pc) {
4922 j = tcg_op_buf_count();
4923 if (lj < j) {
4924 lj++;
4925 while (lj < j) {
4926 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4927 }
4928 }
4929 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4930 gen_opc_cc_op[lj] = dc.cc_op;
4931 tcg_ctx.gen_opc_instr_start[lj] = 1;
4932 tcg_ctx.gen_opc_icount[lj] = num_insns;
4933 }
4934 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4935 gen_io_start();
4936 }
4937
4938 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4939 tcg_gen_debug_insn_start(dc.pc);
4940 }
4941
4942 status = NO_EXIT;
4943 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
4944 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
4945 if (bp->pc == dc.pc) {
4946 status = EXIT_PC_STALE;
4947 do_debug = true;
4948 break;
4949 }
4950 }
4951 }
4952 if (status == NO_EXIT) {
4953 status = translate_one(env, &dc);
4954 }
4955
4956 /* If we reach a page boundary, are single stepping,
4957 or exhaust instruction count, stop generation. */
4958 if (status == NO_EXIT
4959 && (dc.pc >= next_page_start
4960 || tcg_op_buf_full()
4961 || num_insns >= max_insns
4962 || singlestep
4963 || cs->singlestep_enabled)) {
4964 status = EXIT_PC_STALE;
4965 }
4966 } while (status == NO_EXIT);
4967
4968 if (tb->cflags & CF_LAST_IO) {
4969 gen_io_end();
4970 }
4971
4972 switch (status) {
4973 case EXIT_GOTO_TB:
4974 case EXIT_NORETURN:
4975 break;
4976 case EXIT_PC_STALE:
4977 update_psw_addr(&dc);
4978 /* FALLTHRU */
4979 case EXIT_PC_UPDATED:
4980 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4981 cc op type is in env */
4982 update_cc_op(&dc);
4983 /* Exit the TB, either by raising a debug exception or by return. */
4984 if (do_debug) {
4985 gen_exception(EXCP_DEBUG);
4986 } else {
4987 tcg_gen_exit_tb(0);
4988 }
4989 break;
4990 default:
4991 abort();
4992 }
4993
4994 gen_tb_end(tb, num_insns);
4995
4996 if (search_pc) {
4997 j = tcg_op_buf_count();
4998 lj++;
4999 while (lj <= j) {
5000 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5001 }
5002 } else {
5003 tb->size = dc.pc - pc_start;
5004 tb->icount = num_insns;
5005 }
5006
5007 #if defined(S390X_DEBUG_DISAS)
5008 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5009 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5010 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
5011 qemu_log("\n");
5012 }
5013 #endif
5014 }
5015
5016 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
5017 {
5018 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
5019 }
5020
5021 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
5022 {
5023 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
5024 }
5025
5026 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
5027 {
5028 int cc_op;
5029 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
5030 cc_op = gen_opc_cc_op[pc_pos];
5031 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5032 env->cc_op = cc_op;
5033 }
5034 }