]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/translate.c
block/parallels: create bat_entry_off helper
[mirror_qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
37
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
40
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44
45 #include "trace-tcg.h"
46
47
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
52
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t pc, next_pc;
58 enum cc_op cc_op;
59 bool singlestep_enabled;
60 };
61
62 /* Information carried about a condition to be evaluated. */
63 typedef struct {
64 TCGCond cond:8;
65 bool is_64;
66 bool g1;
67 bool g2;
68 union {
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
71 } u;
72 } DisasCompare;
73
74 #define DISAS_EXCP 4
75
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
80
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
82 {
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
86 }
87 }
88 return pc;
89 }
90
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
92 int flags)
93 {
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
96 int i;
97
98 if (env->cc_op > 3) {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 } else {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
104 }
105
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 if ((i % 4) == 3) {
109 cpu_fprintf(f, "\n");
110 } else {
111 cpu_fprintf(f, " ");
112 }
113 }
114
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
117 if ((i % 4) == 3) {
118 cpu_fprintf(f, "\n");
119 } else {
120 cpu_fprintf(f, " ");
121 }
122 }
123
124 #ifndef CONFIG_USER_ONLY
125 for (i = 0; i < 16; i++) {
126 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
127 if ((i % 4) == 3) {
128 cpu_fprintf(f, "\n");
129 } else {
130 cpu_fprintf(f, " ");
131 }
132 }
133 #endif
134
135 #ifdef DEBUG_INLINE_BRANCHES
136 for (i = 0; i < CC_OP_MAX; i++) {
137 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
138 inline_branch_miss[i], inline_branch_hit[i]);
139 }
140 #endif
141
142 cpu_fprintf(f, "\n");
143 }
144
145 static TCGv_i64 psw_addr;
146 static TCGv_i64 psw_mask;
147
148 static TCGv_i32 cc_op;
149 static TCGv_i64 cc_src;
150 static TCGv_i64 cc_dst;
151 static TCGv_i64 cc_vr;
152
153 static char cpu_reg_names[32][4];
154 static TCGv_i64 regs[16];
155 static TCGv_i64 fregs[16];
156
157 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
158
159 void s390x_translate_init(void)
160 {
161 int i;
162
163 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
164 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
165 offsetof(CPUS390XState, psw.addr),
166 "psw_addr");
167 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
168 offsetof(CPUS390XState, psw.mask),
169 "psw_mask");
170
171 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
172 "cc_op");
173 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
174 "cc_src");
175 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
176 "cc_dst");
177 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
178 "cc_vr");
179
180 for (i = 0; i < 16; i++) {
181 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
182 regs[i] = tcg_global_mem_new(TCG_AREG0,
183 offsetof(CPUS390XState, regs[i]),
184 cpu_reg_names[i]);
185 }
186
187 for (i = 0; i < 16; i++) {
188 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
189 fregs[i] = tcg_global_mem_new(TCG_AREG0,
190 offsetof(CPUS390XState, fregs[i].d),
191 cpu_reg_names[i + 16]);
192 }
193 }
194
195 static TCGv_i64 load_reg(int reg)
196 {
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
200 }
201
202 static TCGv_i64 load_freg32_i64(int reg)
203 {
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
206 return r;
207 }
208
209 static void store_reg(int reg, TCGv_i64 v)
210 {
211 tcg_gen_mov_i64(regs[reg], v);
212 }
213
214 static void store_freg(int reg, TCGv_i64 v)
215 {
216 tcg_gen_mov_i64(fregs[reg], v);
217 }
218
219 static void store_reg32_i64(int reg, TCGv_i64 v)
220 {
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
223 }
224
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
226 {
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
228 }
229
230 static void store_freg32_i64(int reg, TCGv_i64 v)
231 {
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
233 }
234
235 static void return_low128(TCGv_i64 dest)
236 {
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
238 }
239
240 static void update_psw_addr(DisasContext *s)
241 {
242 /* psw.addr */
243 tcg_gen_movi_i64(psw_addr, s->pc);
244 }
245
246 static void update_cc_op(DisasContext *s)
247 {
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
250 }
251 }
252
253 static void potential_page_fault(DisasContext *s)
254 {
255 update_psw_addr(s);
256 update_cc_op(s);
257 }
258
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
260 {
261 return (uint64_t)cpu_lduw_code(env, pc);
262 }
263
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
265 {
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
267 }
268
269 static int get_mem_index(DisasContext *s)
270 {
271 switch (s->tb->flags & FLAG_MASK_ASC) {
272 case PSW_ASC_PRIMARY >> 32:
273 return 0;
274 case PSW_ASC_SECONDARY >> 32:
275 return 1;
276 case PSW_ASC_HOME >> 32:
277 return 2;
278 default:
279 tcg_abort();
280 break;
281 }
282 }
283
284 static void gen_exception(int excp)
285 {
286 TCGv_i32 tmp = tcg_const_i32(excp);
287 gen_helper_exception(cpu_env, tmp);
288 tcg_temp_free_i32(tmp);
289 }
290
291 static void gen_program_exception(DisasContext *s, int code)
292 {
293 TCGv_i32 tmp;
294
295 /* Remember what pgm exeption this was. */
296 tmp = tcg_const_i32(code);
297 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
298 tcg_temp_free_i32(tmp);
299
300 tmp = tcg_const_i32(s->next_pc - s->pc);
301 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
302 tcg_temp_free_i32(tmp);
303
304 /* Advance past instruction. */
305 s->pc = s->next_pc;
306 update_psw_addr(s);
307
308 /* Save off cc. */
309 update_cc_op(s);
310
311 /* Trigger exception. */
312 gen_exception(EXCP_PGM);
313 }
314
315 static inline void gen_illegal_opcode(DisasContext *s)
316 {
317 gen_program_exception(s, PGM_SPECIFICATION);
318 }
319
320 #ifndef CONFIG_USER_ONLY
321 static void check_privileged(DisasContext *s)
322 {
323 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
324 gen_program_exception(s, PGM_PRIVILEGED);
325 }
326 }
327 #endif
328
329 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
330 {
331 TCGv_i64 tmp = tcg_temp_new_i64();
332 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
333
334 /* Note that d2 is limited to 20 bits, signed. If we crop negative
335 displacements early we create larger immedate addends. */
336
337 /* Note that addi optimizes the imm==0 case. */
338 if (b2 && x2) {
339 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
340 tcg_gen_addi_i64(tmp, tmp, d2);
341 } else if (b2) {
342 tcg_gen_addi_i64(tmp, regs[b2], d2);
343 } else if (x2) {
344 tcg_gen_addi_i64(tmp, regs[x2], d2);
345 } else {
346 if (need_31) {
347 d2 &= 0x7fffffff;
348 need_31 = false;
349 }
350 tcg_gen_movi_i64(tmp, d2);
351 }
352 if (need_31) {
353 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
354 }
355
356 return tmp;
357 }
358
359 static inline bool live_cc_data(DisasContext *s)
360 {
361 return (s->cc_op != CC_OP_DYNAMIC
362 && s->cc_op != CC_OP_STATIC
363 && s->cc_op > 3);
364 }
365
366 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
367 {
368 if (live_cc_data(s)) {
369 tcg_gen_discard_i64(cc_src);
370 tcg_gen_discard_i64(cc_dst);
371 tcg_gen_discard_i64(cc_vr);
372 }
373 s->cc_op = CC_OP_CONST0 + val;
374 }
375
376 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
377 {
378 if (live_cc_data(s)) {
379 tcg_gen_discard_i64(cc_src);
380 tcg_gen_discard_i64(cc_vr);
381 }
382 tcg_gen_mov_i64(cc_dst, dst);
383 s->cc_op = op;
384 }
385
386 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
387 TCGv_i64 dst)
388 {
389 if (live_cc_data(s)) {
390 tcg_gen_discard_i64(cc_vr);
391 }
392 tcg_gen_mov_i64(cc_src, src);
393 tcg_gen_mov_i64(cc_dst, dst);
394 s->cc_op = op;
395 }
396
397 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
398 TCGv_i64 dst, TCGv_i64 vr)
399 {
400 tcg_gen_mov_i64(cc_src, src);
401 tcg_gen_mov_i64(cc_dst, dst);
402 tcg_gen_mov_i64(cc_vr, vr);
403 s->cc_op = op;
404 }
405
406 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
407 {
408 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
409 }
410
411 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
412 {
413 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
414 }
415
416 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
417 {
418 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
419 }
420
421 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
422 {
423 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
424 }
425
426 /* CC value is in env->cc_op */
427 static void set_cc_static(DisasContext *s)
428 {
429 if (live_cc_data(s)) {
430 tcg_gen_discard_i64(cc_src);
431 tcg_gen_discard_i64(cc_dst);
432 tcg_gen_discard_i64(cc_vr);
433 }
434 s->cc_op = CC_OP_STATIC;
435 }
436
437 /* calculates cc into cc_op */
438 static void gen_op_calc_cc(DisasContext *s)
439 {
440 TCGv_i32 local_cc_op;
441 TCGv_i64 dummy;
442
443 TCGV_UNUSED_I32(local_cc_op);
444 TCGV_UNUSED_I64(dummy);
445 switch (s->cc_op) {
446 default:
447 dummy = tcg_const_i64(0);
448 /* FALLTHRU */
449 case CC_OP_ADD_64:
450 case CC_OP_ADDU_64:
451 case CC_OP_ADDC_64:
452 case CC_OP_SUB_64:
453 case CC_OP_SUBU_64:
454 case CC_OP_SUBB_64:
455 case CC_OP_ADD_32:
456 case CC_OP_ADDU_32:
457 case CC_OP_ADDC_32:
458 case CC_OP_SUB_32:
459 case CC_OP_SUBU_32:
460 case CC_OP_SUBB_32:
461 local_cc_op = tcg_const_i32(s->cc_op);
462 break;
463 case CC_OP_CONST0:
464 case CC_OP_CONST1:
465 case CC_OP_CONST2:
466 case CC_OP_CONST3:
467 case CC_OP_STATIC:
468 case CC_OP_DYNAMIC:
469 break;
470 }
471
472 switch (s->cc_op) {
473 case CC_OP_CONST0:
474 case CC_OP_CONST1:
475 case CC_OP_CONST2:
476 case CC_OP_CONST3:
477 /* s->cc_op is the cc value */
478 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
479 break;
480 case CC_OP_STATIC:
481 /* env->cc_op already is the cc value */
482 break;
483 case CC_OP_NZ:
484 case CC_OP_ABS_64:
485 case CC_OP_NABS_64:
486 case CC_OP_ABS_32:
487 case CC_OP_NABS_32:
488 case CC_OP_LTGT0_32:
489 case CC_OP_LTGT0_64:
490 case CC_OP_COMP_32:
491 case CC_OP_COMP_64:
492 case CC_OP_NZ_F32:
493 case CC_OP_NZ_F64:
494 case CC_OP_FLOGR:
495 /* 1 argument */
496 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
497 break;
498 case CC_OP_ICM:
499 case CC_OP_LTGT_32:
500 case CC_OP_LTGT_64:
501 case CC_OP_LTUGTU_32:
502 case CC_OP_LTUGTU_64:
503 case CC_OP_TM_32:
504 case CC_OP_TM_64:
505 case CC_OP_SLA_32:
506 case CC_OP_SLA_64:
507 case CC_OP_NZ_F128:
508 /* 2 arguments */
509 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
510 break;
511 case CC_OP_ADD_64:
512 case CC_OP_ADDU_64:
513 case CC_OP_ADDC_64:
514 case CC_OP_SUB_64:
515 case CC_OP_SUBU_64:
516 case CC_OP_SUBB_64:
517 case CC_OP_ADD_32:
518 case CC_OP_ADDU_32:
519 case CC_OP_ADDC_32:
520 case CC_OP_SUB_32:
521 case CC_OP_SUBU_32:
522 case CC_OP_SUBB_32:
523 /* 3 arguments */
524 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
525 break;
526 case CC_OP_DYNAMIC:
527 /* unknown operation - assume 3 arguments and cc_op in env */
528 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
529 break;
530 default:
531 tcg_abort();
532 }
533
534 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
535 tcg_temp_free_i32(local_cc_op);
536 }
537 if (!TCGV_IS_UNUSED_I64(dummy)) {
538 tcg_temp_free_i64(dummy);
539 }
540
541 /* We now have cc in cc_op as constant */
542 set_cc_static(s);
543 }
544
545 static int use_goto_tb(DisasContext *s, uint64_t dest)
546 {
547 /* NOTE: we handle the case where the TB spans two pages here */
548 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
549 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
550 && !s->singlestep_enabled
551 && !(s->tb->cflags & CF_LAST_IO));
552 }
553
554 static void account_noninline_branch(DisasContext *s, int cc_op)
555 {
556 #ifdef DEBUG_INLINE_BRANCHES
557 inline_branch_miss[cc_op]++;
558 #endif
559 }
560
561 static void account_inline_branch(DisasContext *s, int cc_op)
562 {
563 #ifdef DEBUG_INLINE_BRANCHES
564 inline_branch_hit[cc_op]++;
565 #endif
566 }
567
568 /* Table of mask values to comparison codes, given a comparison as input.
569 For such, CC=3 should not be possible. */
570 static const TCGCond ltgt_cond[16] = {
571 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
572 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
573 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
574 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
575 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
576 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
577 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
578 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
579 };
580
581 /* Table of mask values to comparison codes, given a logic op as input.
582 For such, only CC=0 and CC=1 should be possible. */
583 static const TCGCond nz_cond[16] = {
584 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
585 TCG_COND_NEVER, TCG_COND_NEVER,
586 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
587 TCG_COND_NE, TCG_COND_NE,
588 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
589 TCG_COND_EQ, TCG_COND_EQ,
590 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
591 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
592 };
593
594 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
595 details required to generate a TCG comparison. */
596 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
597 {
598 TCGCond cond;
599 enum cc_op old_cc_op = s->cc_op;
600
601 if (mask == 15 || mask == 0) {
602 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
603 c->u.s32.a = cc_op;
604 c->u.s32.b = cc_op;
605 c->g1 = c->g2 = true;
606 c->is_64 = false;
607 return;
608 }
609
610 /* Find the TCG condition for the mask + cc op. */
611 switch (old_cc_op) {
612 case CC_OP_LTGT0_32:
613 case CC_OP_LTGT0_64:
614 case CC_OP_LTGT_32:
615 case CC_OP_LTGT_64:
616 cond = ltgt_cond[mask];
617 if (cond == TCG_COND_NEVER) {
618 goto do_dynamic;
619 }
620 account_inline_branch(s, old_cc_op);
621 break;
622
623 case CC_OP_LTUGTU_32:
624 case CC_OP_LTUGTU_64:
625 cond = tcg_unsigned_cond(ltgt_cond[mask]);
626 if (cond == TCG_COND_NEVER) {
627 goto do_dynamic;
628 }
629 account_inline_branch(s, old_cc_op);
630 break;
631
632 case CC_OP_NZ:
633 cond = nz_cond[mask];
634 if (cond == TCG_COND_NEVER) {
635 goto do_dynamic;
636 }
637 account_inline_branch(s, old_cc_op);
638 break;
639
640 case CC_OP_TM_32:
641 case CC_OP_TM_64:
642 switch (mask) {
643 case 8:
644 cond = TCG_COND_EQ;
645 break;
646 case 4 | 2 | 1:
647 cond = TCG_COND_NE;
648 break;
649 default:
650 goto do_dynamic;
651 }
652 account_inline_branch(s, old_cc_op);
653 break;
654
655 case CC_OP_ICM:
656 switch (mask) {
657 case 8:
658 cond = TCG_COND_EQ;
659 break;
660 case 4 | 2 | 1:
661 case 4 | 2:
662 cond = TCG_COND_NE;
663 break;
664 default:
665 goto do_dynamic;
666 }
667 account_inline_branch(s, old_cc_op);
668 break;
669
670 case CC_OP_FLOGR:
671 switch (mask & 0xa) {
672 case 8: /* src == 0 -> no one bit found */
673 cond = TCG_COND_EQ;
674 break;
675 case 2: /* src != 0 -> one bit found */
676 cond = TCG_COND_NE;
677 break;
678 default:
679 goto do_dynamic;
680 }
681 account_inline_branch(s, old_cc_op);
682 break;
683
684 case CC_OP_ADDU_32:
685 case CC_OP_ADDU_64:
686 switch (mask) {
687 case 8 | 2: /* vr == 0 */
688 cond = TCG_COND_EQ;
689 break;
690 case 4 | 1: /* vr != 0 */
691 cond = TCG_COND_NE;
692 break;
693 case 8 | 4: /* no carry -> vr >= src */
694 cond = TCG_COND_GEU;
695 break;
696 case 2 | 1: /* carry -> vr < src */
697 cond = TCG_COND_LTU;
698 break;
699 default:
700 goto do_dynamic;
701 }
702 account_inline_branch(s, old_cc_op);
703 break;
704
705 case CC_OP_SUBU_32:
706 case CC_OP_SUBU_64:
707 /* Note that CC=0 is impossible; treat it as dont-care. */
708 switch (mask & 7) {
709 case 2: /* zero -> op1 == op2 */
710 cond = TCG_COND_EQ;
711 break;
712 case 4 | 1: /* !zero -> op1 != op2 */
713 cond = TCG_COND_NE;
714 break;
715 case 4: /* borrow (!carry) -> op1 < op2 */
716 cond = TCG_COND_LTU;
717 break;
718 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
719 cond = TCG_COND_GEU;
720 break;
721 default:
722 goto do_dynamic;
723 }
724 account_inline_branch(s, old_cc_op);
725 break;
726
727 default:
728 do_dynamic:
729 /* Calculate cc value. */
730 gen_op_calc_cc(s);
731 /* FALLTHRU */
732
733 case CC_OP_STATIC:
734 /* Jump based on CC. We'll load up the real cond below;
735 the assignment here merely avoids a compiler warning. */
736 account_noninline_branch(s, old_cc_op);
737 old_cc_op = CC_OP_STATIC;
738 cond = TCG_COND_NEVER;
739 break;
740 }
741
742 /* Load up the arguments of the comparison. */
743 c->is_64 = true;
744 c->g1 = c->g2 = false;
745 switch (old_cc_op) {
746 case CC_OP_LTGT0_32:
747 c->is_64 = false;
748 c->u.s32.a = tcg_temp_new_i32();
749 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
750 c->u.s32.b = tcg_const_i32(0);
751 break;
752 case CC_OP_LTGT_32:
753 case CC_OP_LTUGTU_32:
754 case CC_OP_SUBU_32:
755 c->is_64 = false;
756 c->u.s32.a = tcg_temp_new_i32();
757 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
758 c->u.s32.b = tcg_temp_new_i32();
759 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
760 break;
761
762 case CC_OP_LTGT0_64:
763 case CC_OP_NZ:
764 case CC_OP_FLOGR:
765 c->u.s64.a = cc_dst;
766 c->u.s64.b = tcg_const_i64(0);
767 c->g1 = true;
768 break;
769 case CC_OP_LTGT_64:
770 case CC_OP_LTUGTU_64:
771 case CC_OP_SUBU_64:
772 c->u.s64.a = cc_src;
773 c->u.s64.b = cc_dst;
774 c->g1 = c->g2 = true;
775 break;
776
777 case CC_OP_TM_32:
778 case CC_OP_TM_64:
779 case CC_OP_ICM:
780 c->u.s64.a = tcg_temp_new_i64();
781 c->u.s64.b = tcg_const_i64(0);
782 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
783 break;
784
785 case CC_OP_ADDU_32:
786 c->is_64 = false;
787 c->u.s32.a = tcg_temp_new_i32();
788 c->u.s32.b = tcg_temp_new_i32();
789 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
790 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
791 tcg_gen_movi_i32(c->u.s32.b, 0);
792 } else {
793 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
794 }
795 break;
796
797 case CC_OP_ADDU_64:
798 c->u.s64.a = cc_vr;
799 c->g1 = true;
800 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
801 c->u.s64.b = tcg_const_i64(0);
802 } else {
803 c->u.s64.b = cc_src;
804 c->g2 = true;
805 }
806 break;
807
808 case CC_OP_STATIC:
809 c->is_64 = false;
810 c->u.s32.a = cc_op;
811 c->g1 = true;
812 switch (mask) {
813 case 0x8 | 0x4 | 0x2: /* cc != 3 */
814 cond = TCG_COND_NE;
815 c->u.s32.b = tcg_const_i32(3);
816 break;
817 case 0x8 | 0x4 | 0x1: /* cc != 2 */
818 cond = TCG_COND_NE;
819 c->u.s32.b = tcg_const_i32(2);
820 break;
821 case 0x8 | 0x2 | 0x1: /* cc != 1 */
822 cond = TCG_COND_NE;
823 c->u.s32.b = tcg_const_i32(1);
824 break;
825 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
826 cond = TCG_COND_EQ;
827 c->g1 = false;
828 c->u.s32.a = tcg_temp_new_i32();
829 c->u.s32.b = tcg_const_i32(0);
830 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
831 break;
832 case 0x8 | 0x4: /* cc < 2 */
833 cond = TCG_COND_LTU;
834 c->u.s32.b = tcg_const_i32(2);
835 break;
836 case 0x8: /* cc == 0 */
837 cond = TCG_COND_EQ;
838 c->u.s32.b = tcg_const_i32(0);
839 break;
840 case 0x4 | 0x2 | 0x1: /* cc != 0 */
841 cond = TCG_COND_NE;
842 c->u.s32.b = tcg_const_i32(0);
843 break;
844 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
845 cond = TCG_COND_NE;
846 c->g1 = false;
847 c->u.s32.a = tcg_temp_new_i32();
848 c->u.s32.b = tcg_const_i32(0);
849 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
850 break;
851 case 0x4: /* cc == 1 */
852 cond = TCG_COND_EQ;
853 c->u.s32.b = tcg_const_i32(1);
854 break;
855 case 0x2 | 0x1: /* cc > 1 */
856 cond = TCG_COND_GTU;
857 c->u.s32.b = tcg_const_i32(1);
858 break;
859 case 0x2: /* cc == 2 */
860 cond = TCG_COND_EQ;
861 c->u.s32.b = tcg_const_i32(2);
862 break;
863 case 0x1: /* cc == 3 */
864 cond = TCG_COND_EQ;
865 c->u.s32.b = tcg_const_i32(3);
866 break;
867 default:
868 /* CC is masked by something else: (8 >> cc) & mask. */
869 cond = TCG_COND_NE;
870 c->g1 = false;
871 c->u.s32.a = tcg_const_i32(8);
872 c->u.s32.b = tcg_const_i32(0);
873 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
874 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
875 break;
876 }
877 break;
878
879 default:
880 abort();
881 }
882 c->cond = cond;
883 }
884
885 static void free_compare(DisasCompare *c)
886 {
887 if (!c->g1) {
888 if (c->is_64) {
889 tcg_temp_free_i64(c->u.s64.a);
890 } else {
891 tcg_temp_free_i32(c->u.s32.a);
892 }
893 }
894 if (!c->g2) {
895 if (c->is_64) {
896 tcg_temp_free_i64(c->u.s64.b);
897 } else {
898 tcg_temp_free_i32(c->u.s32.b);
899 }
900 }
901 }
902
903 /* ====================================================================== */
904 /* Define the insn format enumeration. */
905 #define F0(N) FMT_##N,
906 #define F1(N, X1) F0(N)
907 #define F2(N, X1, X2) F0(N)
908 #define F3(N, X1, X2, X3) F0(N)
909 #define F4(N, X1, X2, X3, X4) F0(N)
910 #define F5(N, X1, X2, X3, X4, X5) F0(N)
911
912 typedef enum {
913 #include "insn-format.def"
914 } DisasFormat;
915
916 #undef F0
917 #undef F1
918 #undef F2
919 #undef F3
920 #undef F4
921 #undef F5
922
923 /* Define a structure to hold the decoded fields. We'll store each inside
924 an array indexed by an enum. In order to conserve memory, we'll arrange
925 for fields that do not exist at the same time to overlap, thus the "C"
926 for compact. For checking purposes there is an "O" for original index
927 as well that will be applied to availability bitmaps. */
928
929 enum DisasFieldIndexO {
930 FLD_O_r1,
931 FLD_O_r2,
932 FLD_O_r3,
933 FLD_O_m1,
934 FLD_O_m3,
935 FLD_O_m4,
936 FLD_O_b1,
937 FLD_O_b2,
938 FLD_O_b4,
939 FLD_O_d1,
940 FLD_O_d2,
941 FLD_O_d4,
942 FLD_O_x2,
943 FLD_O_l1,
944 FLD_O_l2,
945 FLD_O_i1,
946 FLD_O_i2,
947 FLD_O_i3,
948 FLD_O_i4,
949 FLD_O_i5
950 };
951
952 enum DisasFieldIndexC {
953 FLD_C_r1 = 0,
954 FLD_C_m1 = 0,
955 FLD_C_b1 = 0,
956 FLD_C_i1 = 0,
957
958 FLD_C_r2 = 1,
959 FLD_C_b2 = 1,
960 FLD_C_i2 = 1,
961
962 FLD_C_r3 = 2,
963 FLD_C_m3 = 2,
964 FLD_C_i3 = 2,
965
966 FLD_C_m4 = 3,
967 FLD_C_b4 = 3,
968 FLD_C_i4 = 3,
969 FLD_C_l1 = 3,
970
971 FLD_C_i5 = 4,
972 FLD_C_d1 = 4,
973
974 FLD_C_d2 = 5,
975
976 FLD_C_d4 = 6,
977 FLD_C_x2 = 6,
978 FLD_C_l2 = 6,
979
980 NUM_C_FIELD = 7
981 };
982
983 struct DisasFields {
984 unsigned op:8;
985 unsigned op2:8;
986 unsigned presentC:16;
987 unsigned int presentO;
988 int c[NUM_C_FIELD];
989 };
990
991 /* This is the way fields are to be accessed out of DisasFields. */
992 #define have_field(S, F) have_field1((S), FLD_O_##F)
993 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
994
995 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
996 {
997 return (f->presentO >> c) & 1;
998 }
999
1000 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1001 enum DisasFieldIndexC c)
1002 {
1003 assert(have_field1(f, o));
1004 return f->c[c];
1005 }
1006
1007 /* Describe the layout of each field in each format. */
1008 typedef struct DisasField {
1009 unsigned int beg:8;
1010 unsigned int size:8;
1011 unsigned int type:2;
1012 unsigned int indexC:6;
1013 enum DisasFieldIndexO indexO:8;
1014 } DisasField;
1015
1016 typedef struct DisasFormatInfo {
1017 DisasField op[NUM_C_FIELD];
1018 } DisasFormatInfo;
1019
1020 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1021 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1022 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1023 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1024 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1025 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1026 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1027 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1028 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1029 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1030 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1031 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1032 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1033 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1034
1035 #define F0(N) { { } },
1036 #define F1(N, X1) { { X1 } },
1037 #define F2(N, X1, X2) { { X1, X2 } },
1038 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1039 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1040 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1041
1042 static const DisasFormatInfo format_info[] = {
1043 #include "insn-format.def"
1044 };
1045
1046 #undef F0
1047 #undef F1
1048 #undef F2
1049 #undef F3
1050 #undef F4
1051 #undef F5
1052 #undef R
1053 #undef M
1054 #undef BD
1055 #undef BXD
1056 #undef BDL
1057 #undef BXDL
1058 #undef I
1059 #undef L
1060
1061 /* Generally, we'll extract operands into this structures, operate upon
1062 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1063 of routines below for more details. */
1064 typedef struct {
1065 bool g_out, g_out2, g_in1, g_in2;
1066 TCGv_i64 out, out2, in1, in2;
1067 TCGv_i64 addr1;
1068 } DisasOps;
1069
1070 /* Instructions can place constraints on their operands, raising specification
1071 exceptions if they are violated. To make this easy to automate, each "in1",
1072 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1073 of the following, or 0. To make this easy to document, we'll put the
1074 SPEC_<name> defines next to <name>. */
1075
1076 #define SPEC_r1_even 1
1077 #define SPEC_r2_even 2
1078 #define SPEC_r3_even 4
1079 #define SPEC_r1_f128 8
1080 #define SPEC_r2_f128 16
1081
1082 /* Return values from translate_one, indicating the state of the TB. */
1083 typedef enum {
1084 /* Continue the TB. */
1085 NO_EXIT,
1086 /* We have emitted one or more goto_tb. No fixup required. */
1087 EXIT_GOTO_TB,
1088 /* We are not using a goto_tb (for whatever reason), but have updated
1089 the PC (for whatever reason), so there's no need to do it again on
1090 exiting the TB. */
1091 EXIT_PC_UPDATED,
1092 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1093 updated the PC for the next instruction to be executed. */
1094 EXIT_PC_STALE,
1095 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1096 No following code will be executed. */
1097 EXIT_NORETURN,
1098 } ExitStatus;
1099
1100 typedef enum DisasFacility {
1101 FAC_Z, /* zarch (default) */
1102 FAC_CASS, /* compare and swap and store */
1103 FAC_CASS2, /* compare and swap and store 2*/
1104 FAC_DFP, /* decimal floating point */
1105 FAC_DFPR, /* decimal floating point rounding */
1106 FAC_DO, /* distinct operands */
1107 FAC_EE, /* execute extensions */
1108 FAC_EI, /* extended immediate */
1109 FAC_FPE, /* floating point extension */
1110 FAC_FPSSH, /* floating point support sign handling */
1111 FAC_FPRGR, /* FPR-GR transfer */
1112 FAC_GIE, /* general instructions extension */
1113 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1114 FAC_HW, /* high-word */
1115 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1116 FAC_LOC, /* load/store on condition */
1117 FAC_LD, /* long displacement */
1118 FAC_PC, /* population count */
1119 FAC_SCF, /* store clock fast */
1120 FAC_SFLE, /* store facility list extended */
1121 FAC_ILA, /* interlocked access facility 1 */
1122 } DisasFacility;
1123
1124 struct DisasInsn {
1125 unsigned opc:16;
1126 DisasFormat fmt:8;
1127 DisasFacility fac:8;
1128 unsigned spec:8;
1129
1130 const char *name;
1131
1132 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1133 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1134 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1135 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1136 void (*help_cout)(DisasContext *, DisasOps *);
1137 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1138
1139 uint64_t data;
1140 };
1141
1142 /* ====================================================================== */
1143 /* Miscellaneous helpers, used by several operations. */
1144
1145 static void help_l2_shift(DisasContext *s, DisasFields *f,
1146 DisasOps *o, int mask)
1147 {
1148 int b2 = get_field(f, b2);
1149 int d2 = get_field(f, d2);
1150
1151 if (b2 == 0) {
1152 o->in2 = tcg_const_i64(d2 & mask);
1153 } else {
1154 o->in2 = get_address(s, 0, b2, d2);
1155 tcg_gen_andi_i64(o->in2, o->in2, mask);
1156 }
1157 }
1158
1159 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1160 {
1161 if (dest == s->next_pc) {
1162 return NO_EXIT;
1163 }
1164 if (use_goto_tb(s, dest)) {
1165 update_cc_op(s);
1166 tcg_gen_goto_tb(0);
1167 tcg_gen_movi_i64(psw_addr, dest);
1168 tcg_gen_exit_tb((uintptr_t)s->tb);
1169 return EXIT_GOTO_TB;
1170 } else {
1171 tcg_gen_movi_i64(psw_addr, dest);
1172 return EXIT_PC_UPDATED;
1173 }
1174 }
1175
1176 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1177 bool is_imm, int imm, TCGv_i64 cdest)
1178 {
1179 ExitStatus ret;
1180 uint64_t dest = s->pc + 2 * imm;
1181 TCGLabel *lab;
1182
1183 /* Take care of the special cases first. */
1184 if (c->cond == TCG_COND_NEVER) {
1185 ret = NO_EXIT;
1186 goto egress;
1187 }
1188 if (is_imm) {
1189 if (dest == s->next_pc) {
1190 /* Branch to next. */
1191 ret = NO_EXIT;
1192 goto egress;
1193 }
1194 if (c->cond == TCG_COND_ALWAYS) {
1195 ret = help_goto_direct(s, dest);
1196 goto egress;
1197 }
1198 } else {
1199 if (TCGV_IS_UNUSED_I64(cdest)) {
1200 /* E.g. bcr %r0 -> no branch. */
1201 ret = NO_EXIT;
1202 goto egress;
1203 }
1204 if (c->cond == TCG_COND_ALWAYS) {
1205 tcg_gen_mov_i64(psw_addr, cdest);
1206 ret = EXIT_PC_UPDATED;
1207 goto egress;
1208 }
1209 }
1210
1211 if (use_goto_tb(s, s->next_pc)) {
1212 if (is_imm && use_goto_tb(s, dest)) {
1213 /* Both exits can use goto_tb. */
1214 update_cc_op(s);
1215
1216 lab = gen_new_label();
1217 if (c->is_64) {
1218 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1219 } else {
1220 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1221 }
1222
1223 /* Branch not taken. */
1224 tcg_gen_goto_tb(0);
1225 tcg_gen_movi_i64(psw_addr, s->next_pc);
1226 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1227
1228 /* Branch taken. */
1229 gen_set_label(lab);
1230 tcg_gen_goto_tb(1);
1231 tcg_gen_movi_i64(psw_addr, dest);
1232 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1233
1234 ret = EXIT_GOTO_TB;
1235 } else {
1236 /* Fallthru can use goto_tb, but taken branch cannot. */
1237 /* Store taken branch destination before the brcond. This
1238 avoids having to allocate a new local temp to hold it.
1239 We'll overwrite this in the not taken case anyway. */
1240 if (!is_imm) {
1241 tcg_gen_mov_i64(psw_addr, cdest);
1242 }
1243
1244 lab = gen_new_label();
1245 if (c->is_64) {
1246 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1247 } else {
1248 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1249 }
1250
1251 /* Branch not taken. */
1252 update_cc_op(s);
1253 tcg_gen_goto_tb(0);
1254 tcg_gen_movi_i64(psw_addr, s->next_pc);
1255 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1256
1257 gen_set_label(lab);
1258 if (is_imm) {
1259 tcg_gen_movi_i64(psw_addr, dest);
1260 }
1261 ret = EXIT_PC_UPDATED;
1262 }
1263 } else {
1264 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1265 Most commonly we're single-stepping or some other condition that
1266 disables all use of goto_tb. Just update the PC and exit. */
1267
1268 TCGv_i64 next = tcg_const_i64(s->next_pc);
1269 if (is_imm) {
1270 cdest = tcg_const_i64(dest);
1271 }
1272
1273 if (c->is_64) {
1274 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1275 cdest, next);
1276 } else {
1277 TCGv_i32 t0 = tcg_temp_new_i32();
1278 TCGv_i64 t1 = tcg_temp_new_i64();
1279 TCGv_i64 z = tcg_const_i64(0);
1280 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1281 tcg_gen_extu_i32_i64(t1, t0);
1282 tcg_temp_free_i32(t0);
1283 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1284 tcg_temp_free_i64(t1);
1285 tcg_temp_free_i64(z);
1286 }
1287
1288 if (is_imm) {
1289 tcg_temp_free_i64(cdest);
1290 }
1291 tcg_temp_free_i64(next);
1292
1293 ret = EXIT_PC_UPDATED;
1294 }
1295
1296 egress:
1297 free_compare(c);
1298 return ret;
1299 }
1300
1301 /* ====================================================================== */
1302 /* The operations. These perform the bulk of the work for any insn,
1303 usually after the operands have been loaded and output initialized. */
1304
1305 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1306 {
1307 gen_helper_abs_i64(o->out, o->in2);
1308 return NO_EXIT;
1309 }
1310
1311 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1312 {
1313 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1314 return NO_EXIT;
1315 }
1316
1317 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1318 {
1319 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1320 return NO_EXIT;
1321 }
1322
1323 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1324 {
1325 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1326 tcg_gen_mov_i64(o->out2, o->in2);
1327 return NO_EXIT;
1328 }
1329
1330 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1331 {
1332 tcg_gen_add_i64(o->out, o->in1, o->in2);
1333 return NO_EXIT;
1334 }
1335
1336 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1337 {
1338 DisasCompare cmp;
1339 TCGv_i64 carry;
1340
1341 tcg_gen_add_i64(o->out, o->in1, o->in2);
1342
1343 /* The carry flag is the msb of CC, therefore the branch mask that would
1344 create that comparison is 3. Feeding the generated comparison to
1345 setcond produces the carry flag that we desire. */
1346 disas_jcc(s, &cmp, 3);
1347 carry = tcg_temp_new_i64();
1348 if (cmp.is_64) {
1349 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1350 } else {
1351 TCGv_i32 t = tcg_temp_new_i32();
1352 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1353 tcg_gen_extu_i32_i64(carry, t);
1354 tcg_temp_free_i32(t);
1355 }
1356 free_compare(&cmp);
1357
1358 tcg_gen_add_i64(o->out, o->out, carry);
1359 tcg_temp_free_i64(carry);
1360 return NO_EXIT;
1361 }
1362
1363 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1364 {
1365 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1366 return NO_EXIT;
1367 }
1368
1369 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1370 {
1371 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1372 return NO_EXIT;
1373 }
1374
1375 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1376 {
1377 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1378 return_low128(o->out2);
1379 return NO_EXIT;
1380 }
1381
1382 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1383 {
1384 tcg_gen_and_i64(o->out, o->in1, o->in2);
1385 return NO_EXIT;
1386 }
1387
1388 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1389 {
1390 int shift = s->insn->data & 0xff;
1391 int size = s->insn->data >> 8;
1392 uint64_t mask = ((1ull << size) - 1) << shift;
1393
1394 assert(!o->g_in2);
1395 tcg_gen_shli_i64(o->in2, o->in2, shift);
1396 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1397 tcg_gen_and_i64(o->out, o->in1, o->in2);
1398
1399 /* Produce the CC from only the bits manipulated. */
1400 tcg_gen_andi_i64(cc_dst, o->out, mask);
1401 set_cc_nz_u64(s, cc_dst);
1402 return NO_EXIT;
1403 }
1404
1405 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1406 {
1407 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1408 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1409 tcg_gen_mov_i64(psw_addr, o->in2);
1410 return EXIT_PC_UPDATED;
1411 } else {
1412 return NO_EXIT;
1413 }
1414 }
1415
1416 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1417 {
1418 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1419 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1420 }
1421
1422 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1423 {
1424 int m1 = get_field(s->fields, m1);
1425 bool is_imm = have_field(s->fields, i2);
1426 int imm = is_imm ? get_field(s->fields, i2) : 0;
1427 DisasCompare c;
1428
1429 disas_jcc(s, &c, m1);
1430 return help_branch(s, &c, is_imm, imm, o->in2);
1431 }
1432
1433 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1434 {
1435 int r1 = get_field(s->fields, r1);
1436 bool is_imm = have_field(s->fields, i2);
1437 int imm = is_imm ? get_field(s->fields, i2) : 0;
1438 DisasCompare c;
1439 TCGv_i64 t;
1440
1441 c.cond = TCG_COND_NE;
1442 c.is_64 = false;
1443 c.g1 = false;
1444 c.g2 = false;
1445
1446 t = tcg_temp_new_i64();
1447 tcg_gen_subi_i64(t, regs[r1], 1);
1448 store_reg32_i64(r1, t);
1449 c.u.s32.a = tcg_temp_new_i32();
1450 c.u.s32.b = tcg_const_i32(0);
1451 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1452 tcg_temp_free_i64(t);
1453
1454 return help_branch(s, &c, is_imm, imm, o->in2);
1455 }
1456
1457 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1458 {
1459 int r1 = get_field(s->fields, r1);
1460 bool is_imm = have_field(s->fields, i2);
1461 int imm = is_imm ? get_field(s->fields, i2) : 0;
1462 DisasCompare c;
1463
1464 c.cond = TCG_COND_NE;
1465 c.is_64 = true;
1466 c.g1 = true;
1467 c.g2 = false;
1468
1469 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1470 c.u.s64.a = regs[r1];
1471 c.u.s64.b = tcg_const_i64(0);
1472
1473 return help_branch(s, &c, is_imm, imm, o->in2);
1474 }
1475
1476 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1477 {
1478 int r1 = get_field(s->fields, r1);
1479 int r3 = get_field(s->fields, r3);
1480 bool is_imm = have_field(s->fields, i2);
1481 int imm = is_imm ? get_field(s->fields, i2) : 0;
1482 DisasCompare c;
1483 TCGv_i64 t;
1484
1485 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1486 c.is_64 = false;
1487 c.g1 = false;
1488 c.g2 = false;
1489
1490 t = tcg_temp_new_i64();
1491 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1492 c.u.s32.a = tcg_temp_new_i32();
1493 c.u.s32.b = tcg_temp_new_i32();
1494 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1495 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1496 store_reg32_i64(r1, t);
1497 tcg_temp_free_i64(t);
1498
1499 return help_branch(s, &c, is_imm, imm, o->in2);
1500 }
1501
1502 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1503 {
1504 int r1 = get_field(s->fields, r1);
1505 int r3 = get_field(s->fields, r3);
1506 bool is_imm = have_field(s->fields, i2);
1507 int imm = is_imm ? get_field(s->fields, i2) : 0;
1508 DisasCompare c;
1509
1510 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1511 c.is_64 = true;
1512
1513 if (r1 == (r3 | 1)) {
1514 c.u.s64.b = load_reg(r3 | 1);
1515 c.g2 = false;
1516 } else {
1517 c.u.s64.b = regs[r3 | 1];
1518 c.g2 = true;
1519 }
1520
1521 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1522 c.u.s64.a = regs[r1];
1523 c.g1 = true;
1524
1525 return help_branch(s, &c, is_imm, imm, o->in2);
1526 }
1527
1528 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1529 {
1530 int imm, m3 = get_field(s->fields, m3);
1531 bool is_imm;
1532 DisasCompare c;
1533
1534 c.cond = ltgt_cond[m3];
1535 if (s->insn->data) {
1536 c.cond = tcg_unsigned_cond(c.cond);
1537 }
1538 c.is_64 = c.g1 = c.g2 = true;
1539 c.u.s64.a = o->in1;
1540 c.u.s64.b = o->in2;
1541
1542 is_imm = have_field(s->fields, i4);
1543 if (is_imm) {
1544 imm = get_field(s->fields, i4);
1545 } else {
1546 imm = 0;
1547 o->out = get_address(s, 0, get_field(s->fields, b4),
1548 get_field(s->fields, d4));
1549 }
1550
1551 return help_branch(s, &c, is_imm, imm, o->out);
1552 }
1553
1554 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1555 {
1556 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1557 set_cc_static(s);
1558 return NO_EXIT;
1559 }
1560
1561 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1562 {
1563 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1564 set_cc_static(s);
1565 return NO_EXIT;
1566 }
1567
1568 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1569 {
1570 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1571 set_cc_static(s);
1572 return NO_EXIT;
1573 }
1574
1575 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1576 {
1577 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1578 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1579 tcg_temp_free_i32(m3);
1580 gen_set_cc_nz_f32(s, o->in2);
1581 return NO_EXIT;
1582 }
1583
1584 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1585 {
1586 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1587 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1588 tcg_temp_free_i32(m3);
1589 gen_set_cc_nz_f64(s, o->in2);
1590 return NO_EXIT;
1591 }
1592
1593 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1594 {
1595 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1596 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1597 tcg_temp_free_i32(m3);
1598 gen_set_cc_nz_f128(s, o->in1, o->in2);
1599 return NO_EXIT;
1600 }
1601
1602 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1603 {
1604 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1605 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1606 tcg_temp_free_i32(m3);
1607 gen_set_cc_nz_f32(s, o->in2);
1608 return NO_EXIT;
1609 }
1610
1611 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1612 {
1613 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1614 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1615 tcg_temp_free_i32(m3);
1616 gen_set_cc_nz_f64(s, o->in2);
1617 return NO_EXIT;
1618 }
1619
1620 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1621 {
1622 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1623 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1624 tcg_temp_free_i32(m3);
1625 gen_set_cc_nz_f128(s, o->in1, o->in2);
1626 return NO_EXIT;
1627 }
1628
1629 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1630 {
1631 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1632 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1633 tcg_temp_free_i32(m3);
1634 gen_set_cc_nz_f32(s, o->in2);
1635 return NO_EXIT;
1636 }
1637
1638 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1639 {
1640 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1641 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1642 tcg_temp_free_i32(m3);
1643 gen_set_cc_nz_f64(s, o->in2);
1644 return NO_EXIT;
1645 }
1646
1647 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1648 {
1649 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1650 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1651 tcg_temp_free_i32(m3);
1652 gen_set_cc_nz_f128(s, o->in1, o->in2);
1653 return NO_EXIT;
1654 }
1655
1656 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1657 {
1658 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1659 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1660 tcg_temp_free_i32(m3);
1661 gen_set_cc_nz_f32(s, o->in2);
1662 return NO_EXIT;
1663 }
1664
1665 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1666 {
1667 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1668 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1669 tcg_temp_free_i32(m3);
1670 gen_set_cc_nz_f64(s, o->in2);
1671 return NO_EXIT;
1672 }
1673
1674 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1675 {
1676 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1677 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1678 tcg_temp_free_i32(m3);
1679 gen_set_cc_nz_f128(s, o->in1, o->in2);
1680 return NO_EXIT;
1681 }
1682
1683 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1684 {
1685 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1686 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1687 tcg_temp_free_i32(m3);
1688 return NO_EXIT;
1689 }
1690
1691 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1692 {
1693 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1694 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1695 tcg_temp_free_i32(m3);
1696 return NO_EXIT;
1697 }
1698
1699 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1700 {
1701 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1702 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1703 tcg_temp_free_i32(m3);
1704 return_low128(o->out2);
1705 return NO_EXIT;
1706 }
1707
1708 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1709 {
1710 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1711 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1712 tcg_temp_free_i32(m3);
1713 return NO_EXIT;
1714 }
1715
1716 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1717 {
1718 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1719 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1720 tcg_temp_free_i32(m3);
1721 return NO_EXIT;
1722 }
1723
1724 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1725 {
1726 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1727 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1728 tcg_temp_free_i32(m3);
1729 return_low128(o->out2);
1730 return NO_EXIT;
1731 }
1732
1733 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1734 {
1735 int r2 = get_field(s->fields, r2);
1736 TCGv_i64 len = tcg_temp_new_i64();
1737
1738 potential_page_fault(s);
1739 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1740 set_cc_static(s);
1741 return_low128(o->out);
1742
1743 tcg_gen_add_i64(regs[r2], regs[r2], len);
1744 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1745 tcg_temp_free_i64(len);
1746
1747 return NO_EXIT;
1748 }
1749
1750 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1751 {
1752 int l = get_field(s->fields, l1);
1753 TCGv_i32 vl;
1754
1755 switch (l + 1) {
1756 case 1:
1757 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1758 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1759 break;
1760 case 2:
1761 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1762 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1763 break;
1764 case 4:
1765 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1766 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1767 break;
1768 case 8:
1769 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1770 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1771 break;
1772 default:
1773 potential_page_fault(s);
1774 vl = tcg_const_i32(l);
1775 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1776 tcg_temp_free_i32(vl);
1777 set_cc_static(s);
1778 return NO_EXIT;
1779 }
1780 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1781 return NO_EXIT;
1782 }
1783
1784 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1785 {
1786 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1787 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1788 potential_page_fault(s);
1789 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1790 tcg_temp_free_i32(r1);
1791 tcg_temp_free_i32(r3);
1792 set_cc_static(s);
1793 return NO_EXIT;
1794 }
1795
1796 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1797 {
1798 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1799 TCGv_i32 t1 = tcg_temp_new_i32();
1800 tcg_gen_trunc_i64_i32(t1, o->in1);
1801 potential_page_fault(s);
1802 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1803 set_cc_static(s);
1804 tcg_temp_free_i32(t1);
1805 tcg_temp_free_i32(m3);
1806 return NO_EXIT;
1807 }
1808
1809 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1810 {
1811 potential_page_fault(s);
1812 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1813 set_cc_static(s);
1814 return_low128(o->in2);
1815 return NO_EXIT;
1816 }
1817
1818 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1819 {
1820 TCGv_i64 t = tcg_temp_new_i64();
1821 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1822 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1823 tcg_gen_or_i64(o->out, o->out, t);
1824 tcg_temp_free_i64(t);
1825 return NO_EXIT;
1826 }
1827
1828 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1829 {
1830 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1831 int d2 = get_field(s->fields, d2);
1832 int b2 = get_field(s->fields, b2);
1833 int is_64 = s->insn->data;
1834 TCGv_i64 addr, mem, cc, z;
1835
1836 /* Note that in1 = R3 (new value) and
1837 in2 = (zero-extended) R1 (expected value). */
1838
1839 /* Load the memory into the (temporary) output. While the PoO only talks
1840 about moving the memory to R1 on inequality, if we include equality it
1841 means that R1 is equal to the memory in all conditions. */
1842 addr = get_address(s, 0, b2, d2);
1843 if (is_64) {
1844 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1845 } else {
1846 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1847 }
1848
1849 /* Are the memory and expected values (un)equal? Note that this setcond
1850 produces the output CC value, thus the NE sense of the test. */
1851 cc = tcg_temp_new_i64();
1852 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1853
1854 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1855 Recall that we are allowed to unconditionally issue the store (and
1856 thus any possible write trap), so (re-)store the original contents
1857 of MEM in case of inequality. */
1858 z = tcg_const_i64(0);
1859 mem = tcg_temp_new_i64();
1860 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1861 if (is_64) {
1862 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1863 } else {
1864 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1865 }
1866 tcg_temp_free_i64(z);
1867 tcg_temp_free_i64(mem);
1868 tcg_temp_free_i64(addr);
1869
1870 /* Store CC back to cc_op. Wait until after the store so that any
1871 exception gets the old cc_op value. */
1872 tcg_gen_trunc_i64_i32(cc_op, cc);
1873 tcg_temp_free_i64(cc);
1874 set_cc_static(s);
1875 return NO_EXIT;
1876 }
1877
1878 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1879 {
1880 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1881 int r1 = get_field(s->fields, r1);
1882 int r3 = get_field(s->fields, r3);
1883 int d2 = get_field(s->fields, d2);
1884 int b2 = get_field(s->fields, b2);
1885 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1886
1887 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1888
1889 addrh = get_address(s, 0, b2, d2);
1890 addrl = get_address(s, 0, b2, d2 + 8);
1891 outh = tcg_temp_new_i64();
1892 outl = tcg_temp_new_i64();
1893
1894 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1895 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1896
1897 /* Fold the double-word compare with arithmetic. */
1898 cc = tcg_temp_new_i64();
1899 z = tcg_temp_new_i64();
1900 tcg_gen_xor_i64(cc, outh, regs[r1]);
1901 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1902 tcg_gen_or_i64(cc, cc, z);
1903 tcg_gen_movi_i64(z, 0);
1904 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1905
1906 memh = tcg_temp_new_i64();
1907 meml = tcg_temp_new_i64();
1908 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1909 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1910 tcg_temp_free_i64(z);
1911
1912 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1913 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1914 tcg_temp_free_i64(memh);
1915 tcg_temp_free_i64(meml);
1916 tcg_temp_free_i64(addrh);
1917 tcg_temp_free_i64(addrl);
1918
1919 /* Save back state now that we've passed all exceptions. */
1920 tcg_gen_mov_i64(regs[r1], outh);
1921 tcg_gen_mov_i64(regs[r1 + 1], outl);
1922 tcg_gen_trunc_i64_i32(cc_op, cc);
1923 tcg_temp_free_i64(outh);
1924 tcg_temp_free_i64(outl);
1925 tcg_temp_free_i64(cc);
1926 set_cc_static(s);
1927 return NO_EXIT;
1928 }
1929
1930 #ifndef CONFIG_USER_ONLY
1931 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1932 {
1933 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1934 check_privileged(s);
1935 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1936 tcg_temp_free_i32(r1);
1937 set_cc_static(s);
1938 return NO_EXIT;
1939 }
1940 #endif
1941
1942 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1943 {
1944 TCGv_i64 t1 = tcg_temp_new_i64();
1945 TCGv_i32 t2 = tcg_temp_new_i32();
1946 tcg_gen_trunc_i64_i32(t2, o->in1);
1947 gen_helper_cvd(t1, t2);
1948 tcg_temp_free_i32(t2);
1949 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1950 tcg_temp_free_i64(t1);
1951 return NO_EXIT;
1952 }
1953
1954 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1955 {
1956 int m3 = get_field(s->fields, m3);
1957 TCGLabel *lab = gen_new_label();
1958 TCGv_i32 t;
1959 TCGCond c;
1960
1961 c = tcg_invert_cond(ltgt_cond[m3]);
1962 if (s->insn->data) {
1963 c = tcg_unsigned_cond(c);
1964 }
1965 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1966
1967 /* Set DXC to 0xff. */
1968 t = tcg_temp_new_i32();
1969 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1970 tcg_gen_ori_i32(t, t, 0xff00);
1971 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1972 tcg_temp_free_i32(t);
1973
1974 /* Trap. */
1975 gen_program_exception(s, PGM_DATA);
1976
1977 gen_set_label(lab);
1978 return NO_EXIT;
1979 }
1980
1981 #ifndef CONFIG_USER_ONLY
1982 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1983 {
1984 TCGv_i32 tmp;
1985
1986 check_privileged(s);
1987 potential_page_fault(s);
1988
1989 /* We pretend the format is RX_a so that D2 is the field we want. */
1990 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1991 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1992 tcg_temp_free_i32(tmp);
1993 return NO_EXIT;
1994 }
1995 #endif
1996
1997 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1998 {
1999 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2000 return_low128(o->out);
2001 return NO_EXIT;
2002 }
2003
2004 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2005 {
2006 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2007 return_low128(o->out);
2008 return NO_EXIT;
2009 }
2010
2011 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2012 {
2013 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2014 return_low128(o->out);
2015 return NO_EXIT;
2016 }
2017
2018 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2019 {
2020 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2021 return_low128(o->out);
2022 return NO_EXIT;
2023 }
2024
2025 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2026 {
2027 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2028 return NO_EXIT;
2029 }
2030
2031 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2032 {
2033 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2034 return NO_EXIT;
2035 }
2036
2037 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2038 {
2039 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2040 return_low128(o->out2);
2041 return NO_EXIT;
2042 }
2043
2044 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2045 {
2046 int r2 = get_field(s->fields, r2);
2047 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2048 return NO_EXIT;
2049 }
2050
2051 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2052 {
2053 /* No cache information provided. */
2054 tcg_gen_movi_i64(o->out, -1);
2055 return NO_EXIT;
2056 }
2057
2058 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2059 {
2060 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2061 return NO_EXIT;
2062 }
2063
2064 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2065 {
2066 int r1 = get_field(s->fields, r1);
2067 int r2 = get_field(s->fields, r2);
2068 TCGv_i64 t = tcg_temp_new_i64();
2069
2070 /* Note the "subsequently" in the PoO, which implies a defined result
2071 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2072 tcg_gen_shri_i64(t, psw_mask, 32);
2073 store_reg32_i64(r1, t);
2074 if (r2 != 0) {
2075 store_reg32_i64(r2, psw_mask);
2076 }
2077
2078 tcg_temp_free_i64(t);
2079 return NO_EXIT;
2080 }
2081
2082 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2083 {
2084 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2085 tb->flags, (ab)use the tb->cs_base field as the address of
2086 the template in memory, and grab 8 bits of tb->flags/cflags for
2087 the contents of the register. We would then recognize all this
2088 in gen_intermediate_code_internal, generating code for exactly
2089 one instruction. This new TB then gets executed normally.
2090
2091 On the other hand, this seems to be mostly used for modifying
2092 MVC inside of memcpy, which needs a helper call anyway. So
2093 perhaps this doesn't bear thinking about any further. */
2094
2095 TCGv_i64 tmp;
2096
2097 update_psw_addr(s);
2098 update_cc_op(s);
2099
2100 tmp = tcg_const_i64(s->next_pc);
2101 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2102 tcg_temp_free_i64(tmp);
2103
2104 set_cc_static(s);
2105 return NO_EXIT;
2106 }
2107
2108 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2109 {
2110 /* We'll use the original input for cc computation, since we get to
2111 compare that against 0, which ought to be better than comparing
2112 the real output against 64. It also lets cc_dst be a convenient
2113 temporary during our computation. */
2114 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2115
2116 /* R1 = IN ? CLZ(IN) : 64. */
2117 gen_helper_clz(o->out, o->in2);
2118
2119 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2120 value by 64, which is undefined. But since the shift is 64 iff the
2121 input is zero, we still get the correct result after and'ing. */
2122 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2123 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2124 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2125 return NO_EXIT;
2126 }
2127
2128 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2129 {
2130 int m3 = get_field(s->fields, m3);
2131 int pos, len, base = s->insn->data;
2132 TCGv_i64 tmp = tcg_temp_new_i64();
2133 uint64_t ccm;
2134
2135 switch (m3) {
2136 case 0xf:
2137 /* Effectively a 32-bit load. */
2138 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2139 len = 32;
2140 goto one_insert;
2141
2142 case 0xc:
2143 case 0x6:
2144 case 0x3:
2145 /* Effectively a 16-bit load. */
2146 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2147 len = 16;
2148 goto one_insert;
2149
2150 case 0x8:
2151 case 0x4:
2152 case 0x2:
2153 case 0x1:
2154 /* Effectively an 8-bit load. */
2155 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2156 len = 8;
2157 goto one_insert;
2158
2159 one_insert:
2160 pos = base + ctz32(m3) * 8;
2161 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2162 ccm = ((1ull << len) - 1) << pos;
2163 break;
2164
2165 default:
2166 /* This is going to be a sequence of loads and inserts. */
2167 pos = base + 32 - 8;
2168 ccm = 0;
2169 while (m3) {
2170 if (m3 & 0x8) {
2171 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2172 tcg_gen_addi_i64(o->in2, o->in2, 1);
2173 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2174 ccm |= 0xff << pos;
2175 }
2176 m3 = (m3 << 1) & 0xf;
2177 pos -= 8;
2178 }
2179 break;
2180 }
2181
2182 tcg_gen_movi_i64(tmp, ccm);
2183 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2184 tcg_temp_free_i64(tmp);
2185 return NO_EXIT;
2186 }
2187
2188 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2189 {
2190 int shift = s->insn->data & 0xff;
2191 int size = s->insn->data >> 8;
2192 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2193 return NO_EXIT;
2194 }
2195
2196 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2197 {
2198 TCGv_i64 t1;
2199
2200 gen_op_calc_cc(s);
2201 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2202
2203 t1 = tcg_temp_new_i64();
2204 tcg_gen_shli_i64(t1, psw_mask, 20);
2205 tcg_gen_shri_i64(t1, t1, 36);
2206 tcg_gen_or_i64(o->out, o->out, t1);
2207
2208 tcg_gen_extu_i32_i64(t1, cc_op);
2209 tcg_gen_shli_i64(t1, t1, 28);
2210 tcg_gen_or_i64(o->out, o->out, t1);
2211 tcg_temp_free_i64(t1);
2212 return NO_EXIT;
2213 }
2214
2215 #ifndef CONFIG_USER_ONLY
2216 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2217 {
2218 check_privileged(s);
2219 gen_helper_ipte(cpu_env, o->in1, o->in2);
2220 return NO_EXIT;
2221 }
2222
2223 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2224 {
2225 check_privileged(s);
2226 gen_helper_iske(o->out, cpu_env, o->in2);
2227 return NO_EXIT;
2228 }
2229 #endif
2230
2231 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2232 {
2233 gen_helper_ldeb(o->out, cpu_env, o->in2);
2234 return NO_EXIT;
2235 }
2236
2237 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2238 {
2239 gen_helper_ledb(o->out, cpu_env, o->in2);
2240 return NO_EXIT;
2241 }
2242
2243 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2244 {
2245 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2246 return NO_EXIT;
2247 }
2248
2249 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2250 {
2251 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2252 return NO_EXIT;
2253 }
2254
2255 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2256 {
2257 gen_helper_lxdb(o->out, cpu_env, o->in2);
2258 return_low128(o->out2);
2259 return NO_EXIT;
2260 }
2261
2262 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2263 {
2264 gen_helper_lxeb(o->out, cpu_env, o->in2);
2265 return_low128(o->out2);
2266 return NO_EXIT;
2267 }
2268
2269 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2270 {
2271 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2272 return NO_EXIT;
2273 }
2274
2275 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2276 {
2277 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2278 return NO_EXIT;
2279 }
2280
2281 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2282 {
2283 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2284 return NO_EXIT;
2285 }
2286
2287 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2288 {
2289 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2290 return NO_EXIT;
2291 }
2292
2293 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2294 {
2295 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2296 return NO_EXIT;
2297 }
2298
2299 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2300 {
2301 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2302 return NO_EXIT;
2303 }
2304
2305 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2306 {
2307 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2308 return NO_EXIT;
2309 }
2310
2311 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2312 {
2313 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2314 return NO_EXIT;
2315 }
2316
2317 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2318 {
2319 DisasCompare c;
2320
2321 disas_jcc(s, &c, get_field(s->fields, m3));
2322
2323 if (c.is_64) {
2324 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2325 o->in2, o->in1);
2326 free_compare(&c);
2327 } else {
2328 TCGv_i32 t32 = tcg_temp_new_i32();
2329 TCGv_i64 t, z;
2330
2331 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2332 free_compare(&c);
2333
2334 t = tcg_temp_new_i64();
2335 tcg_gen_extu_i32_i64(t, t32);
2336 tcg_temp_free_i32(t32);
2337
2338 z = tcg_const_i64(0);
2339 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2340 tcg_temp_free_i64(t);
2341 tcg_temp_free_i64(z);
2342 }
2343
2344 return NO_EXIT;
2345 }
2346
2347 #ifndef CONFIG_USER_ONLY
2348 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2349 {
2350 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2351 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2352 check_privileged(s);
2353 potential_page_fault(s);
2354 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2355 tcg_temp_free_i32(r1);
2356 tcg_temp_free_i32(r3);
2357 return NO_EXIT;
2358 }
2359
2360 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2361 {
2362 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2363 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2364 check_privileged(s);
2365 potential_page_fault(s);
2366 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2367 tcg_temp_free_i32(r1);
2368 tcg_temp_free_i32(r3);
2369 return NO_EXIT;
2370 }
2371 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2372 {
2373 check_privileged(s);
2374 potential_page_fault(s);
2375 gen_helper_lra(o->out, cpu_env, o->in2);
2376 set_cc_static(s);
2377 return NO_EXIT;
2378 }
2379
2380 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2381 {
2382 TCGv_i64 t1, t2;
2383
2384 check_privileged(s);
2385
2386 t1 = tcg_temp_new_i64();
2387 t2 = tcg_temp_new_i64();
2388 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2389 tcg_gen_addi_i64(o->in2, o->in2, 4);
2390 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2391 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2392 tcg_gen_shli_i64(t1, t1, 32);
2393 gen_helper_load_psw(cpu_env, t1, t2);
2394 tcg_temp_free_i64(t1);
2395 tcg_temp_free_i64(t2);
2396 return EXIT_NORETURN;
2397 }
2398
2399 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2400 {
2401 TCGv_i64 t1, t2;
2402
2403 check_privileged(s);
2404
2405 t1 = tcg_temp_new_i64();
2406 t2 = tcg_temp_new_i64();
2407 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2408 tcg_gen_addi_i64(o->in2, o->in2, 8);
2409 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2410 gen_helper_load_psw(cpu_env, t1, t2);
2411 tcg_temp_free_i64(t1);
2412 tcg_temp_free_i64(t2);
2413 return EXIT_NORETURN;
2414 }
2415 #endif
2416
2417 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2418 {
2419 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2420 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2421 potential_page_fault(s);
2422 gen_helper_lam(cpu_env, r1, o->in2, r3);
2423 tcg_temp_free_i32(r1);
2424 tcg_temp_free_i32(r3);
2425 return NO_EXIT;
2426 }
2427
2428 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2429 {
2430 int r1 = get_field(s->fields, r1);
2431 int r3 = get_field(s->fields, r3);
2432 TCGv_i64 t = tcg_temp_new_i64();
2433 TCGv_i64 t4 = tcg_const_i64(4);
2434
2435 while (1) {
2436 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2437 store_reg32_i64(r1, t);
2438 if (r1 == r3) {
2439 break;
2440 }
2441 tcg_gen_add_i64(o->in2, o->in2, t4);
2442 r1 = (r1 + 1) & 15;
2443 }
2444
2445 tcg_temp_free_i64(t);
2446 tcg_temp_free_i64(t4);
2447 return NO_EXIT;
2448 }
2449
2450 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2451 {
2452 int r1 = get_field(s->fields, r1);
2453 int r3 = get_field(s->fields, r3);
2454 TCGv_i64 t = tcg_temp_new_i64();
2455 TCGv_i64 t4 = tcg_const_i64(4);
2456
2457 while (1) {
2458 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2459 store_reg32h_i64(r1, t);
2460 if (r1 == r3) {
2461 break;
2462 }
2463 tcg_gen_add_i64(o->in2, o->in2, t4);
2464 r1 = (r1 + 1) & 15;
2465 }
2466
2467 tcg_temp_free_i64(t);
2468 tcg_temp_free_i64(t4);
2469 return NO_EXIT;
2470 }
2471
2472 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2473 {
2474 int r1 = get_field(s->fields, r1);
2475 int r3 = get_field(s->fields, r3);
2476 TCGv_i64 t8 = tcg_const_i64(8);
2477
2478 while (1) {
2479 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2480 if (r1 == r3) {
2481 break;
2482 }
2483 tcg_gen_add_i64(o->in2, o->in2, t8);
2484 r1 = (r1 + 1) & 15;
2485 }
2486
2487 tcg_temp_free_i64(t8);
2488 return NO_EXIT;
2489 }
2490
2491 #ifndef CONFIG_USER_ONLY
2492 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2493 {
2494 check_privileged(s);
2495 potential_page_fault(s);
2496 gen_helper_lura(o->out, cpu_env, o->in2);
2497 return NO_EXIT;
2498 }
2499
2500 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2501 {
2502 check_privileged(s);
2503 potential_page_fault(s);
2504 gen_helper_lurag(o->out, cpu_env, o->in2);
2505 return NO_EXIT;
2506 }
2507 #endif
2508
2509 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2510 {
2511 o->out = o->in2;
2512 o->g_out = o->g_in2;
2513 TCGV_UNUSED_I64(o->in2);
2514 o->g_in2 = false;
2515 return NO_EXIT;
2516 }
2517
2518 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2519 {
2520 o->out = o->in1;
2521 o->out2 = o->in2;
2522 o->g_out = o->g_in1;
2523 o->g_out2 = o->g_in2;
2524 TCGV_UNUSED_I64(o->in1);
2525 TCGV_UNUSED_I64(o->in2);
2526 o->g_in1 = o->g_in2 = false;
2527 return NO_EXIT;
2528 }
2529
2530 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2531 {
2532 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2533 potential_page_fault(s);
2534 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2535 tcg_temp_free_i32(l);
2536 return NO_EXIT;
2537 }
2538
2539 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2540 {
2541 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2542 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2543 potential_page_fault(s);
2544 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2545 tcg_temp_free_i32(r1);
2546 tcg_temp_free_i32(r2);
2547 set_cc_static(s);
2548 return NO_EXIT;
2549 }
2550
2551 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2552 {
2553 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2554 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2555 potential_page_fault(s);
2556 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2557 tcg_temp_free_i32(r1);
2558 tcg_temp_free_i32(r3);
2559 set_cc_static(s);
2560 return NO_EXIT;
2561 }
2562
2563 #ifndef CONFIG_USER_ONLY
2564 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2565 {
2566 int r1 = get_field(s->fields, l1);
2567 check_privileged(s);
2568 potential_page_fault(s);
2569 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2570 set_cc_static(s);
2571 return NO_EXIT;
2572 }
2573
2574 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2575 {
2576 int r1 = get_field(s->fields, l1);
2577 check_privileged(s);
2578 potential_page_fault(s);
2579 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2580 set_cc_static(s);
2581 return NO_EXIT;
2582 }
2583 #endif
2584
2585 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2586 {
2587 potential_page_fault(s);
2588 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2589 set_cc_static(s);
2590 return NO_EXIT;
2591 }
2592
2593 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2594 {
2595 potential_page_fault(s);
2596 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2597 set_cc_static(s);
2598 return_low128(o->in2);
2599 return NO_EXIT;
2600 }
2601
2602 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2603 {
2604 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2605 return NO_EXIT;
2606 }
2607
2608 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2609 {
2610 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2611 return NO_EXIT;
2612 }
2613
2614 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2615 {
2616 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2617 return NO_EXIT;
2618 }
2619
2620 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2621 {
2622 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2623 return NO_EXIT;
2624 }
2625
2626 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2627 {
2628 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2629 return NO_EXIT;
2630 }
2631
2632 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2633 {
2634 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2635 return_low128(o->out2);
2636 return NO_EXIT;
2637 }
2638
2639 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2640 {
2641 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2642 return_low128(o->out2);
2643 return NO_EXIT;
2644 }
2645
2646 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2647 {
2648 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2649 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2650 tcg_temp_free_i64(r3);
2651 return NO_EXIT;
2652 }
2653
2654 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2655 {
2656 int r3 = get_field(s->fields, r3);
2657 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2658 return NO_EXIT;
2659 }
2660
2661 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2662 {
2663 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2664 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2665 tcg_temp_free_i64(r3);
2666 return NO_EXIT;
2667 }
2668
2669 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2670 {
2671 int r3 = get_field(s->fields, r3);
2672 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2673 return NO_EXIT;
2674 }
2675
2676 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2677 {
2678 gen_helper_nabs_i64(o->out, o->in2);
2679 return NO_EXIT;
2680 }
2681
2682 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2683 {
2684 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2685 return NO_EXIT;
2686 }
2687
2688 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2689 {
2690 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2691 return NO_EXIT;
2692 }
2693
2694 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2695 {
2696 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2697 tcg_gen_mov_i64(o->out2, o->in2);
2698 return NO_EXIT;
2699 }
2700
2701 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2702 {
2703 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2704 potential_page_fault(s);
2705 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2706 tcg_temp_free_i32(l);
2707 set_cc_static(s);
2708 return NO_EXIT;
2709 }
2710
2711 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2712 {
2713 tcg_gen_neg_i64(o->out, o->in2);
2714 return NO_EXIT;
2715 }
2716
2717 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2718 {
2719 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2720 return NO_EXIT;
2721 }
2722
2723 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2724 {
2725 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2726 return NO_EXIT;
2727 }
2728
2729 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2730 {
2731 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2732 tcg_gen_mov_i64(o->out2, o->in2);
2733 return NO_EXIT;
2734 }
2735
2736 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2737 {
2738 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2739 potential_page_fault(s);
2740 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2741 tcg_temp_free_i32(l);
2742 set_cc_static(s);
2743 return NO_EXIT;
2744 }
2745
2746 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2747 {
2748 tcg_gen_or_i64(o->out, o->in1, o->in2);
2749 return NO_EXIT;
2750 }
2751
2752 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2753 {
2754 int shift = s->insn->data & 0xff;
2755 int size = s->insn->data >> 8;
2756 uint64_t mask = ((1ull << size) - 1) << shift;
2757
2758 assert(!o->g_in2);
2759 tcg_gen_shli_i64(o->in2, o->in2, shift);
2760 tcg_gen_or_i64(o->out, o->in1, o->in2);
2761
2762 /* Produce the CC from only the bits manipulated. */
2763 tcg_gen_andi_i64(cc_dst, o->out, mask);
2764 set_cc_nz_u64(s, cc_dst);
2765 return NO_EXIT;
2766 }
2767
2768 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2769 {
2770 gen_helper_popcnt(o->out, o->in2);
2771 return NO_EXIT;
2772 }
2773
2774 #ifndef CONFIG_USER_ONLY
2775 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2776 {
2777 check_privileged(s);
2778 gen_helper_ptlb(cpu_env);
2779 return NO_EXIT;
2780 }
2781 #endif
2782
2783 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2784 {
2785 int i3 = get_field(s->fields, i3);
2786 int i4 = get_field(s->fields, i4);
2787 int i5 = get_field(s->fields, i5);
2788 int do_zero = i4 & 0x80;
2789 uint64_t mask, imask, pmask;
2790 int pos, len, rot;
2791
2792 /* Adjust the arguments for the specific insn. */
2793 switch (s->fields->op2) {
2794 case 0x55: /* risbg */
2795 i3 &= 63;
2796 i4 &= 63;
2797 pmask = ~0;
2798 break;
2799 case 0x5d: /* risbhg */
2800 i3 &= 31;
2801 i4 &= 31;
2802 pmask = 0xffffffff00000000ull;
2803 break;
2804 case 0x51: /* risblg */
2805 i3 &= 31;
2806 i4 &= 31;
2807 pmask = 0x00000000ffffffffull;
2808 break;
2809 default:
2810 abort();
2811 }
2812
2813 /* MASK is the set of bits to be inserted from R2.
2814 Take care for I3/I4 wraparound. */
2815 mask = pmask >> i3;
2816 if (i3 <= i4) {
2817 mask ^= pmask >> i4 >> 1;
2818 } else {
2819 mask |= ~(pmask >> i4 >> 1);
2820 }
2821 mask &= pmask;
2822
2823 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2824 insns, we need to keep the other half of the register. */
2825 imask = ~mask | ~pmask;
2826 if (do_zero) {
2827 if (s->fields->op2 == 0x55) {
2828 imask = 0;
2829 } else {
2830 imask = ~pmask;
2831 }
2832 }
2833
2834 /* In some cases we can implement this with deposit, which can be more
2835 efficient on some hosts. */
2836 if (~mask == imask && i3 <= i4) {
2837 if (s->fields->op2 == 0x5d) {
2838 i3 += 32, i4 += 32;
2839 }
2840 /* Note that we rotate the bits to be inserted to the lsb, not to
2841 the position as described in the PoO. */
2842 len = i4 - i3 + 1;
2843 pos = 63 - i4;
2844 rot = (i5 - pos) & 63;
2845 } else {
2846 pos = len = -1;
2847 rot = i5 & 63;
2848 }
2849
2850 /* Rotate the input as necessary. */
2851 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2852
2853 /* Insert the selected bits into the output. */
2854 if (pos >= 0) {
2855 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2856 } else if (imask == 0) {
2857 tcg_gen_andi_i64(o->out, o->in2, mask);
2858 } else {
2859 tcg_gen_andi_i64(o->in2, o->in2, mask);
2860 tcg_gen_andi_i64(o->out, o->out, imask);
2861 tcg_gen_or_i64(o->out, o->out, o->in2);
2862 }
2863 return NO_EXIT;
2864 }
2865
2866 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2867 {
2868 int i3 = get_field(s->fields, i3);
2869 int i4 = get_field(s->fields, i4);
2870 int i5 = get_field(s->fields, i5);
2871 uint64_t mask;
2872
2873 /* If this is a test-only form, arrange to discard the result. */
2874 if (i3 & 0x80) {
2875 o->out = tcg_temp_new_i64();
2876 o->g_out = false;
2877 }
2878
2879 i3 &= 63;
2880 i4 &= 63;
2881 i5 &= 63;
2882
2883 /* MASK is the set of bits to be operated on from R2.
2884 Take care for I3/I4 wraparound. */
2885 mask = ~0ull >> i3;
2886 if (i3 <= i4) {
2887 mask ^= ~0ull >> i4 >> 1;
2888 } else {
2889 mask |= ~(~0ull >> i4 >> 1);
2890 }
2891
2892 /* Rotate the input as necessary. */
2893 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2894
2895 /* Operate. */
2896 switch (s->fields->op2) {
2897 case 0x55: /* AND */
2898 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2899 tcg_gen_and_i64(o->out, o->out, o->in2);
2900 break;
2901 case 0x56: /* OR */
2902 tcg_gen_andi_i64(o->in2, o->in2, mask);
2903 tcg_gen_or_i64(o->out, o->out, o->in2);
2904 break;
2905 case 0x57: /* XOR */
2906 tcg_gen_andi_i64(o->in2, o->in2, mask);
2907 tcg_gen_xor_i64(o->out, o->out, o->in2);
2908 break;
2909 default:
2910 abort();
2911 }
2912
2913 /* Set the CC. */
2914 tcg_gen_andi_i64(cc_dst, o->out, mask);
2915 set_cc_nz_u64(s, cc_dst);
2916 return NO_EXIT;
2917 }
2918
2919 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2920 {
2921 tcg_gen_bswap16_i64(o->out, o->in2);
2922 return NO_EXIT;
2923 }
2924
2925 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2926 {
2927 tcg_gen_bswap32_i64(o->out, o->in2);
2928 return NO_EXIT;
2929 }
2930
2931 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2932 {
2933 tcg_gen_bswap64_i64(o->out, o->in2);
2934 return NO_EXIT;
2935 }
2936
2937 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2938 {
2939 TCGv_i32 t1 = tcg_temp_new_i32();
2940 TCGv_i32 t2 = tcg_temp_new_i32();
2941 TCGv_i32 to = tcg_temp_new_i32();
2942 tcg_gen_trunc_i64_i32(t1, o->in1);
2943 tcg_gen_trunc_i64_i32(t2, o->in2);
2944 tcg_gen_rotl_i32(to, t1, t2);
2945 tcg_gen_extu_i32_i64(o->out, to);
2946 tcg_temp_free_i32(t1);
2947 tcg_temp_free_i32(t2);
2948 tcg_temp_free_i32(to);
2949 return NO_EXIT;
2950 }
2951
2952 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2953 {
2954 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2955 return NO_EXIT;
2956 }
2957
2958 #ifndef CONFIG_USER_ONLY
2959 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2960 {
2961 check_privileged(s);
2962 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2963 set_cc_static(s);
2964 return NO_EXIT;
2965 }
2966
2967 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2968 {
2969 check_privileged(s);
2970 gen_helper_sacf(cpu_env, o->in2);
2971 /* Addressing mode has changed, so end the block. */
2972 return EXIT_PC_STALE;
2973 }
2974 #endif
2975
2976 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
2977 {
2978 int sam = s->insn->data;
2979 TCGv_i64 tsam;
2980 uint64_t mask;
2981
2982 switch (sam) {
2983 case 0:
2984 mask = 0xffffff;
2985 break;
2986 case 1:
2987 mask = 0x7fffffff;
2988 break;
2989 default:
2990 mask = -1;
2991 break;
2992 }
2993
2994 /* Bizarre but true, we check the address of the current insn for the
2995 specification exception, not the next to be executed. Thus the PoO
2996 documents that Bad Things Happen two bytes before the end. */
2997 if (s->pc & ~mask) {
2998 gen_program_exception(s, PGM_SPECIFICATION);
2999 return EXIT_NORETURN;
3000 }
3001 s->next_pc &= mask;
3002
3003 tsam = tcg_const_i64(sam);
3004 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3005 tcg_temp_free_i64(tsam);
3006
3007 /* Always exit the TB, since we (may have) changed execution mode. */
3008 return EXIT_PC_STALE;
3009 }
3010
3011 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3012 {
3013 int r1 = get_field(s->fields, r1);
3014 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3015 return NO_EXIT;
3016 }
3017
3018 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3019 {
3020 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3021 return NO_EXIT;
3022 }
3023
3024 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3025 {
3026 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3027 return NO_EXIT;
3028 }
3029
3030 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3031 {
3032 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3033 return_low128(o->out2);
3034 return NO_EXIT;
3035 }
3036
3037 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3038 {
3039 gen_helper_sqeb(o->out, cpu_env, o->in2);
3040 return NO_EXIT;
3041 }
3042
3043 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3044 {
3045 gen_helper_sqdb(o->out, cpu_env, o->in2);
3046 return NO_EXIT;
3047 }
3048
3049 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3050 {
3051 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3052 return_low128(o->out2);
3053 return NO_EXIT;
3054 }
3055
3056 #ifndef CONFIG_USER_ONLY
3057 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3058 {
3059 check_privileged(s);
3060 potential_page_fault(s);
3061 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3062 set_cc_static(s);
3063 return NO_EXIT;
3064 }
3065
3066 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3067 {
3068 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3069 check_privileged(s);
3070 potential_page_fault(s);
3071 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3072 tcg_temp_free_i32(r1);
3073 return NO_EXIT;
3074 }
3075 #endif
3076
3077 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3078 {
3079 DisasCompare c;
3080 TCGv_i64 a;
3081 TCGLabel *lab;
3082 int r1;
3083
3084 disas_jcc(s, &c, get_field(s->fields, m3));
3085
3086 /* We want to store when the condition is fulfilled, so branch
3087 out when it's not */
3088 c.cond = tcg_invert_cond(c.cond);
3089
3090 lab = gen_new_label();
3091 if (c.is_64) {
3092 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3093 } else {
3094 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3095 }
3096 free_compare(&c);
3097
3098 r1 = get_field(s->fields, r1);
3099 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3100 if (s->insn->data) {
3101 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3102 } else {
3103 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3104 }
3105 tcg_temp_free_i64(a);
3106
3107 gen_set_label(lab);
3108 return NO_EXIT;
3109 }
3110
3111 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3112 {
3113 uint64_t sign = 1ull << s->insn->data;
3114 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3115 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3116 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3117 /* The arithmetic left shift is curious in that it does not affect
3118 the sign bit. Copy that over from the source unchanged. */
3119 tcg_gen_andi_i64(o->out, o->out, ~sign);
3120 tcg_gen_andi_i64(o->in1, o->in1, sign);
3121 tcg_gen_or_i64(o->out, o->out, o->in1);
3122 return NO_EXIT;
3123 }
3124
3125 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3126 {
3127 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3128 return NO_EXIT;
3129 }
3130
3131 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3132 {
3133 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3134 return NO_EXIT;
3135 }
3136
3137 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3138 {
3139 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3140 return NO_EXIT;
3141 }
3142
3143 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3144 {
3145 gen_helper_sfpc(cpu_env, o->in2);
3146 return NO_EXIT;
3147 }
3148
3149 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3150 {
3151 gen_helper_sfas(cpu_env, o->in2);
3152 return NO_EXIT;
3153 }
3154
3155 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3156 {
3157 int b2 = get_field(s->fields, b2);
3158 int d2 = get_field(s->fields, d2);
3159 TCGv_i64 t1 = tcg_temp_new_i64();
3160 TCGv_i64 t2 = tcg_temp_new_i64();
3161 int mask, pos, len;
3162
3163 switch (s->fields->op2) {
3164 case 0x99: /* SRNM */
3165 pos = 0, len = 2;
3166 break;
3167 case 0xb8: /* SRNMB */
3168 pos = 0, len = 3;
3169 break;
3170 case 0xb9: /* SRNMT */
3171 pos = 4, len = 3;
3172 break;
3173 default:
3174 tcg_abort();
3175 }
3176 mask = (1 << len) - 1;
3177
3178 /* Insert the value into the appropriate field of the FPC. */
3179 if (b2 == 0) {
3180 tcg_gen_movi_i64(t1, d2 & mask);
3181 } else {
3182 tcg_gen_addi_i64(t1, regs[b2], d2);
3183 tcg_gen_andi_i64(t1, t1, mask);
3184 }
3185 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3186 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3187 tcg_temp_free_i64(t1);
3188
3189 /* Then install the new FPC to set the rounding mode in fpu_status. */
3190 gen_helper_sfpc(cpu_env, t2);
3191 tcg_temp_free_i64(t2);
3192 return NO_EXIT;
3193 }
3194
3195 #ifndef CONFIG_USER_ONLY
3196 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3197 {
3198 check_privileged(s);
3199 tcg_gen_shri_i64(o->in2, o->in2, 4);
3200 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3201 return NO_EXIT;
3202 }
3203
3204 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3205 {
3206 check_privileged(s);
3207 gen_helper_sske(cpu_env, o->in1, o->in2);
3208 return NO_EXIT;
3209 }
3210
3211 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3212 {
3213 check_privileged(s);
3214 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3215 return NO_EXIT;
3216 }
3217
3218 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3219 {
3220 check_privileged(s);
3221 /* ??? Surely cpu address != cpu number. In any case the previous
3222 version of this stored more than the required half-word, so it
3223 is unlikely this has ever been tested. */
3224 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3225 return NO_EXIT;
3226 }
3227
3228 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3229 {
3230 gen_helper_stck(o->out, cpu_env);
3231 /* ??? We don't implement clock states. */
3232 gen_op_movi_cc(s, 0);
3233 return NO_EXIT;
3234 }
3235
3236 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3237 {
3238 TCGv_i64 c1 = tcg_temp_new_i64();
3239 TCGv_i64 c2 = tcg_temp_new_i64();
3240 gen_helper_stck(c1, cpu_env);
3241 /* Shift the 64-bit value into its place as a zero-extended
3242 104-bit value. Note that "bit positions 64-103 are always
3243 non-zero so that they compare differently to STCK"; we set
3244 the least significant bit to 1. */
3245 tcg_gen_shli_i64(c2, c1, 56);
3246 tcg_gen_shri_i64(c1, c1, 8);
3247 tcg_gen_ori_i64(c2, c2, 0x10000);
3248 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3249 tcg_gen_addi_i64(o->in2, o->in2, 8);
3250 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3251 tcg_temp_free_i64(c1);
3252 tcg_temp_free_i64(c2);
3253 /* ??? We don't implement clock states. */
3254 gen_op_movi_cc(s, 0);
3255 return NO_EXIT;
3256 }
3257
3258 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3259 {
3260 check_privileged(s);
3261 gen_helper_sckc(cpu_env, o->in2);
3262 return NO_EXIT;
3263 }
3264
3265 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3266 {
3267 check_privileged(s);
3268 gen_helper_stckc(o->out, cpu_env);
3269 return NO_EXIT;
3270 }
3271
3272 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3273 {
3274 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3275 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3276 check_privileged(s);
3277 potential_page_fault(s);
3278 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3279 tcg_temp_free_i32(r1);
3280 tcg_temp_free_i32(r3);
3281 return NO_EXIT;
3282 }
3283
3284 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3285 {
3286 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3287 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3288 check_privileged(s);
3289 potential_page_fault(s);
3290 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3291 tcg_temp_free_i32(r1);
3292 tcg_temp_free_i32(r3);
3293 return NO_EXIT;
3294 }
3295
3296 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3297 {
3298 TCGv_i64 t1 = tcg_temp_new_i64();
3299
3300 check_privileged(s);
3301 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3302 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3303 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3304 tcg_temp_free_i64(t1);
3305
3306 return NO_EXIT;
3307 }
3308
3309 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3310 {
3311 check_privileged(s);
3312 gen_helper_spt(cpu_env, o->in2);
3313 return NO_EXIT;
3314 }
3315
3316 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3317 {
3318 TCGv_i64 f, a;
3319 /* We really ought to have more complete indication of facilities
3320 that we implement. Address this when STFLE is implemented. */
3321 check_privileged(s);
3322 f = tcg_const_i64(0xc0000000);
3323 a = tcg_const_i64(200);
3324 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3325 tcg_temp_free_i64(f);
3326 tcg_temp_free_i64(a);
3327 return NO_EXIT;
3328 }
3329
3330 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3331 {
3332 check_privileged(s);
3333 gen_helper_stpt(o->out, cpu_env);
3334 return NO_EXIT;
3335 }
3336
3337 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3338 {
3339 check_privileged(s);
3340 potential_page_fault(s);
3341 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3342 set_cc_static(s);
3343 return NO_EXIT;
3344 }
3345
3346 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3347 {
3348 check_privileged(s);
3349 gen_helper_spx(cpu_env, o->in2);
3350 return NO_EXIT;
3351 }
3352
3353 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3354 {
3355 check_privileged(s);
3356 /* Not operational. */
3357 gen_op_movi_cc(s, 3);
3358 return NO_EXIT;
3359 }
3360
3361 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3362 {
3363 check_privileged(s);
3364 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3365 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3366 return NO_EXIT;
3367 }
3368
3369 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3370 {
3371 uint64_t i2 = get_field(s->fields, i2);
3372 TCGv_i64 t;
3373
3374 check_privileged(s);
3375
3376 /* It is important to do what the instruction name says: STORE THEN.
3377 If we let the output hook perform the store then if we fault and
3378 restart, we'll have the wrong SYSTEM MASK in place. */
3379 t = tcg_temp_new_i64();
3380 tcg_gen_shri_i64(t, psw_mask, 56);
3381 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3382 tcg_temp_free_i64(t);
3383
3384 if (s->fields->op == 0xac) {
3385 tcg_gen_andi_i64(psw_mask, psw_mask,
3386 (i2 << 56) | 0x00ffffffffffffffull);
3387 } else {
3388 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3389 }
3390 return NO_EXIT;
3391 }
3392
3393 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3394 {
3395 check_privileged(s);
3396 potential_page_fault(s);
3397 gen_helper_stura(cpu_env, o->in2, o->in1);
3398 return NO_EXIT;
3399 }
3400
3401 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3402 {
3403 check_privileged(s);
3404 potential_page_fault(s);
3405 gen_helper_sturg(cpu_env, o->in2, o->in1);
3406 return NO_EXIT;
3407 }
3408 #endif
3409
3410 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3411 {
3412 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3413 return NO_EXIT;
3414 }
3415
3416 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3417 {
3418 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3419 return NO_EXIT;
3420 }
3421
3422 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3423 {
3424 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3425 return NO_EXIT;
3426 }
3427
3428 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3429 {
3430 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3431 return NO_EXIT;
3432 }
3433
3434 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3435 {
3436 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3437 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3438 potential_page_fault(s);
3439 gen_helper_stam(cpu_env, r1, o->in2, r3);
3440 tcg_temp_free_i32(r1);
3441 tcg_temp_free_i32(r3);
3442 return NO_EXIT;
3443 }
3444
3445 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3446 {
3447 int m3 = get_field(s->fields, m3);
3448 int pos, base = s->insn->data;
3449 TCGv_i64 tmp = tcg_temp_new_i64();
3450
3451 pos = base + ctz32(m3) * 8;
3452 switch (m3) {
3453 case 0xf:
3454 /* Effectively a 32-bit store. */
3455 tcg_gen_shri_i64(tmp, o->in1, pos);
3456 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3457 break;
3458
3459 case 0xc:
3460 case 0x6:
3461 case 0x3:
3462 /* Effectively a 16-bit store. */
3463 tcg_gen_shri_i64(tmp, o->in1, pos);
3464 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3465 break;
3466
3467 case 0x8:
3468 case 0x4:
3469 case 0x2:
3470 case 0x1:
3471 /* Effectively an 8-bit store. */
3472 tcg_gen_shri_i64(tmp, o->in1, pos);
3473 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3474 break;
3475
3476 default:
3477 /* This is going to be a sequence of shifts and stores. */
3478 pos = base + 32 - 8;
3479 while (m3) {
3480 if (m3 & 0x8) {
3481 tcg_gen_shri_i64(tmp, o->in1, pos);
3482 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3483 tcg_gen_addi_i64(o->in2, o->in2, 1);
3484 }
3485 m3 = (m3 << 1) & 0xf;
3486 pos -= 8;
3487 }
3488 break;
3489 }
3490 tcg_temp_free_i64(tmp);
3491 return NO_EXIT;
3492 }
3493
3494 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3495 {
3496 int r1 = get_field(s->fields, r1);
3497 int r3 = get_field(s->fields, r3);
3498 int size = s->insn->data;
3499 TCGv_i64 tsize = tcg_const_i64(size);
3500
3501 while (1) {
3502 if (size == 8) {
3503 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3504 } else {
3505 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3506 }
3507 if (r1 == r3) {
3508 break;
3509 }
3510 tcg_gen_add_i64(o->in2, o->in2, tsize);
3511 r1 = (r1 + 1) & 15;
3512 }
3513
3514 tcg_temp_free_i64(tsize);
3515 return NO_EXIT;
3516 }
3517
3518 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3519 {
3520 int r1 = get_field(s->fields, r1);
3521 int r3 = get_field(s->fields, r3);
3522 TCGv_i64 t = tcg_temp_new_i64();
3523 TCGv_i64 t4 = tcg_const_i64(4);
3524 TCGv_i64 t32 = tcg_const_i64(32);
3525
3526 while (1) {
3527 tcg_gen_shl_i64(t, regs[r1], t32);
3528 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3529 if (r1 == r3) {
3530 break;
3531 }
3532 tcg_gen_add_i64(o->in2, o->in2, t4);
3533 r1 = (r1 + 1) & 15;
3534 }
3535
3536 tcg_temp_free_i64(t);
3537 tcg_temp_free_i64(t4);
3538 tcg_temp_free_i64(t32);
3539 return NO_EXIT;
3540 }
3541
3542 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3543 {
3544 potential_page_fault(s);
3545 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3546 set_cc_static(s);
3547 return_low128(o->in2);
3548 return NO_EXIT;
3549 }
3550
3551 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3552 {
3553 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3554 return NO_EXIT;
3555 }
3556
3557 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3558 {
3559 DisasCompare cmp;
3560 TCGv_i64 borrow;
3561
3562 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3563
3564 /* The !borrow flag is the msb of CC. Since we want the inverse of
3565 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3566 disas_jcc(s, &cmp, 8 | 4);
3567 borrow = tcg_temp_new_i64();
3568 if (cmp.is_64) {
3569 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3570 } else {
3571 TCGv_i32 t = tcg_temp_new_i32();
3572 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3573 tcg_gen_extu_i32_i64(borrow, t);
3574 tcg_temp_free_i32(t);
3575 }
3576 free_compare(&cmp);
3577
3578 tcg_gen_sub_i64(o->out, o->out, borrow);
3579 tcg_temp_free_i64(borrow);
3580 return NO_EXIT;
3581 }
3582
3583 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3584 {
3585 TCGv_i32 t;
3586
3587 update_psw_addr(s);
3588 update_cc_op(s);
3589
3590 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3591 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3592 tcg_temp_free_i32(t);
3593
3594 t = tcg_const_i32(s->next_pc - s->pc);
3595 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3596 tcg_temp_free_i32(t);
3597
3598 gen_exception(EXCP_SVC);
3599 return EXIT_NORETURN;
3600 }
3601
3602 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3603 {
3604 gen_helper_tceb(cc_op, o->in1, o->in2);
3605 set_cc_static(s);
3606 return NO_EXIT;
3607 }
3608
3609 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3610 {
3611 gen_helper_tcdb(cc_op, o->in1, o->in2);
3612 set_cc_static(s);
3613 return NO_EXIT;
3614 }
3615
3616 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3617 {
3618 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3619 set_cc_static(s);
3620 return NO_EXIT;
3621 }
3622
3623 #ifndef CONFIG_USER_ONLY
3624 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3625 {
3626 potential_page_fault(s);
3627 gen_helper_tprot(cc_op, o->addr1, o->in2);
3628 set_cc_static(s);
3629 return NO_EXIT;
3630 }
3631 #endif
3632
3633 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3634 {
3635 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3636 potential_page_fault(s);
3637 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3638 tcg_temp_free_i32(l);
3639 set_cc_static(s);
3640 return NO_EXIT;
3641 }
3642
3643 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3644 {
3645 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3646 potential_page_fault(s);
3647 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3648 tcg_temp_free_i32(l);
3649 return NO_EXIT;
3650 }
3651
3652 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3653 {
3654 int d1 = get_field(s->fields, d1);
3655 int d2 = get_field(s->fields, d2);
3656 int b1 = get_field(s->fields, b1);
3657 int b2 = get_field(s->fields, b2);
3658 int l = get_field(s->fields, l1);
3659 TCGv_i32 t32;
3660
3661 o->addr1 = get_address(s, 0, b1, d1);
3662
3663 /* If the addresses are identical, this is a store/memset of zero. */
3664 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3665 o->in2 = tcg_const_i64(0);
3666
3667 l++;
3668 while (l >= 8) {
3669 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3670 l -= 8;
3671 if (l > 0) {
3672 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3673 }
3674 }
3675 if (l >= 4) {
3676 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3677 l -= 4;
3678 if (l > 0) {
3679 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3680 }
3681 }
3682 if (l >= 2) {
3683 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3684 l -= 2;
3685 if (l > 0) {
3686 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3687 }
3688 }
3689 if (l) {
3690 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3691 }
3692 gen_op_movi_cc(s, 0);
3693 return NO_EXIT;
3694 }
3695
3696 /* But in general we'll defer to a helper. */
3697 o->in2 = get_address(s, 0, b2, d2);
3698 t32 = tcg_const_i32(l);
3699 potential_page_fault(s);
3700 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3701 tcg_temp_free_i32(t32);
3702 set_cc_static(s);
3703 return NO_EXIT;
3704 }
3705
3706 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3707 {
3708 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3709 return NO_EXIT;
3710 }
3711
3712 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3713 {
3714 int shift = s->insn->data & 0xff;
3715 int size = s->insn->data >> 8;
3716 uint64_t mask = ((1ull << size) - 1) << shift;
3717
3718 assert(!o->g_in2);
3719 tcg_gen_shli_i64(o->in2, o->in2, shift);
3720 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3721
3722 /* Produce the CC from only the bits manipulated. */
3723 tcg_gen_andi_i64(cc_dst, o->out, mask);
3724 set_cc_nz_u64(s, cc_dst);
3725 return NO_EXIT;
3726 }
3727
3728 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3729 {
3730 o->out = tcg_const_i64(0);
3731 return NO_EXIT;
3732 }
3733
3734 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3735 {
3736 o->out = tcg_const_i64(0);
3737 o->out2 = o->out;
3738 o->g_out2 = true;
3739 return NO_EXIT;
3740 }
3741
3742 /* ====================================================================== */
3743 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3744 the original inputs), update the various cc data structures in order to
3745 be able to compute the new condition code. */
3746
3747 static void cout_abs32(DisasContext *s, DisasOps *o)
3748 {
3749 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3750 }
3751
3752 static void cout_abs64(DisasContext *s, DisasOps *o)
3753 {
3754 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3755 }
3756
3757 static void cout_adds32(DisasContext *s, DisasOps *o)
3758 {
3759 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3760 }
3761
3762 static void cout_adds64(DisasContext *s, DisasOps *o)
3763 {
3764 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3765 }
3766
3767 static void cout_addu32(DisasContext *s, DisasOps *o)
3768 {
3769 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3770 }
3771
3772 static void cout_addu64(DisasContext *s, DisasOps *o)
3773 {
3774 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3775 }
3776
3777 static void cout_addc32(DisasContext *s, DisasOps *o)
3778 {
3779 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3780 }
3781
3782 static void cout_addc64(DisasContext *s, DisasOps *o)
3783 {
3784 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3785 }
3786
3787 static void cout_cmps32(DisasContext *s, DisasOps *o)
3788 {
3789 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3790 }
3791
3792 static void cout_cmps64(DisasContext *s, DisasOps *o)
3793 {
3794 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3795 }
3796
3797 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3798 {
3799 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3800 }
3801
3802 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3803 {
3804 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3805 }
3806
3807 static void cout_f32(DisasContext *s, DisasOps *o)
3808 {
3809 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3810 }
3811
3812 static void cout_f64(DisasContext *s, DisasOps *o)
3813 {
3814 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3815 }
3816
3817 static void cout_f128(DisasContext *s, DisasOps *o)
3818 {
3819 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3820 }
3821
3822 static void cout_nabs32(DisasContext *s, DisasOps *o)
3823 {
3824 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3825 }
3826
3827 static void cout_nabs64(DisasContext *s, DisasOps *o)
3828 {
3829 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3830 }
3831
3832 static void cout_neg32(DisasContext *s, DisasOps *o)
3833 {
3834 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3835 }
3836
3837 static void cout_neg64(DisasContext *s, DisasOps *o)
3838 {
3839 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3840 }
3841
3842 static void cout_nz32(DisasContext *s, DisasOps *o)
3843 {
3844 tcg_gen_ext32u_i64(cc_dst, o->out);
3845 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3846 }
3847
3848 static void cout_nz64(DisasContext *s, DisasOps *o)
3849 {
3850 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3851 }
3852
3853 static void cout_s32(DisasContext *s, DisasOps *o)
3854 {
3855 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3856 }
3857
3858 static void cout_s64(DisasContext *s, DisasOps *o)
3859 {
3860 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3861 }
3862
3863 static void cout_subs32(DisasContext *s, DisasOps *o)
3864 {
3865 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3866 }
3867
3868 static void cout_subs64(DisasContext *s, DisasOps *o)
3869 {
3870 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3871 }
3872
3873 static void cout_subu32(DisasContext *s, DisasOps *o)
3874 {
3875 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3876 }
3877
3878 static void cout_subu64(DisasContext *s, DisasOps *o)
3879 {
3880 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3881 }
3882
3883 static void cout_subb32(DisasContext *s, DisasOps *o)
3884 {
3885 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3886 }
3887
3888 static void cout_subb64(DisasContext *s, DisasOps *o)
3889 {
3890 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3891 }
3892
3893 static void cout_tm32(DisasContext *s, DisasOps *o)
3894 {
3895 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3896 }
3897
3898 static void cout_tm64(DisasContext *s, DisasOps *o)
3899 {
3900 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3901 }
3902
3903 /* ====================================================================== */
3904 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
3905 with the TCG register to which we will write. Used in combination with
3906 the "wout" generators, in some cases we need a new temporary, and in
3907 some cases we can write to a TCG global. */
3908
3909 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3910 {
3911 o->out = tcg_temp_new_i64();
3912 }
3913 #define SPEC_prep_new 0
3914
3915 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3916 {
3917 o->out = tcg_temp_new_i64();
3918 o->out2 = tcg_temp_new_i64();
3919 }
3920 #define SPEC_prep_new_P 0
3921
3922 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3923 {
3924 o->out = regs[get_field(f, r1)];
3925 o->g_out = true;
3926 }
3927 #define SPEC_prep_r1 0
3928
3929 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3930 {
3931 int r1 = get_field(f, r1);
3932 o->out = regs[r1];
3933 o->out2 = regs[r1 + 1];
3934 o->g_out = o->g_out2 = true;
3935 }
3936 #define SPEC_prep_r1_P SPEC_r1_even
3937
3938 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3939 {
3940 o->out = fregs[get_field(f, r1)];
3941 o->g_out = true;
3942 }
3943 #define SPEC_prep_f1 0
3944
3945 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3946 {
3947 int r1 = get_field(f, r1);
3948 o->out = fregs[r1];
3949 o->out2 = fregs[r1 + 2];
3950 o->g_out = o->g_out2 = true;
3951 }
3952 #define SPEC_prep_x1 SPEC_r1_f128
3953
3954 /* ====================================================================== */
3955 /* The "Write OUTput" generators. These generally perform some non-trivial
3956 copy of data to TCG globals, or to main memory. The trivial cases are
3957 generally handled by having a "prep" generator install the TCG global
3958 as the destination of the operation. */
3959
3960 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3961 {
3962 store_reg(get_field(f, r1), o->out);
3963 }
3964 #define SPEC_wout_r1 0
3965
3966 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3967 {
3968 int r1 = get_field(f, r1);
3969 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3970 }
3971 #define SPEC_wout_r1_8 0
3972
3973 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3974 {
3975 int r1 = get_field(f, r1);
3976 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3977 }
3978 #define SPEC_wout_r1_16 0
3979
3980 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3981 {
3982 store_reg32_i64(get_field(f, r1), o->out);
3983 }
3984 #define SPEC_wout_r1_32 0
3985
3986 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3987 {
3988 int r1 = get_field(f, r1);
3989 store_reg32_i64(r1, o->out);
3990 store_reg32_i64(r1 + 1, o->out2);
3991 }
3992 #define SPEC_wout_r1_P32 SPEC_r1_even
3993
3994 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3995 {
3996 int r1 = get_field(f, r1);
3997 store_reg32_i64(r1 + 1, o->out);
3998 tcg_gen_shri_i64(o->out, o->out, 32);
3999 store_reg32_i64(r1, o->out);
4000 }
4001 #define SPEC_wout_r1_D32 SPEC_r1_even
4002
4003 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4004 {
4005 store_freg32_i64(get_field(f, r1), o->out);
4006 }
4007 #define SPEC_wout_e1 0
4008
4009 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4010 {
4011 store_freg(get_field(f, r1), o->out);
4012 }
4013 #define SPEC_wout_f1 0
4014
4015 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4016 {
4017 int f1 = get_field(s->fields, r1);
4018 store_freg(f1, o->out);
4019 store_freg(f1 + 2, o->out2);
4020 }
4021 #define SPEC_wout_x1 SPEC_r1_f128
4022
4023 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4024 {
4025 if (get_field(f, r1) != get_field(f, r2)) {
4026 store_reg32_i64(get_field(f, r1), o->out);
4027 }
4028 }
4029 #define SPEC_wout_cond_r1r2_32 0
4030
4031 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4032 {
4033 if (get_field(f, r1) != get_field(f, r2)) {
4034 store_freg32_i64(get_field(f, r1), o->out);
4035 }
4036 }
4037 #define SPEC_wout_cond_e1e2 0
4038
4039 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4040 {
4041 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4042 }
4043 #define SPEC_wout_m1_8 0
4044
4045 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4046 {
4047 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4048 }
4049 #define SPEC_wout_m1_16 0
4050
4051 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4052 {
4053 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4054 }
4055 #define SPEC_wout_m1_32 0
4056
4057 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4058 {
4059 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4060 }
4061 #define SPEC_wout_m1_64 0
4062
4063 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4064 {
4065 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4066 }
4067 #define SPEC_wout_m2_32 0
4068
4069 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4070 {
4071 /* XXX release reservation */
4072 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4073 store_reg32_i64(get_field(f, r1), o->in2);
4074 }
4075 #define SPEC_wout_m2_32_r1_atomic 0
4076
4077 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4078 {
4079 /* XXX release reservation */
4080 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4081 store_reg(get_field(f, r1), o->in2);
4082 }
4083 #define SPEC_wout_m2_64_r1_atomic 0
4084
4085 /* ====================================================================== */
4086 /* The "INput 1" generators. These load the first operand to an insn. */
4087
4088 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4089 {
4090 o->in1 = load_reg(get_field(f, r1));
4091 }
4092 #define SPEC_in1_r1 0
4093
4094 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4095 {
4096 o->in1 = regs[get_field(f, r1)];
4097 o->g_in1 = true;
4098 }
4099 #define SPEC_in1_r1_o 0
4100
4101 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4102 {
4103 o->in1 = tcg_temp_new_i64();
4104 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4105 }
4106 #define SPEC_in1_r1_32s 0
4107
4108 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4109 {
4110 o->in1 = tcg_temp_new_i64();
4111 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4112 }
4113 #define SPEC_in1_r1_32u 0
4114
4115 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4116 {
4117 o->in1 = tcg_temp_new_i64();
4118 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4119 }
4120 #define SPEC_in1_r1_sr32 0
4121
4122 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4123 {
4124 o->in1 = load_reg(get_field(f, r1) + 1);
4125 }
4126 #define SPEC_in1_r1p1 SPEC_r1_even
4127
4128 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4129 {
4130 o->in1 = tcg_temp_new_i64();
4131 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4132 }
4133 #define SPEC_in1_r1p1_32s SPEC_r1_even
4134
4135 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4136 {
4137 o->in1 = tcg_temp_new_i64();
4138 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4139 }
4140 #define SPEC_in1_r1p1_32u SPEC_r1_even
4141
4142 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4143 {
4144 int r1 = get_field(f, r1);
4145 o->in1 = tcg_temp_new_i64();
4146 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4147 }
4148 #define SPEC_in1_r1_D32 SPEC_r1_even
4149
4150 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4151 {
4152 o->in1 = load_reg(get_field(f, r2));
4153 }
4154 #define SPEC_in1_r2 0
4155
4156 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4157 {
4158 o->in1 = load_reg(get_field(f, r3));
4159 }
4160 #define SPEC_in1_r3 0
4161
4162 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4163 {
4164 o->in1 = regs[get_field(f, r3)];
4165 o->g_in1 = true;
4166 }
4167 #define SPEC_in1_r3_o 0
4168
4169 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4170 {
4171 o->in1 = tcg_temp_new_i64();
4172 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4173 }
4174 #define SPEC_in1_r3_32s 0
4175
4176 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4177 {
4178 o->in1 = tcg_temp_new_i64();
4179 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4180 }
4181 #define SPEC_in1_r3_32u 0
4182
4183 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4184 {
4185 int r3 = get_field(f, r3);
4186 o->in1 = tcg_temp_new_i64();
4187 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4188 }
4189 #define SPEC_in1_r3_D32 SPEC_r3_even
4190
4191 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4192 {
4193 o->in1 = load_freg32_i64(get_field(f, r1));
4194 }
4195 #define SPEC_in1_e1 0
4196
4197 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4198 {
4199 o->in1 = fregs[get_field(f, r1)];
4200 o->g_in1 = true;
4201 }
4202 #define SPEC_in1_f1_o 0
4203
4204 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4205 {
4206 int r1 = get_field(f, r1);
4207 o->out = fregs[r1];
4208 o->out2 = fregs[r1 + 2];
4209 o->g_out = o->g_out2 = true;
4210 }
4211 #define SPEC_in1_x1_o SPEC_r1_f128
4212
4213 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4214 {
4215 o->in1 = fregs[get_field(f, r3)];
4216 o->g_in1 = true;
4217 }
4218 #define SPEC_in1_f3_o 0
4219
4220 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4221 {
4222 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4223 }
4224 #define SPEC_in1_la1 0
4225
4226 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4227 {
4228 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4229 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4230 }
4231 #define SPEC_in1_la2 0
4232
4233 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4234 {
4235 in1_la1(s, f, o);
4236 o->in1 = tcg_temp_new_i64();
4237 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4238 }
4239 #define SPEC_in1_m1_8u 0
4240
4241 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4242 {
4243 in1_la1(s, f, o);
4244 o->in1 = tcg_temp_new_i64();
4245 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4246 }
4247 #define SPEC_in1_m1_16s 0
4248
4249 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4250 {
4251 in1_la1(s, f, o);
4252 o->in1 = tcg_temp_new_i64();
4253 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4254 }
4255 #define SPEC_in1_m1_16u 0
4256
4257 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4258 {
4259 in1_la1(s, f, o);
4260 o->in1 = tcg_temp_new_i64();
4261 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4262 }
4263 #define SPEC_in1_m1_32s 0
4264
4265 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4266 {
4267 in1_la1(s, f, o);
4268 o->in1 = tcg_temp_new_i64();
4269 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4270 }
4271 #define SPEC_in1_m1_32u 0
4272
4273 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4274 {
4275 in1_la1(s, f, o);
4276 o->in1 = tcg_temp_new_i64();
4277 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4278 }
4279 #define SPEC_in1_m1_64 0
4280
4281 /* ====================================================================== */
4282 /* The "INput 2" generators. These load the second operand to an insn. */
4283
4284 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4285 {
4286 o->in2 = regs[get_field(f, r1)];
4287 o->g_in2 = true;
4288 }
4289 #define SPEC_in2_r1_o 0
4290
4291 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4292 {
4293 o->in2 = tcg_temp_new_i64();
4294 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4295 }
4296 #define SPEC_in2_r1_16u 0
4297
4298 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4299 {
4300 o->in2 = tcg_temp_new_i64();
4301 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4302 }
4303 #define SPEC_in2_r1_32u 0
4304
4305 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4306 {
4307 int r1 = get_field(f, r1);
4308 o->in2 = tcg_temp_new_i64();
4309 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4310 }
4311 #define SPEC_in2_r1_D32 SPEC_r1_even
4312
4313 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4314 {
4315 o->in2 = load_reg(get_field(f, r2));
4316 }
4317 #define SPEC_in2_r2 0
4318
4319 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4320 {
4321 o->in2 = regs[get_field(f, r2)];
4322 o->g_in2 = true;
4323 }
4324 #define SPEC_in2_r2_o 0
4325
4326 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4327 {
4328 int r2 = get_field(f, r2);
4329 if (r2 != 0) {
4330 o->in2 = load_reg(r2);
4331 }
4332 }
4333 #define SPEC_in2_r2_nz 0
4334
4335 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4336 {
4337 o->in2 = tcg_temp_new_i64();
4338 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4339 }
4340 #define SPEC_in2_r2_8s 0
4341
4342 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4343 {
4344 o->in2 = tcg_temp_new_i64();
4345 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4346 }
4347 #define SPEC_in2_r2_8u 0
4348
4349 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4350 {
4351 o->in2 = tcg_temp_new_i64();
4352 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4353 }
4354 #define SPEC_in2_r2_16s 0
4355
4356 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4357 {
4358 o->in2 = tcg_temp_new_i64();
4359 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4360 }
4361 #define SPEC_in2_r2_16u 0
4362
4363 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4364 {
4365 o->in2 = load_reg(get_field(f, r3));
4366 }
4367 #define SPEC_in2_r3 0
4368
4369 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4370 {
4371 o->in2 = tcg_temp_new_i64();
4372 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4373 }
4374 #define SPEC_in2_r2_32s 0
4375
4376 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4377 {
4378 o->in2 = tcg_temp_new_i64();
4379 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4380 }
4381 #define SPEC_in2_r2_32u 0
4382
4383 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4384 {
4385 o->in2 = load_freg32_i64(get_field(f, r2));
4386 }
4387 #define SPEC_in2_e2 0
4388
4389 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4390 {
4391 o->in2 = fregs[get_field(f, r2)];
4392 o->g_in2 = true;
4393 }
4394 #define SPEC_in2_f2_o 0
4395
4396 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4397 {
4398 int r2 = get_field(f, r2);
4399 o->in1 = fregs[r2];
4400 o->in2 = fregs[r2 + 2];
4401 o->g_in1 = o->g_in2 = true;
4402 }
4403 #define SPEC_in2_x2_o SPEC_r2_f128
4404
4405 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4406 {
4407 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4408 }
4409 #define SPEC_in2_ra2 0
4410
4411 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4412 {
4413 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4414 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4415 }
4416 #define SPEC_in2_a2 0
4417
4418 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4419 {
4420 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4421 }
4422 #define SPEC_in2_ri2 0
4423
4424 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4425 {
4426 help_l2_shift(s, f, o, 31);
4427 }
4428 #define SPEC_in2_sh32 0
4429
4430 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4431 {
4432 help_l2_shift(s, f, o, 63);
4433 }
4434 #define SPEC_in2_sh64 0
4435
4436 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4437 {
4438 in2_a2(s, f, o);
4439 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4440 }
4441 #define SPEC_in2_m2_8u 0
4442
4443 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4444 {
4445 in2_a2(s, f, o);
4446 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4447 }
4448 #define SPEC_in2_m2_16s 0
4449
4450 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4451 {
4452 in2_a2(s, f, o);
4453 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4454 }
4455 #define SPEC_in2_m2_16u 0
4456
4457 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4458 {
4459 in2_a2(s, f, o);
4460 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4461 }
4462 #define SPEC_in2_m2_32s 0
4463
4464 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4465 {
4466 in2_a2(s, f, o);
4467 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4468 }
4469 #define SPEC_in2_m2_32u 0
4470
4471 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4472 {
4473 in2_a2(s, f, o);
4474 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4475 }
4476 #define SPEC_in2_m2_64 0
4477
4478 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4479 {
4480 in2_ri2(s, f, o);
4481 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4482 }
4483 #define SPEC_in2_mri2_16u 0
4484
4485 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4486 {
4487 in2_ri2(s, f, o);
4488 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4489 }
4490 #define SPEC_in2_mri2_32s 0
4491
4492 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4493 {
4494 in2_ri2(s, f, o);
4495 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4496 }
4497 #define SPEC_in2_mri2_32u 0
4498
4499 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4500 {
4501 in2_ri2(s, f, o);
4502 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4503 }
4504 #define SPEC_in2_mri2_64 0
4505
4506 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4507 {
4508 /* XXX should reserve the address */
4509 in1_la2(s, f, o);
4510 o->in2 = tcg_temp_new_i64();
4511 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4512 }
4513 #define SPEC_in2_m2_32s_atomic 0
4514
4515 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4516 {
4517 /* XXX should reserve the address */
4518 in1_la2(s, f, o);
4519 o->in2 = tcg_temp_new_i64();
4520 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4521 }
4522 #define SPEC_in2_m2_64_atomic 0
4523
4524 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4525 {
4526 o->in2 = tcg_const_i64(get_field(f, i2));
4527 }
4528 #define SPEC_in2_i2 0
4529
4530 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4531 {
4532 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4533 }
4534 #define SPEC_in2_i2_8u 0
4535
4536 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4537 {
4538 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4539 }
4540 #define SPEC_in2_i2_16u 0
4541
4542 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4543 {
4544 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4545 }
4546 #define SPEC_in2_i2_32u 0
4547
4548 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4549 {
4550 uint64_t i2 = (uint16_t)get_field(f, i2);
4551 o->in2 = tcg_const_i64(i2 << s->insn->data);
4552 }
4553 #define SPEC_in2_i2_16u_shl 0
4554
4555 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4556 {
4557 uint64_t i2 = (uint32_t)get_field(f, i2);
4558 o->in2 = tcg_const_i64(i2 << s->insn->data);
4559 }
4560 #define SPEC_in2_i2_32u_shl 0
4561
4562 /* ====================================================================== */
4563
4564 /* Find opc within the table of insns. This is formulated as a switch
4565 statement so that (1) we get compile-time notice of cut-paste errors
4566 for duplicated opcodes, and (2) the compiler generates the binary
4567 search tree, rather than us having to post-process the table. */
4568
4569 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4570 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4571
4572 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4573
4574 enum DisasInsnEnum {
4575 #include "insn-data.def"
4576 };
4577
4578 #undef D
4579 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4580 .opc = OPC, \
4581 .fmt = FMT_##FT, \
4582 .fac = FAC_##FC, \
4583 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4584 .name = #NM, \
4585 .help_in1 = in1_##I1, \
4586 .help_in2 = in2_##I2, \
4587 .help_prep = prep_##P, \
4588 .help_wout = wout_##W, \
4589 .help_cout = cout_##CC, \
4590 .help_op = op_##OP, \
4591 .data = D \
4592 },
4593
4594 /* Allow 0 to be used for NULL in the table below. */
4595 #define in1_0 NULL
4596 #define in2_0 NULL
4597 #define prep_0 NULL
4598 #define wout_0 NULL
4599 #define cout_0 NULL
4600 #define op_0 NULL
4601
4602 #define SPEC_in1_0 0
4603 #define SPEC_in2_0 0
4604 #define SPEC_prep_0 0
4605 #define SPEC_wout_0 0
4606
4607 static const DisasInsn insn_info[] = {
4608 #include "insn-data.def"
4609 };
4610
4611 #undef D
4612 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4613 case OPC: return &insn_info[insn_ ## NM];
4614
4615 static const DisasInsn *lookup_opc(uint16_t opc)
4616 {
4617 switch (opc) {
4618 #include "insn-data.def"
4619 default:
4620 return NULL;
4621 }
4622 }
4623
4624 #undef D
4625 #undef C
4626
4627 /* Extract a field from the insn. The INSN should be left-aligned in
4628 the uint64_t so that we can more easily utilize the big-bit-endian
4629 definitions we extract from the Principals of Operation. */
4630
4631 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4632 {
4633 uint32_t r, m;
4634
4635 if (f->size == 0) {
4636 return;
4637 }
4638
4639 /* Zero extract the field from the insn. */
4640 r = (insn << f->beg) >> (64 - f->size);
4641
4642 /* Sign-extend, or un-swap the field as necessary. */
4643 switch (f->type) {
4644 case 0: /* unsigned */
4645 break;
4646 case 1: /* signed */
4647 assert(f->size <= 32);
4648 m = 1u << (f->size - 1);
4649 r = (r ^ m) - m;
4650 break;
4651 case 2: /* dl+dh split, signed 20 bit. */
4652 r = ((int8_t)r << 12) | (r >> 8);
4653 break;
4654 default:
4655 abort();
4656 }
4657
4658 /* Validate that the "compressed" encoding we selected above is valid.
4659 I.e. we havn't make two different original fields overlap. */
4660 assert(((o->presentC >> f->indexC) & 1) == 0);
4661 o->presentC |= 1 << f->indexC;
4662 o->presentO |= 1 << f->indexO;
4663
4664 o->c[f->indexC] = r;
4665 }
4666
4667 /* Lookup the insn at the current PC, extracting the operands into O and
4668 returning the info struct for the insn. Returns NULL for invalid insn. */
4669
4670 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4671 DisasFields *f)
4672 {
4673 uint64_t insn, pc = s->pc;
4674 int op, op2, ilen;
4675 const DisasInsn *info;
4676
4677 insn = ld_code2(env, pc);
4678 op = (insn >> 8) & 0xff;
4679 ilen = get_ilen(op);
4680 s->next_pc = s->pc + ilen;
4681
4682 switch (ilen) {
4683 case 2:
4684 insn = insn << 48;
4685 break;
4686 case 4:
4687 insn = ld_code4(env, pc) << 32;
4688 break;
4689 case 6:
4690 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4691 break;
4692 default:
4693 abort();
4694 }
4695
4696 /* We can't actually determine the insn format until we've looked up
4697 the full insn opcode. Which we can't do without locating the
4698 secondary opcode. Assume by default that OP2 is at bit 40; for
4699 those smaller insns that don't actually have a secondary opcode
4700 this will correctly result in OP2 = 0. */
4701 switch (op) {
4702 case 0x01: /* E */
4703 case 0x80: /* S */
4704 case 0x82: /* S */
4705 case 0x93: /* S */
4706 case 0xb2: /* S, RRF, RRE */
4707 case 0xb3: /* RRE, RRD, RRF */
4708 case 0xb9: /* RRE, RRF */
4709 case 0xe5: /* SSE, SIL */
4710 op2 = (insn << 8) >> 56;
4711 break;
4712 case 0xa5: /* RI */
4713 case 0xa7: /* RI */
4714 case 0xc0: /* RIL */
4715 case 0xc2: /* RIL */
4716 case 0xc4: /* RIL */
4717 case 0xc6: /* RIL */
4718 case 0xc8: /* SSF */
4719 case 0xcc: /* RIL */
4720 op2 = (insn << 12) >> 60;
4721 break;
4722 case 0xd0 ... 0xdf: /* SS */
4723 case 0xe1: /* SS */
4724 case 0xe2: /* SS */
4725 case 0xe8: /* SS */
4726 case 0xe9: /* SS */
4727 case 0xea: /* SS */
4728 case 0xee ... 0xf3: /* SS */
4729 case 0xf8 ... 0xfd: /* SS */
4730 op2 = 0;
4731 break;
4732 default:
4733 op2 = (insn << 40) >> 56;
4734 break;
4735 }
4736
4737 memset(f, 0, sizeof(*f));
4738 f->op = op;
4739 f->op2 = op2;
4740
4741 /* Lookup the instruction. */
4742 info = lookup_opc(op << 8 | op2);
4743
4744 /* If we found it, extract the operands. */
4745 if (info != NULL) {
4746 DisasFormat fmt = info->fmt;
4747 int i;
4748
4749 for (i = 0; i < NUM_C_FIELD; ++i) {
4750 extract_field(f, &format_info[fmt].op[i], insn);
4751 }
4752 }
4753 return info;
4754 }
4755
4756 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4757 {
4758 const DisasInsn *insn;
4759 ExitStatus ret = NO_EXIT;
4760 DisasFields f;
4761 DisasOps o;
4762
4763 /* Search for the insn in the table. */
4764 insn = extract_insn(env, s, &f);
4765
4766 /* Not found means unimplemented/illegal opcode. */
4767 if (insn == NULL) {
4768 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4769 f.op, f.op2);
4770 gen_illegal_opcode(s);
4771 return EXIT_NORETURN;
4772 }
4773
4774 /* Check for insn specification exceptions. */
4775 if (insn->spec) {
4776 int spec = insn->spec, excp = 0, r;
4777
4778 if (spec & SPEC_r1_even) {
4779 r = get_field(&f, r1);
4780 if (r & 1) {
4781 excp = PGM_SPECIFICATION;
4782 }
4783 }
4784 if (spec & SPEC_r2_even) {
4785 r = get_field(&f, r2);
4786 if (r & 1) {
4787 excp = PGM_SPECIFICATION;
4788 }
4789 }
4790 if (spec & SPEC_r3_even) {
4791 r = get_field(&f, r3);
4792 if (r & 1) {
4793 excp = PGM_SPECIFICATION;
4794 }
4795 }
4796 if (spec & SPEC_r1_f128) {
4797 r = get_field(&f, r1);
4798 if (r > 13) {
4799 excp = PGM_SPECIFICATION;
4800 }
4801 }
4802 if (spec & SPEC_r2_f128) {
4803 r = get_field(&f, r2);
4804 if (r > 13) {
4805 excp = PGM_SPECIFICATION;
4806 }
4807 }
4808 if (excp) {
4809 gen_program_exception(s, excp);
4810 return EXIT_NORETURN;
4811 }
4812 }
4813
4814 /* Set up the strutures we use to communicate with the helpers. */
4815 s->insn = insn;
4816 s->fields = &f;
4817 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4818 TCGV_UNUSED_I64(o.out);
4819 TCGV_UNUSED_I64(o.out2);
4820 TCGV_UNUSED_I64(o.in1);
4821 TCGV_UNUSED_I64(o.in2);
4822 TCGV_UNUSED_I64(o.addr1);
4823
4824 /* Implement the instruction. */
4825 if (insn->help_in1) {
4826 insn->help_in1(s, &f, &o);
4827 }
4828 if (insn->help_in2) {
4829 insn->help_in2(s, &f, &o);
4830 }
4831 if (insn->help_prep) {
4832 insn->help_prep(s, &f, &o);
4833 }
4834 if (insn->help_op) {
4835 ret = insn->help_op(s, &o);
4836 }
4837 if (insn->help_wout) {
4838 insn->help_wout(s, &f, &o);
4839 }
4840 if (insn->help_cout) {
4841 insn->help_cout(s, &o);
4842 }
4843
4844 /* Free any temporaries created by the helpers. */
4845 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4846 tcg_temp_free_i64(o.out);
4847 }
4848 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4849 tcg_temp_free_i64(o.out2);
4850 }
4851 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4852 tcg_temp_free_i64(o.in1);
4853 }
4854 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4855 tcg_temp_free_i64(o.in2);
4856 }
4857 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4858 tcg_temp_free_i64(o.addr1);
4859 }
4860
4861 /* Advance to the next instruction. */
4862 s->pc = s->next_pc;
4863 return ret;
4864 }
4865
4866 static inline void gen_intermediate_code_internal(S390CPU *cpu,
4867 TranslationBlock *tb,
4868 bool search_pc)
4869 {
4870 CPUState *cs = CPU(cpu);
4871 CPUS390XState *env = &cpu->env;
4872 DisasContext dc;
4873 target_ulong pc_start;
4874 uint64_t next_page_start;
4875 int j, lj = -1;
4876 int num_insns, max_insns;
4877 CPUBreakpoint *bp;
4878 ExitStatus status;
4879 bool do_debug;
4880
4881 pc_start = tb->pc;
4882
4883 /* 31-bit mode */
4884 if (!(tb->flags & FLAG_MASK_64)) {
4885 pc_start &= 0x7fffffff;
4886 }
4887
4888 dc.tb = tb;
4889 dc.pc = pc_start;
4890 dc.cc_op = CC_OP_DYNAMIC;
4891 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
4892
4893 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4894
4895 num_insns = 0;
4896 max_insns = tb->cflags & CF_COUNT_MASK;
4897 if (max_insns == 0) {
4898 max_insns = CF_COUNT_MASK;
4899 }
4900
4901 gen_tb_start(tb);
4902
4903 do {
4904 if (search_pc) {
4905 j = tcg_op_buf_count();
4906 if (lj < j) {
4907 lj++;
4908 while (lj < j) {
4909 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4910 }
4911 }
4912 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4913 gen_opc_cc_op[lj] = dc.cc_op;
4914 tcg_ctx.gen_opc_instr_start[lj] = 1;
4915 tcg_ctx.gen_opc_icount[lj] = num_insns;
4916 }
4917 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4918 gen_io_start();
4919 }
4920
4921 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4922 tcg_gen_debug_insn_start(dc.pc);
4923 }
4924
4925 status = NO_EXIT;
4926 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
4927 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
4928 if (bp->pc == dc.pc) {
4929 status = EXIT_PC_STALE;
4930 do_debug = true;
4931 break;
4932 }
4933 }
4934 }
4935 if (status == NO_EXIT) {
4936 status = translate_one(env, &dc);
4937 }
4938
4939 /* If we reach a page boundary, are single stepping,
4940 or exhaust instruction count, stop generation. */
4941 if (status == NO_EXIT
4942 && (dc.pc >= next_page_start
4943 || tcg_op_buf_full()
4944 || num_insns >= max_insns
4945 || singlestep
4946 || cs->singlestep_enabled)) {
4947 status = EXIT_PC_STALE;
4948 }
4949 } while (status == NO_EXIT);
4950
4951 if (tb->cflags & CF_LAST_IO) {
4952 gen_io_end();
4953 }
4954
4955 switch (status) {
4956 case EXIT_GOTO_TB:
4957 case EXIT_NORETURN:
4958 break;
4959 case EXIT_PC_STALE:
4960 update_psw_addr(&dc);
4961 /* FALLTHRU */
4962 case EXIT_PC_UPDATED:
4963 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4964 cc op type is in env */
4965 update_cc_op(&dc);
4966 /* Exit the TB, either by raising a debug exception or by return. */
4967 if (do_debug) {
4968 gen_exception(EXCP_DEBUG);
4969 } else {
4970 tcg_gen_exit_tb(0);
4971 }
4972 break;
4973 default:
4974 abort();
4975 }
4976
4977 gen_tb_end(tb, num_insns);
4978
4979 if (search_pc) {
4980 j = tcg_op_buf_count();
4981 lj++;
4982 while (lj <= j) {
4983 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4984 }
4985 } else {
4986 tb->size = dc.pc - pc_start;
4987 tb->icount = num_insns;
4988 }
4989
4990 #if defined(S390X_DEBUG_DISAS)
4991 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4992 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4993 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4994 qemu_log("\n");
4995 }
4996 #endif
4997 }
4998
4999 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
5000 {
5001 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
5002 }
5003
5004 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
5005 {
5006 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
5007 }
5008
5009 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
5010 {
5011 int cc_op;
5012 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
5013 cc_op = gen_opc_cc_op[pc_pos];
5014 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5015 env->cc_op = cc_op;
5016 }
5017 }