]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Tidy s->op_cc handling
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 };
59
60 /* Information carried about a condition to be evaluated. */
61 typedef struct {
62 TCGCond cond:8;
63 bool is_64;
64 bool g1;
65 bool g2;
66 union {
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
69 } u;
70 } DisasCompare;
71
72 #define DISAS_EXCP 4
73
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit[CC_OP_MAX];
76 static uint64_t inline_branch_miss[CC_OP_MAX];
77 #endif
78
79 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
80 {
81 if (!(s->tb->flags & FLAG_MASK_64)) {
82 if (s->tb->flags & FLAG_MASK_32) {
83 return pc | 0x80000000;
84 }
85 }
86 return pc;
87 }
88
89 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
90 int flags)
91 {
92 int i;
93
94 if (env->cc_op > 3) {
95 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
96 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
97 } else {
98 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
99 env->psw.mask, env->psw.addr, env->cc_op);
100 }
101
102 for (i = 0; i < 16; i++) {
103 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
104 if ((i % 4) == 3) {
105 cpu_fprintf(f, "\n");
106 } else {
107 cpu_fprintf(f, " ");
108 }
109 }
110
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
113 if ((i % 4) == 3) {
114 cpu_fprintf(f, "\n");
115 } else {
116 cpu_fprintf(f, " ");
117 }
118 }
119
120 #ifndef CONFIG_USER_ONLY
121 for (i = 0; i < 16; i++) {
122 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
123 if ((i % 4) == 3) {
124 cpu_fprintf(f, "\n");
125 } else {
126 cpu_fprintf(f, " ");
127 }
128 }
129 #endif
130
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i = 0; i < CC_OP_MAX; i++) {
133 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
134 inline_branch_miss[i], inline_branch_hit[i]);
135 }
136 #endif
137
138 cpu_fprintf(f, "\n");
139 }
140
141 static TCGv_i64 psw_addr;
142 static TCGv_i64 psw_mask;
143
144 static TCGv_i32 cc_op;
145 static TCGv_i64 cc_src;
146 static TCGv_i64 cc_dst;
147 static TCGv_i64 cc_vr;
148
149 static char cpu_reg_names[32][4];
150 static TCGv_i64 regs[16];
151 static TCGv_i64 fregs[16];
152
153 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
154
155 void s390x_translate_init(void)
156 {
157 int i;
158
159 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
160 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
161 offsetof(CPUS390XState, psw.addr),
162 "psw_addr");
163 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
164 offsetof(CPUS390XState, psw.mask),
165 "psw_mask");
166
167 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
168 "cc_op");
169 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
170 "cc_src");
171 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
172 "cc_dst");
173 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
174 "cc_vr");
175
176 for (i = 0; i < 16; i++) {
177 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
178 regs[i] = tcg_global_mem_new(TCG_AREG0,
179 offsetof(CPUS390XState, regs[i]),
180 cpu_reg_names[i]);
181 }
182
183 for (i = 0; i < 16; i++) {
184 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
185 fregs[i] = tcg_global_mem_new(TCG_AREG0,
186 offsetof(CPUS390XState, fregs[i].d),
187 cpu_reg_names[i + 16]);
188 }
189
190 /* register helpers */
191 #define GEN_HELPER 2
192 #include "helper.h"
193 }
194
195 static TCGv_i64 load_reg(int reg)
196 {
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
200 }
201
202 static TCGv_i64 load_freg32_i64(int reg)
203 {
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
206 return r;
207 }
208
209 static void store_reg(int reg, TCGv_i64 v)
210 {
211 tcg_gen_mov_i64(regs[reg], v);
212 }
213
214 static void store_freg(int reg, TCGv_i64 v)
215 {
216 tcg_gen_mov_i64(fregs[reg], v);
217 }
218
219 static void store_reg32_i64(int reg, TCGv_i64 v)
220 {
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
223 }
224
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
226 {
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
228 }
229
230 static void store_freg32_i64(int reg, TCGv_i64 v)
231 {
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
233 }
234
235 static void return_low128(TCGv_i64 dest)
236 {
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
238 }
239
240 static void update_psw_addr(DisasContext *s)
241 {
242 /* psw.addr */
243 tcg_gen_movi_i64(psw_addr, s->pc);
244 }
245
246 static void update_cc_op(DisasContext *s)
247 {
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
250 }
251 }
252
253 static void potential_page_fault(DisasContext *s)
254 {
255 update_psw_addr(s);
256 update_cc_op(s);
257 }
258
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
260 {
261 return (uint64_t)cpu_lduw_code(env, pc);
262 }
263
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
265 {
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
267 }
268
269 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
270 {
271 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
272 }
273
274 static int get_mem_index(DisasContext *s)
275 {
276 switch (s->tb->flags & FLAG_MASK_ASC) {
277 case PSW_ASC_PRIMARY >> 32:
278 return 0;
279 case PSW_ASC_SECONDARY >> 32:
280 return 1;
281 case PSW_ASC_HOME >> 32:
282 return 2;
283 default:
284 tcg_abort();
285 break;
286 }
287 }
288
289 static void gen_exception(int excp)
290 {
291 TCGv_i32 tmp = tcg_const_i32(excp);
292 gen_helper_exception(cpu_env, tmp);
293 tcg_temp_free_i32(tmp);
294 }
295
296 static void gen_program_exception(DisasContext *s, int code)
297 {
298 TCGv_i32 tmp;
299
300 /* Remember what pgm exeption this was. */
301 tmp = tcg_const_i32(code);
302 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
303 tcg_temp_free_i32(tmp);
304
305 tmp = tcg_const_i32(s->next_pc - s->pc);
306 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
307 tcg_temp_free_i32(tmp);
308
309 /* Advance past instruction. */
310 s->pc = s->next_pc;
311 update_psw_addr(s);
312
313 /* Save off cc. */
314 update_cc_op(s);
315
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM);
318 }
319
320 static inline void gen_illegal_opcode(DisasContext *s)
321 {
322 gen_program_exception(s, PGM_SPECIFICATION);
323 }
324
325 static inline void check_privileged(DisasContext *s)
326 {
327 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
328 gen_program_exception(s, PGM_PRIVILEGED);
329 }
330 }
331
332 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
333 {
334 TCGv_i64 tmp;
335
336 /* 31-bitify the immediate part; register contents are dealt with below */
337 if (!(s->tb->flags & FLAG_MASK_64)) {
338 d2 &= 0x7fffffffUL;
339 }
340
341 if (x2) {
342 if (d2) {
343 tmp = tcg_const_i64(d2);
344 tcg_gen_add_i64(tmp, tmp, regs[x2]);
345 } else {
346 tmp = load_reg(x2);
347 }
348 if (b2) {
349 tcg_gen_add_i64(tmp, tmp, regs[b2]);
350 }
351 } else if (b2) {
352 if (d2) {
353 tmp = tcg_const_i64(d2);
354 tcg_gen_add_i64(tmp, tmp, regs[b2]);
355 } else {
356 tmp = load_reg(b2);
357 }
358 } else {
359 tmp = tcg_const_i64(d2);
360 }
361
362 /* 31-bit mode mask if there are values loaded from registers */
363 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
364 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
365 }
366
367 return tmp;
368 }
369
370 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
371 {
372 s->cc_op = CC_OP_CONST0 + val;
373 }
374
375 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
376 {
377 tcg_gen_discard_i64(cc_src);
378 tcg_gen_mov_i64(cc_dst, dst);
379 tcg_gen_discard_i64(cc_vr);
380 s->cc_op = op;
381 }
382
383 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
384 TCGv_i64 dst)
385 {
386 tcg_gen_mov_i64(cc_src, src);
387 tcg_gen_mov_i64(cc_dst, dst);
388 tcg_gen_discard_i64(cc_vr);
389 s->cc_op = op;
390 }
391
392 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
393 TCGv_i64 dst, TCGv_i64 vr)
394 {
395 tcg_gen_mov_i64(cc_src, src);
396 tcg_gen_mov_i64(cc_dst, dst);
397 tcg_gen_mov_i64(cc_vr, vr);
398 s->cc_op = op;
399 }
400
401 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
402 {
403 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
404 }
405
406 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
407 {
408 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
409 }
410
411 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
412 {
413 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
414 }
415
416 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
417 {
418 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
419 }
420
421 /* CC value is in env->cc_op */
422 static void set_cc_static(DisasContext *s)
423 {
424 tcg_gen_discard_i64(cc_src);
425 tcg_gen_discard_i64(cc_dst);
426 tcg_gen_discard_i64(cc_vr);
427 s->cc_op = CC_OP_STATIC;
428 }
429
430 /* calculates cc into cc_op */
431 static void gen_op_calc_cc(DisasContext *s)
432 {
433 TCGv_i32 local_cc_op;
434 TCGv_i64 dummy;
435
436 TCGV_UNUSED_I32(local_cc_op);
437 TCGV_UNUSED_I64(dummy);
438 switch (s->cc_op) {
439 default:
440 dummy = tcg_const_i64(0);
441 /* FALLTHRU */
442 case CC_OP_ADD_64:
443 case CC_OP_ADDU_64:
444 case CC_OP_ADDC_64:
445 case CC_OP_SUB_64:
446 case CC_OP_SUBU_64:
447 case CC_OP_SUBB_64:
448 case CC_OP_ADD_32:
449 case CC_OP_ADDU_32:
450 case CC_OP_ADDC_32:
451 case CC_OP_SUB_32:
452 case CC_OP_SUBU_32:
453 case CC_OP_SUBB_32:
454 local_cc_op = tcg_const_i32(s->cc_op);
455 break;
456 case CC_OP_CONST0:
457 case CC_OP_CONST1:
458 case CC_OP_CONST2:
459 case CC_OP_CONST3:
460 case CC_OP_STATIC:
461 case CC_OP_DYNAMIC:
462 break;
463 }
464
465 switch (s->cc_op) {
466 case CC_OP_CONST0:
467 case CC_OP_CONST1:
468 case CC_OP_CONST2:
469 case CC_OP_CONST3:
470 /* s->cc_op is the cc value */
471 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
472 break;
473 case CC_OP_STATIC:
474 /* env->cc_op already is the cc value */
475 break;
476 case CC_OP_NZ:
477 case CC_OP_ABS_64:
478 case CC_OP_NABS_64:
479 case CC_OP_ABS_32:
480 case CC_OP_NABS_32:
481 case CC_OP_LTGT0_32:
482 case CC_OP_LTGT0_64:
483 case CC_OP_COMP_32:
484 case CC_OP_COMP_64:
485 case CC_OP_NZ_F32:
486 case CC_OP_NZ_F64:
487 case CC_OP_FLOGR:
488 /* 1 argument */
489 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
490 break;
491 case CC_OP_ICM:
492 case CC_OP_LTGT_32:
493 case CC_OP_LTGT_64:
494 case CC_OP_LTUGTU_32:
495 case CC_OP_LTUGTU_64:
496 case CC_OP_TM_32:
497 case CC_OP_TM_64:
498 case CC_OP_SLA_32:
499 case CC_OP_SLA_64:
500 case CC_OP_NZ_F128:
501 /* 2 arguments */
502 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
503 break;
504 case CC_OP_ADD_64:
505 case CC_OP_ADDU_64:
506 case CC_OP_ADDC_64:
507 case CC_OP_SUB_64:
508 case CC_OP_SUBU_64:
509 case CC_OP_SUBB_64:
510 case CC_OP_ADD_32:
511 case CC_OP_ADDU_32:
512 case CC_OP_ADDC_32:
513 case CC_OP_SUB_32:
514 case CC_OP_SUBU_32:
515 case CC_OP_SUBB_32:
516 /* 3 arguments */
517 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
518 break;
519 case CC_OP_DYNAMIC:
520 /* unknown operation - assume 3 arguments and cc_op in env */
521 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
522 break;
523 default:
524 tcg_abort();
525 }
526
527 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
528 tcg_temp_free_i32(local_cc_op);
529 }
530 if (!TCGV_IS_UNUSED_I64(dummy)) {
531 tcg_temp_free_i64(dummy);
532 }
533
534 /* We now have cc in cc_op as constant */
535 set_cc_static(s);
536 }
537
538 static int use_goto_tb(DisasContext *s, uint64_t dest)
539 {
540 /* NOTE: we handle the case where the TB spans two pages here */
541 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
542 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
543 && !s->singlestep_enabled
544 && !(s->tb->cflags & CF_LAST_IO));
545 }
546
547 static void account_noninline_branch(DisasContext *s, int cc_op)
548 {
549 #ifdef DEBUG_INLINE_BRANCHES
550 inline_branch_miss[cc_op]++;
551 #endif
552 }
553
554 static void account_inline_branch(DisasContext *s, int cc_op)
555 {
556 #ifdef DEBUG_INLINE_BRANCHES
557 inline_branch_hit[cc_op]++;
558 #endif
559 }
560
561 /* Table of mask values to comparison codes, given a comparison as input.
562 For a true comparison CC=3 will never be set, but we treat this
563 conservatively for possible use when CC=3 indicates overflow. */
564 static const TCGCond ltgt_cond[16] = {
565 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
566 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
567 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
568 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
569 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
570 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
571 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
572 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
573 };
574
575 /* Table of mask values to comparison codes, given a logic op as input.
576 For such, only CC=0 and CC=1 should be possible. */
577 static const TCGCond nz_cond[16] = {
578 /* | | x | x */
579 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
580 /* | NE | x | x */
581 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
582 /* EQ | | x | x */
583 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
584 /* EQ | NE | x | x */
585 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
586 };
587
588 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
589 details required to generate a TCG comparison. */
590 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
591 {
592 TCGCond cond;
593 enum cc_op old_cc_op = s->cc_op;
594
595 if (mask == 15 || mask == 0) {
596 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
597 c->u.s32.a = cc_op;
598 c->u.s32.b = cc_op;
599 c->g1 = c->g2 = true;
600 c->is_64 = false;
601 return;
602 }
603
604 /* Find the TCG condition for the mask + cc op. */
605 switch (old_cc_op) {
606 case CC_OP_LTGT0_32:
607 case CC_OP_LTGT0_64:
608 case CC_OP_LTGT_32:
609 case CC_OP_LTGT_64:
610 cond = ltgt_cond[mask];
611 if (cond == TCG_COND_NEVER) {
612 goto do_dynamic;
613 }
614 account_inline_branch(s, old_cc_op);
615 break;
616
617 case CC_OP_LTUGTU_32:
618 case CC_OP_LTUGTU_64:
619 cond = tcg_unsigned_cond(ltgt_cond[mask]);
620 if (cond == TCG_COND_NEVER) {
621 goto do_dynamic;
622 }
623 account_inline_branch(s, old_cc_op);
624 break;
625
626 case CC_OP_NZ:
627 cond = nz_cond[mask];
628 if (cond == TCG_COND_NEVER) {
629 goto do_dynamic;
630 }
631 account_inline_branch(s, old_cc_op);
632 break;
633
634 case CC_OP_TM_32:
635 case CC_OP_TM_64:
636 switch (mask) {
637 case 8:
638 cond = TCG_COND_EQ;
639 break;
640 case 4 | 2 | 1:
641 cond = TCG_COND_NE;
642 break;
643 default:
644 goto do_dynamic;
645 }
646 account_inline_branch(s, old_cc_op);
647 break;
648
649 case CC_OP_ICM:
650 switch (mask) {
651 case 8:
652 cond = TCG_COND_EQ;
653 break;
654 case 4 | 2 | 1:
655 case 4 | 2:
656 cond = TCG_COND_NE;
657 break;
658 default:
659 goto do_dynamic;
660 }
661 account_inline_branch(s, old_cc_op);
662 break;
663
664 case CC_OP_FLOGR:
665 switch (mask & 0xa) {
666 case 8: /* src == 0 -> no one bit found */
667 cond = TCG_COND_EQ;
668 break;
669 case 2: /* src != 0 -> one bit found */
670 cond = TCG_COND_NE;
671 break;
672 default:
673 goto do_dynamic;
674 }
675 account_inline_branch(s, old_cc_op);
676 break;
677
678 default:
679 do_dynamic:
680 /* Calculate cc value. */
681 gen_op_calc_cc(s);
682 /* FALLTHRU */
683
684 case CC_OP_STATIC:
685 /* Jump based on CC. We'll load up the real cond below;
686 the assignment here merely avoids a compiler warning. */
687 account_noninline_branch(s, old_cc_op);
688 old_cc_op = CC_OP_STATIC;
689 cond = TCG_COND_NEVER;
690 break;
691 }
692
693 /* Load up the arguments of the comparison. */
694 c->is_64 = true;
695 c->g1 = c->g2 = false;
696 switch (old_cc_op) {
697 case CC_OP_LTGT0_32:
698 c->is_64 = false;
699 c->u.s32.a = tcg_temp_new_i32();
700 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
701 c->u.s32.b = tcg_const_i32(0);
702 break;
703 case CC_OP_LTGT_32:
704 case CC_OP_LTUGTU_32:
705 c->is_64 = false;
706 c->u.s32.a = tcg_temp_new_i32();
707 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
708 c->u.s32.b = tcg_temp_new_i32();
709 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
710 break;
711
712 case CC_OP_LTGT0_64:
713 case CC_OP_NZ:
714 case CC_OP_FLOGR:
715 c->u.s64.a = cc_dst;
716 c->u.s64.b = tcg_const_i64(0);
717 c->g1 = true;
718 break;
719 case CC_OP_LTGT_64:
720 case CC_OP_LTUGTU_64:
721 c->u.s64.a = cc_src;
722 c->u.s64.b = cc_dst;
723 c->g1 = c->g2 = true;
724 break;
725
726 case CC_OP_TM_32:
727 case CC_OP_TM_64:
728 case CC_OP_ICM:
729 c->u.s64.a = tcg_temp_new_i64();
730 c->u.s64.b = tcg_const_i64(0);
731 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
732 break;
733
734 case CC_OP_STATIC:
735 c->is_64 = false;
736 c->u.s32.a = cc_op;
737 c->g1 = true;
738 switch (mask) {
739 case 0x8 | 0x4 | 0x2: /* cc != 3 */
740 cond = TCG_COND_NE;
741 c->u.s32.b = tcg_const_i32(3);
742 break;
743 case 0x8 | 0x4 | 0x1: /* cc != 2 */
744 cond = TCG_COND_NE;
745 c->u.s32.b = tcg_const_i32(2);
746 break;
747 case 0x8 | 0x2 | 0x1: /* cc != 1 */
748 cond = TCG_COND_NE;
749 c->u.s32.b = tcg_const_i32(1);
750 break;
751 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
752 cond = TCG_COND_EQ;
753 c->g1 = false;
754 c->u.s32.a = tcg_temp_new_i32();
755 c->u.s32.b = tcg_const_i32(0);
756 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
757 break;
758 case 0x8 | 0x4: /* cc < 2 */
759 cond = TCG_COND_LTU;
760 c->u.s32.b = tcg_const_i32(2);
761 break;
762 case 0x8: /* cc == 0 */
763 cond = TCG_COND_EQ;
764 c->u.s32.b = tcg_const_i32(0);
765 break;
766 case 0x4 | 0x2 | 0x1: /* cc != 0 */
767 cond = TCG_COND_NE;
768 c->u.s32.b = tcg_const_i32(0);
769 break;
770 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
771 cond = TCG_COND_NE;
772 c->g1 = false;
773 c->u.s32.a = tcg_temp_new_i32();
774 c->u.s32.b = tcg_const_i32(0);
775 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
776 break;
777 case 0x4: /* cc == 1 */
778 cond = TCG_COND_EQ;
779 c->u.s32.b = tcg_const_i32(1);
780 break;
781 case 0x2 | 0x1: /* cc > 1 */
782 cond = TCG_COND_GTU;
783 c->u.s32.b = tcg_const_i32(1);
784 break;
785 case 0x2: /* cc == 2 */
786 cond = TCG_COND_EQ;
787 c->u.s32.b = tcg_const_i32(2);
788 break;
789 case 0x1: /* cc == 3 */
790 cond = TCG_COND_EQ;
791 c->u.s32.b = tcg_const_i32(3);
792 break;
793 default:
794 /* CC is masked by something else: (8 >> cc) & mask. */
795 cond = TCG_COND_NE;
796 c->g1 = false;
797 c->u.s32.a = tcg_const_i32(8);
798 c->u.s32.b = tcg_const_i32(0);
799 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
800 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
801 break;
802 }
803 break;
804
805 default:
806 abort();
807 }
808 c->cond = cond;
809 }
810
811 static void free_compare(DisasCompare *c)
812 {
813 if (!c->g1) {
814 if (c->is_64) {
815 tcg_temp_free_i64(c->u.s64.a);
816 } else {
817 tcg_temp_free_i32(c->u.s32.a);
818 }
819 }
820 if (!c->g2) {
821 if (c->is_64) {
822 tcg_temp_free_i64(c->u.s64.b);
823 } else {
824 tcg_temp_free_i32(c->u.s32.b);
825 }
826 }
827 }
828
829 /* ====================================================================== */
830 /* Define the insn format enumeration. */
831 #define F0(N) FMT_##N,
832 #define F1(N, X1) F0(N)
833 #define F2(N, X1, X2) F0(N)
834 #define F3(N, X1, X2, X3) F0(N)
835 #define F4(N, X1, X2, X3, X4) F0(N)
836 #define F5(N, X1, X2, X3, X4, X5) F0(N)
837
838 typedef enum {
839 #include "insn-format.def"
840 } DisasFormat;
841
842 #undef F0
843 #undef F1
844 #undef F2
845 #undef F3
846 #undef F4
847 #undef F5
848
849 /* Define a structure to hold the decoded fields. We'll store each inside
850 an array indexed by an enum. In order to conserve memory, we'll arrange
851 for fields that do not exist at the same time to overlap, thus the "C"
852 for compact. For checking purposes there is an "O" for original index
853 as well that will be applied to availability bitmaps. */
854
855 enum DisasFieldIndexO {
856 FLD_O_r1,
857 FLD_O_r2,
858 FLD_O_r3,
859 FLD_O_m1,
860 FLD_O_m3,
861 FLD_O_m4,
862 FLD_O_b1,
863 FLD_O_b2,
864 FLD_O_b4,
865 FLD_O_d1,
866 FLD_O_d2,
867 FLD_O_d4,
868 FLD_O_x2,
869 FLD_O_l1,
870 FLD_O_l2,
871 FLD_O_i1,
872 FLD_O_i2,
873 FLD_O_i3,
874 FLD_O_i4,
875 FLD_O_i5
876 };
877
878 enum DisasFieldIndexC {
879 FLD_C_r1 = 0,
880 FLD_C_m1 = 0,
881 FLD_C_b1 = 0,
882 FLD_C_i1 = 0,
883
884 FLD_C_r2 = 1,
885 FLD_C_b2 = 1,
886 FLD_C_i2 = 1,
887
888 FLD_C_r3 = 2,
889 FLD_C_m3 = 2,
890 FLD_C_i3 = 2,
891
892 FLD_C_m4 = 3,
893 FLD_C_b4 = 3,
894 FLD_C_i4 = 3,
895 FLD_C_l1 = 3,
896
897 FLD_C_i5 = 4,
898 FLD_C_d1 = 4,
899
900 FLD_C_d2 = 5,
901
902 FLD_C_d4 = 6,
903 FLD_C_x2 = 6,
904 FLD_C_l2 = 6,
905
906 NUM_C_FIELD = 7
907 };
908
909 struct DisasFields {
910 unsigned op:8;
911 unsigned op2:8;
912 unsigned presentC:16;
913 unsigned int presentO;
914 int c[NUM_C_FIELD];
915 };
916
917 /* This is the way fields are to be accessed out of DisasFields. */
918 #define have_field(S, F) have_field1((S), FLD_O_##F)
919 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
920
921 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
922 {
923 return (f->presentO >> c) & 1;
924 }
925
926 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
927 enum DisasFieldIndexC c)
928 {
929 assert(have_field1(f, o));
930 return f->c[c];
931 }
932
933 /* Describe the layout of each field in each format. */
934 typedef struct DisasField {
935 unsigned int beg:8;
936 unsigned int size:8;
937 unsigned int type:2;
938 unsigned int indexC:6;
939 enum DisasFieldIndexO indexO:8;
940 } DisasField;
941
942 typedef struct DisasFormatInfo {
943 DisasField op[NUM_C_FIELD];
944 } DisasFormatInfo;
945
946 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
947 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
948 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
949 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
950 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
951 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
952 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
953 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
954 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
955 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
956 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
957 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
958 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
959 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
960
961 #define F0(N) { { } },
962 #define F1(N, X1) { { X1 } },
963 #define F2(N, X1, X2) { { X1, X2 } },
964 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
965 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
966 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
967
968 static const DisasFormatInfo format_info[] = {
969 #include "insn-format.def"
970 };
971
972 #undef F0
973 #undef F1
974 #undef F2
975 #undef F3
976 #undef F4
977 #undef F5
978 #undef R
979 #undef M
980 #undef BD
981 #undef BXD
982 #undef BDL
983 #undef BXDL
984 #undef I
985 #undef L
986
987 /* Generally, we'll extract operands into this structures, operate upon
988 them, and store them back. See the "in1", "in2", "prep", "wout" sets
989 of routines below for more details. */
990 typedef struct {
991 bool g_out, g_out2, g_in1, g_in2;
992 TCGv_i64 out, out2, in1, in2;
993 TCGv_i64 addr1;
994 } DisasOps;
995
996 /* Return values from translate_one, indicating the state of the TB. */
997 typedef enum {
998 /* Continue the TB. */
999 NO_EXIT,
1000 /* We have emitted one or more goto_tb. No fixup required. */
1001 EXIT_GOTO_TB,
1002 /* We are not using a goto_tb (for whatever reason), but have updated
1003 the PC (for whatever reason), so there's no need to do it again on
1004 exiting the TB. */
1005 EXIT_PC_UPDATED,
1006 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1007 updated the PC for the next instruction to be executed. */
1008 EXIT_PC_STALE,
1009 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1010 No following code will be executed. */
1011 EXIT_NORETURN,
1012 } ExitStatus;
1013
1014 typedef enum DisasFacility {
1015 FAC_Z, /* zarch (default) */
1016 FAC_CASS, /* compare and swap and store */
1017 FAC_CASS2, /* compare and swap and store 2*/
1018 FAC_DFP, /* decimal floating point */
1019 FAC_DFPR, /* decimal floating point rounding */
1020 FAC_DO, /* distinct operands */
1021 FAC_EE, /* execute extensions */
1022 FAC_EI, /* extended immediate */
1023 FAC_FPE, /* floating point extension */
1024 FAC_FPSSH, /* floating point support sign handling */
1025 FAC_FPRGR, /* FPR-GR transfer */
1026 FAC_GIE, /* general instructions extension */
1027 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1028 FAC_HW, /* high-word */
1029 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1030 FAC_LOC, /* load/store on condition */
1031 FAC_LD, /* long displacement */
1032 FAC_PC, /* population count */
1033 FAC_SCF, /* store clock fast */
1034 FAC_SFLE, /* store facility list extended */
1035 } DisasFacility;
1036
1037 struct DisasInsn {
1038 unsigned opc:16;
1039 DisasFormat fmt:6;
1040 DisasFacility fac:6;
1041
1042 const char *name;
1043
1044 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1045 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1046 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1047 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1048 void (*help_cout)(DisasContext *, DisasOps *);
1049 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1050
1051 uint64_t data;
1052 };
1053
1054 /* ====================================================================== */
1055 /* Miscelaneous helpers, used by several operations. */
1056
1057 static void help_l2_shift(DisasContext *s, DisasFields *f,
1058 DisasOps *o, int mask)
1059 {
1060 int b2 = get_field(f, b2);
1061 int d2 = get_field(f, d2);
1062
1063 if (b2 == 0) {
1064 o->in2 = tcg_const_i64(d2 & mask);
1065 } else {
1066 o->in2 = get_address(s, 0, b2, d2);
1067 tcg_gen_andi_i64(o->in2, o->in2, mask);
1068 }
1069 }
1070
1071 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1072 {
1073 if (dest == s->next_pc) {
1074 return NO_EXIT;
1075 }
1076 if (use_goto_tb(s, dest)) {
1077 update_cc_op(s);
1078 tcg_gen_goto_tb(0);
1079 tcg_gen_movi_i64(psw_addr, dest);
1080 tcg_gen_exit_tb((tcg_target_long)s->tb);
1081 return EXIT_GOTO_TB;
1082 } else {
1083 tcg_gen_movi_i64(psw_addr, dest);
1084 return EXIT_PC_UPDATED;
1085 }
1086 }
1087
1088 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1089 bool is_imm, int imm, TCGv_i64 cdest)
1090 {
1091 ExitStatus ret;
1092 uint64_t dest = s->pc + 2 * imm;
1093 int lab;
1094
1095 /* Take care of the special cases first. */
1096 if (c->cond == TCG_COND_NEVER) {
1097 ret = NO_EXIT;
1098 goto egress;
1099 }
1100 if (is_imm) {
1101 if (dest == s->next_pc) {
1102 /* Branch to next. */
1103 ret = NO_EXIT;
1104 goto egress;
1105 }
1106 if (c->cond == TCG_COND_ALWAYS) {
1107 ret = help_goto_direct(s, dest);
1108 goto egress;
1109 }
1110 } else {
1111 if (TCGV_IS_UNUSED_I64(cdest)) {
1112 /* E.g. bcr %r0 -> no branch. */
1113 ret = NO_EXIT;
1114 goto egress;
1115 }
1116 if (c->cond == TCG_COND_ALWAYS) {
1117 tcg_gen_mov_i64(psw_addr, cdest);
1118 ret = EXIT_PC_UPDATED;
1119 goto egress;
1120 }
1121 }
1122
1123 if (use_goto_tb(s, s->next_pc)) {
1124 if (is_imm && use_goto_tb(s, dest)) {
1125 /* Both exits can use goto_tb. */
1126 update_cc_op(s);
1127
1128 lab = gen_new_label();
1129 if (c->is_64) {
1130 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1131 } else {
1132 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1133 }
1134
1135 /* Branch not taken. */
1136 tcg_gen_goto_tb(0);
1137 tcg_gen_movi_i64(psw_addr, s->next_pc);
1138 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1139
1140 /* Branch taken. */
1141 gen_set_label(lab);
1142 tcg_gen_goto_tb(1);
1143 tcg_gen_movi_i64(psw_addr, dest);
1144 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1145
1146 ret = EXIT_GOTO_TB;
1147 } else {
1148 /* Fallthru can use goto_tb, but taken branch cannot. */
1149 /* Store taken branch destination before the brcond. This
1150 avoids having to allocate a new local temp to hold it.
1151 We'll overwrite this in the not taken case anyway. */
1152 if (!is_imm) {
1153 tcg_gen_mov_i64(psw_addr, cdest);
1154 }
1155
1156 lab = gen_new_label();
1157 if (c->is_64) {
1158 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1159 } else {
1160 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1161 }
1162
1163 /* Branch not taken. */
1164 update_cc_op(s);
1165 tcg_gen_goto_tb(0);
1166 tcg_gen_movi_i64(psw_addr, s->next_pc);
1167 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1168
1169 gen_set_label(lab);
1170 if (is_imm) {
1171 tcg_gen_movi_i64(psw_addr, dest);
1172 }
1173 ret = EXIT_PC_UPDATED;
1174 }
1175 } else {
1176 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1177 Most commonly we're single-stepping or some other condition that
1178 disables all use of goto_tb. Just update the PC and exit. */
1179
1180 TCGv_i64 next = tcg_const_i64(s->next_pc);
1181 if (is_imm) {
1182 cdest = tcg_const_i64(dest);
1183 }
1184
1185 if (c->is_64) {
1186 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1187 cdest, next);
1188 } else {
1189 TCGv_i32 t0 = tcg_temp_new_i32();
1190 TCGv_i64 t1 = tcg_temp_new_i64();
1191 TCGv_i64 z = tcg_const_i64(0);
1192 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1193 tcg_gen_extu_i32_i64(t1, t0);
1194 tcg_temp_free_i32(t0);
1195 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1196 tcg_temp_free_i64(t1);
1197 tcg_temp_free_i64(z);
1198 }
1199
1200 if (is_imm) {
1201 tcg_temp_free_i64(cdest);
1202 }
1203 tcg_temp_free_i64(next);
1204
1205 ret = EXIT_PC_UPDATED;
1206 }
1207
1208 egress:
1209 free_compare(c);
1210 return ret;
1211 }
1212
1213 /* ====================================================================== */
1214 /* The operations. These perform the bulk of the work for any insn,
1215 usually after the operands have been loaded and output initialized. */
1216
1217 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1218 {
1219 gen_helper_abs_i64(o->out, o->in2);
1220 return NO_EXIT;
1221 }
1222
1223 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1224 {
1225 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1226 return NO_EXIT;
1227 }
1228
1229 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1230 {
1231 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1232 return NO_EXIT;
1233 }
1234
1235 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1236 {
1237 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1238 tcg_gen_mov_i64(o->out2, o->in2);
1239 return NO_EXIT;
1240 }
1241
1242 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1243 {
1244 tcg_gen_add_i64(o->out, o->in1, o->in2);
1245 return NO_EXIT;
1246 }
1247
1248 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1249 {
1250 TCGv_i64 cc;
1251
1252 tcg_gen_add_i64(o->out, o->in1, o->in2);
1253
1254 /* XXX possible optimization point */
1255 gen_op_calc_cc(s);
1256 cc = tcg_temp_new_i64();
1257 tcg_gen_extu_i32_i64(cc, cc_op);
1258 tcg_gen_shri_i64(cc, cc, 1);
1259
1260 tcg_gen_add_i64(o->out, o->out, cc);
1261 tcg_temp_free_i64(cc);
1262 return NO_EXIT;
1263 }
1264
1265 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1266 {
1267 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1268 return NO_EXIT;
1269 }
1270
1271 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1272 {
1273 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1274 return NO_EXIT;
1275 }
1276
1277 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1278 {
1279 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1280 return_low128(o->out2);
1281 return NO_EXIT;
1282 }
1283
1284 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1285 {
1286 tcg_gen_and_i64(o->out, o->in1, o->in2);
1287 return NO_EXIT;
1288 }
1289
1290 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1291 {
1292 int shift = s->insn->data & 0xff;
1293 int size = s->insn->data >> 8;
1294 uint64_t mask = ((1ull << size) - 1) << shift;
1295
1296 assert(!o->g_in2);
1297 tcg_gen_shli_i64(o->in2, o->in2, shift);
1298 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1299 tcg_gen_and_i64(o->out, o->in1, o->in2);
1300
1301 /* Produce the CC from only the bits manipulated. */
1302 tcg_gen_andi_i64(cc_dst, o->out, mask);
1303 set_cc_nz_u64(s, cc_dst);
1304 return NO_EXIT;
1305 }
1306
1307 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1308 {
1309 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1310 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1311 tcg_gen_mov_i64(psw_addr, o->in2);
1312 return EXIT_PC_UPDATED;
1313 } else {
1314 return NO_EXIT;
1315 }
1316 }
1317
1318 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1319 {
1320 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1321 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1322 }
1323
1324 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1325 {
1326 int m1 = get_field(s->fields, m1);
1327 bool is_imm = have_field(s->fields, i2);
1328 int imm = is_imm ? get_field(s->fields, i2) : 0;
1329 DisasCompare c;
1330
1331 disas_jcc(s, &c, m1);
1332 return help_branch(s, &c, is_imm, imm, o->in2);
1333 }
1334
1335 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1336 {
1337 int r1 = get_field(s->fields, r1);
1338 bool is_imm = have_field(s->fields, i2);
1339 int imm = is_imm ? get_field(s->fields, i2) : 0;
1340 DisasCompare c;
1341 TCGv_i64 t;
1342
1343 c.cond = TCG_COND_NE;
1344 c.is_64 = false;
1345 c.g1 = false;
1346 c.g2 = false;
1347
1348 t = tcg_temp_new_i64();
1349 tcg_gen_subi_i64(t, regs[r1], 1);
1350 store_reg32_i64(r1, t);
1351 c.u.s32.a = tcg_temp_new_i32();
1352 c.u.s32.b = tcg_const_i32(0);
1353 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1354 tcg_temp_free_i64(t);
1355
1356 return help_branch(s, &c, is_imm, imm, o->in2);
1357 }
1358
1359 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1360 {
1361 int r1 = get_field(s->fields, r1);
1362 bool is_imm = have_field(s->fields, i2);
1363 int imm = is_imm ? get_field(s->fields, i2) : 0;
1364 DisasCompare c;
1365
1366 c.cond = TCG_COND_NE;
1367 c.is_64 = true;
1368 c.g1 = true;
1369 c.g2 = false;
1370
1371 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1372 c.u.s64.a = regs[r1];
1373 c.u.s64.b = tcg_const_i64(0);
1374
1375 return help_branch(s, &c, is_imm, imm, o->in2);
1376 }
1377
1378 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1379 {
1380 int r1 = get_field(s->fields, r1);
1381 int r3 = get_field(s->fields, r3);
1382 bool is_imm = have_field(s->fields, i2);
1383 int imm = is_imm ? get_field(s->fields, i2) : 0;
1384 DisasCompare c;
1385 TCGv_i64 t;
1386
1387 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1388 c.is_64 = false;
1389 c.g1 = false;
1390 c.g2 = false;
1391
1392 t = tcg_temp_new_i64();
1393 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1394 c.u.s32.a = tcg_temp_new_i32();
1395 c.u.s32.b = tcg_temp_new_i32();
1396 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1397 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1398 store_reg32_i64(r1, t);
1399 tcg_temp_free_i64(t);
1400
1401 return help_branch(s, &c, is_imm, imm, o->in2);
1402 }
1403
1404 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1405 {
1406 int r1 = get_field(s->fields, r1);
1407 int r3 = get_field(s->fields, r3);
1408 bool is_imm = have_field(s->fields, i2);
1409 int imm = is_imm ? get_field(s->fields, i2) : 0;
1410 DisasCompare c;
1411
1412 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1413 c.is_64 = true;
1414
1415 if (r1 == (r3 | 1)) {
1416 c.u.s64.b = load_reg(r3 | 1);
1417 c.g2 = false;
1418 } else {
1419 c.u.s64.b = regs[r3 | 1];
1420 c.g2 = true;
1421 }
1422
1423 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1424 c.u.s64.a = regs[r1];
1425 c.g1 = true;
1426
1427 return help_branch(s, &c, is_imm, imm, o->in2);
1428 }
1429
1430 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1431 {
1432 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1433 set_cc_static(s);
1434 return NO_EXIT;
1435 }
1436
1437 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1438 {
1439 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1440 set_cc_static(s);
1441 return NO_EXIT;
1442 }
1443
1444 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1445 {
1446 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1447 set_cc_static(s);
1448 return NO_EXIT;
1449 }
1450
1451 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1452 {
1453 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1454 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1455 tcg_temp_free_i32(m3);
1456 gen_set_cc_nz_f32(s, o->in2);
1457 return NO_EXIT;
1458 }
1459
1460 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1461 {
1462 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1463 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1464 tcg_temp_free_i32(m3);
1465 gen_set_cc_nz_f64(s, o->in2);
1466 return NO_EXIT;
1467 }
1468
1469 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1470 {
1471 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1472 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1473 tcg_temp_free_i32(m3);
1474 gen_set_cc_nz_f128(s, o->in1, o->in2);
1475 return NO_EXIT;
1476 }
1477
1478 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1479 {
1480 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1481 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1482 tcg_temp_free_i32(m3);
1483 gen_set_cc_nz_f32(s, o->in2);
1484 return NO_EXIT;
1485 }
1486
1487 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1488 {
1489 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1490 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1491 tcg_temp_free_i32(m3);
1492 gen_set_cc_nz_f64(s, o->in2);
1493 return NO_EXIT;
1494 }
1495
1496 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1497 {
1498 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1499 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1500 tcg_temp_free_i32(m3);
1501 gen_set_cc_nz_f128(s, o->in1, o->in2);
1502 return NO_EXIT;
1503 }
1504
1505 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1506 {
1507 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1508 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1509 tcg_temp_free_i32(m3);
1510 return NO_EXIT;
1511 }
1512
1513 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1514 {
1515 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1516 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1517 tcg_temp_free_i32(m3);
1518 return NO_EXIT;
1519 }
1520
1521 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1522 {
1523 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1524 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1525 tcg_temp_free_i32(m3);
1526 return_low128(o->out2);
1527 return NO_EXIT;
1528 }
1529
1530 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1531 {
1532 int r2 = get_field(s->fields, r2);
1533 TCGv_i64 len = tcg_temp_new_i64();
1534
1535 potential_page_fault(s);
1536 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1537 set_cc_static(s);
1538 return_low128(o->out);
1539
1540 tcg_gen_add_i64(regs[r2], regs[r2], len);
1541 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1542 tcg_temp_free_i64(len);
1543
1544 return NO_EXIT;
1545 }
1546
1547 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1548 {
1549 int l = get_field(s->fields, l1);
1550 TCGv_i32 vl;
1551
1552 switch (l + 1) {
1553 case 1:
1554 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1555 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1556 break;
1557 case 2:
1558 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1559 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1560 break;
1561 case 4:
1562 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1563 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1564 break;
1565 case 8:
1566 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1567 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1568 break;
1569 default:
1570 potential_page_fault(s);
1571 vl = tcg_const_i32(l);
1572 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1573 tcg_temp_free_i32(vl);
1574 set_cc_static(s);
1575 return NO_EXIT;
1576 }
1577 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1578 return NO_EXIT;
1579 }
1580
1581 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1582 {
1583 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1584 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1585 potential_page_fault(s);
1586 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1587 tcg_temp_free_i32(r1);
1588 tcg_temp_free_i32(r3);
1589 set_cc_static(s);
1590 return NO_EXIT;
1591 }
1592
1593 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1594 {
1595 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1596 TCGv_i32 t1 = tcg_temp_new_i32();
1597 tcg_gen_trunc_i64_i32(t1, o->in1);
1598 potential_page_fault(s);
1599 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1600 set_cc_static(s);
1601 tcg_temp_free_i32(t1);
1602 tcg_temp_free_i32(m3);
1603 return NO_EXIT;
1604 }
1605
1606 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1607 {
1608 potential_page_fault(s);
1609 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1610 set_cc_static(s);
1611 return_low128(o->in2);
1612 return NO_EXIT;
1613 }
1614
1615 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1616 {
1617 int r3 = get_field(s->fields, r3);
1618 potential_page_fault(s);
1619 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1620 set_cc_static(s);
1621 return NO_EXIT;
1622 }
1623
1624 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1625 {
1626 int r3 = get_field(s->fields, r3);
1627 potential_page_fault(s);
1628 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1629 set_cc_static(s);
1630 return NO_EXIT;
1631 }
1632
1633 #ifndef CONFIG_USER_ONLY
1634 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1635 {
1636 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1637 check_privileged(s);
1638 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1639 tcg_temp_free_i32(r1);
1640 set_cc_static(s);
1641 return NO_EXIT;
1642 }
1643 #endif
1644
1645 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1646 {
1647 int r3 = get_field(s->fields, r3);
1648 TCGv_i64 in3 = tcg_temp_new_i64();
1649 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1650 potential_page_fault(s);
1651 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1652 tcg_temp_free_i64(in3);
1653 set_cc_static(s);
1654 return NO_EXIT;
1655 }
1656
1657 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1658 {
1659 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1660 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1661 potential_page_fault(s);
1662 /* XXX rewrite in tcg */
1663 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1664 set_cc_static(s);
1665 return NO_EXIT;
1666 }
1667
1668 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1669 {
1670 TCGv_i64 t1 = tcg_temp_new_i64();
1671 TCGv_i32 t2 = tcg_temp_new_i32();
1672 tcg_gen_trunc_i64_i32(t2, o->in1);
1673 gen_helper_cvd(t1, t2);
1674 tcg_temp_free_i32(t2);
1675 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1676 tcg_temp_free_i64(t1);
1677 return NO_EXIT;
1678 }
1679
1680 #ifndef CONFIG_USER_ONLY
1681 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1682 {
1683 TCGv_i32 tmp;
1684
1685 check_privileged(s);
1686 potential_page_fault(s);
1687
1688 /* We pretend the format is RX_a so that D2 is the field we want. */
1689 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1690 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1691 tcg_temp_free_i32(tmp);
1692 return NO_EXIT;
1693 }
1694 #endif
1695
1696 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1697 {
1698 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1699 return_low128(o->out);
1700 return NO_EXIT;
1701 }
1702
1703 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1704 {
1705 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1706 return_low128(o->out);
1707 return NO_EXIT;
1708 }
1709
1710 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1711 {
1712 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1713 return_low128(o->out);
1714 return NO_EXIT;
1715 }
1716
1717 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1718 {
1719 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
1720 return_low128(o->out);
1721 return NO_EXIT;
1722 }
1723
1724 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
1725 {
1726 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
1727 return NO_EXIT;
1728 }
1729
1730 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
1731 {
1732 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
1733 return NO_EXIT;
1734 }
1735
1736 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
1737 {
1738 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1739 return_low128(o->out2);
1740 return NO_EXIT;
1741 }
1742
1743 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
1744 {
1745 int r2 = get_field(s->fields, r2);
1746 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1747 return NO_EXIT;
1748 }
1749
1750 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
1751 {
1752 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
1753 return NO_EXIT;
1754 }
1755
1756 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
1757 {
1758 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
1759 tb->flags, (ab)use the tb->cs_base field as the address of
1760 the template in memory, and grab 8 bits of tb->flags/cflags for
1761 the contents of the register. We would then recognize all this
1762 in gen_intermediate_code_internal, generating code for exactly
1763 one instruction. This new TB then gets executed normally.
1764
1765 On the other hand, this seems to be mostly used for modifying
1766 MVC inside of memcpy, which needs a helper call anyway. So
1767 perhaps this doesn't bear thinking about any further. */
1768
1769 TCGv_i64 tmp;
1770
1771 update_psw_addr(s);
1772 update_cc_op(s);
1773
1774 tmp = tcg_const_i64(s->next_pc);
1775 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
1776 tcg_temp_free_i64(tmp);
1777
1778 set_cc_static(s);
1779 return NO_EXIT;
1780 }
1781
1782 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
1783 {
1784 /* We'll use the original input for cc computation, since we get to
1785 compare that against 0, which ought to be better than comparing
1786 the real output against 64. It also lets cc_dst be a convenient
1787 temporary during our computation. */
1788 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
1789
1790 /* R1 = IN ? CLZ(IN) : 64. */
1791 gen_helper_clz(o->out, o->in2);
1792
1793 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
1794 value by 64, which is undefined. But since the shift is 64 iff the
1795 input is zero, we still get the correct result after and'ing. */
1796 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
1797 tcg_gen_shr_i64(o->out2, o->out2, o->out);
1798 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
1799 return NO_EXIT;
1800 }
1801
1802 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
1803 {
1804 int m3 = get_field(s->fields, m3);
1805 int pos, len, base = s->insn->data;
1806 TCGv_i64 tmp = tcg_temp_new_i64();
1807 uint64_t ccm;
1808
1809 switch (m3) {
1810 case 0xf:
1811 /* Effectively a 32-bit load. */
1812 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
1813 len = 32;
1814 goto one_insert;
1815
1816 case 0xc:
1817 case 0x6:
1818 case 0x3:
1819 /* Effectively a 16-bit load. */
1820 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
1821 len = 16;
1822 goto one_insert;
1823
1824 case 0x8:
1825 case 0x4:
1826 case 0x2:
1827 case 0x1:
1828 /* Effectively an 8-bit load. */
1829 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
1830 len = 8;
1831 goto one_insert;
1832
1833 one_insert:
1834 pos = base + ctz32(m3) * 8;
1835 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
1836 ccm = ((1ull << len) - 1) << pos;
1837 break;
1838
1839 default:
1840 /* This is going to be a sequence of loads and inserts. */
1841 pos = base + 32 - 8;
1842 ccm = 0;
1843 while (m3) {
1844 if (m3 & 0x8) {
1845 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
1846 tcg_gen_addi_i64(o->in2, o->in2, 1);
1847 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
1848 ccm |= 0xff << pos;
1849 }
1850 m3 = (m3 << 1) & 0xf;
1851 pos -= 8;
1852 }
1853 break;
1854 }
1855
1856 tcg_gen_movi_i64(tmp, ccm);
1857 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
1858 tcg_temp_free_i64(tmp);
1859 return NO_EXIT;
1860 }
1861
1862 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
1863 {
1864 int shift = s->insn->data & 0xff;
1865 int size = s->insn->data >> 8;
1866 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
1867 return NO_EXIT;
1868 }
1869
1870 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
1871 {
1872 TCGv_i64 t1;
1873
1874 gen_op_calc_cc(s);
1875 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
1876
1877 t1 = tcg_temp_new_i64();
1878 tcg_gen_shli_i64(t1, psw_mask, 20);
1879 tcg_gen_shri_i64(t1, t1, 36);
1880 tcg_gen_or_i64(o->out, o->out, t1);
1881
1882 tcg_gen_extu_i32_i64(t1, cc_op);
1883 tcg_gen_shli_i64(t1, t1, 28);
1884 tcg_gen_or_i64(o->out, o->out, t1);
1885 tcg_temp_free_i64(t1);
1886 return NO_EXIT;
1887 }
1888
1889 #ifndef CONFIG_USER_ONLY
1890 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
1891 {
1892 check_privileged(s);
1893 gen_helper_ipte(cpu_env, o->in1, o->in2);
1894 return NO_EXIT;
1895 }
1896
1897 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
1898 {
1899 check_privileged(s);
1900 gen_helper_iske(o->out, cpu_env, o->in2);
1901 return NO_EXIT;
1902 }
1903 #endif
1904
1905 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
1906 {
1907 gen_helper_ldeb(o->out, cpu_env, o->in2);
1908 return NO_EXIT;
1909 }
1910
1911 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
1912 {
1913 gen_helper_ledb(o->out, cpu_env, o->in2);
1914 return NO_EXIT;
1915 }
1916
1917 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
1918 {
1919 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
1920 return NO_EXIT;
1921 }
1922
1923 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
1924 {
1925 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
1926 return NO_EXIT;
1927 }
1928
1929 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
1930 {
1931 gen_helper_lxdb(o->out, cpu_env, o->in2);
1932 return_low128(o->out2);
1933 return NO_EXIT;
1934 }
1935
1936 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
1937 {
1938 gen_helper_lxeb(o->out, cpu_env, o->in2);
1939 return_low128(o->out2);
1940 return NO_EXIT;
1941 }
1942
1943 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
1944 {
1945 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
1946 return NO_EXIT;
1947 }
1948
1949 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
1950 {
1951 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
1952 return NO_EXIT;
1953 }
1954
1955 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
1956 {
1957 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
1958 return NO_EXIT;
1959 }
1960
1961 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
1962 {
1963 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
1964 return NO_EXIT;
1965 }
1966
1967 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
1968 {
1969 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
1970 return NO_EXIT;
1971 }
1972
1973 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
1974 {
1975 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
1976 return NO_EXIT;
1977 }
1978
1979 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
1980 {
1981 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
1982 return NO_EXIT;
1983 }
1984
1985 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
1986 {
1987 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
1988 return NO_EXIT;
1989 }
1990
1991 #ifndef CONFIG_USER_ONLY
1992 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
1993 {
1994 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1995 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1996 check_privileged(s);
1997 potential_page_fault(s);
1998 gen_helper_lctl(cpu_env, r1, o->in2, r3);
1999 tcg_temp_free_i32(r1);
2000 tcg_temp_free_i32(r3);
2001 return NO_EXIT;
2002 }
2003
2004 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2005 {
2006 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2007 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2008 check_privileged(s);
2009 potential_page_fault(s);
2010 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2011 tcg_temp_free_i32(r1);
2012 tcg_temp_free_i32(r3);
2013 return NO_EXIT;
2014 }
2015 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2016 {
2017 check_privileged(s);
2018 potential_page_fault(s);
2019 gen_helper_lra(o->out, cpu_env, o->in2);
2020 set_cc_static(s);
2021 return NO_EXIT;
2022 }
2023
2024 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2025 {
2026 TCGv_i64 t1, t2;
2027
2028 check_privileged(s);
2029
2030 t1 = tcg_temp_new_i64();
2031 t2 = tcg_temp_new_i64();
2032 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2033 tcg_gen_addi_i64(o->in2, o->in2, 4);
2034 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2035 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2036 tcg_gen_shli_i64(t1, t1, 32);
2037 gen_helper_load_psw(cpu_env, t1, t2);
2038 tcg_temp_free_i64(t1);
2039 tcg_temp_free_i64(t2);
2040 return EXIT_NORETURN;
2041 }
2042
2043 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2044 {
2045 TCGv_i64 t1, t2;
2046
2047 check_privileged(s);
2048
2049 t1 = tcg_temp_new_i64();
2050 t2 = tcg_temp_new_i64();
2051 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2052 tcg_gen_addi_i64(o->in2, o->in2, 8);
2053 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2054 gen_helper_load_psw(cpu_env, t1, t2);
2055 tcg_temp_free_i64(t1);
2056 tcg_temp_free_i64(t2);
2057 return EXIT_NORETURN;
2058 }
2059 #endif
2060
2061 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2062 {
2063 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2064 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2065 potential_page_fault(s);
2066 gen_helper_lam(cpu_env, r1, o->in2, r3);
2067 tcg_temp_free_i32(r1);
2068 tcg_temp_free_i32(r3);
2069 return NO_EXIT;
2070 }
2071
2072 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2073 {
2074 int r1 = get_field(s->fields, r1);
2075 int r3 = get_field(s->fields, r3);
2076 TCGv_i64 t = tcg_temp_new_i64();
2077 TCGv_i64 t4 = tcg_const_i64(4);
2078
2079 while (1) {
2080 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2081 store_reg32_i64(r1, t);
2082 if (r1 == r3) {
2083 break;
2084 }
2085 tcg_gen_add_i64(o->in2, o->in2, t4);
2086 r1 = (r1 + 1) & 15;
2087 }
2088
2089 tcg_temp_free_i64(t);
2090 tcg_temp_free_i64(t4);
2091 return NO_EXIT;
2092 }
2093
2094 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2095 {
2096 int r1 = get_field(s->fields, r1);
2097 int r3 = get_field(s->fields, r3);
2098 TCGv_i64 t = tcg_temp_new_i64();
2099 TCGv_i64 t4 = tcg_const_i64(4);
2100
2101 while (1) {
2102 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2103 store_reg32h_i64(r1, t);
2104 if (r1 == r3) {
2105 break;
2106 }
2107 tcg_gen_add_i64(o->in2, o->in2, t4);
2108 r1 = (r1 + 1) & 15;
2109 }
2110
2111 tcg_temp_free_i64(t);
2112 tcg_temp_free_i64(t4);
2113 return NO_EXIT;
2114 }
2115
2116 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2117 {
2118 int r1 = get_field(s->fields, r1);
2119 int r3 = get_field(s->fields, r3);
2120 TCGv_i64 t8 = tcg_const_i64(8);
2121
2122 while (1) {
2123 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2124 if (r1 == r3) {
2125 break;
2126 }
2127 tcg_gen_add_i64(o->in2, o->in2, t8);
2128 r1 = (r1 + 1) & 15;
2129 }
2130
2131 tcg_temp_free_i64(t8);
2132 return NO_EXIT;
2133 }
2134
2135 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2136 {
2137 o->out = o->in2;
2138 o->g_out = o->g_in2;
2139 TCGV_UNUSED_I64(o->in2);
2140 o->g_in2 = false;
2141 return NO_EXIT;
2142 }
2143
2144 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2145 {
2146 o->out = o->in1;
2147 o->out2 = o->in2;
2148 o->g_out = o->g_in1;
2149 o->g_out2 = o->g_in2;
2150 TCGV_UNUSED_I64(o->in1);
2151 TCGV_UNUSED_I64(o->in2);
2152 o->g_in1 = o->g_in2 = false;
2153 return NO_EXIT;
2154 }
2155
2156 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2157 {
2158 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2159 potential_page_fault(s);
2160 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2161 tcg_temp_free_i32(l);
2162 return NO_EXIT;
2163 }
2164
2165 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2166 {
2167 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2168 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2169 potential_page_fault(s);
2170 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2171 tcg_temp_free_i32(r1);
2172 tcg_temp_free_i32(r2);
2173 set_cc_static(s);
2174 return NO_EXIT;
2175 }
2176
2177 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2178 {
2179 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2180 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2181 potential_page_fault(s);
2182 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2183 tcg_temp_free_i32(r1);
2184 tcg_temp_free_i32(r3);
2185 set_cc_static(s);
2186 return NO_EXIT;
2187 }
2188
2189 #ifndef CONFIG_USER_ONLY
2190 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2191 {
2192 int r1 = get_field(s->fields, l1);
2193 check_privileged(s);
2194 potential_page_fault(s);
2195 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2196 set_cc_static(s);
2197 return NO_EXIT;
2198 }
2199
2200 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2201 {
2202 int r1 = get_field(s->fields, l1);
2203 check_privileged(s);
2204 potential_page_fault(s);
2205 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2206 set_cc_static(s);
2207 return NO_EXIT;
2208 }
2209 #endif
2210
2211 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2212 {
2213 potential_page_fault(s);
2214 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2215 set_cc_static(s);
2216 return NO_EXIT;
2217 }
2218
2219 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2220 {
2221 potential_page_fault(s);
2222 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2223 set_cc_static(s);
2224 return_low128(o->in2);
2225 return NO_EXIT;
2226 }
2227
2228 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2229 {
2230 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2231 return NO_EXIT;
2232 }
2233
2234 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2235 {
2236 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2237 return_low128(o->out2);
2238 return NO_EXIT;
2239 }
2240
2241 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2242 {
2243 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2244 return NO_EXIT;
2245 }
2246
2247 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2248 {
2249 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2250 return NO_EXIT;
2251 }
2252
2253 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2254 {
2255 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2256 return NO_EXIT;
2257 }
2258
2259 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2260 {
2261 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2262 return_low128(o->out2);
2263 return NO_EXIT;
2264 }
2265
2266 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2267 {
2268 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2269 return_low128(o->out2);
2270 return NO_EXIT;
2271 }
2272
2273 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2274 {
2275 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2276 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2277 tcg_temp_free_i64(r3);
2278 return NO_EXIT;
2279 }
2280
2281 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2282 {
2283 int r3 = get_field(s->fields, r3);
2284 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2285 return NO_EXIT;
2286 }
2287
2288 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2289 {
2290 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2291 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2292 tcg_temp_free_i64(r3);
2293 return NO_EXIT;
2294 }
2295
2296 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2297 {
2298 int r3 = get_field(s->fields, r3);
2299 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2300 return NO_EXIT;
2301 }
2302
2303 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2304 {
2305 gen_helper_nabs_i64(o->out, o->in2);
2306 return NO_EXIT;
2307 }
2308
2309 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2310 {
2311 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2312 return NO_EXIT;
2313 }
2314
2315 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2316 {
2317 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2318 return NO_EXIT;
2319 }
2320
2321 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2322 {
2323 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2324 tcg_gen_mov_i64(o->out2, o->in2);
2325 return NO_EXIT;
2326 }
2327
2328 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2329 {
2330 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2331 potential_page_fault(s);
2332 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2333 tcg_temp_free_i32(l);
2334 set_cc_static(s);
2335 return NO_EXIT;
2336 }
2337
2338 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2339 {
2340 tcg_gen_neg_i64(o->out, o->in2);
2341 return NO_EXIT;
2342 }
2343
2344 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2345 {
2346 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2347 return NO_EXIT;
2348 }
2349
2350 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2351 {
2352 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2353 return NO_EXIT;
2354 }
2355
2356 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2357 {
2358 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2359 tcg_gen_mov_i64(o->out2, o->in2);
2360 return NO_EXIT;
2361 }
2362
2363 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2364 {
2365 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2366 potential_page_fault(s);
2367 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2368 tcg_temp_free_i32(l);
2369 set_cc_static(s);
2370 return NO_EXIT;
2371 }
2372
2373 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2374 {
2375 tcg_gen_or_i64(o->out, o->in1, o->in2);
2376 return NO_EXIT;
2377 }
2378
2379 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2380 {
2381 int shift = s->insn->data & 0xff;
2382 int size = s->insn->data >> 8;
2383 uint64_t mask = ((1ull << size) - 1) << shift;
2384
2385 assert(!o->g_in2);
2386 tcg_gen_shli_i64(o->in2, o->in2, shift);
2387 tcg_gen_or_i64(o->out, o->in1, o->in2);
2388
2389 /* Produce the CC from only the bits manipulated. */
2390 tcg_gen_andi_i64(cc_dst, o->out, mask);
2391 set_cc_nz_u64(s, cc_dst);
2392 return NO_EXIT;
2393 }
2394
2395 #ifndef CONFIG_USER_ONLY
2396 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2397 {
2398 check_privileged(s);
2399 gen_helper_ptlb(cpu_env);
2400 return NO_EXIT;
2401 }
2402 #endif
2403
2404 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2405 {
2406 tcg_gen_bswap16_i64(o->out, o->in2);
2407 return NO_EXIT;
2408 }
2409
2410 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2411 {
2412 tcg_gen_bswap32_i64(o->out, o->in2);
2413 return NO_EXIT;
2414 }
2415
2416 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2417 {
2418 tcg_gen_bswap64_i64(o->out, o->in2);
2419 return NO_EXIT;
2420 }
2421
2422 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2423 {
2424 TCGv_i32 t1 = tcg_temp_new_i32();
2425 TCGv_i32 t2 = tcg_temp_new_i32();
2426 TCGv_i32 to = tcg_temp_new_i32();
2427 tcg_gen_trunc_i64_i32(t1, o->in1);
2428 tcg_gen_trunc_i64_i32(t2, o->in2);
2429 tcg_gen_rotl_i32(to, t1, t2);
2430 tcg_gen_extu_i32_i64(o->out, to);
2431 tcg_temp_free_i32(t1);
2432 tcg_temp_free_i32(t2);
2433 tcg_temp_free_i32(to);
2434 return NO_EXIT;
2435 }
2436
2437 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2438 {
2439 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2440 return NO_EXIT;
2441 }
2442
2443 #ifndef CONFIG_USER_ONLY
2444 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2445 {
2446 check_privileged(s);
2447 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2448 set_cc_static(s);
2449 return NO_EXIT;
2450 }
2451
2452 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2453 {
2454 check_privileged(s);
2455 gen_helper_sacf(cpu_env, o->in2);
2456 /* Addressing mode has changed, so end the block. */
2457 return EXIT_PC_STALE;
2458 }
2459 #endif
2460
2461 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2462 {
2463 int r1 = get_field(s->fields, r1);
2464 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2465 return NO_EXIT;
2466 }
2467
2468 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2469 {
2470 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2471 return NO_EXIT;
2472 }
2473
2474 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2475 {
2476 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2477 return NO_EXIT;
2478 }
2479
2480 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2481 {
2482 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2483 return_low128(o->out2);
2484 return NO_EXIT;
2485 }
2486
2487 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2488 {
2489 gen_helper_sqeb(o->out, cpu_env, o->in2);
2490 return NO_EXIT;
2491 }
2492
2493 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2494 {
2495 gen_helper_sqdb(o->out, cpu_env, o->in2);
2496 return NO_EXIT;
2497 }
2498
2499 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2500 {
2501 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2502 return_low128(o->out2);
2503 return NO_EXIT;
2504 }
2505
2506 #ifndef CONFIG_USER_ONLY
2507 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2508 {
2509 check_privileged(s);
2510 potential_page_fault(s);
2511 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2512 set_cc_static(s);
2513 return NO_EXIT;
2514 }
2515
2516 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2517 {
2518 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2519 check_privileged(s);
2520 potential_page_fault(s);
2521 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2522 tcg_temp_free_i32(r1);
2523 return NO_EXIT;
2524 }
2525 #endif
2526
2527 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2528 {
2529 uint64_t sign = 1ull << s->insn->data;
2530 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2531 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2532 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2533 /* The arithmetic left shift is curious in that it does not affect
2534 the sign bit. Copy that over from the source unchanged. */
2535 tcg_gen_andi_i64(o->out, o->out, ~sign);
2536 tcg_gen_andi_i64(o->in1, o->in1, sign);
2537 tcg_gen_or_i64(o->out, o->out, o->in1);
2538 return NO_EXIT;
2539 }
2540
2541 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2542 {
2543 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2544 return NO_EXIT;
2545 }
2546
2547 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2548 {
2549 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2550 return NO_EXIT;
2551 }
2552
2553 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2554 {
2555 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2556 return NO_EXIT;
2557 }
2558
2559 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2560 {
2561 gen_helper_sfpc(cpu_env, o->in2);
2562 return NO_EXIT;
2563 }
2564
2565 #ifndef CONFIG_USER_ONLY
2566 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2567 {
2568 check_privileged(s);
2569 tcg_gen_shri_i64(o->in2, o->in2, 4);
2570 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2571 return NO_EXIT;
2572 }
2573
2574 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
2575 {
2576 check_privileged(s);
2577 gen_helper_sske(cpu_env, o->in1, o->in2);
2578 return NO_EXIT;
2579 }
2580
2581 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2582 {
2583 check_privileged(s);
2584 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2585 return NO_EXIT;
2586 }
2587
2588 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
2589 {
2590 check_privileged(s);
2591 /* ??? Surely cpu address != cpu number. In any case the previous
2592 version of this stored more than the required half-word, so it
2593 is unlikely this has ever been tested. */
2594 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2595 return NO_EXIT;
2596 }
2597
2598 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
2599 {
2600 gen_helper_stck(o->out, cpu_env);
2601 /* ??? We don't implement clock states. */
2602 gen_op_movi_cc(s, 0);
2603 return NO_EXIT;
2604 }
2605
2606 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
2607 {
2608 TCGv_i64 c1 = tcg_temp_new_i64();
2609 TCGv_i64 c2 = tcg_temp_new_i64();
2610 gen_helper_stck(c1, cpu_env);
2611 /* Shift the 64-bit value into its place as a zero-extended
2612 104-bit value. Note that "bit positions 64-103 are always
2613 non-zero so that they compare differently to STCK"; we set
2614 the least significant bit to 1. */
2615 tcg_gen_shli_i64(c2, c1, 56);
2616 tcg_gen_shri_i64(c1, c1, 8);
2617 tcg_gen_ori_i64(c2, c2, 0x10000);
2618 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
2619 tcg_gen_addi_i64(o->in2, o->in2, 8);
2620 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
2621 tcg_temp_free_i64(c1);
2622 tcg_temp_free_i64(c2);
2623 /* ??? We don't implement clock states. */
2624 gen_op_movi_cc(s, 0);
2625 return NO_EXIT;
2626 }
2627
2628 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
2629 {
2630 check_privileged(s);
2631 gen_helper_sckc(cpu_env, o->in2);
2632 return NO_EXIT;
2633 }
2634
2635 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
2636 {
2637 check_privileged(s);
2638 gen_helper_stckc(o->out, cpu_env);
2639 return NO_EXIT;
2640 }
2641
2642 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2643 {
2644 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2645 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2646 check_privileged(s);
2647 potential_page_fault(s);
2648 gen_helper_stctg(cpu_env, r1, o->in2, r3);
2649 tcg_temp_free_i32(r1);
2650 tcg_temp_free_i32(r3);
2651 return NO_EXIT;
2652 }
2653
2654 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
2655 {
2656 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2657 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2658 check_privileged(s);
2659 potential_page_fault(s);
2660 gen_helper_stctl(cpu_env, r1, o->in2, r3);
2661 tcg_temp_free_i32(r1);
2662 tcg_temp_free_i32(r3);
2663 return NO_EXIT;
2664 }
2665
2666 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
2667 {
2668 check_privileged(s);
2669 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2670 return NO_EXIT;
2671 }
2672
2673 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
2674 {
2675 check_privileged(s);
2676 gen_helper_spt(cpu_env, o->in2);
2677 return NO_EXIT;
2678 }
2679
2680 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
2681 {
2682 TCGv_i64 f, a;
2683 /* We really ought to have more complete indication of facilities
2684 that we implement. Address this when STFLE is implemented. */
2685 check_privileged(s);
2686 f = tcg_const_i64(0xc0000000);
2687 a = tcg_const_i64(200);
2688 tcg_gen_qemu_st32(f, a, get_mem_index(s));
2689 tcg_temp_free_i64(f);
2690 tcg_temp_free_i64(a);
2691 return NO_EXIT;
2692 }
2693
2694 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
2695 {
2696 check_privileged(s);
2697 gen_helper_stpt(o->out, cpu_env);
2698 return NO_EXIT;
2699 }
2700
2701 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
2702 {
2703 check_privileged(s);
2704 potential_page_fault(s);
2705 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
2706 set_cc_static(s);
2707 return NO_EXIT;
2708 }
2709
2710 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
2711 {
2712 check_privileged(s);
2713 gen_helper_spx(cpu_env, o->in2);
2714 return NO_EXIT;
2715 }
2716
2717 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
2718 {
2719 check_privileged(s);
2720 /* Not operational. */
2721 gen_op_movi_cc(s, 3);
2722 return NO_EXIT;
2723 }
2724
2725 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
2726 {
2727 check_privileged(s);
2728 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
2729 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
2730 return NO_EXIT;
2731 }
2732
2733 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
2734 {
2735 uint64_t i2 = get_field(s->fields, i2);
2736 TCGv_i64 t;
2737
2738 check_privileged(s);
2739
2740 /* It is important to do what the instruction name says: STORE THEN.
2741 If we let the output hook perform the store then if we fault and
2742 restart, we'll have the wrong SYSTEM MASK in place. */
2743 t = tcg_temp_new_i64();
2744 tcg_gen_shri_i64(t, psw_mask, 56);
2745 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
2746 tcg_temp_free_i64(t);
2747
2748 if (s->fields->op == 0xac) {
2749 tcg_gen_andi_i64(psw_mask, psw_mask,
2750 (i2 << 56) | 0x00ffffffffffffffull);
2751 } else {
2752 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
2753 }
2754 return NO_EXIT;
2755 }
2756
2757 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
2758 {
2759 check_privileged(s);
2760 potential_page_fault(s);
2761 gen_helper_stura(cpu_env, o->in2, o->in1);
2762 return NO_EXIT;
2763 }
2764 #endif
2765
2766 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
2767 {
2768 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
2769 return NO_EXIT;
2770 }
2771
2772 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
2773 {
2774 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
2775 return NO_EXIT;
2776 }
2777
2778 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
2779 {
2780 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
2781 return NO_EXIT;
2782 }
2783
2784 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
2785 {
2786 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
2787 return NO_EXIT;
2788 }
2789
2790 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
2791 {
2792 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2793 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2794 potential_page_fault(s);
2795 gen_helper_stam(cpu_env, r1, o->in2, r3);
2796 tcg_temp_free_i32(r1);
2797 tcg_temp_free_i32(r3);
2798 return NO_EXIT;
2799 }
2800
2801 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
2802 {
2803 int m3 = get_field(s->fields, m3);
2804 int pos, base = s->insn->data;
2805 TCGv_i64 tmp = tcg_temp_new_i64();
2806
2807 pos = base + ctz32(m3) * 8;
2808 switch (m3) {
2809 case 0xf:
2810 /* Effectively a 32-bit store. */
2811 tcg_gen_shri_i64(tmp, o->in1, pos);
2812 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
2813 break;
2814
2815 case 0xc:
2816 case 0x6:
2817 case 0x3:
2818 /* Effectively a 16-bit store. */
2819 tcg_gen_shri_i64(tmp, o->in1, pos);
2820 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
2821 break;
2822
2823 case 0x8:
2824 case 0x4:
2825 case 0x2:
2826 case 0x1:
2827 /* Effectively an 8-bit store. */
2828 tcg_gen_shri_i64(tmp, o->in1, pos);
2829 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
2830 break;
2831
2832 default:
2833 /* This is going to be a sequence of shifts and stores. */
2834 pos = base + 32 - 8;
2835 while (m3) {
2836 if (m3 & 0x8) {
2837 tcg_gen_shri_i64(tmp, o->in1, pos);
2838 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
2839 tcg_gen_addi_i64(o->in2, o->in2, 1);
2840 }
2841 m3 = (m3 << 1) & 0xf;
2842 pos -= 8;
2843 }
2844 break;
2845 }
2846 tcg_temp_free_i64(tmp);
2847 return NO_EXIT;
2848 }
2849
2850 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
2851 {
2852 int r1 = get_field(s->fields, r1);
2853 int r3 = get_field(s->fields, r3);
2854 int size = s->insn->data;
2855 TCGv_i64 tsize = tcg_const_i64(size);
2856
2857 while (1) {
2858 if (size == 8) {
2859 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
2860 } else {
2861 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
2862 }
2863 if (r1 == r3) {
2864 break;
2865 }
2866 tcg_gen_add_i64(o->in2, o->in2, tsize);
2867 r1 = (r1 + 1) & 15;
2868 }
2869
2870 tcg_temp_free_i64(tsize);
2871 return NO_EXIT;
2872 }
2873
2874 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
2875 {
2876 int r1 = get_field(s->fields, r1);
2877 int r3 = get_field(s->fields, r3);
2878 TCGv_i64 t = tcg_temp_new_i64();
2879 TCGv_i64 t4 = tcg_const_i64(4);
2880 TCGv_i64 t32 = tcg_const_i64(32);
2881
2882 while (1) {
2883 tcg_gen_shl_i64(t, regs[r1], t32);
2884 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
2885 if (r1 == r3) {
2886 break;
2887 }
2888 tcg_gen_add_i64(o->in2, o->in2, t4);
2889 r1 = (r1 + 1) & 15;
2890 }
2891
2892 tcg_temp_free_i64(t);
2893 tcg_temp_free_i64(t4);
2894 tcg_temp_free_i64(t32);
2895 return NO_EXIT;
2896 }
2897
2898 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
2899 {
2900 potential_page_fault(s);
2901 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2902 set_cc_static(s);
2903 return_low128(o->in2);
2904 return NO_EXIT;
2905 }
2906
2907 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
2908 {
2909 tcg_gen_sub_i64(o->out, o->in1, o->in2);
2910 return NO_EXIT;
2911 }
2912
2913 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
2914 {
2915 TCGv_i64 cc;
2916
2917 assert(!o->g_in2);
2918 tcg_gen_not_i64(o->in2, o->in2);
2919 tcg_gen_add_i64(o->out, o->in1, o->in2);
2920
2921 /* XXX possible optimization point */
2922 gen_op_calc_cc(s);
2923 cc = tcg_temp_new_i64();
2924 tcg_gen_extu_i32_i64(cc, cc_op);
2925 tcg_gen_shri_i64(cc, cc, 1);
2926 tcg_gen_add_i64(o->out, o->out, cc);
2927 tcg_temp_free_i64(cc);
2928 return NO_EXIT;
2929 }
2930
2931 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
2932 {
2933 TCGv_i32 t;
2934
2935 update_psw_addr(s);
2936 update_cc_op(s);
2937
2938 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
2939 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
2940 tcg_temp_free_i32(t);
2941
2942 t = tcg_const_i32(s->next_pc - s->pc);
2943 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
2944 tcg_temp_free_i32(t);
2945
2946 gen_exception(EXCP_SVC);
2947 return EXIT_NORETURN;
2948 }
2949
2950 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
2951 {
2952 gen_helper_tceb(cc_op, o->in1, o->in2);
2953 set_cc_static(s);
2954 return NO_EXIT;
2955 }
2956
2957 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
2958 {
2959 gen_helper_tcdb(cc_op, o->in1, o->in2);
2960 set_cc_static(s);
2961 return NO_EXIT;
2962 }
2963
2964 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
2965 {
2966 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
2967 set_cc_static(s);
2968 return NO_EXIT;
2969 }
2970
2971 #ifndef CONFIG_USER_ONLY
2972 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
2973 {
2974 potential_page_fault(s);
2975 gen_helper_tprot(cc_op, o->addr1, o->in2);
2976 set_cc_static(s);
2977 return NO_EXIT;
2978 }
2979 #endif
2980
2981 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
2982 {
2983 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2984 potential_page_fault(s);
2985 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
2986 tcg_temp_free_i32(l);
2987 set_cc_static(s);
2988 return NO_EXIT;
2989 }
2990
2991 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
2992 {
2993 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2994 potential_page_fault(s);
2995 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
2996 tcg_temp_free_i32(l);
2997 return NO_EXIT;
2998 }
2999
3000 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3001 {
3002 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3003 potential_page_fault(s);
3004 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3005 tcg_temp_free_i32(l);
3006 set_cc_static(s);
3007 return NO_EXIT;
3008 }
3009
3010 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3011 {
3012 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3013 return NO_EXIT;
3014 }
3015
3016 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3017 {
3018 int shift = s->insn->data & 0xff;
3019 int size = s->insn->data >> 8;
3020 uint64_t mask = ((1ull << size) - 1) << shift;
3021
3022 assert(!o->g_in2);
3023 tcg_gen_shli_i64(o->in2, o->in2, shift);
3024 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3025
3026 /* Produce the CC from only the bits manipulated. */
3027 tcg_gen_andi_i64(cc_dst, o->out, mask);
3028 set_cc_nz_u64(s, cc_dst);
3029 return NO_EXIT;
3030 }
3031
3032 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3033 {
3034 o->out = tcg_const_i64(0);
3035 return NO_EXIT;
3036 }
3037
3038 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3039 {
3040 o->out = tcg_const_i64(0);
3041 o->out2 = o->out;
3042 o->g_out2 = true;
3043 return NO_EXIT;
3044 }
3045
3046 /* ====================================================================== */
3047 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3048 the original inputs), update the various cc data structures in order to
3049 be able to compute the new condition code. */
3050
3051 static void cout_abs32(DisasContext *s, DisasOps *o)
3052 {
3053 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3054 }
3055
3056 static void cout_abs64(DisasContext *s, DisasOps *o)
3057 {
3058 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3059 }
3060
3061 static void cout_adds32(DisasContext *s, DisasOps *o)
3062 {
3063 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3064 }
3065
3066 static void cout_adds64(DisasContext *s, DisasOps *o)
3067 {
3068 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3069 }
3070
3071 static void cout_addu32(DisasContext *s, DisasOps *o)
3072 {
3073 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3074 }
3075
3076 static void cout_addu64(DisasContext *s, DisasOps *o)
3077 {
3078 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3079 }
3080
3081 static void cout_addc32(DisasContext *s, DisasOps *o)
3082 {
3083 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3084 }
3085
3086 static void cout_addc64(DisasContext *s, DisasOps *o)
3087 {
3088 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3089 }
3090
3091 static void cout_cmps32(DisasContext *s, DisasOps *o)
3092 {
3093 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3094 }
3095
3096 static void cout_cmps64(DisasContext *s, DisasOps *o)
3097 {
3098 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3099 }
3100
3101 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3102 {
3103 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3104 }
3105
3106 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3107 {
3108 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3109 }
3110
3111 static void cout_f32(DisasContext *s, DisasOps *o)
3112 {
3113 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3114 }
3115
3116 static void cout_f64(DisasContext *s, DisasOps *o)
3117 {
3118 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3119 }
3120
3121 static void cout_f128(DisasContext *s, DisasOps *o)
3122 {
3123 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3124 }
3125
3126 static void cout_nabs32(DisasContext *s, DisasOps *o)
3127 {
3128 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3129 }
3130
3131 static void cout_nabs64(DisasContext *s, DisasOps *o)
3132 {
3133 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3134 }
3135
3136 static void cout_neg32(DisasContext *s, DisasOps *o)
3137 {
3138 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3139 }
3140
3141 static void cout_neg64(DisasContext *s, DisasOps *o)
3142 {
3143 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3144 }
3145
3146 static void cout_nz32(DisasContext *s, DisasOps *o)
3147 {
3148 tcg_gen_ext32u_i64(cc_dst, o->out);
3149 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3150 }
3151
3152 static void cout_nz64(DisasContext *s, DisasOps *o)
3153 {
3154 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3155 }
3156
3157 static void cout_s32(DisasContext *s, DisasOps *o)
3158 {
3159 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3160 }
3161
3162 static void cout_s64(DisasContext *s, DisasOps *o)
3163 {
3164 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3165 }
3166
3167 static void cout_subs32(DisasContext *s, DisasOps *o)
3168 {
3169 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3170 }
3171
3172 static void cout_subs64(DisasContext *s, DisasOps *o)
3173 {
3174 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3175 }
3176
3177 static void cout_subu32(DisasContext *s, DisasOps *o)
3178 {
3179 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3180 }
3181
3182 static void cout_subu64(DisasContext *s, DisasOps *o)
3183 {
3184 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3185 }
3186
3187 static void cout_subb32(DisasContext *s, DisasOps *o)
3188 {
3189 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3190 }
3191
3192 static void cout_subb64(DisasContext *s, DisasOps *o)
3193 {
3194 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3195 }
3196
3197 static void cout_tm32(DisasContext *s, DisasOps *o)
3198 {
3199 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3200 }
3201
3202 static void cout_tm64(DisasContext *s, DisasOps *o)
3203 {
3204 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3205 }
3206
3207 /* ====================================================================== */
3208 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3209 with the TCG register to which we will write. Used in combination with
3210 the "wout" generators, in some cases we need a new temporary, and in
3211 some cases we can write to a TCG global. */
3212
3213 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3214 {
3215 o->out = tcg_temp_new_i64();
3216 }
3217
3218 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3219 {
3220 o->out = tcg_temp_new_i64();
3221 o->out2 = tcg_temp_new_i64();
3222 }
3223
3224 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3225 {
3226 o->out = regs[get_field(f, r1)];
3227 o->g_out = true;
3228 }
3229
3230 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3231 {
3232 /* ??? Specification exception: r1 must be even. */
3233 int r1 = get_field(f, r1);
3234 o->out = regs[r1];
3235 o->out2 = regs[(r1 + 1) & 15];
3236 o->g_out = o->g_out2 = true;
3237 }
3238
3239 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3240 {
3241 o->out = fregs[get_field(f, r1)];
3242 o->g_out = true;
3243 }
3244
3245 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3246 {
3247 /* ??? Specification exception: r1 must be < 14. */
3248 int r1 = get_field(f, r1);
3249 o->out = fregs[r1];
3250 o->out2 = fregs[(r1 + 2) & 15];
3251 o->g_out = o->g_out2 = true;
3252 }
3253
3254 /* ====================================================================== */
3255 /* The "Write OUTput" generators. These generally perform some non-trivial
3256 copy of data to TCG globals, or to main memory. The trivial cases are
3257 generally handled by having a "prep" generator install the TCG global
3258 as the destination of the operation. */
3259
3260 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3261 {
3262 store_reg(get_field(f, r1), o->out);
3263 }
3264
3265 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3266 {
3267 int r1 = get_field(f, r1);
3268 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3269 }
3270
3271 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3272 {
3273 int r1 = get_field(f, r1);
3274 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3275 }
3276
3277 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3278 {
3279 store_reg32_i64(get_field(f, r1), o->out);
3280 }
3281
3282 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3283 {
3284 /* ??? Specification exception: r1 must be even. */
3285 int r1 = get_field(f, r1);
3286 store_reg32_i64(r1, o->out);
3287 store_reg32_i64((r1 + 1) & 15, o->out2);
3288 }
3289
3290 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3291 {
3292 /* ??? Specification exception: r1 must be even. */
3293 int r1 = get_field(f, r1);
3294 store_reg32_i64((r1 + 1) & 15, o->out);
3295 tcg_gen_shri_i64(o->out, o->out, 32);
3296 store_reg32_i64(r1, o->out);
3297 }
3298
3299 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3300 {
3301 store_freg32_i64(get_field(f, r1), o->out);
3302 }
3303
3304 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3305 {
3306 store_freg(get_field(f, r1), o->out);
3307 }
3308
3309 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3310 {
3311 /* ??? Specification exception: r1 must be < 14. */
3312 int f1 = get_field(s->fields, r1);
3313 store_freg(f1, o->out);
3314 store_freg((f1 + 2) & 15, o->out2);
3315 }
3316
3317 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3318 {
3319 if (get_field(f, r1) != get_field(f, r2)) {
3320 store_reg32_i64(get_field(f, r1), o->out);
3321 }
3322 }
3323
3324 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3325 {
3326 if (get_field(f, r1) != get_field(f, r2)) {
3327 store_freg32_i64(get_field(f, r1), o->out);
3328 }
3329 }
3330
3331 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3332 {
3333 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3334 }
3335
3336 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3337 {
3338 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3339 }
3340
3341 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3342 {
3343 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3344 }
3345
3346 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3347 {
3348 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3349 }
3350
3351 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3352 {
3353 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3354 }
3355
3356 /* ====================================================================== */
3357 /* The "INput 1" generators. These load the first operand to an insn. */
3358
3359 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3360 {
3361 o->in1 = load_reg(get_field(f, r1));
3362 }
3363
3364 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3365 {
3366 o->in1 = regs[get_field(f, r1)];
3367 o->g_in1 = true;
3368 }
3369
3370 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3371 {
3372 o->in1 = tcg_temp_new_i64();
3373 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3374 }
3375
3376 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3377 {
3378 o->in1 = tcg_temp_new_i64();
3379 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3380 }
3381
3382 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3383 {
3384 o->in1 = tcg_temp_new_i64();
3385 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3386 }
3387
3388 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3389 {
3390 /* ??? Specification exception: r1 must be even. */
3391 int r1 = get_field(f, r1);
3392 o->in1 = load_reg((r1 + 1) & 15);
3393 }
3394
3395 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3396 {
3397 /* ??? Specification exception: r1 must be even. */
3398 int r1 = get_field(f, r1);
3399 o->in1 = tcg_temp_new_i64();
3400 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3401 }
3402
3403 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3404 {
3405 /* ??? Specification exception: r1 must be even. */
3406 int r1 = get_field(f, r1);
3407 o->in1 = tcg_temp_new_i64();
3408 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3409 }
3410
3411 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3412 {
3413 /* ??? Specification exception: r1 must be even. */
3414 int r1 = get_field(f, r1);
3415 o->in1 = tcg_temp_new_i64();
3416 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3417 }
3418
3419 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3420 {
3421 o->in1 = load_reg(get_field(f, r2));
3422 }
3423
3424 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3425 {
3426 o->in1 = load_reg(get_field(f, r3));
3427 }
3428
3429 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3430 {
3431 o->in1 = regs[get_field(f, r3)];
3432 o->g_in1 = true;
3433 }
3434
3435 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3436 {
3437 o->in1 = tcg_temp_new_i64();
3438 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3439 }
3440
3441 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3442 {
3443 o->in1 = tcg_temp_new_i64();
3444 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3445 }
3446
3447 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3448 {
3449 o->in1 = load_freg32_i64(get_field(f, r1));
3450 }
3451
3452 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3453 {
3454 o->in1 = fregs[get_field(f, r1)];
3455 o->g_in1 = true;
3456 }
3457
3458 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3459 {
3460 /* ??? Specification exception: r1 must be < 14. */
3461 int r1 = get_field(f, r1);
3462 o->out = fregs[r1];
3463 o->out2 = fregs[(r1 + 2) & 15];
3464 o->g_out = o->g_out2 = true;
3465 }
3466
3467 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3468 {
3469 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3470 }
3471
3472 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3473 {
3474 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3475 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3476 }
3477
3478 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3479 {
3480 in1_la1(s, f, o);
3481 o->in1 = tcg_temp_new_i64();
3482 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3483 }
3484
3485 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3486 {
3487 in1_la1(s, f, o);
3488 o->in1 = tcg_temp_new_i64();
3489 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3490 }
3491
3492 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3493 {
3494 in1_la1(s, f, o);
3495 o->in1 = tcg_temp_new_i64();
3496 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3497 }
3498
3499 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3500 {
3501 in1_la1(s, f, o);
3502 o->in1 = tcg_temp_new_i64();
3503 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3504 }
3505
3506 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3507 {
3508 in1_la1(s, f, o);
3509 o->in1 = tcg_temp_new_i64();
3510 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3511 }
3512
3513 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3514 {
3515 in1_la1(s, f, o);
3516 o->in1 = tcg_temp_new_i64();
3517 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3518 }
3519
3520 /* ====================================================================== */
3521 /* The "INput 2" generators. These load the second operand to an insn. */
3522
3523 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3524 {
3525 o->in2 = regs[get_field(f, r1)];
3526 o->g_in2 = true;
3527 }
3528
3529 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3530 {
3531 o->in2 = tcg_temp_new_i64();
3532 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3533 }
3534
3535 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3536 {
3537 o->in2 = tcg_temp_new_i64();
3538 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3539 }
3540
3541 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3542 {
3543 o->in2 = load_reg(get_field(f, r2));
3544 }
3545
3546 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3547 {
3548 o->in2 = regs[get_field(f, r2)];
3549 o->g_in2 = true;
3550 }
3551
3552 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3553 {
3554 int r2 = get_field(f, r2);
3555 if (r2 != 0) {
3556 o->in2 = load_reg(r2);
3557 }
3558 }
3559
3560 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3561 {
3562 o->in2 = tcg_temp_new_i64();
3563 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3564 }
3565
3566 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3567 {
3568 o->in2 = tcg_temp_new_i64();
3569 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3570 }
3571
3572 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3573 {
3574 o->in2 = tcg_temp_new_i64();
3575 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3576 }
3577
3578 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3579 {
3580 o->in2 = tcg_temp_new_i64();
3581 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3582 }
3583
3584 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3585 {
3586 o->in2 = load_reg(get_field(f, r3));
3587 }
3588
3589 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3590 {
3591 o->in2 = tcg_temp_new_i64();
3592 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3593 }
3594
3595 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3596 {
3597 o->in2 = tcg_temp_new_i64();
3598 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3599 }
3600
3601 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3602 {
3603 o->in2 = load_freg32_i64(get_field(f, r2));
3604 }
3605
3606 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3607 {
3608 o->in2 = fregs[get_field(f, r2)];
3609 o->g_in2 = true;
3610 }
3611
3612 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3613 {
3614 /* ??? Specification exception: r1 must be < 14. */
3615 int r2 = get_field(f, r2);
3616 o->in1 = fregs[r2];
3617 o->in2 = fregs[(r2 + 2) & 15];
3618 o->g_in1 = o->g_in2 = true;
3619 }
3620
3621 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
3622 {
3623 o->in2 = get_address(s, 0, get_field(f, r2), 0);
3624 }
3625
3626 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3627 {
3628 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3629 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3630 }
3631
3632 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3633 {
3634 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3635 }
3636
3637 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3638 {
3639 help_l2_shift(s, f, o, 31);
3640 }
3641
3642 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3643 {
3644 help_l2_shift(s, f, o, 63);
3645 }
3646
3647 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3648 {
3649 in2_a2(s, f, o);
3650 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3651 }
3652
3653 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3654 {
3655 in2_a2(s, f, o);
3656 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3657 }
3658
3659 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3660 {
3661 in2_a2(s, f, o);
3662 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3663 }
3664
3665 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3666 {
3667 in2_a2(s, f, o);
3668 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3669 }
3670
3671 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3672 {
3673 in2_a2(s, f, o);
3674 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3675 }
3676
3677 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3678 {
3679 in2_a2(s, f, o);
3680 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3681 }
3682
3683 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3684 {
3685 in2_ri2(s, f, o);
3686 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3687 }
3688
3689 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3690 {
3691 in2_ri2(s, f, o);
3692 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3693 }
3694
3695 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3696 {
3697 in2_ri2(s, f, o);
3698 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3699 }
3700
3701 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3702 {
3703 in2_ri2(s, f, o);
3704 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3705 }
3706
3707 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3708 {
3709 o->in2 = tcg_const_i64(get_field(f, i2));
3710 }
3711
3712 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3713 {
3714 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3715 }
3716
3717 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3718 {
3719 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3720 }
3721
3722 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3723 {
3724 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3725 }
3726
3727 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3728 {
3729 uint64_t i2 = (uint16_t)get_field(f, i2);
3730 o->in2 = tcg_const_i64(i2 << s->insn->data);
3731 }
3732
3733 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3734 {
3735 uint64_t i2 = (uint32_t)get_field(f, i2);
3736 o->in2 = tcg_const_i64(i2 << s->insn->data);
3737 }
3738
3739 /* ====================================================================== */
3740
3741 /* Find opc within the table of insns. This is formulated as a switch
3742 statement so that (1) we get compile-time notice of cut-paste errors
3743 for duplicated opcodes, and (2) the compiler generates the binary
3744 search tree, rather than us having to post-process the table. */
3745
3746 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3747 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3748
3749 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3750
3751 enum DisasInsnEnum {
3752 #include "insn-data.def"
3753 };
3754
3755 #undef D
3756 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3757 .opc = OPC, \
3758 .fmt = FMT_##FT, \
3759 .fac = FAC_##FC, \
3760 .name = #NM, \
3761 .help_in1 = in1_##I1, \
3762 .help_in2 = in2_##I2, \
3763 .help_prep = prep_##P, \
3764 .help_wout = wout_##W, \
3765 .help_cout = cout_##CC, \
3766 .help_op = op_##OP, \
3767 .data = D \
3768 },
3769
3770 /* Allow 0 to be used for NULL in the table below. */
3771 #define in1_0 NULL
3772 #define in2_0 NULL
3773 #define prep_0 NULL
3774 #define wout_0 NULL
3775 #define cout_0 NULL
3776 #define op_0 NULL
3777
3778 static const DisasInsn insn_info[] = {
3779 #include "insn-data.def"
3780 };
3781
3782 #undef D
3783 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3784 case OPC: return &insn_info[insn_ ## NM];
3785
3786 static const DisasInsn *lookup_opc(uint16_t opc)
3787 {
3788 switch (opc) {
3789 #include "insn-data.def"
3790 default:
3791 return NULL;
3792 }
3793 }
3794
3795 #undef D
3796 #undef C
3797
3798 /* Extract a field from the insn. The INSN should be left-aligned in
3799 the uint64_t so that we can more easily utilize the big-bit-endian
3800 definitions we extract from the Principals of Operation. */
3801
3802 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
3803 {
3804 uint32_t r, m;
3805
3806 if (f->size == 0) {
3807 return;
3808 }
3809
3810 /* Zero extract the field from the insn. */
3811 r = (insn << f->beg) >> (64 - f->size);
3812
3813 /* Sign-extend, or un-swap the field as necessary. */
3814 switch (f->type) {
3815 case 0: /* unsigned */
3816 break;
3817 case 1: /* signed */
3818 assert(f->size <= 32);
3819 m = 1u << (f->size - 1);
3820 r = (r ^ m) - m;
3821 break;
3822 case 2: /* dl+dh split, signed 20 bit. */
3823 r = ((int8_t)r << 12) | (r >> 8);
3824 break;
3825 default:
3826 abort();
3827 }
3828
3829 /* Validate that the "compressed" encoding we selected above is valid.
3830 I.e. we havn't make two different original fields overlap. */
3831 assert(((o->presentC >> f->indexC) & 1) == 0);
3832 o->presentC |= 1 << f->indexC;
3833 o->presentO |= 1 << f->indexO;
3834
3835 o->c[f->indexC] = r;
3836 }
3837
3838 /* Lookup the insn at the current PC, extracting the operands into O and
3839 returning the info struct for the insn. Returns NULL for invalid insn. */
3840
3841 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
3842 DisasFields *f)
3843 {
3844 uint64_t insn, pc = s->pc;
3845 int op, op2, ilen;
3846 const DisasInsn *info;
3847
3848 insn = ld_code2(env, pc);
3849 op = (insn >> 8) & 0xff;
3850 ilen = get_ilen(op);
3851 s->next_pc = s->pc + ilen;
3852
3853 switch (ilen) {
3854 case 2:
3855 insn = insn << 48;
3856 break;
3857 case 4:
3858 insn = ld_code4(env, pc) << 32;
3859 break;
3860 case 6:
3861 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
3862 break;
3863 default:
3864 abort();
3865 }
3866
3867 /* We can't actually determine the insn format until we've looked up
3868 the full insn opcode. Which we can't do without locating the
3869 secondary opcode. Assume by default that OP2 is at bit 40; for
3870 those smaller insns that don't actually have a secondary opcode
3871 this will correctly result in OP2 = 0. */
3872 switch (op) {
3873 case 0x01: /* E */
3874 case 0x80: /* S */
3875 case 0x82: /* S */
3876 case 0x93: /* S */
3877 case 0xb2: /* S, RRF, RRE */
3878 case 0xb3: /* RRE, RRD, RRF */
3879 case 0xb9: /* RRE, RRF */
3880 case 0xe5: /* SSE, SIL */
3881 op2 = (insn << 8) >> 56;
3882 break;
3883 case 0xa5: /* RI */
3884 case 0xa7: /* RI */
3885 case 0xc0: /* RIL */
3886 case 0xc2: /* RIL */
3887 case 0xc4: /* RIL */
3888 case 0xc6: /* RIL */
3889 case 0xc8: /* SSF */
3890 case 0xcc: /* RIL */
3891 op2 = (insn << 12) >> 60;
3892 break;
3893 case 0xd0 ... 0xdf: /* SS */
3894 case 0xe1: /* SS */
3895 case 0xe2: /* SS */
3896 case 0xe8: /* SS */
3897 case 0xe9: /* SS */
3898 case 0xea: /* SS */
3899 case 0xee ... 0xf3: /* SS */
3900 case 0xf8 ... 0xfd: /* SS */
3901 op2 = 0;
3902 break;
3903 default:
3904 op2 = (insn << 40) >> 56;
3905 break;
3906 }
3907
3908 memset(f, 0, sizeof(*f));
3909 f->op = op;
3910 f->op2 = op2;
3911
3912 /* Lookup the instruction. */
3913 info = lookup_opc(op << 8 | op2);
3914
3915 /* If we found it, extract the operands. */
3916 if (info != NULL) {
3917 DisasFormat fmt = info->fmt;
3918 int i;
3919
3920 for (i = 0; i < NUM_C_FIELD; ++i) {
3921 extract_field(f, &format_info[fmt].op[i], insn);
3922 }
3923 }
3924 return info;
3925 }
3926
3927 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
3928 {
3929 const DisasInsn *insn;
3930 ExitStatus ret = NO_EXIT;
3931 DisasFields f;
3932 DisasOps o;
3933
3934 /* Search for the insn in the table. */
3935 insn = extract_insn(env, s, &f);
3936
3937 /* Not found means unimplemented/illegal opcode. */
3938 if (insn == NULL) {
3939 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
3940 f.op, f.op2);
3941 gen_illegal_opcode(s);
3942 return EXIT_NORETURN;
3943 }
3944
3945 /* Set up the strutures we use to communicate with the helpers. */
3946 s->insn = insn;
3947 s->fields = &f;
3948 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
3949 TCGV_UNUSED_I64(o.out);
3950 TCGV_UNUSED_I64(o.out2);
3951 TCGV_UNUSED_I64(o.in1);
3952 TCGV_UNUSED_I64(o.in2);
3953 TCGV_UNUSED_I64(o.addr1);
3954
3955 /* Implement the instruction. */
3956 if (insn->help_in1) {
3957 insn->help_in1(s, &f, &o);
3958 }
3959 if (insn->help_in2) {
3960 insn->help_in2(s, &f, &o);
3961 }
3962 if (insn->help_prep) {
3963 insn->help_prep(s, &f, &o);
3964 }
3965 if (insn->help_op) {
3966 ret = insn->help_op(s, &o);
3967 }
3968 if (insn->help_wout) {
3969 insn->help_wout(s, &f, &o);
3970 }
3971 if (insn->help_cout) {
3972 insn->help_cout(s, &o);
3973 }
3974
3975 /* Free any temporaries created by the helpers. */
3976 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
3977 tcg_temp_free_i64(o.out);
3978 }
3979 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
3980 tcg_temp_free_i64(o.out2);
3981 }
3982 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
3983 tcg_temp_free_i64(o.in1);
3984 }
3985 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
3986 tcg_temp_free_i64(o.in2);
3987 }
3988 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
3989 tcg_temp_free_i64(o.addr1);
3990 }
3991
3992 /* Advance to the next instruction. */
3993 s->pc = s->next_pc;
3994 return ret;
3995 }
3996
3997 static inline void gen_intermediate_code_internal(CPUS390XState *env,
3998 TranslationBlock *tb,
3999 int search_pc)
4000 {
4001 DisasContext dc;
4002 target_ulong pc_start;
4003 uint64_t next_page_start;
4004 uint16_t *gen_opc_end;
4005 int j, lj = -1;
4006 int num_insns, max_insns;
4007 CPUBreakpoint *bp;
4008 ExitStatus status;
4009 bool do_debug;
4010
4011 pc_start = tb->pc;
4012
4013 /* 31-bit mode */
4014 if (!(tb->flags & FLAG_MASK_64)) {
4015 pc_start &= 0x7fffffff;
4016 }
4017
4018 dc.tb = tb;
4019 dc.pc = pc_start;
4020 dc.cc_op = CC_OP_DYNAMIC;
4021 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4022
4023 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4024
4025 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4026
4027 num_insns = 0;
4028 max_insns = tb->cflags & CF_COUNT_MASK;
4029 if (max_insns == 0) {
4030 max_insns = CF_COUNT_MASK;
4031 }
4032
4033 gen_icount_start();
4034
4035 do {
4036 if (search_pc) {
4037 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4038 if (lj < j) {
4039 lj++;
4040 while (lj < j) {
4041 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4042 }
4043 }
4044 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4045 gen_opc_cc_op[lj] = dc.cc_op;
4046 tcg_ctx.gen_opc_instr_start[lj] = 1;
4047 tcg_ctx.gen_opc_icount[lj] = num_insns;
4048 }
4049 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4050 gen_io_start();
4051 }
4052
4053 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4054 tcg_gen_debug_insn_start(dc.pc);
4055 }
4056
4057 status = NO_EXIT;
4058 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4059 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4060 if (bp->pc == dc.pc) {
4061 status = EXIT_PC_STALE;
4062 do_debug = true;
4063 break;
4064 }
4065 }
4066 }
4067 if (status == NO_EXIT) {
4068 status = translate_one(env, &dc);
4069 }
4070
4071 /* If we reach a page boundary, are single stepping,
4072 or exhaust instruction count, stop generation. */
4073 if (status == NO_EXIT
4074 && (dc.pc >= next_page_start
4075 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4076 || num_insns >= max_insns
4077 || singlestep
4078 || env->singlestep_enabled)) {
4079 status = EXIT_PC_STALE;
4080 }
4081 } while (status == NO_EXIT);
4082
4083 if (tb->cflags & CF_LAST_IO) {
4084 gen_io_end();
4085 }
4086
4087 switch (status) {
4088 case EXIT_GOTO_TB:
4089 case EXIT_NORETURN:
4090 break;
4091 case EXIT_PC_STALE:
4092 update_psw_addr(&dc);
4093 /* FALLTHRU */
4094 case EXIT_PC_UPDATED:
4095 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4096 cc op type is in env */
4097 update_cc_op(&dc);
4098 /* Exit the TB, either by raising a debug exception or by return. */
4099 if (do_debug) {
4100 gen_exception(EXCP_DEBUG);
4101 } else {
4102 tcg_gen_exit_tb(0);
4103 }
4104 break;
4105 default:
4106 abort();
4107 }
4108
4109 gen_icount_end(tb, num_insns);
4110 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4111 if (search_pc) {
4112 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4113 lj++;
4114 while (lj <= j) {
4115 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4116 }
4117 } else {
4118 tb->size = dc.pc - pc_start;
4119 tb->icount = num_insns;
4120 }
4121
4122 #if defined(S390X_DEBUG_DISAS)
4123 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4124 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4125 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4126 qemu_log("\n");
4127 }
4128 #endif
4129 }
4130
4131 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4132 {
4133 gen_intermediate_code_internal(env, tb, 0);
4134 }
4135
4136 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4137 {
4138 gen_intermediate_code_internal(env, tb, 1);
4139 }
4140
4141 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4142 {
4143 int cc_op;
4144 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4145 cc_op = gen_opc_cc_op[pc_pos];
4146 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4147 env->cc_op = cc_op;
4148 }
4149 }