]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Optimize ADDU/SUBU CC testing
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 };
59
60 /* Information carried about a condition to be evaluated. */
61 typedef struct {
62 TCGCond cond:8;
63 bool is_64;
64 bool g1;
65 bool g2;
66 union {
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
69 } u;
70 } DisasCompare;
71
72 #define DISAS_EXCP 4
73
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit[CC_OP_MAX];
76 static uint64_t inline_branch_miss[CC_OP_MAX];
77 #endif
78
79 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
80 {
81 if (!(s->tb->flags & FLAG_MASK_64)) {
82 if (s->tb->flags & FLAG_MASK_32) {
83 return pc | 0x80000000;
84 }
85 }
86 return pc;
87 }
88
89 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
90 int flags)
91 {
92 int i;
93
94 if (env->cc_op > 3) {
95 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
96 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
97 } else {
98 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
99 env->psw.mask, env->psw.addr, env->cc_op);
100 }
101
102 for (i = 0; i < 16; i++) {
103 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
104 if ((i % 4) == 3) {
105 cpu_fprintf(f, "\n");
106 } else {
107 cpu_fprintf(f, " ");
108 }
109 }
110
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
113 if ((i % 4) == 3) {
114 cpu_fprintf(f, "\n");
115 } else {
116 cpu_fprintf(f, " ");
117 }
118 }
119
120 #ifndef CONFIG_USER_ONLY
121 for (i = 0; i < 16; i++) {
122 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
123 if ((i % 4) == 3) {
124 cpu_fprintf(f, "\n");
125 } else {
126 cpu_fprintf(f, " ");
127 }
128 }
129 #endif
130
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i = 0; i < CC_OP_MAX; i++) {
133 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
134 inline_branch_miss[i], inline_branch_hit[i]);
135 }
136 #endif
137
138 cpu_fprintf(f, "\n");
139 }
140
141 static TCGv_i64 psw_addr;
142 static TCGv_i64 psw_mask;
143
144 static TCGv_i32 cc_op;
145 static TCGv_i64 cc_src;
146 static TCGv_i64 cc_dst;
147 static TCGv_i64 cc_vr;
148
149 static char cpu_reg_names[32][4];
150 static TCGv_i64 regs[16];
151 static TCGv_i64 fregs[16];
152
153 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
154
155 void s390x_translate_init(void)
156 {
157 int i;
158
159 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
160 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
161 offsetof(CPUS390XState, psw.addr),
162 "psw_addr");
163 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
164 offsetof(CPUS390XState, psw.mask),
165 "psw_mask");
166
167 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
168 "cc_op");
169 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
170 "cc_src");
171 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
172 "cc_dst");
173 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
174 "cc_vr");
175
176 for (i = 0; i < 16; i++) {
177 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
178 regs[i] = tcg_global_mem_new(TCG_AREG0,
179 offsetof(CPUS390XState, regs[i]),
180 cpu_reg_names[i]);
181 }
182
183 for (i = 0; i < 16; i++) {
184 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
185 fregs[i] = tcg_global_mem_new(TCG_AREG0,
186 offsetof(CPUS390XState, fregs[i].d),
187 cpu_reg_names[i + 16]);
188 }
189
190 /* register helpers */
191 #define GEN_HELPER 2
192 #include "helper.h"
193 }
194
195 static TCGv_i64 load_reg(int reg)
196 {
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
200 }
201
202 static TCGv_i64 load_freg32_i64(int reg)
203 {
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
206 return r;
207 }
208
209 static void store_reg(int reg, TCGv_i64 v)
210 {
211 tcg_gen_mov_i64(regs[reg], v);
212 }
213
214 static void store_freg(int reg, TCGv_i64 v)
215 {
216 tcg_gen_mov_i64(fregs[reg], v);
217 }
218
219 static void store_reg32_i64(int reg, TCGv_i64 v)
220 {
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
223 }
224
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
226 {
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
228 }
229
230 static void store_freg32_i64(int reg, TCGv_i64 v)
231 {
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
233 }
234
235 static void return_low128(TCGv_i64 dest)
236 {
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
238 }
239
240 static void update_psw_addr(DisasContext *s)
241 {
242 /* psw.addr */
243 tcg_gen_movi_i64(psw_addr, s->pc);
244 }
245
246 static void update_cc_op(DisasContext *s)
247 {
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
250 }
251 }
252
253 static void potential_page_fault(DisasContext *s)
254 {
255 update_psw_addr(s);
256 update_cc_op(s);
257 }
258
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
260 {
261 return (uint64_t)cpu_lduw_code(env, pc);
262 }
263
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
265 {
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
267 }
268
269 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
270 {
271 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
272 }
273
274 static int get_mem_index(DisasContext *s)
275 {
276 switch (s->tb->flags & FLAG_MASK_ASC) {
277 case PSW_ASC_PRIMARY >> 32:
278 return 0;
279 case PSW_ASC_SECONDARY >> 32:
280 return 1;
281 case PSW_ASC_HOME >> 32:
282 return 2;
283 default:
284 tcg_abort();
285 break;
286 }
287 }
288
289 static void gen_exception(int excp)
290 {
291 TCGv_i32 tmp = tcg_const_i32(excp);
292 gen_helper_exception(cpu_env, tmp);
293 tcg_temp_free_i32(tmp);
294 }
295
296 static void gen_program_exception(DisasContext *s, int code)
297 {
298 TCGv_i32 tmp;
299
300 /* Remember what pgm exeption this was. */
301 tmp = tcg_const_i32(code);
302 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
303 tcg_temp_free_i32(tmp);
304
305 tmp = tcg_const_i32(s->next_pc - s->pc);
306 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
307 tcg_temp_free_i32(tmp);
308
309 /* Advance past instruction. */
310 s->pc = s->next_pc;
311 update_psw_addr(s);
312
313 /* Save off cc. */
314 update_cc_op(s);
315
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM);
318 }
319
320 static inline void gen_illegal_opcode(DisasContext *s)
321 {
322 gen_program_exception(s, PGM_SPECIFICATION);
323 }
324
325 static inline void check_privileged(DisasContext *s)
326 {
327 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
328 gen_program_exception(s, PGM_PRIVILEGED);
329 }
330 }
331
332 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
333 {
334 TCGv_i64 tmp;
335
336 /* 31-bitify the immediate part; register contents are dealt with below */
337 if (!(s->tb->flags & FLAG_MASK_64)) {
338 d2 &= 0x7fffffffUL;
339 }
340
341 if (x2) {
342 if (d2) {
343 tmp = tcg_const_i64(d2);
344 tcg_gen_add_i64(tmp, tmp, regs[x2]);
345 } else {
346 tmp = load_reg(x2);
347 }
348 if (b2) {
349 tcg_gen_add_i64(tmp, tmp, regs[b2]);
350 }
351 } else if (b2) {
352 if (d2) {
353 tmp = tcg_const_i64(d2);
354 tcg_gen_add_i64(tmp, tmp, regs[b2]);
355 } else {
356 tmp = load_reg(b2);
357 }
358 } else {
359 tmp = tcg_const_i64(d2);
360 }
361
362 /* 31-bit mode mask if there are values loaded from registers */
363 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
364 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
365 }
366
367 return tmp;
368 }
369
370 static inline bool live_cc_data(DisasContext *s)
371 {
372 return (s->cc_op != CC_OP_DYNAMIC
373 && s->cc_op != CC_OP_STATIC
374 && s->cc_op > 3);
375 }
376
377 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
378 {
379 if (live_cc_data(s)) {
380 tcg_gen_discard_i64(cc_src);
381 tcg_gen_discard_i64(cc_dst);
382 tcg_gen_discard_i64(cc_vr);
383 }
384 s->cc_op = CC_OP_CONST0 + val;
385 }
386
387 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
388 {
389 if (live_cc_data(s)) {
390 tcg_gen_discard_i64(cc_src);
391 tcg_gen_discard_i64(cc_vr);
392 }
393 tcg_gen_mov_i64(cc_dst, dst);
394 s->cc_op = op;
395 }
396
397 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
398 TCGv_i64 dst)
399 {
400 if (live_cc_data(s)) {
401 tcg_gen_discard_i64(cc_vr);
402 }
403 tcg_gen_mov_i64(cc_src, src);
404 tcg_gen_mov_i64(cc_dst, dst);
405 s->cc_op = op;
406 }
407
408 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
409 TCGv_i64 dst, TCGv_i64 vr)
410 {
411 tcg_gen_mov_i64(cc_src, src);
412 tcg_gen_mov_i64(cc_dst, dst);
413 tcg_gen_mov_i64(cc_vr, vr);
414 s->cc_op = op;
415 }
416
417 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
418 {
419 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
420 }
421
422 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
423 {
424 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
425 }
426
427 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
428 {
429 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
430 }
431
432 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
433 {
434 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
435 }
436
437 /* CC value is in env->cc_op */
438 static void set_cc_static(DisasContext *s)
439 {
440 if (live_cc_data(s)) {
441 tcg_gen_discard_i64(cc_src);
442 tcg_gen_discard_i64(cc_dst);
443 tcg_gen_discard_i64(cc_vr);
444 }
445 s->cc_op = CC_OP_STATIC;
446 }
447
448 /* calculates cc into cc_op */
449 static void gen_op_calc_cc(DisasContext *s)
450 {
451 TCGv_i32 local_cc_op;
452 TCGv_i64 dummy;
453
454 TCGV_UNUSED_I32(local_cc_op);
455 TCGV_UNUSED_I64(dummy);
456 switch (s->cc_op) {
457 default:
458 dummy = tcg_const_i64(0);
459 /* FALLTHRU */
460 case CC_OP_ADD_64:
461 case CC_OP_ADDU_64:
462 case CC_OP_ADDC_64:
463 case CC_OP_SUB_64:
464 case CC_OP_SUBU_64:
465 case CC_OP_SUBB_64:
466 case CC_OP_ADD_32:
467 case CC_OP_ADDU_32:
468 case CC_OP_ADDC_32:
469 case CC_OP_SUB_32:
470 case CC_OP_SUBU_32:
471 case CC_OP_SUBB_32:
472 local_cc_op = tcg_const_i32(s->cc_op);
473 break;
474 case CC_OP_CONST0:
475 case CC_OP_CONST1:
476 case CC_OP_CONST2:
477 case CC_OP_CONST3:
478 case CC_OP_STATIC:
479 case CC_OP_DYNAMIC:
480 break;
481 }
482
483 switch (s->cc_op) {
484 case CC_OP_CONST0:
485 case CC_OP_CONST1:
486 case CC_OP_CONST2:
487 case CC_OP_CONST3:
488 /* s->cc_op is the cc value */
489 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
490 break;
491 case CC_OP_STATIC:
492 /* env->cc_op already is the cc value */
493 break;
494 case CC_OP_NZ:
495 case CC_OP_ABS_64:
496 case CC_OP_NABS_64:
497 case CC_OP_ABS_32:
498 case CC_OP_NABS_32:
499 case CC_OP_LTGT0_32:
500 case CC_OP_LTGT0_64:
501 case CC_OP_COMP_32:
502 case CC_OP_COMP_64:
503 case CC_OP_NZ_F32:
504 case CC_OP_NZ_F64:
505 case CC_OP_FLOGR:
506 /* 1 argument */
507 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
508 break;
509 case CC_OP_ICM:
510 case CC_OP_LTGT_32:
511 case CC_OP_LTGT_64:
512 case CC_OP_LTUGTU_32:
513 case CC_OP_LTUGTU_64:
514 case CC_OP_TM_32:
515 case CC_OP_TM_64:
516 case CC_OP_SLA_32:
517 case CC_OP_SLA_64:
518 case CC_OP_NZ_F128:
519 /* 2 arguments */
520 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
521 break;
522 case CC_OP_ADD_64:
523 case CC_OP_ADDU_64:
524 case CC_OP_ADDC_64:
525 case CC_OP_SUB_64:
526 case CC_OP_SUBU_64:
527 case CC_OP_SUBB_64:
528 case CC_OP_ADD_32:
529 case CC_OP_ADDU_32:
530 case CC_OP_ADDC_32:
531 case CC_OP_SUB_32:
532 case CC_OP_SUBU_32:
533 case CC_OP_SUBB_32:
534 /* 3 arguments */
535 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
536 break;
537 case CC_OP_DYNAMIC:
538 /* unknown operation - assume 3 arguments and cc_op in env */
539 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
540 break;
541 default:
542 tcg_abort();
543 }
544
545 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
546 tcg_temp_free_i32(local_cc_op);
547 }
548 if (!TCGV_IS_UNUSED_I64(dummy)) {
549 tcg_temp_free_i64(dummy);
550 }
551
552 /* We now have cc in cc_op as constant */
553 set_cc_static(s);
554 }
555
556 static int use_goto_tb(DisasContext *s, uint64_t dest)
557 {
558 /* NOTE: we handle the case where the TB spans two pages here */
559 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
560 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
561 && !s->singlestep_enabled
562 && !(s->tb->cflags & CF_LAST_IO));
563 }
564
565 static void account_noninline_branch(DisasContext *s, int cc_op)
566 {
567 #ifdef DEBUG_INLINE_BRANCHES
568 inline_branch_miss[cc_op]++;
569 #endif
570 }
571
572 static void account_inline_branch(DisasContext *s, int cc_op)
573 {
574 #ifdef DEBUG_INLINE_BRANCHES
575 inline_branch_hit[cc_op]++;
576 #endif
577 }
578
579 /* Table of mask values to comparison codes, given a comparison as input.
580 For such, CC=3 should not be possible. */
581 static const TCGCond ltgt_cond[16] = {
582 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
583 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
584 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
585 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
586 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
587 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
588 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
589 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
590 };
591
592 /* Table of mask values to comparison codes, given a logic op as input.
593 For such, only CC=0 and CC=1 should be possible. */
594 static const TCGCond nz_cond[16] = {
595 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
596 TCG_COND_NEVER, TCG_COND_NEVER,
597 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
598 TCG_COND_NE, TCG_COND_NE,
599 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
600 TCG_COND_EQ, TCG_COND_EQ,
601 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
602 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
603 };
604
605 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
606 details required to generate a TCG comparison. */
607 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
608 {
609 TCGCond cond;
610 enum cc_op old_cc_op = s->cc_op;
611
612 if (mask == 15 || mask == 0) {
613 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
614 c->u.s32.a = cc_op;
615 c->u.s32.b = cc_op;
616 c->g1 = c->g2 = true;
617 c->is_64 = false;
618 return;
619 }
620
621 /* Find the TCG condition for the mask + cc op. */
622 switch (old_cc_op) {
623 case CC_OP_LTGT0_32:
624 case CC_OP_LTGT0_64:
625 case CC_OP_LTGT_32:
626 case CC_OP_LTGT_64:
627 cond = ltgt_cond[mask];
628 if (cond == TCG_COND_NEVER) {
629 goto do_dynamic;
630 }
631 account_inline_branch(s, old_cc_op);
632 break;
633
634 case CC_OP_LTUGTU_32:
635 case CC_OP_LTUGTU_64:
636 cond = tcg_unsigned_cond(ltgt_cond[mask]);
637 if (cond == TCG_COND_NEVER) {
638 goto do_dynamic;
639 }
640 account_inline_branch(s, old_cc_op);
641 break;
642
643 case CC_OP_NZ:
644 cond = nz_cond[mask];
645 if (cond == TCG_COND_NEVER) {
646 goto do_dynamic;
647 }
648 account_inline_branch(s, old_cc_op);
649 break;
650
651 case CC_OP_TM_32:
652 case CC_OP_TM_64:
653 switch (mask) {
654 case 8:
655 cond = TCG_COND_EQ;
656 break;
657 case 4 | 2 | 1:
658 cond = TCG_COND_NE;
659 break;
660 default:
661 goto do_dynamic;
662 }
663 account_inline_branch(s, old_cc_op);
664 break;
665
666 case CC_OP_ICM:
667 switch (mask) {
668 case 8:
669 cond = TCG_COND_EQ;
670 break;
671 case 4 | 2 | 1:
672 case 4 | 2:
673 cond = TCG_COND_NE;
674 break;
675 default:
676 goto do_dynamic;
677 }
678 account_inline_branch(s, old_cc_op);
679 break;
680
681 case CC_OP_FLOGR:
682 switch (mask & 0xa) {
683 case 8: /* src == 0 -> no one bit found */
684 cond = TCG_COND_EQ;
685 break;
686 case 2: /* src != 0 -> one bit found */
687 cond = TCG_COND_NE;
688 break;
689 default:
690 goto do_dynamic;
691 }
692 account_inline_branch(s, old_cc_op);
693 break;
694
695 case CC_OP_ADDU_32:
696 case CC_OP_ADDU_64:
697 switch (mask) {
698 case 8 | 2: /* vr == 0 */
699 cond = TCG_COND_EQ;
700 break;
701 case 4 | 1: /* vr != 0 */
702 cond = TCG_COND_NE;
703 break;
704 case 8 | 4: /* no carry -> vr >= src */
705 cond = TCG_COND_GEU;
706 break;
707 case 2 | 1: /* carry -> vr < src */
708 cond = TCG_COND_LTU;
709 break;
710 default:
711 goto do_dynamic;
712 }
713 account_inline_branch(s, old_cc_op);
714 break;
715
716 case CC_OP_SUBU_32:
717 case CC_OP_SUBU_64:
718 /* Note that CC=0 is impossible; treat it as dont-care. */
719 switch (mask & 7) {
720 case 2: /* zero -> op1 == op2 */
721 cond = TCG_COND_EQ;
722 break;
723 case 4 | 1: /* !zero -> op1 != op2 */
724 cond = TCG_COND_NE;
725 break;
726 case 4: /* borrow (!carry) -> op1 < op2 */
727 cond = TCG_COND_LTU;
728 break;
729 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
730 cond = TCG_COND_GEU;
731 break;
732 default:
733 goto do_dynamic;
734 }
735 account_inline_branch(s, old_cc_op);
736 break;
737
738 default:
739 do_dynamic:
740 /* Calculate cc value. */
741 gen_op_calc_cc(s);
742 /* FALLTHRU */
743
744 case CC_OP_STATIC:
745 /* Jump based on CC. We'll load up the real cond below;
746 the assignment here merely avoids a compiler warning. */
747 account_noninline_branch(s, old_cc_op);
748 old_cc_op = CC_OP_STATIC;
749 cond = TCG_COND_NEVER;
750 break;
751 }
752
753 /* Load up the arguments of the comparison. */
754 c->is_64 = true;
755 c->g1 = c->g2 = false;
756 switch (old_cc_op) {
757 case CC_OP_LTGT0_32:
758 c->is_64 = false;
759 c->u.s32.a = tcg_temp_new_i32();
760 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
761 c->u.s32.b = tcg_const_i32(0);
762 break;
763 case CC_OP_LTGT_32:
764 case CC_OP_LTUGTU_32:
765 case CC_OP_SUBU_32:
766 c->is_64 = false;
767 c->u.s32.a = tcg_temp_new_i32();
768 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
769 c->u.s32.b = tcg_temp_new_i32();
770 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
771 break;
772
773 case CC_OP_LTGT0_64:
774 case CC_OP_NZ:
775 case CC_OP_FLOGR:
776 c->u.s64.a = cc_dst;
777 c->u.s64.b = tcg_const_i64(0);
778 c->g1 = true;
779 break;
780 case CC_OP_LTGT_64:
781 case CC_OP_LTUGTU_64:
782 case CC_OP_SUBU_64:
783 c->u.s64.a = cc_src;
784 c->u.s64.b = cc_dst;
785 c->g1 = c->g2 = true;
786 break;
787
788 case CC_OP_TM_32:
789 case CC_OP_TM_64:
790 case CC_OP_ICM:
791 c->u.s64.a = tcg_temp_new_i64();
792 c->u.s64.b = tcg_const_i64(0);
793 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
794 break;
795
796 case CC_OP_ADDU_32:
797 c->is_64 = false;
798 c->u.s32.a = tcg_temp_new_i32();
799 c->u.s32.b = tcg_temp_new_i32();
800 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
801 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
802 tcg_gen_movi_i32(c->u.s32.b, 0);
803 } else {
804 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
805 }
806 break;
807
808 case CC_OP_ADDU_64:
809 c->u.s64.a = cc_vr;
810 c->g1 = true;
811 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
812 c->u.s64.b = tcg_const_i64(0);
813 } else {
814 c->u.s64.b = cc_src;
815 c->g2 = true;
816 }
817 break;
818
819 case CC_OP_STATIC:
820 c->is_64 = false;
821 c->u.s32.a = cc_op;
822 c->g1 = true;
823 switch (mask) {
824 case 0x8 | 0x4 | 0x2: /* cc != 3 */
825 cond = TCG_COND_NE;
826 c->u.s32.b = tcg_const_i32(3);
827 break;
828 case 0x8 | 0x4 | 0x1: /* cc != 2 */
829 cond = TCG_COND_NE;
830 c->u.s32.b = tcg_const_i32(2);
831 break;
832 case 0x8 | 0x2 | 0x1: /* cc != 1 */
833 cond = TCG_COND_NE;
834 c->u.s32.b = tcg_const_i32(1);
835 break;
836 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
837 cond = TCG_COND_EQ;
838 c->g1 = false;
839 c->u.s32.a = tcg_temp_new_i32();
840 c->u.s32.b = tcg_const_i32(0);
841 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
842 break;
843 case 0x8 | 0x4: /* cc < 2 */
844 cond = TCG_COND_LTU;
845 c->u.s32.b = tcg_const_i32(2);
846 break;
847 case 0x8: /* cc == 0 */
848 cond = TCG_COND_EQ;
849 c->u.s32.b = tcg_const_i32(0);
850 break;
851 case 0x4 | 0x2 | 0x1: /* cc != 0 */
852 cond = TCG_COND_NE;
853 c->u.s32.b = tcg_const_i32(0);
854 break;
855 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
856 cond = TCG_COND_NE;
857 c->g1 = false;
858 c->u.s32.a = tcg_temp_new_i32();
859 c->u.s32.b = tcg_const_i32(0);
860 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
861 break;
862 case 0x4: /* cc == 1 */
863 cond = TCG_COND_EQ;
864 c->u.s32.b = tcg_const_i32(1);
865 break;
866 case 0x2 | 0x1: /* cc > 1 */
867 cond = TCG_COND_GTU;
868 c->u.s32.b = tcg_const_i32(1);
869 break;
870 case 0x2: /* cc == 2 */
871 cond = TCG_COND_EQ;
872 c->u.s32.b = tcg_const_i32(2);
873 break;
874 case 0x1: /* cc == 3 */
875 cond = TCG_COND_EQ;
876 c->u.s32.b = tcg_const_i32(3);
877 break;
878 default:
879 /* CC is masked by something else: (8 >> cc) & mask. */
880 cond = TCG_COND_NE;
881 c->g1 = false;
882 c->u.s32.a = tcg_const_i32(8);
883 c->u.s32.b = tcg_const_i32(0);
884 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
885 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
886 break;
887 }
888 break;
889
890 default:
891 abort();
892 }
893 c->cond = cond;
894 }
895
896 static void free_compare(DisasCompare *c)
897 {
898 if (!c->g1) {
899 if (c->is_64) {
900 tcg_temp_free_i64(c->u.s64.a);
901 } else {
902 tcg_temp_free_i32(c->u.s32.a);
903 }
904 }
905 if (!c->g2) {
906 if (c->is_64) {
907 tcg_temp_free_i64(c->u.s64.b);
908 } else {
909 tcg_temp_free_i32(c->u.s32.b);
910 }
911 }
912 }
913
914 /* ====================================================================== */
915 /* Define the insn format enumeration. */
916 #define F0(N) FMT_##N,
917 #define F1(N, X1) F0(N)
918 #define F2(N, X1, X2) F0(N)
919 #define F3(N, X1, X2, X3) F0(N)
920 #define F4(N, X1, X2, X3, X4) F0(N)
921 #define F5(N, X1, X2, X3, X4, X5) F0(N)
922
923 typedef enum {
924 #include "insn-format.def"
925 } DisasFormat;
926
927 #undef F0
928 #undef F1
929 #undef F2
930 #undef F3
931 #undef F4
932 #undef F5
933
934 /* Define a structure to hold the decoded fields. We'll store each inside
935 an array indexed by an enum. In order to conserve memory, we'll arrange
936 for fields that do not exist at the same time to overlap, thus the "C"
937 for compact. For checking purposes there is an "O" for original index
938 as well that will be applied to availability bitmaps. */
939
940 enum DisasFieldIndexO {
941 FLD_O_r1,
942 FLD_O_r2,
943 FLD_O_r3,
944 FLD_O_m1,
945 FLD_O_m3,
946 FLD_O_m4,
947 FLD_O_b1,
948 FLD_O_b2,
949 FLD_O_b4,
950 FLD_O_d1,
951 FLD_O_d2,
952 FLD_O_d4,
953 FLD_O_x2,
954 FLD_O_l1,
955 FLD_O_l2,
956 FLD_O_i1,
957 FLD_O_i2,
958 FLD_O_i3,
959 FLD_O_i4,
960 FLD_O_i5
961 };
962
963 enum DisasFieldIndexC {
964 FLD_C_r1 = 0,
965 FLD_C_m1 = 0,
966 FLD_C_b1 = 0,
967 FLD_C_i1 = 0,
968
969 FLD_C_r2 = 1,
970 FLD_C_b2 = 1,
971 FLD_C_i2 = 1,
972
973 FLD_C_r3 = 2,
974 FLD_C_m3 = 2,
975 FLD_C_i3 = 2,
976
977 FLD_C_m4 = 3,
978 FLD_C_b4 = 3,
979 FLD_C_i4 = 3,
980 FLD_C_l1 = 3,
981
982 FLD_C_i5 = 4,
983 FLD_C_d1 = 4,
984
985 FLD_C_d2 = 5,
986
987 FLD_C_d4 = 6,
988 FLD_C_x2 = 6,
989 FLD_C_l2 = 6,
990
991 NUM_C_FIELD = 7
992 };
993
994 struct DisasFields {
995 unsigned op:8;
996 unsigned op2:8;
997 unsigned presentC:16;
998 unsigned int presentO;
999 int c[NUM_C_FIELD];
1000 };
1001
1002 /* This is the way fields are to be accessed out of DisasFields. */
1003 #define have_field(S, F) have_field1((S), FLD_O_##F)
1004 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1005
1006 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1007 {
1008 return (f->presentO >> c) & 1;
1009 }
1010
1011 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1012 enum DisasFieldIndexC c)
1013 {
1014 assert(have_field1(f, o));
1015 return f->c[c];
1016 }
1017
1018 /* Describe the layout of each field in each format. */
1019 typedef struct DisasField {
1020 unsigned int beg:8;
1021 unsigned int size:8;
1022 unsigned int type:2;
1023 unsigned int indexC:6;
1024 enum DisasFieldIndexO indexO:8;
1025 } DisasField;
1026
1027 typedef struct DisasFormatInfo {
1028 DisasField op[NUM_C_FIELD];
1029 } DisasFormatInfo;
1030
1031 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1032 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1033 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1034 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1035 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1036 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1037 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1038 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1039 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1040 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1041 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1042 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1043 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1044 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1045
1046 #define F0(N) { { } },
1047 #define F1(N, X1) { { X1 } },
1048 #define F2(N, X1, X2) { { X1, X2 } },
1049 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1050 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1051 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1052
1053 static const DisasFormatInfo format_info[] = {
1054 #include "insn-format.def"
1055 };
1056
1057 #undef F0
1058 #undef F1
1059 #undef F2
1060 #undef F3
1061 #undef F4
1062 #undef F5
1063 #undef R
1064 #undef M
1065 #undef BD
1066 #undef BXD
1067 #undef BDL
1068 #undef BXDL
1069 #undef I
1070 #undef L
1071
1072 /* Generally, we'll extract operands into this structures, operate upon
1073 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1074 of routines below for more details. */
1075 typedef struct {
1076 bool g_out, g_out2, g_in1, g_in2;
1077 TCGv_i64 out, out2, in1, in2;
1078 TCGv_i64 addr1;
1079 } DisasOps;
1080
1081 /* Instructions can place constraints on their operands, raising specification
1082 exceptions if they are violated. To make this easy to automate, each "in1",
1083 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1084 of the following, or 0. To make this easy to document, we'll put the
1085 SPEC_<name> defines next to <name>. */
1086
1087 #define SPEC_r1_even 1
1088 #define SPEC_r2_even 2
1089 #define SPEC_r1_f128 4
1090 #define SPEC_r2_f128 8
1091
1092 /* Return values from translate_one, indicating the state of the TB. */
1093 typedef enum {
1094 /* Continue the TB. */
1095 NO_EXIT,
1096 /* We have emitted one or more goto_tb. No fixup required. */
1097 EXIT_GOTO_TB,
1098 /* We are not using a goto_tb (for whatever reason), but have updated
1099 the PC (for whatever reason), so there's no need to do it again on
1100 exiting the TB. */
1101 EXIT_PC_UPDATED,
1102 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1103 updated the PC for the next instruction to be executed. */
1104 EXIT_PC_STALE,
1105 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1106 No following code will be executed. */
1107 EXIT_NORETURN,
1108 } ExitStatus;
1109
1110 typedef enum DisasFacility {
1111 FAC_Z, /* zarch (default) */
1112 FAC_CASS, /* compare and swap and store */
1113 FAC_CASS2, /* compare and swap and store 2*/
1114 FAC_DFP, /* decimal floating point */
1115 FAC_DFPR, /* decimal floating point rounding */
1116 FAC_DO, /* distinct operands */
1117 FAC_EE, /* execute extensions */
1118 FAC_EI, /* extended immediate */
1119 FAC_FPE, /* floating point extension */
1120 FAC_FPSSH, /* floating point support sign handling */
1121 FAC_FPRGR, /* FPR-GR transfer */
1122 FAC_GIE, /* general instructions extension */
1123 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1124 FAC_HW, /* high-word */
1125 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1126 FAC_LOC, /* load/store on condition */
1127 FAC_LD, /* long displacement */
1128 FAC_PC, /* population count */
1129 FAC_SCF, /* store clock fast */
1130 FAC_SFLE, /* store facility list extended */
1131 } DisasFacility;
1132
1133 struct DisasInsn {
1134 unsigned opc:16;
1135 DisasFormat fmt:6;
1136 DisasFacility fac:6;
1137 unsigned spec:4;
1138
1139 const char *name;
1140
1141 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1142 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1143 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1144 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1145 void (*help_cout)(DisasContext *, DisasOps *);
1146 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1147
1148 uint64_t data;
1149 };
1150
1151 /* ====================================================================== */
1152 /* Miscelaneous helpers, used by several operations. */
1153
1154 static void help_l2_shift(DisasContext *s, DisasFields *f,
1155 DisasOps *o, int mask)
1156 {
1157 int b2 = get_field(f, b2);
1158 int d2 = get_field(f, d2);
1159
1160 if (b2 == 0) {
1161 o->in2 = tcg_const_i64(d2 & mask);
1162 } else {
1163 o->in2 = get_address(s, 0, b2, d2);
1164 tcg_gen_andi_i64(o->in2, o->in2, mask);
1165 }
1166 }
1167
1168 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1169 {
1170 if (dest == s->next_pc) {
1171 return NO_EXIT;
1172 }
1173 if (use_goto_tb(s, dest)) {
1174 update_cc_op(s);
1175 tcg_gen_goto_tb(0);
1176 tcg_gen_movi_i64(psw_addr, dest);
1177 tcg_gen_exit_tb((tcg_target_long)s->tb);
1178 return EXIT_GOTO_TB;
1179 } else {
1180 tcg_gen_movi_i64(psw_addr, dest);
1181 return EXIT_PC_UPDATED;
1182 }
1183 }
1184
1185 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1186 bool is_imm, int imm, TCGv_i64 cdest)
1187 {
1188 ExitStatus ret;
1189 uint64_t dest = s->pc + 2 * imm;
1190 int lab;
1191
1192 /* Take care of the special cases first. */
1193 if (c->cond == TCG_COND_NEVER) {
1194 ret = NO_EXIT;
1195 goto egress;
1196 }
1197 if (is_imm) {
1198 if (dest == s->next_pc) {
1199 /* Branch to next. */
1200 ret = NO_EXIT;
1201 goto egress;
1202 }
1203 if (c->cond == TCG_COND_ALWAYS) {
1204 ret = help_goto_direct(s, dest);
1205 goto egress;
1206 }
1207 } else {
1208 if (TCGV_IS_UNUSED_I64(cdest)) {
1209 /* E.g. bcr %r0 -> no branch. */
1210 ret = NO_EXIT;
1211 goto egress;
1212 }
1213 if (c->cond == TCG_COND_ALWAYS) {
1214 tcg_gen_mov_i64(psw_addr, cdest);
1215 ret = EXIT_PC_UPDATED;
1216 goto egress;
1217 }
1218 }
1219
1220 if (use_goto_tb(s, s->next_pc)) {
1221 if (is_imm && use_goto_tb(s, dest)) {
1222 /* Both exits can use goto_tb. */
1223 update_cc_op(s);
1224
1225 lab = gen_new_label();
1226 if (c->is_64) {
1227 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1228 } else {
1229 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1230 }
1231
1232 /* Branch not taken. */
1233 tcg_gen_goto_tb(0);
1234 tcg_gen_movi_i64(psw_addr, s->next_pc);
1235 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1236
1237 /* Branch taken. */
1238 gen_set_label(lab);
1239 tcg_gen_goto_tb(1);
1240 tcg_gen_movi_i64(psw_addr, dest);
1241 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1242
1243 ret = EXIT_GOTO_TB;
1244 } else {
1245 /* Fallthru can use goto_tb, but taken branch cannot. */
1246 /* Store taken branch destination before the brcond. This
1247 avoids having to allocate a new local temp to hold it.
1248 We'll overwrite this in the not taken case anyway. */
1249 if (!is_imm) {
1250 tcg_gen_mov_i64(psw_addr, cdest);
1251 }
1252
1253 lab = gen_new_label();
1254 if (c->is_64) {
1255 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1256 } else {
1257 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1258 }
1259
1260 /* Branch not taken. */
1261 update_cc_op(s);
1262 tcg_gen_goto_tb(0);
1263 tcg_gen_movi_i64(psw_addr, s->next_pc);
1264 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1265
1266 gen_set_label(lab);
1267 if (is_imm) {
1268 tcg_gen_movi_i64(psw_addr, dest);
1269 }
1270 ret = EXIT_PC_UPDATED;
1271 }
1272 } else {
1273 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1274 Most commonly we're single-stepping or some other condition that
1275 disables all use of goto_tb. Just update the PC and exit. */
1276
1277 TCGv_i64 next = tcg_const_i64(s->next_pc);
1278 if (is_imm) {
1279 cdest = tcg_const_i64(dest);
1280 }
1281
1282 if (c->is_64) {
1283 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1284 cdest, next);
1285 } else {
1286 TCGv_i32 t0 = tcg_temp_new_i32();
1287 TCGv_i64 t1 = tcg_temp_new_i64();
1288 TCGv_i64 z = tcg_const_i64(0);
1289 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1290 tcg_gen_extu_i32_i64(t1, t0);
1291 tcg_temp_free_i32(t0);
1292 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1293 tcg_temp_free_i64(t1);
1294 tcg_temp_free_i64(z);
1295 }
1296
1297 if (is_imm) {
1298 tcg_temp_free_i64(cdest);
1299 }
1300 tcg_temp_free_i64(next);
1301
1302 ret = EXIT_PC_UPDATED;
1303 }
1304
1305 egress:
1306 free_compare(c);
1307 return ret;
1308 }
1309
1310 /* ====================================================================== */
1311 /* The operations. These perform the bulk of the work for any insn,
1312 usually after the operands have been loaded and output initialized. */
1313
1314 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1315 {
1316 gen_helper_abs_i64(o->out, o->in2);
1317 return NO_EXIT;
1318 }
1319
1320 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1321 {
1322 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1323 return NO_EXIT;
1324 }
1325
1326 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1327 {
1328 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1329 return NO_EXIT;
1330 }
1331
1332 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1333 {
1334 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1335 tcg_gen_mov_i64(o->out2, o->in2);
1336 return NO_EXIT;
1337 }
1338
1339 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1340 {
1341 tcg_gen_add_i64(o->out, o->in1, o->in2);
1342 return NO_EXIT;
1343 }
1344
1345 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1346 {
1347 TCGv_i64 cc;
1348
1349 tcg_gen_add_i64(o->out, o->in1, o->in2);
1350
1351 /* XXX possible optimization point */
1352 gen_op_calc_cc(s);
1353 cc = tcg_temp_new_i64();
1354 tcg_gen_extu_i32_i64(cc, cc_op);
1355 tcg_gen_shri_i64(cc, cc, 1);
1356
1357 tcg_gen_add_i64(o->out, o->out, cc);
1358 tcg_temp_free_i64(cc);
1359 return NO_EXIT;
1360 }
1361
1362 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1363 {
1364 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1365 return NO_EXIT;
1366 }
1367
1368 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1369 {
1370 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1371 return NO_EXIT;
1372 }
1373
1374 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1375 {
1376 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1377 return_low128(o->out2);
1378 return NO_EXIT;
1379 }
1380
1381 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1382 {
1383 tcg_gen_and_i64(o->out, o->in1, o->in2);
1384 return NO_EXIT;
1385 }
1386
1387 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1388 {
1389 int shift = s->insn->data & 0xff;
1390 int size = s->insn->data >> 8;
1391 uint64_t mask = ((1ull << size) - 1) << shift;
1392
1393 assert(!o->g_in2);
1394 tcg_gen_shli_i64(o->in2, o->in2, shift);
1395 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1396 tcg_gen_and_i64(o->out, o->in1, o->in2);
1397
1398 /* Produce the CC from only the bits manipulated. */
1399 tcg_gen_andi_i64(cc_dst, o->out, mask);
1400 set_cc_nz_u64(s, cc_dst);
1401 return NO_EXIT;
1402 }
1403
1404 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1405 {
1406 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1407 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1408 tcg_gen_mov_i64(psw_addr, o->in2);
1409 return EXIT_PC_UPDATED;
1410 } else {
1411 return NO_EXIT;
1412 }
1413 }
1414
1415 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1416 {
1417 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1418 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1419 }
1420
1421 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1422 {
1423 int m1 = get_field(s->fields, m1);
1424 bool is_imm = have_field(s->fields, i2);
1425 int imm = is_imm ? get_field(s->fields, i2) : 0;
1426 DisasCompare c;
1427
1428 disas_jcc(s, &c, m1);
1429 return help_branch(s, &c, is_imm, imm, o->in2);
1430 }
1431
1432 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1433 {
1434 int r1 = get_field(s->fields, r1);
1435 bool is_imm = have_field(s->fields, i2);
1436 int imm = is_imm ? get_field(s->fields, i2) : 0;
1437 DisasCompare c;
1438 TCGv_i64 t;
1439
1440 c.cond = TCG_COND_NE;
1441 c.is_64 = false;
1442 c.g1 = false;
1443 c.g2 = false;
1444
1445 t = tcg_temp_new_i64();
1446 tcg_gen_subi_i64(t, regs[r1], 1);
1447 store_reg32_i64(r1, t);
1448 c.u.s32.a = tcg_temp_new_i32();
1449 c.u.s32.b = tcg_const_i32(0);
1450 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1451 tcg_temp_free_i64(t);
1452
1453 return help_branch(s, &c, is_imm, imm, o->in2);
1454 }
1455
1456 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1457 {
1458 int r1 = get_field(s->fields, r1);
1459 bool is_imm = have_field(s->fields, i2);
1460 int imm = is_imm ? get_field(s->fields, i2) : 0;
1461 DisasCompare c;
1462
1463 c.cond = TCG_COND_NE;
1464 c.is_64 = true;
1465 c.g1 = true;
1466 c.g2 = false;
1467
1468 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1469 c.u.s64.a = regs[r1];
1470 c.u.s64.b = tcg_const_i64(0);
1471
1472 return help_branch(s, &c, is_imm, imm, o->in2);
1473 }
1474
1475 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1476 {
1477 int r1 = get_field(s->fields, r1);
1478 int r3 = get_field(s->fields, r3);
1479 bool is_imm = have_field(s->fields, i2);
1480 int imm = is_imm ? get_field(s->fields, i2) : 0;
1481 DisasCompare c;
1482 TCGv_i64 t;
1483
1484 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1485 c.is_64 = false;
1486 c.g1 = false;
1487 c.g2 = false;
1488
1489 t = tcg_temp_new_i64();
1490 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1491 c.u.s32.a = tcg_temp_new_i32();
1492 c.u.s32.b = tcg_temp_new_i32();
1493 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1494 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1495 store_reg32_i64(r1, t);
1496 tcg_temp_free_i64(t);
1497
1498 return help_branch(s, &c, is_imm, imm, o->in2);
1499 }
1500
1501 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1502 {
1503 int r1 = get_field(s->fields, r1);
1504 int r3 = get_field(s->fields, r3);
1505 bool is_imm = have_field(s->fields, i2);
1506 int imm = is_imm ? get_field(s->fields, i2) : 0;
1507 DisasCompare c;
1508
1509 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1510 c.is_64 = true;
1511
1512 if (r1 == (r3 | 1)) {
1513 c.u.s64.b = load_reg(r3 | 1);
1514 c.g2 = false;
1515 } else {
1516 c.u.s64.b = regs[r3 | 1];
1517 c.g2 = true;
1518 }
1519
1520 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1521 c.u.s64.a = regs[r1];
1522 c.g1 = true;
1523
1524 return help_branch(s, &c, is_imm, imm, o->in2);
1525 }
1526
1527 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1528 {
1529 int imm, m3 = get_field(s->fields, m3);
1530 bool is_imm;
1531 DisasCompare c;
1532
1533 c.cond = ltgt_cond[m3];
1534 if (s->insn->data) {
1535 c.cond = tcg_unsigned_cond(c.cond);
1536 }
1537 c.is_64 = c.g1 = c.g2 = true;
1538 c.u.s64.a = o->in1;
1539 c.u.s64.b = o->in2;
1540
1541 is_imm = have_field(s->fields, i4);
1542 if (is_imm) {
1543 imm = get_field(s->fields, i4);
1544 } else {
1545 imm = 0;
1546 o->out = get_address(s, 0, get_field(s->fields, b4),
1547 get_field(s->fields, d4));
1548 }
1549
1550 return help_branch(s, &c, is_imm, imm, o->out);
1551 }
1552
1553 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1554 {
1555 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1556 set_cc_static(s);
1557 return NO_EXIT;
1558 }
1559
1560 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1561 {
1562 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1563 set_cc_static(s);
1564 return NO_EXIT;
1565 }
1566
1567 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1568 {
1569 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1570 set_cc_static(s);
1571 return NO_EXIT;
1572 }
1573
1574 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1575 {
1576 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1577 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1578 tcg_temp_free_i32(m3);
1579 gen_set_cc_nz_f32(s, o->in2);
1580 return NO_EXIT;
1581 }
1582
1583 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1584 {
1585 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1586 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1587 tcg_temp_free_i32(m3);
1588 gen_set_cc_nz_f64(s, o->in2);
1589 return NO_EXIT;
1590 }
1591
1592 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1593 {
1594 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1595 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1596 tcg_temp_free_i32(m3);
1597 gen_set_cc_nz_f128(s, o->in1, o->in2);
1598 return NO_EXIT;
1599 }
1600
1601 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1602 {
1603 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1604 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1605 tcg_temp_free_i32(m3);
1606 gen_set_cc_nz_f32(s, o->in2);
1607 return NO_EXIT;
1608 }
1609
1610 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1611 {
1612 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1613 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1614 tcg_temp_free_i32(m3);
1615 gen_set_cc_nz_f64(s, o->in2);
1616 return NO_EXIT;
1617 }
1618
1619 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1620 {
1621 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1622 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1623 tcg_temp_free_i32(m3);
1624 gen_set_cc_nz_f128(s, o->in1, o->in2);
1625 return NO_EXIT;
1626 }
1627
1628 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1629 {
1630 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1631 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1632 tcg_temp_free_i32(m3);
1633 gen_set_cc_nz_f32(s, o->in2);
1634 return NO_EXIT;
1635 }
1636
1637 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1638 {
1639 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1640 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1641 tcg_temp_free_i32(m3);
1642 gen_set_cc_nz_f64(s, o->in2);
1643 return NO_EXIT;
1644 }
1645
1646 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1647 {
1648 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1649 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1650 tcg_temp_free_i32(m3);
1651 gen_set_cc_nz_f128(s, o->in1, o->in2);
1652 return NO_EXIT;
1653 }
1654
1655 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1656 {
1657 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1658 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1659 tcg_temp_free_i32(m3);
1660 gen_set_cc_nz_f32(s, o->in2);
1661 return NO_EXIT;
1662 }
1663
1664 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1665 {
1666 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1667 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1668 tcg_temp_free_i32(m3);
1669 gen_set_cc_nz_f64(s, o->in2);
1670 return NO_EXIT;
1671 }
1672
1673 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1674 {
1675 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1676 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1677 tcg_temp_free_i32(m3);
1678 gen_set_cc_nz_f128(s, o->in1, o->in2);
1679 return NO_EXIT;
1680 }
1681
1682 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1683 {
1684 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1685 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1686 tcg_temp_free_i32(m3);
1687 return NO_EXIT;
1688 }
1689
1690 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1691 {
1692 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1693 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1694 tcg_temp_free_i32(m3);
1695 return NO_EXIT;
1696 }
1697
1698 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1699 {
1700 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1701 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1702 tcg_temp_free_i32(m3);
1703 return_low128(o->out2);
1704 return NO_EXIT;
1705 }
1706
1707 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1708 {
1709 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1710 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1711 tcg_temp_free_i32(m3);
1712 return NO_EXIT;
1713 }
1714
1715 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1716 {
1717 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1718 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1719 tcg_temp_free_i32(m3);
1720 return NO_EXIT;
1721 }
1722
1723 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1724 {
1725 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1726 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1727 tcg_temp_free_i32(m3);
1728 return_low128(o->out2);
1729 return NO_EXIT;
1730 }
1731
1732 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1733 {
1734 int r2 = get_field(s->fields, r2);
1735 TCGv_i64 len = tcg_temp_new_i64();
1736
1737 potential_page_fault(s);
1738 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1739 set_cc_static(s);
1740 return_low128(o->out);
1741
1742 tcg_gen_add_i64(regs[r2], regs[r2], len);
1743 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1744 tcg_temp_free_i64(len);
1745
1746 return NO_EXIT;
1747 }
1748
1749 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1750 {
1751 int l = get_field(s->fields, l1);
1752 TCGv_i32 vl;
1753
1754 switch (l + 1) {
1755 case 1:
1756 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1757 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1758 break;
1759 case 2:
1760 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1761 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1762 break;
1763 case 4:
1764 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1765 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1766 break;
1767 case 8:
1768 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1769 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1770 break;
1771 default:
1772 potential_page_fault(s);
1773 vl = tcg_const_i32(l);
1774 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1775 tcg_temp_free_i32(vl);
1776 set_cc_static(s);
1777 return NO_EXIT;
1778 }
1779 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1780 return NO_EXIT;
1781 }
1782
1783 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1784 {
1785 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1786 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1787 potential_page_fault(s);
1788 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1789 tcg_temp_free_i32(r1);
1790 tcg_temp_free_i32(r3);
1791 set_cc_static(s);
1792 return NO_EXIT;
1793 }
1794
1795 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1796 {
1797 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1798 TCGv_i32 t1 = tcg_temp_new_i32();
1799 tcg_gen_trunc_i64_i32(t1, o->in1);
1800 potential_page_fault(s);
1801 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1802 set_cc_static(s);
1803 tcg_temp_free_i32(t1);
1804 tcg_temp_free_i32(m3);
1805 return NO_EXIT;
1806 }
1807
1808 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1809 {
1810 potential_page_fault(s);
1811 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1812 set_cc_static(s);
1813 return_low128(o->in2);
1814 return NO_EXIT;
1815 }
1816
1817 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1818 {
1819 TCGv_i64 t = tcg_temp_new_i64();
1820 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1821 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1822 tcg_gen_or_i64(o->out, o->out, t);
1823 tcg_temp_free_i64(t);
1824 return NO_EXIT;
1825 }
1826
1827 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1828 {
1829 int r3 = get_field(s->fields, r3);
1830 potential_page_fault(s);
1831 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1832 set_cc_static(s);
1833 return NO_EXIT;
1834 }
1835
1836 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1837 {
1838 int r3 = get_field(s->fields, r3);
1839 potential_page_fault(s);
1840 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1841 set_cc_static(s);
1842 return NO_EXIT;
1843 }
1844
1845 #ifndef CONFIG_USER_ONLY
1846 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1847 {
1848 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1849 check_privileged(s);
1850 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1851 tcg_temp_free_i32(r1);
1852 set_cc_static(s);
1853 return NO_EXIT;
1854 }
1855 #endif
1856
1857 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1858 {
1859 int r3 = get_field(s->fields, r3);
1860 TCGv_i64 in3 = tcg_temp_new_i64();
1861 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1862 potential_page_fault(s);
1863 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1864 tcg_temp_free_i64(in3);
1865 set_cc_static(s);
1866 return NO_EXIT;
1867 }
1868
1869 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1870 {
1871 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1872 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1873 potential_page_fault(s);
1874 /* XXX rewrite in tcg */
1875 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1876 set_cc_static(s);
1877 return NO_EXIT;
1878 }
1879
1880 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1881 {
1882 TCGv_i64 t1 = tcg_temp_new_i64();
1883 TCGv_i32 t2 = tcg_temp_new_i32();
1884 tcg_gen_trunc_i64_i32(t2, o->in1);
1885 gen_helper_cvd(t1, t2);
1886 tcg_temp_free_i32(t2);
1887 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1888 tcg_temp_free_i64(t1);
1889 return NO_EXIT;
1890 }
1891
1892 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1893 {
1894 int m3 = get_field(s->fields, m3);
1895 int lab = gen_new_label();
1896 TCGv_i32 t;
1897 TCGCond c;
1898
1899 c = tcg_invert_cond(ltgt_cond[m3]);
1900 if (s->insn->data) {
1901 c = tcg_unsigned_cond(c);
1902 }
1903 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1904
1905 /* Set DXC to 0xff. */
1906 t = tcg_temp_new_i32();
1907 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1908 tcg_gen_ori_i32(t, t, 0xff00);
1909 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1910 tcg_temp_free_i32(t);
1911
1912 /* Trap. */
1913 gen_program_exception(s, PGM_DATA);
1914
1915 gen_set_label(lab);
1916 return NO_EXIT;
1917 }
1918
1919 #ifndef CONFIG_USER_ONLY
1920 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1921 {
1922 TCGv_i32 tmp;
1923
1924 check_privileged(s);
1925 potential_page_fault(s);
1926
1927 /* We pretend the format is RX_a so that D2 is the field we want. */
1928 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1929 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1930 tcg_temp_free_i32(tmp);
1931 return NO_EXIT;
1932 }
1933 #endif
1934
1935 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1936 {
1937 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1938 return_low128(o->out);
1939 return NO_EXIT;
1940 }
1941
1942 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1943 {
1944 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1945 return_low128(o->out);
1946 return NO_EXIT;
1947 }
1948
1949 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1950 {
1951 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1952 return_low128(o->out);
1953 return NO_EXIT;
1954 }
1955
1956 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1957 {
1958 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
1959 return_low128(o->out);
1960 return NO_EXIT;
1961 }
1962
1963 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
1964 {
1965 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
1966 return NO_EXIT;
1967 }
1968
1969 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
1970 {
1971 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
1972 return NO_EXIT;
1973 }
1974
1975 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
1976 {
1977 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1978 return_low128(o->out2);
1979 return NO_EXIT;
1980 }
1981
1982 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
1983 {
1984 int r2 = get_field(s->fields, r2);
1985 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1986 return NO_EXIT;
1987 }
1988
1989 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
1990 {
1991 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
1992 return NO_EXIT;
1993 }
1994
1995 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
1996 {
1997 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
1998 tb->flags, (ab)use the tb->cs_base field as the address of
1999 the template in memory, and grab 8 bits of tb->flags/cflags for
2000 the contents of the register. We would then recognize all this
2001 in gen_intermediate_code_internal, generating code for exactly
2002 one instruction. This new TB then gets executed normally.
2003
2004 On the other hand, this seems to be mostly used for modifying
2005 MVC inside of memcpy, which needs a helper call anyway. So
2006 perhaps this doesn't bear thinking about any further. */
2007
2008 TCGv_i64 tmp;
2009
2010 update_psw_addr(s);
2011 update_cc_op(s);
2012
2013 tmp = tcg_const_i64(s->next_pc);
2014 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2015 tcg_temp_free_i64(tmp);
2016
2017 set_cc_static(s);
2018 return NO_EXIT;
2019 }
2020
2021 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2022 {
2023 /* We'll use the original input for cc computation, since we get to
2024 compare that against 0, which ought to be better than comparing
2025 the real output against 64. It also lets cc_dst be a convenient
2026 temporary during our computation. */
2027 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2028
2029 /* R1 = IN ? CLZ(IN) : 64. */
2030 gen_helper_clz(o->out, o->in2);
2031
2032 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2033 value by 64, which is undefined. But since the shift is 64 iff the
2034 input is zero, we still get the correct result after and'ing. */
2035 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2036 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2037 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2038 return NO_EXIT;
2039 }
2040
2041 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2042 {
2043 int m3 = get_field(s->fields, m3);
2044 int pos, len, base = s->insn->data;
2045 TCGv_i64 tmp = tcg_temp_new_i64();
2046 uint64_t ccm;
2047
2048 switch (m3) {
2049 case 0xf:
2050 /* Effectively a 32-bit load. */
2051 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2052 len = 32;
2053 goto one_insert;
2054
2055 case 0xc:
2056 case 0x6:
2057 case 0x3:
2058 /* Effectively a 16-bit load. */
2059 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2060 len = 16;
2061 goto one_insert;
2062
2063 case 0x8:
2064 case 0x4:
2065 case 0x2:
2066 case 0x1:
2067 /* Effectively an 8-bit load. */
2068 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2069 len = 8;
2070 goto one_insert;
2071
2072 one_insert:
2073 pos = base + ctz32(m3) * 8;
2074 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2075 ccm = ((1ull << len) - 1) << pos;
2076 break;
2077
2078 default:
2079 /* This is going to be a sequence of loads and inserts. */
2080 pos = base + 32 - 8;
2081 ccm = 0;
2082 while (m3) {
2083 if (m3 & 0x8) {
2084 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2085 tcg_gen_addi_i64(o->in2, o->in2, 1);
2086 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2087 ccm |= 0xff << pos;
2088 }
2089 m3 = (m3 << 1) & 0xf;
2090 pos -= 8;
2091 }
2092 break;
2093 }
2094
2095 tcg_gen_movi_i64(tmp, ccm);
2096 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2097 tcg_temp_free_i64(tmp);
2098 return NO_EXIT;
2099 }
2100
2101 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2102 {
2103 int shift = s->insn->data & 0xff;
2104 int size = s->insn->data >> 8;
2105 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2106 return NO_EXIT;
2107 }
2108
2109 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2110 {
2111 TCGv_i64 t1;
2112
2113 gen_op_calc_cc(s);
2114 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2115
2116 t1 = tcg_temp_new_i64();
2117 tcg_gen_shli_i64(t1, psw_mask, 20);
2118 tcg_gen_shri_i64(t1, t1, 36);
2119 tcg_gen_or_i64(o->out, o->out, t1);
2120
2121 tcg_gen_extu_i32_i64(t1, cc_op);
2122 tcg_gen_shli_i64(t1, t1, 28);
2123 tcg_gen_or_i64(o->out, o->out, t1);
2124 tcg_temp_free_i64(t1);
2125 return NO_EXIT;
2126 }
2127
2128 #ifndef CONFIG_USER_ONLY
2129 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2130 {
2131 check_privileged(s);
2132 gen_helper_ipte(cpu_env, o->in1, o->in2);
2133 return NO_EXIT;
2134 }
2135
2136 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2137 {
2138 check_privileged(s);
2139 gen_helper_iske(o->out, cpu_env, o->in2);
2140 return NO_EXIT;
2141 }
2142 #endif
2143
2144 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2145 {
2146 gen_helper_ldeb(o->out, cpu_env, o->in2);
2147 return NO_EXIT;
2148 }
2149
2150 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2151 {
2152 gen_helper_ledb(o->out, cpu_env, o->in2);
2153 return NO_EXIT;
2154 }
2155
2156 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2157 {
2158 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2159 return NO_EXIT;
2160 }
2161
2162 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2163 {
2164 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2165 return NO_EXIT;
2166 }
2167
2168 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2169 {
2170 gen_helper_lxdb(o->out, cpu_env, o->in2);
2171 return_low128(o->out2);
2172 return NO_EXIT;
2173 }
2174
2175 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2176 {
2177 gen_helper_lxeb(o->out, cpu_env, o->in2);
2178 return_low128(o->out2);
2179 return NO_EXIT;
2180 }
2181
2182 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2183 {
2184 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2185 return NO_EXIT;
2186 }
2187
2188 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2189 {
2190 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2191 return NO_EXIT;
2192 }
2193
2194 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2195 {
2196 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2197 return NO_EXIT;
2198 }
2199
2200 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2201 {
2202 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2203 return NO_EXIT;
2204 }
2205
2206 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2207 {
2208 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2209 return NO_EXIT;
2210 }
2211
2212 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2213 {
2214 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2215 return NO_EXIT;
2216 }
2217
2218 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2219 {
2220 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2221 return NO_EXIT;
2222 }
2223
2224 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2225 {
2226 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2227 return NO_EXIT;
2228 }
2229
2230 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2231 {
2232 DisasCompare c;
2233
2234 disas_jcc(s, &c, get_field(s->fields, m3));
2235
2236 if (c.is_64) {
2237 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2238 o->in2, o->in1);
2239 free_compare(&c);
2240 } else {
2241 TCGv_i32 t32 = tcg_temp_new_i32();
2242 TCGv_i64 t, z;
2243
2244 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2245 free_compare(&c);
2246
2247 t = tcg_temp_new_i64();
2248 tcg_gen_extu_i32_i64(t, t32);
2249 tcg_temp_free_i32(t32);
2250
2251 z = tcg_const_i64(0);
2252 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2253 tcg_temp_free_i64(t);
2254 tcg_temp_free_i64(z);
2255 }
2256
2257 return NO_EXIT;
2258 }
2259
2260 #ifndef CONFIG_USER_ONLY
2261 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2262 {
2263 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2264 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2265 check_privileged(s);
2266 potential_page_fault(s);
2267 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2268 tcg_temp_free_i32(r1);
2269 tcg_temp_free_i32(r3);
2270 return NO_EXIT;
2271 }
2272
2273 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2274 {
2275 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2276 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2277 check_privileged(s);
2278 potential_page_fault(s);
2279 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2280 tcg_temp_free_i32(r1);
2281 tcg_temp_free_i32(r3);
2282 return NO_EXIT;
2283 }
2284 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2285 {
2286 check_privileged(s);
2287 potential_page_fault(s);
2288 gen_helper_lra(o->out, cpu_env, o->in2);
2289 set_cc_static(s);
2290 return NO_EXIT;
2291 }
2292
2293 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2294 {
2295 TCGv_i64 t1, t2;
2296
2297 check_privileged(s);
2298
2299 t1 = tcg_temp_new_i64();
2300 t2 = tcg_temp_new_i64();
2301 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2302 tcg_gen_addi_i64(o->in2, o->in2, 4);
2303 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2304 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2305 tcg_gen_shli_i64(t1, t1, 32);
2306 gen_helper_load_psw(cpu_env, t1, t2);
2307 tcg_temp_free_i64(t1);
2308 tcg_temp_free_i64(t2);
2309 return EXIT_NORETURN;
2310 }
2311
2312 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2313 {
2314 TCGv_i64 t1, t2;
2315
2316 check_privileged(s);
2317
2318 t1 = tcg_temp_new_i64();
2319 t2 = tcg_temp_new_i64();
2320 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2321 tcg_gen_addi_i64(o->in2, o->in2, 8);
2322 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2323 gen_helper_load_psw(cpu_env, t1, t2);
2324 tcg_temp_free_i64(t1);
2325 tcg_temp_free_i64(t2);
2326 return EXIT_NORETURN;
2327 }
2328 #endif
2329
2330 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2331 {
2332 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2333 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2334 potential_page_fault(s);
2335 gen_helper_lam(cpu_env, r1, o->in2, r3);
2336 tcg_temp_free_i32(r1);
2337 tcg_temp_free_i32(r3);
2338 return NO_EXIT;
2339 }
2340
2341 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2342 {
2343 int r1 = get_field(s->fields, r1);
2344 int r3 = get_field(s->fields, r3);
2345 TCGv_i64 t = tcg_temp_new_i64();
2346 TCGv_i64 t4 = tcg_const_i64(4);
2347
2348 while (1) {
2349 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2350 store_reg32_i64(r1, t);
2351 if (r1 == r3) {
2352 break;
2353 }
2354 tcg_gen_add_i64(o->in2, o->in2, t4);
2355 r1 = (r1 + 1) & 15;
2356 }
2357
2358 tcg_temp_free_i64(t);
2359 tcg_temp_free_i64(t4);
2360 return NO_EXIT;
2361 }
2362
2363 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2364 {
2365 int r1 = get_field(s->fields, r1);
2366 int r3 = get_field(s->fields, r3);
2367 TCGv_i64 t = tcg_temp_new_i64();
2368 TCGv_i64 t4 = tcg_const_i64(4);
2369
2370 while (1) {
2371 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2372 store_reg32h_i64(r1, t);
2373 if (r1 == r3) {
2374 break;
2375 }
2376 tcg_gen_add_i64(o->in2, o->in2, t4);
2377 r1 = (r1 + 1) & 15;
2378 }
2379
2380 tcg_temp_free_i64(t);
2381 tcg_temp_free_i64(t4);
2382 return NO_EXIT;
2383 }
2384
2385 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2386 {
2387 int r1 = get_field(s->fields, r1);
2388 int r3 = get_field(s->fields, r3);
2389 TCGv_i64 t8 = tcg_const_i64(8);
2390
2391 while (1) {
2392 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2393 if (r1 == r3) {
2394 break;
2395 }
2396 tcg_gen_add_i64(o->in2, o->in2, t8);
2397 r1 = (r1 + 1) & 15;
2398 }
2399
2400 tcg_temp_free_i64(t8);
2401 return NO_EXIT;
2402 }
2403
2404 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2405 {
2406 o->out = o->in2;
2407 o->g_out = o->g_in2;
2408 TCGV_UNUSED_I64(o->in2);
2409 o->g_in2 = false;
2410 return NO_EXIT;
2411 }
2412
2413 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2414 {
2415 o->out = o->in1;
2416 o->out2 = o->in2;
2417 o->g_out = o->g_in1;
2418 o->g_out2 = o->g_in2;
2419 TCGV_UNUSED_I64(o->in1);
2420 TCGV_UNUSED_I64(o->in2);
2421 o->g_in1 = o->g_in2 = false;
2422 return NO_EXIT;
2423 }
2424
2425 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2426 {
2427 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2428 potential_page_fault(s);
2429 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2430 tcg_temp_free_i32(l);
2431 return NO_EXIT;
2432 }
2433
2434 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2435 {
2436 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2437 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2438 potential_page_fault(s);
2439 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2440 tcg_temp_free_i32(r1);
2441 tcg_temp_free_i32(r2);
2442 set_cc_static(s);
2443 return NO_EXIT;
2444 }
2445
2446 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2447 {
2448 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2449 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2450 potential_page_fault(s);
2451 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2452 tcg_temp_free_i32(r1);
2453 tcg_temp_free_i32(r3);
2454 set_cc_static(s);
2455 return NO_EXIT;
2456 }
2457
2458 #ifndef CONFIG_USER_ONLY
2459 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2460 {
2461 int r1 = get_field(s->fields, l1);
2462 check_privileged(s);
2463 potential_page_fault(s);
2464 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2465 set_cc_static(s);
2466 return NO_EXIT;
2467 }
2468
2469 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2470 {
2471 int r1 = get_field(s->fields, l1);
2472 check_privileged(s);
2473 potential_page_fault(s);
2474 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2475 set_cc_static(s);
2476 return NO_EXIT;
2477 }
2478 #endif
2479
2480 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2481 {
2482 potential_page_fault(s);
2483 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2484 set_cc_static(s);
2485 return NO_EXIT;
2486 }
2487
2488 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2489 {
2490 potential_page_fault(s);
2491 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2492 set_cc_static(s);
2493 return_low128(o->in2);
2494 return NO_EXIT;
2495 }
2496
2497 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2498 {
2499 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2500 return NO_EXIT;
2501 }
2502
2503 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2504 {
2505 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2506 return_low128(o->out2);
2507 return NO_EXIT;
2508 }
2509
2510 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2511 {
2512 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2513 return NO_EXIT;
2514 }
2515
2516 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2517 {
2518 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2519 return NO_EXIT;
2520 }
2521
2522 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2523 {
2524 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2525 return NO_EXIT;
2526 }
2527
2528 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2529 {
2530 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2531 return_low128(o->out2);
2532 return NO_EXIT;
2533 }
2534
2535 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2536 {
2537 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2538 return_low128(o->out2);
2539 return NO_EXIT;
2540 }
2541
2542 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2543 {
2544 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2545 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2546 tcg_temp_free_i64(r3);
2547 return NO_EXIT;
2548 }
2549
2550 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2551 {
2552 int r3 = get_field(s->fields, r3);
2553 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2554 return NO_EXIT;
2555 }
2556
2557 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2558 {
2559 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2560 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2561 tcg_temp_free_i64(r3);
2562 return NO_EXIT;
2563 }
2564
2565 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2566 {
2567 int r3 = get_field(s->fields, r3);
2568 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2569 return NO_EXIT;
2570 }
2571
2572 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2573 {
2574 gen_helper_nabs_i64(o->out, o->in2);
2575 return NO_EXIT;
2576 }
2577
2578 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2579 {
2580 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2581 return NO_EXIT;
2582 }
2583
2584 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2585 {
2586 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2587 return NO_EXIT;
2588 }
2589
2590 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2591 {
2592 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2593 tcg_gen_mov_i64(o->out2, o->in2);
2594 return NO_EXIT;
2595 }
2596
2597 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2598 {
2599 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2600 potential_page_fault(s);
2601 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2602 tcg_temp_free_i32(l);
2603 set_cc_static(s);
2604 return NO_EXIT;
2605 }
2606
2607 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2608 {
2609 tcg_gen_neg_i64(o->out, o->in2);
2610 return NO_EXIT;
2611 }
2612
2613 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2614 {
2615 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2616 return NO_EXIT;
2617 }
2618
2619 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2620 {
2621 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2622 return NO_EXIT;
2623 }
2624
2625 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2626 {
2627 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2628 tcg_gen_mov_i64(o->out2, o->in2);
2629 return NO_EXIT;
2630 }
2631
2632 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2633 {
2634 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2635 potential_page_fault(s);
2636 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2637 tcg_temp_free_i32(l);
2638 set_cc_static(s);
2639 return NO_EXIT;
2640 }
2641
2642 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2643 {
2644 tcg_gen_or_i64(o->out, o->in1, o->in2);
2645 return NO_EXIT;
2646 }
2647
2648 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2649 {
2650 int shift = s->insn->data & 0xff;
2651 int size = s->insn->data >> 8;
2652 uint64_t mask = ((1ull << size) - 1) << shift;
2653
2654 assert(!o->g_in2);
2655 tcg_gen_shli_i64(o->in2, o->in2, shift);
2656 tcg_gen_or_i64(o->out, o->in1, o->in2);
2657
2658 /* Produce the CC from only the bits manipulated. */
2659 tcg_gen_andi_i64(cc_dst, o->out, mask);
2660 set_cc_nz_u64(s, cc_dst);
2661 return NO_EXIT;
2662 }
2663
2664 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2665 {
2666 gen_helper_popcnt(o->out, o->in2);
2667 return NO_EXIT;
2668 }
2669
2670 #ifndef CONFIG_USER_ONLY
2671 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2672 {
2673 check_privileged(s);
2674 gen_helper_ptlb(cpu_env);
2675 return NO_EXIT;
2676 }
2677 #endif
2678
2679 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2680 {
2681 int i3 = get_field(s->fields, i3);
2682 int i4 = get_field(s->fields, i4);
2683 int i5 = get_field(s->fields, i5);
2684 int do_zero = i4 & 0x80;
2685 uint64_t mask, imask, pmask;
2686 int pos, len, rot;
2687
2688 /* Adjust the arguments for the specific insn. */
2689 switch (s->fields->op2) {
2690 case 0x55: /* risbg */
2691 i3 &= 63;
2692 i4 &= 63;
2693 pmask = ~0;
2694 break;
2695 case 0x5d: /* risbhg */
2696 i3 &= 31;
2697 i4 &= 31;
2698 pmask = 0xffffffff00000000ull;
2699 break;
2700 case 0x51: /* risblg */
2701 i3 &= 31;
2702 i4 &= 31;
2703 pmask = 0x00000000ffffffffull;
2704 break;
2705 default:
2706 abort();
2707 }
2708
2709 /* MASK is the set of bits to be inserted from R2.
2710 Take care for I3/I4 wraparound. */
2711 mask = pmask >> i3;
2712 if (i3 <= i4) {
2713 mask ^= pmask >> i4 >> 1;
2714 } else {
2715 mask |= ~(pmask >> i4 >> 1);
2716 }
2717 mask &= pmask;
2718
2719 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2720 insns, we need to keep the other half of the register. */
2721 imask = ~mask | ~pmask;
2722 if (do_zero) {
2723 if (s->fields->op2 == 0x55) {
2724 imask = 0;
2725 } else {
2726 imask = ~pmask;
2727 }
2728 }
2729
2730 /* In some cases we can implement this with deposit, which can be more
2731 efficient on some hosts. */
2732 if (~mask == imask && i3 <= i4) {
2733 if (s->fields->op2 == 0x5d) {
2734 i3 += 32, i4 += 32;
2735 }
2736 /* Note that we rotate the bits to be inserted to the lsb, not to
2737 the position as described in the PoO. */
2738 len = i4 - i3 + 1;
2739 pos = 63 - i4;
2740 rot = (i5 - pos) & 63;
2741 } else {
2742 pos = len = -1;
2743 rot = i5 & 63;
2744 }
2745
2746 /* Rotate the input as necessary. */
2747 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2748
2749 /* Insert the selected bits into the output. */
2750 if (pos >= 0) {
2751 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2752 } else if (imask == 0) {
2753 tcg_gen_andi_i64(o->out, o->in2, mask);
2754 } else {
2755 tcg_gen_andi_i64(o->in2, o->in2, mask);
2756 tcg_gen_andi_i64(o->out, o->out, imask);
2757 tcg_gen_or_i64(o->out, o->out, o->in2);
2758 }
2759 return NO_EXIT;
2760 }
2761
2762 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2763 {
2764 int i3 = get_field(s->fields, i3);
2765 int i4 = get_field(s->fields, i4);
2766 int i5 = get_field(s->fields, i5);
2767 uint64_t mask;
2768
2769 /* If this is a test-only form, arrange to discard the result. */
2770 if (i3 & 0x80) {
2771 o->out = tcg_temp_new_i64();
2772 o->g_out = false;
2773 }
2774
2775 i3 &= 63;
2776 i4 &= 63;
2777 i5 &= 63;
2778
2779 /* MASK is the set of bits to be operated on from R2.
2780 Take care for I3/I4 wraparound. */
2781 mask = ~0ull >> i3;
2782 if (i3 <= i4) {
2783 mask ^= ~0ull >> i4 >> 1;
2784 } else {
2785 mask |= ~(~0ull >> i4 >> 1);
2786 }
2787
2788 /* Rotate the input as necessary. */
2789 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2790
2791 /* Operate. */
2792 switch (s->fields->op2) {
2793 case 0x55: /* AND */
2794 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2795 tcg_gen_and_i64(o->out, o->out, o->in2);
2796 break;
2797 case 0x56: /* OR */
2798 tcg_gen_andi_i64(o->in2, o->in2, mask);
2799 tcg_gen_or_i64(o->out, o->out, o->in2);
2800 break;
2801 case 0x57: /* XOR */
2802 tcg_gen_andi_i64(o->in2, o->in2, mask);
2803 tcg_gen_xor_i64(o->out, o->out, o->in2);
2804 break;
2805 default:
2806 abort();
2807 }
2808
2809 /* Set the CC. */
2810 tcg_gen_andi_i64(cc_dst, o->out, mask);
2811 set_cc_nz_u64(s, cc_dst);
2812 return NO_EXIT;
2813 }
2814
2815 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2816 {
2817 tcg_gen_bswap16_i64(o->out, o->in2);
2818 return NO_EXIT;
2819 }
2820
2821 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2822 {
2823 tcg_gen_bswap32_i64(o->out, o->in2);
2824 return NO_EXIT;
2825 }
2826
2827 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2828 {
2829 tcg_gen_bswap64_i64(o->out, o->in2);
2830 return NO_EXIT;
2831 }
2832
2833 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2834 {
2835 TCGv_i32 t1 = tcg_temp_new_i32();
2836 TCGv_i32 t2 = tcg_temp_new_i32();
2837 TCGv_i32 to = tcg_temp_new_i32();
2838 tcg_gen_trunc_i64_i32(t1, o->in1);
2839 tcg_gen_trunc_i64_i32(t2, o->in2);
2840 tcg_gen_rotl_i32(to, t1, t2);
2841 tcg_gen_extu_i32_i64(o->out, to);
2842 tcg_temp_free_i32(t1);
2843 tcg_temp_free_i32(t2);
2844 tcg_temp_free_i32(to);
2845 return NO_EXIT;
2846 }
2847
2848 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2849 {
2850 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2851 return NO_EXIT;
2852 }
2853
2854 #ifndef CONFIG_USER_ONLY
2855 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2856 {
2857 check_privileged(s);
2858 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2859 set_cc_static(s);
2860 return NO_EXIT;
2861 }
2862
2863 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2864 {
2865 check_privileged(s);
2866 gen_helper_sacf(cpu_env, o->in2);
2867 /* Addressing mode has changed, so end the block. */
2868 return EXIT_PC_STALE;
2869 }
2870 #endif
2871
2872 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2873 {
2874 int r1 = get_field(s->fields, r1);
2875 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2876 return NO_EXIT;
2877 }
2878
2879 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2880 {
2881 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2882 return NO_EXIT;
2883 }
2884
2885 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2886 {
2887 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2888 return NO_EXIT;
2889 }
2890
2891 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2892 {
2893 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2894 return_low128(o->out2);
2895 return NO_EXIT;
2896 }
2897
2898 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2899 {
2900 gen_helper_sqeb(o->out, cpu_env, o->in2);
2901 return NO_EXIT;
2902 }
2903
2904 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2905 {
2906 gen_helper_sqdb(o->out, cpu_env, o->in2);
2907 return NO_EXIT;
2908 }
2909
2910 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2911 {
2912 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2913 return_low128(o->out2);
2914 return NO_EXIT;
2915 }
2916
2917 #ifndef CONFIG_USER_ONLY
2918 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2919 {
2920 check_privileged(s);
2921 potential_page_fault(s);
2922 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2923 set_cc_static(s);
2924 return NO_EXIT;
2925 }
2926
2927 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2928 {
2929 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2930 check_privileged(s);
2931 potential_page_fault(s);
2932 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2933 tcg_temp_free_i32(r1);
2934 return NO_EXIT;
2935 }
2936 #endif
2937
2938 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
2939 {
2940 DisasCompare c;
2941 TCGv_i64 a;
2942 int lab, r1;
2943
2944 disas_jcc(s, &c, get_field(s->fields, m3));
2945
2946 lab = gen_new_label();
2947 if (c.is_64) {
2948 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
2949 } else {
2950 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
2951 }
2952 free_compare(&c);
2953
2954 r1 = get_field(s->fields, r1);
2955 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2956 if (s->insn->data) {
2957 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
2958 } else {
2959 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
2960 }
2961 tcg_temp_free_i64(a);
2962
2963 gen_set_label(lab);
2964 return NO_EXIT;
2965 }
2966
2967 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2968 {
2969 uint64_t sign = 1ull << s->insn->data;
2970 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2971 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2972 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2973 /* The arithmetic left shift is curious in that it does not affect
2974 the sign bit. Copy that over from the source unchanged. */
2975 tcg_gen_andi_i64(o->out, o->out, ~sign);
2976 tcg_gen_andi_i64(o->in1, o->in1, sign);
2977 tcg_gen_or_i64(o->out, o->out, o->in1);
2978 return NO_EXIT;
2979 }
2980
2981 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2982 {
2983 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2984 return NO_EXIT;
2985 }
2986
2987 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2988 {
2989 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2990 return NO_EXIT;
2991 }
2992
2993 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2994 {
2995 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2996 return NO_EXIT;
2997 }
2998
2999 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3000 {
3001 gen_helper_sfpc(cpu_env, o->in2);
3002 return NO_EXIT;
3003 }
3004
3005 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3006 {
3007 gen_helper_sfas(cpu_env, o->in2);
3008 return NO_EXIT;
3009 }
3010
3011 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3012 {
3013 int b2 = get_field(s->fields, b2);
3014 int d2 = get_field(s->fields, d2);
3015 TCGv_i64 t1 = tcg_temp_new_i64();
3016 TCGv_i64 t2 = tcg_temp_new_i64();
3017 int mask, pos, len;
3018
3019 switch (s->fields->op2) {
3020 case 0x99: /* SRNM */
3021 pos = 0, len = 2;
3022 break;
3023 case 0xb8: /* SRNMB */
3024 pos = 0, len = 3;
3025 break;
3026 case 0xb9: /* SRNMT */
3027 pos = 4, len = 3;
3028 default:
3029 tcg_abort();
3030 }
3031 mask = (1 << len) - 1;
3032
3033 /* Insert the value into the appropriate field of the FPC. */
3034 if (b2 == 0) {
3035 tcg_gen_movi_i64(t1, d2 & mask);
3036 } else {
3037 tcg_gen_addi_i64(t1, regs[b2], d2);
3038 tcg_gen_andi_i64(t1, t1, mask);
3039 }
3040 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3041 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3042 tcg_temp_free_i64(t1);
3043
3044 /* Then install the new FPC to set the rounding mode in fpu_status. */
3045 gen_helper_sfpc(cpu_env, t2);
3046 tcg_temp_free_i64(t2);
3047 return NO_EXIT;
3048 }
3049
3050 #ifndef CONFIG_USER_ONLY
3051 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3052 {
3053 check_privileged(s);
3054 tcg_gen_shri_i64(o->in2, o->in2, 4);
3055 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3056 return NO_EXIT;
3057 }
3058
3059 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3060 {
3061 check_privileged(s);
3062 gen_helper_sske(cpu_env, o->in1, o->in2);
3063 return NO_EXIT;
3064 }
3065
3066 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3067 {
3068 check_privileged(s);
3069 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3070 return NO_EXIT;
3071 }
3072
3073 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3074 {
3075 check_privileged(s);
3076 /* ??? Surely cpu address != cpu number. In any case the previous
3077 version of this stored more than the required half-word, so it
3078 is unlikely this has ever been tested. */
3079 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3080 return NO_EXIT;
3081 }
3082
3083 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3084 {
3085 gen_helper_stck(o->out, cpu_env);
3086 /* ??? We don't implement clock states. */
3087 gen_op_movi_cc(s, 0);
3088 return NO_EXIT;
3089 }
3090
3091 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3092 {
3093 TCGv_i64 c1 = tcg_temp_new_i64();
3094 TCGv_i64 c2 = tcg_temp_new_i64();
3095 gen_helper_stck(c1, cpu_env);
3096 /* Shift the 64-bit value into its place as a zero-extended
3097 104-bit value. Note that "bit positions 64-103 are always
3098 non-zero so that they compare differently to STCK"; we set
3099 the least significant bit to 1. */
3100 tcg_gen_shli_i64(c2, c1, 56);
3101 tcg_gen_shri_i64(c1, c1, 8);
3102 tcg_gen_ori_i64(c2, c2, 0x10000);
3103 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3104 tcg_gen_addi_i64(o->in2, o->in2, 8);
3105 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3106 tcg_temp_free_i64(c1);
3107 tcg_temp_free_i64(c2);
3108 /* ??? We don't implement clock states. */
3109 gen_op_movi_cc(s, 0);
3110 return NO_EXIT;
3111 }
3112
3113 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3114 {
3115 check_privileged(s);
3116 gen_helper_sckc(cpu_env, o->in2);
3117 return NO_EXIT;
3118 }
3119
3120 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3121 {
3122 check_privileged(s);
3123 gen_helper_stckc(o->out, cpu_env);
3124 return NO_EXIT;
3125 }
3126
3127 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3128 {
3129 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3130 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3131 check_privileged(s);
3132 potential_page_fault(s);
3133 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3134 tcg_temp_free_i32(r1);
3135 tcg_temp_free_i32(r3);
3136 return NO_EXIT;
3137 }
3138
3139 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3140 {
3141 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3142 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3143 check_privileged(s);
3144 potential_page_fault(s);
3145 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3146 tcg_temp_free_i32(r1);
3147 tcg_temp_free_i32(r3);
3148 return NO_EXIT;
3149 }
3150
3151 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3152 {
3153 check_privileged(s);
3154 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3155 return NO_EXIT;
3156 }
3157
3158 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3159 {
3160 check_privileged(s);
3161 gen_helper_spt(cpu_env, o->in2);
3162 return NO_EXIT;
3163 }
3164
3165 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3166 {
3167 TCGv_i64 f, a;
3168 /* We really ought to have more complete indication of facilities
3169 that we implement. Address this when STFLE is implemented. */
3170 check_privileged(s);
3171 f = tcg_const_i64(0xc0000000);
3172 a = tcg_const_i64(200);
3173 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3174 tcg_temp_free_i64(f);
3175 tcg_temp_free_i64(a);
3176 return NO_EXIT;
3177 }
3178
3179 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3180 {
3181 check_privileged(s);
3182 gen_helper_stpt(o->out, cpu_env);
3183 return NO_EXIT;
3184 }
3185
3186 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3187 {
3188 check_privileged(s);
3189 potential_page_fault(s);
3190 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3191 set_cc_static(s);
3192 return NO_EXIT;
3193 }
3194
3195 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3196 {
3197 check_privileged(s);
3198 gen_helper_spx(cpu_env, o->in2);
3199 return NO_EXIT;
3200 }
3201
3202 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3203 {
3204 check_privileged(s);
3205 /* Not operational. */
3206 gen_op_movi_cc(s, 3);
3207 return NO_EXIT;
3208 }
3209
3210 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3211 {
3212 check_privileged(s);
3213 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3214 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3215 return NO_EXIT;
3216 }
3217
3218 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3219 {
3220 uint64_t i2 = get_field(s->fields, i2);
3221 TCGv_i64 t;
3222
3223 check_privileged(s);
3224
3225 /* It is important to do what the instruction name says: STORE THEN.
3226 If we let the output hook perform the store then if we fault and
3227 restart, we'll have the wrong SYSTEM MASK in place. */
3228 t = tcg_temp_new_i64();
3229 tcg_gen_shri_i64(t, psw_mask, 56);
3230 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3231 tcg_temp_free_i64(t);
3232
3233 if (s->fields->op == 0xac) {
3234 tcg_gen_andi_i64(psw_mask, psw_mask,
3235 (i2 << 56) | 0x00ffffffffffffffull);
3236 } else {
3237 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3238 }
3239 return NO_EXIT;
3240 }
3241
3242 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3243 {
3244 check_privileged(s);
3245 potential_page_fault(s);
3246 gen_helper_stura(cpu_env, o->in2, o->in1);
3247 return NO_EXIT;
3248 }
3249 #endif
3250
3251 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3252 {
3253 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3254 return NO_EXIT;
3255 }
3256
3257 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3258 {
3259 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3260 return NO_EXIT;
3261 }
3262
3263 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3264 {
3265 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3266 return NO_EXIT;
3267 }
3268
3269 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3270 {
3271 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3272 return NO_EXIT;
3273 }
3274
3275 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3276 {
3277 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3278 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3279 potential_page_fault(s);
3280 gen_helper_stam(cpu_env, r1, o->in2, r3);
3281 tcg_temp_free_i32(r1);
3282 tcg_temp_free_i32(r3);
3283 return NO_EXIT;
3284 }
3285
3286 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3287 {
3288 int m3 = get_field(s->fields, m3);
3289 int pos, base = s->insn->data;
3290 TCGv_i64 tmp = tcg_temp_new_i64();
3291
3292 pos = base + ctz32(m3) * 8;
3293 switch (m3) {
3294 case 0xf:
3295 /* Effectively a 32-bit store. */
3296 tcg_gen_shri_i64(tmp, o->in1, pos);
3297 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3298 break;
3299
3300 case 0xc:
3301 case 0x6:
3302 case 0x3:
3303 /* Effectively a 16-bit store. */
3304 tcg_gen_shri_i64(tmp, o->in1, pos);
3305 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3306 break;
3307
3308 case 0x8:
3309 case 0x4:
3310 case 0x2:
3311 case 0x1:
3312 /* Effectively an 8-bit store. */
3313 tcg_gen_shri_i64(tmp, o->in1, pos);
3314 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3315 break;
3316
3317 default:
3318 /* This is going to be a sequence of shifts and stores. */
3319 pos = base + 32 - 8;
3320 while (m3) {
3321 if (m3 & 0x8) {
3322 tcg_gen_shri_i64(tmp, o->in1, pos);
3323 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3324 tcg_gen_addi_i64(o->in2, o->in2, 1);
3325 }
3326 m3 = (m3 << 1) & 0xf;
3327 pos -= 8;
3328 }
3329 break;
3330 }
3331 tcg_temp_free_i64(tmp);
3332 return NO_EXIT;
3333 }
3334
3335 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3336 {
3337 int r1 = get_field(s->fields, r1);
3338 int r3 = get_field(s->fields, r3);
3339 int size = s->insn->data;
3340 TCGv_i64 tsize = tcg_const_i64(size);
3341
3342 while (1) {
3343 if (size == 8) {
3344 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3345 } else {
3346 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3347 }
3348 if (r1 == r3) {
3349 break;
3350 }
3351 tcg_gen_add_i64(o->in2, o->in2, tsize);
3352 r1 = (r1 + 1) & 15;
3353 }
3354
3355 tcg_temp_free_i64(tsize);
3356 return NO_EXIT;
3357 }
3358
3359 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3360 {
3361 int r1 = get_field(s->fields, r1);
3362 int r3 = get_field(s->fields, r3);
3363 TCGv_i64 t = tcg_temp_new_i64();
3364 TCGv_i64 t4 = tcg_const_i64(4);
3365 TCGv_i64 t32 = tcg_const_i64(32);
3366
3367 while (1) {
3368 tcg_gen_shl_i64(t, regs[r1], t32);
3369 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3370 if (r1 == r3) {
3371 break;
3372 }
3373 tcg_gen_add_i64(o->in2, o->in2, t4);
3374 r1 = (r1 + 1) & 15;
3375 }
3376
3377 tcg_temp_free_i64(t);
3378 tcg_temp_free_i64(t4);
3379 tcg_temp_free_i64(t32);
3380 return NO_EXIT;
3381 }
3382
3383 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3384 {
3385 potential_page_fault(s);
3386 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3387 set_cc_static(s);
3388 return_low128(o->in2);
3389 return NO_EXIT;
3390 }
3391
3392 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3393 {
3394 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3395 return NO_EXIT;
3396 }
3397
3398 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3399 {
3400 TCGv_i64 cc;
3401
3402 assert(!o->g_in2);
3403 tcg_gen_not_i64(o->in2, o->in2);
3404 tcg_gen_add_i64(o->out, o->in1, o->in2);
3405
3406 /* XXX possible optimization point */
3407 gen_op_calc_cc(s);
3408 cc = tcg_temp_new_i64();
3409 tcg_gen_extu_i32_i64(cc, cc_op);
3410 tcg_gen_shri_i64(cc, cc, 1);
3411 tcg_gen_add_i64(o->out, o->out, cc);
3412 tcg_temp_free_i64(cc);
3413 return NO_EXIT;
3414 }
3415
3416 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3417 {
3418 TCGv_i32 t;
3419
3420 update_psw_addr(s);
3421 update_cc_op(s);
3422
3423 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3424 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3425 tcg_temp_free_i32(t);
3426
3427 t = tcg_const_i32(s->next_pc - s->pc);
3428 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3429 tcg_temp_free_i32(t);
3430
3431 gen_exception(EXCP_SVC);
3432 return EXIT_NORETURN;
3433 }
3434
3435 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3436 {
3437 gen_helper_tceb(cc_op, o->in1, o->in2);
3438 set_cc_static(s);
3439 return NO_EXIT;
3440 }
3441
3442 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3443 {
3444 gen_helper_tcdb(cc_op, o->in1, o->in2);
3445 set_cc_static(s);
3446 return NO_EXIT;
3447 }
3448
3449 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3450 {
3451 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3452 set_cc_static(s);
3453 return NO_EXIT;
3454 }
3455
3456 #ifndef CONFIG_USER_ONLY
3457 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3458 {
3459 potential_page_fault(s);
3460 gen_helper_tprot(cc_op, o->addr1, o->in2);
3461 set_cc_static(s);
3462 return NO_EXIT;
3463 }
3464 #endif
3465
3466 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3467 {
3468 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3469 potential_page_fault(s);
3470 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3471 tcg_temp_free_i32(l);
3472 set_cc_static(s);
3473 return NO_EXIT;
3474 }
3475
3476 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3477 {
3478 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3479 potential_page_fault(s);
3480 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3481 tcg_temp_free_i32(l);
3482 return NO_EXIT;
3483 }
3484
3485 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3486 {
3487 int d1 = get_field(s->fields, d1);
3488 int d2 = get_field(s->fields, d2);
3489 int b1 = get_field(s->fields, b1);
3490 int b2 = get_field(s->fields, b2);
3491 int l = get_field(s->fields, l1);
3492 TCGv_i32 t32;
3493
3494 o->addr1 = get_address(s, 0, b1, d1);
3495
3496 /* If the addresses are identical, this is a store/memset of zero. */
3497 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3498 o->in2 = tcg_const_i64(0);
3499
3500 l++;
3501 while (l >= 8) {
3502 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3503 l -= 8;
3504 if (l > 0) {
3505 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3506 }
3507 }
3508 if (l >= 4) {
3509 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3510 l -= 4;
3511 if (l > 0) {
3512 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3513 }
3514 }
3515 if (l >= 2) {
3516 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3517 l -= 2;
3518 if (l > 0) {
3519 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3520 }
3521 }
3522 if (l) {
3523 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3524 }
3525 gen_op_movi_cc(s, 0);
3526 return NO_EXIT;
3527 }
3528
3529 /* But in general we'll defer to a helper. */
3530 o->in2 = get_address(s, 0, b2, d2);
3531 t32 = tcg_const_i32(l);
3532 potential_page_fault(s);
3533 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3534 tcg_temp_free_i32(t32);
3535 set_cc_static(s);
3536 return NO_EXIT;
3537 }
3538
3539 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3540 {
3541 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3542 return NO_EXIT;
3543 }
3544
3545 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3546 {
3547 int shift = s->insn->data & 0xff;
3548 int size = s->insn->data >> 8;
3549 uint64_t mask = ((1ull << size) - 1) << shift;
3550
3551 assert(!o->g_in2);
3552 tcg_gen_shli_i64(o->in2, o->in2, shift);
3553 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3554
3555 /* Produce the CC from only the bits manipulated. */
3556 tcg_gen_andi_i64(cc_dst, o->out, mask);
3557 set_cc_nz_u64(s, cc_dst);
3558 return NO_EXIT;
3559 }
3560
3561 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3562 {
3563 o->out = tcg_const_i64(0);
3564 return NO_EXIT;
3565 }
3566
3567 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3568 {
3569 o->out = tcg_const_i64(0);
3570 o->out2 = o->out;
3571 o->g_out2 = true;
3572 return NO_EXIT;
3573 }
3574
3575 /* ====================================================================== */
3576 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3577 the original inputs), update the various cc data structures in order to
3578 be able to compute the new condition code. */
3579
3580 static void cout_abs32(DisasContext *s, DisasOps *o)
3581 {
3582 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3583 }
3584
3585 static void cout_abs64(DisasContext *s, DisasOps *o)
3586 {
3587 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3588 }
3589
3590 static void cout_adds32(DisasContext *s, DisasOps *o)
3591 {
3592 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3593 }
3594
3595 static void cout_adds64(DisasContext *s, DisasOps *o)
3596 {
3597 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3598 }
3599
3600 static void cout_addu32(DisasContext *s, DisasOps *o)
3601 {
3602 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3603 }
3604
3605 static void cout_addu64(DisasContext *s, DisasOps *o)
3606 {
3607 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3608 }
3609
3610 static void cout_addc32(DisasContext *s, DisasOps *o)
3611 {
3612 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3613 }
3614
3615 static void cout_addc64(DisasContext *s, DisasOps *o)
3616 {
3617 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3618 }
3619
3620 static void cout_cmps32(DisasContext *s, DisasOps *o)
3621 {
3622 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3623 }
3624
3625 static void cout_cmps64(DisasContext *s, DisasOps *o)
3626 {
3627 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3628 }
3629
3630 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3631 {
3632 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3633 }
3634
3635 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3636 {
3637 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3638 }
3639
3640 static void cout_f32(DisasContext *s, DisasOps *o)
3641 {
3642 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3643 }
3644
3645 static void cout_f64(DisasContext *s, DisasOps *o)
3646 {
3647 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3648 }
3649
3650 static void cout_f128(DisasContext *s, DisasOps *o)
3651 {
3652 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3653 }
3654
3655 static void cout_nabs32(DisasContext *s, DisasOps *o)
3656 {
3657 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3658 }
3659
3660 static void cout_nabs64(DisasContext *s, DisasOps *o)
3661 {
3662 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3663 }
3664
3665 static void cout_neg32(DisasContext *s, DisasOps *o)
3666 {
3667 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3668 }
3669
3670 static void cout_neg64(DisasContext *s, DisasOps *o)
3671 {
3672 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3673 }
3674
3675 static void cout_nz32(DisasContext *s, DisasOps *o)
3676 {
3677 tcg_gen_ext32u_i64(cc_dst, o->out);
3678 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3679 }
3680
3681 static void cout_nz64(DisasContext *s, DisasOps *o)
3682 {
3683 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3684 }
3685
3686 static void cout_s32(DisasContext *s, DisasOps *o)
3687 {
3688 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3689 }
3690
3691 static void cout_s64(DisasContext *s, DisasOps *o)
3692 {
3693 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3694 }
3695
3696 static void cout_subs32(DisasContext *s, DisasOps *o)
3697 {
3698 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3699 }
3700
3701 static void cout_subs64(DisasContext *s, DisasOps *o)
3702 {
3703 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3704 }
3705
3706 static void cout_subu32(DisasContext *s, DisasOps *o)
3707 {
3708 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3709 }
3710
3711 static void cout_subu64(DisasContext *s, DisasOps *o)
3712 {
3713 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3714 }
3715
3716 static void cout_subb32(DisasContext *s, DisasOps *o)
3717 {
3718 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3719 }
3720
3721 static void cout_subb64(DisasContext *s, DisasOps *o)
3722 {
3723 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3724 }
3725
3726 static void cout_tm32(DisasContext *s, DisasOps *o)
3727 {
3728 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3729 }
3730
3731 static void cout_tm64(DisasContext *s, DisasOps *o)
3732 {
3733 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3734 }
3735
3736 /* ====================================================================== */
3737 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3738 with the TCG register to which we will write. Used in combination with
3739 the "wout" generators, in some cases we need a new temporary, and in
3740 some cases we can write to a TCG global. */
3741
3742 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3743 {
3744 o->out = tcg_temp_new_i64();
3745 }
3746 #define SPEC_prep_new 0
3747
3748 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3749 {
3750 o->out = tcg_temp_new_i64();
3751 o->out2 = tcg_temp_new_i64();
3752 }
3753 #define SPEC_prep_new_P 0
3754
3755 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3756 {
3757 o->out = regs[get_field(f, r1)];
3758 o->g_out = true;
3759 }
3760 #define SPEC_prep_r1 0
3761
3762 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3763 {
3764 int r1 = get_field(f, r1);
3765 o->out = regs[r1];
3766 o->out2 = regs[r1 + 1];
3767 o->g_out = o->g_out2 = true;
3768 }
3769 #define SPEC_prep_r1_P SPEC_r1_even
3770
3771 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3772 {
3773 o->out = fregs[get_field(f, r1)];
3774 o->g_out = true;
3775 }
3776 #define SPEC_prep_f1 0
3777
3778 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3779 {
3780 int r1 = get_field(f, r1);
3781 o->out = fregs[r1];
3782 o->out2 = fregs[r1 + 2];
3783 o->g_out = o->g_out2 = true;
3784 }
3785 #define SPEC_prep_x1 SPEC_r1_f128
3786
3787 /* ====================================================================== */
3788 /* The "Write OUTput" generators. These generally perform some non-trivial
3789 copy of data to TCG globals, or to main memory. The trivial cases are
3790 generally handled by having a "prep" generator install the TCG global
3791 as the destination of the operation. */
3792
3793 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3794 {
3795 store_reg(get_field(f, r1), o->out);
3796 }
3797 #define SPEC_wout_r1 0
3798
3799 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3800 {
3801 int r1 = get_field(f, r1);
3802 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3803 }
3804 #define SPEC_wout_r1_8 0
3805
3806 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3807 {
3808 int r1 = get_field(f, r1);
3809 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3810 }
3811 #define SPEC_wout_r1_16 0
3812
3813 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3814 {
3815 store_reg32_i64(get_field(f, r1), o->out);
3816 }
3817 #define SPEC_wout_r1_32 0
3818
3819 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3820 {
3821 int r1 = get_field(f, r1);
3822 store_reg32_i64(r1, o->out);
3823 store_reg32_i64(r1 + 1, o->out2);
3824 }
3825 #define SPEC_wout_r1_P32 SPEC_r1_even
3826
3827 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3828 {
3829 int r1 = get_field(f, r1);
3830 store_reg32_i64(r1 + 1, o->out);
3831 tcg_gen_shri_i64(o->out, o->out, 32);
3832 store_reg32_i64(r1, o->out);
3833 }
3834 #define SPEC_wout_r1_D32 SPEC_r1_even
3835
3836 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3837 {
3838 store_freg32_i64(get_field(f, r1), o->out);
3839 }
3840 #define SPEC_wout_e1 0
3841
3842 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3843 {
3844 store_freg(get_field(f, r1), o->out);
3845 }
3846 #define SPEC_wout_f1 0
3847
3848 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3849 {
3850 int f1 = get_field(s->fields, r1);
3851 store_freg(f1, o->out);
3852 store_freg(f1 + 2, o->out2);
3853 }
3854 #define SPEC_wout_x1 SPEC_r1_f128
3855
3856 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3857 {
3858 if (get_field(f, r1) != get_field(f, r2)) {
3859 store_reg32_i64(get_field(f, r1), o->out);
3860 }
3861 }
3862 #define SPEC_wout_cond_r1r2_32 0
3863
3864 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3865 {
3866 if (get_field(f, r1) != get_field(f, r2)) {
3867 store_freg32_i64(get_field(f, r1), o->out);
3868 }
3869 }
3870 #define SPEC_wout_cond_e1e2 0
3871
3872 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3873 {
3874 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3875 }
3876 #define SPEC_wout_m1_8 0
3877
3878 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3879 {
3880 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3881 }
3882 #define SPEC_wout_m1_16 0
3883
3884 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3885 {
3886 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3887 }
3888 #define SPEC_wout_m1_32 0
3889
3890 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3891 {
3892 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3893 }
3894 #define SPEC_wout_m1_64 0
3895
3896 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3897 {
3898 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3899 }
3900 #define SPEC_wout_m2_32 0
3901
3902 /* ====================================================================== */
3903 /* The "INput 1" generators. These load the first operand to an insn. */
3904
3905 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3906 {
3907 o->in1 = load_reg(get_field(f, r1));
3908 }
3909 #define SPEC_in1_r1 0
3910
3911 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3912 {
3913 o->in1 = regs[get_field(f, r1)];
3914 o->g_in1 = true;
3915 }
3916 #define SPEC_in1_r1_o 0
3917
3918 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3919 {
3920 o->in1 = tcg_temp_new_i64();
3921 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3922 }
3923 #define SPEC_in1_r1_32s 0
3924
3925 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3926 {
3927 o->in1 = tcg_temp_new_i64();
3928 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3929 }
3930 #define SPEC_in1_r1_32u 0
3931
3932 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3933 {
3934 o->in1 = tcg_temp_new_i64();
3935 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3936 }
3937 #define SPEC_in1_r1_sr32 0
3938
3939 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3940 {
3941 o->in1 = load_reg(get_field(f, r1) + 1);
3942 }
3943 #define SPEC_in1_r1p1 SPEC_r1_even
3944
3945 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3946 {
3947 o->in1 = tcg_temp_new_i64();
3948 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
3949 }
3950 #define SPEC_in1_r1p1_32s SPEC_r1_even
3951
3952 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3953 {
3954 o->in1 = tcg_temp_new_i64();
3955 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
3956 }
3957 #define SPEC_in1_r1p1_32u SPEC_r1_even
3958
3959 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3960 {
3961 int r1 = get_field(f, r1);
3962 o->in1 = tcg_temp_new_i64();
3963 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3964 }
3965 #define SPEC_in1_r1_D32 SPEC_r1_even
3966
3967 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3968 {
3969 o->in1 = load_reg(get_field(f, r2));
3970 }
3971 #define SPEC_in1_r2 0
3972
3973 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3974 {
3975 o->in1 = load_reg(get_field(f, r3));
3976 }
3977 #define SPEC_in1_r3 0
3978
3979 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3980 {
3981 o->in1 = regs[get_field(f, r3)];
3982 o->g_in1 = true;
3983 }
3984 #define SPEC_in1_r3_o 0
3985
3986 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3987 {
3988 o->in1 = tcg_temp_new_i64();
3989 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3990 }
3991 #define SPEC_in1_r3_32s 0
3992
3993 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3994 {
3995 o->in1 = tcg_temp_new_i64();
3996 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3997 }
3998 #define SPEC_in1_r3_32u 0
3999
4000 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4001 {
4002 o->in1 = load_freg32_i64(get_field(f, r1));
4003 }
4004 #define SPEC_in1_e1 0
4005
4006 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4007 {
4008 o->in1 = fregs[get_field(f, r1)];
4009 o->g_in1 = true;
4010 }
4011 #define SPEC_in1_f1_o 0
4012
4013 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4014 {
4015 int r1 = get_field(f, r1);
4016 o->out = fregs[r1];
4017 o->out2 = fregs[r1 + 2];
4018 o->g_out = o->g_out2 = true;
4019 }
4020 #define SPEC_in1_x1_o SPEC_r1_f128
4021
4022 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4023 {
4024 o->in1 = fregs[get_field(f, r3)];
4025 o->g_in1 = true;
4026 }
4027 #define SPEC_in1_f3_o 0
4028
4029 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4030 {
4031 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4032 }
4033 #define SPEC_in1_la1 0
4034
4035 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4036 {
4037 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4038 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4039 }
4040 #define SPEC_in1_la2 0
4041
4042 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4043 {
4044 in1_la1(s, f, o);
4045 o->in1 = tcg_temp_new_i64();
4046 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4047 }
4048 #define SPEC_in1_m1_8u 0
4049
4050 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4051 {
4052 in1_la1(s, f, o);
4053 o->in1 = tcg_temp_new_i64();
4054 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4055 }
4056 #define SPEC_in1_m1_16s 0
4057
4058 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4059 {
4060 in1_la1(s, f, o);
4061 o->in1 = tcg_temp_new_i64();
4062 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4063 }
4064 #define SPEC_in1_m1_16u 0
4065
4066 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4067 {
4068 in1_la1(s, f, o);
4069 o->in1 = tcg_temp_new_i64();
4070 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4071 }
4072 #define SPEC_in1_m1_32s 0
4073
4074 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4075 {
4076 in1_la1(s, f, o);
4077 o->in1 = tcg_temp_new_i64();
4078 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4079 }
4080 #define SPEC_in1_m1_32u 0
4081
4082 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4083 {
4084 in1_la1(s, f, o);
4085 o->in1 = tcg_temp_new_i64();
4086 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4087 }
4088 #define SPEC_in1_m1_64 0
4089
4090 /* ====================================================================== */
4091 /* The "INput 2" generators. These load the second operand to an insn. */
4092
4093 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4094 {
4095 o->in2 = regs[get_field(f, r1)];
4096 o->g_in2 = true;
4097 }
4098 #define SPEC_in2_r1_o 0
4099
4100 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4101 {
4102 o->in2 = tcg_temp_new_i64();
4103 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4104 }
4105 #define SPEC_in2_r1_16u 0
4106
4107 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4108 {
4109 o->in2 = tcg_temp_new_i64();
4110 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4111 }
4112 #define SPEC_in2_r1_32u 0
4113
4114 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4115 {
4116 o->in2 = load_reg(get_field(f, r2));
4117 }
4118 #define SPEC_in2_r2 0
4119
4120 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4121 {
4122 o->in2 = regs[get_field(f, r2)];
4123 o->g_in2 = true;
4124 }
4125 #define SPEC_in2_r2_o 0
4126
4127 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4128 {
4129 int r2 = get_field(f, r2);
4130 if (r2 != 0) {
4131 o->in2 = load_reg(r2);
4132 }
4133 }
4134 #define SPEC_in2_r2_nz 0
4135
4136 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4137 {
4138 o->in2 = tcg_temp_new_i64();
4139 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4140 }
4141 #define SPEC_in2_r2_8s 0
4142
4143 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4144 {
4145 o->in2 = tcg_temp_new_i64();
4146 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4147 }
4148 #define SPEC_in2_r2_8u 0
4149
4150 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4151 {
4152 o->in2 = tcg_temp_new_i64();
4153 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4154 }
4155 #define SPEC_in2_r2_16s 0
4156
4157 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4158 {
4159 o->in2 = tcg_temp_new_i64();
4160 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4161 }
4162 #define SPEC_in2_r2_16u 0
4163
4164 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4165 {
4166 o->in2 = load_reg(get_field(f, r3));
4167 }
4168 #define SPEC_in2_r3 0
4169
4170 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4171 {
4172 o->in2 = tcg_temp_new_i64();
4173 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4174 }
4175 #define SPEC_in2_r2_32s 0
4176
4177 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4178 {
4179 o->in2 = tcg_temp_new_i64();
4180 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4181 }
4182 #define SPEC_in2_r2_32u 0
4183
4184 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4185 {
4186 o->in2 = load_freg32_i64(get_field(f, r2));
4187 }
4188 #define SPEC_in2_e2 0
4189
4190 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4191 {
4192 o->in2 = fregs[get_field(f, r2)];
4193 o->g_in2 = true;
4194 }
4195 #define SPEC_in2_f2_o 0
4196
4197 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4198 {
4199 int r2 = get_field(f, r2);
4200 o->in1 = fregs[r2];
4201 o->in2 = fregs[r2 + 2];
4202 o->g_in1 = o->g_in2 = true;
4203 }
4204 #define SPEC_in2_x2_o SPEC_r2_f128
4205
4206 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4207 {
4208 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4209 }
4210 #define SPEC_in2_ra2 0
4211
4212 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4213 {
4214 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4215 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4216 }
4217 #define SPEC_in2_a2 0
4218
4219 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4220 {
4221 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4222 }
4223 #define SPEC_in2_ri2 0
4224
4225 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4226 {
4227 help_l2_shift(s, f, o, 31);
4228 }
4229 #define SPEC_in2_sh32 0
4230
4231 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4232 {
4233 help_l2_shift(s, f, o, 63);
4234 }
4235 #define SPEC_in2_sh64 0
4236
4237 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4238 {
4239 in2_a2(s, f, o);
4240 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4241 }
4242 #define SPEC_in2_m2_8u 0
4243
4244 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4245 {
4246 in2_a2(s, f, o);
4247 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4248 }
4249 #define SPEC_in2_m2_16s 0
4250
4251 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4252 {
4253 in2_a2(s, f, o);
4254 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4255 }
4256 #define SPEC_in2_m2_16u 0
4257
4258 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4259 {
4260 in2_a2(s, f, o);
4261 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4262 }
4263 #define SPEC_in2_m2_32s 0
4264
4265 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4266 {
4267 in2_a2(s, f, o);
4268 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4269 }
4270 #define SPEC_in2_m2_32u 0
4271
4272 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4273 {
4274 in2_a2(s, f, o);
4275 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4276 }
4277 #define SPEC_in2_m2_64 0
4278
4279 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4280 {
4281 in2_ri2(s, f, o);
4282 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4283 }
4284 #define SPEC_in2_mri2_16u 0
4285
4286 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4287 {
4288 in2_ri2(s, f, o);
4289 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4290 }
4291 #define SPEC_in2_mri2_32s 0
4292
4293 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4294 {
4295 in2_ri2(s, f, o);
4296 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4297 }
4298 #define SPEC_in2_mri2_32u 0
4299
4300 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4301 {
4302 in2_ri2(s, f, o);
4303 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4304 }
4305 #define SPEC_in2_mri2_64 0
4306
4307 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4308 {
4309 o->in2 = tcg_const_i64(get_field(f, i2));
4310 }
4311 #define SPEC_in2_i2 0
4312
4313 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4314 {
4315 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4316 }
4317 #define SPEC_in2_i2_8u 0
4318
4319 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4320 {
4321 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4322 }
4323 #define SPEC_in2_i2_16u 0
4324
4325 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4326 {
4327 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4328 }
4329 #define SPEC_in2_i2_32u 0
4330
4331 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4332 {
4333 uint64_t i2 = (uint16_t)get_field(f, i2);
4334 o->in2 = tcg_const_i64(i2 << s->insn->data);
4335 }
4336 #define SPEC_in2_i2_16u_shl 0
4337
4338 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4339 {
4340 uint64_t i2 = (uint32_t)get_field(f, i2);
4341 o->in2 = tcg_const_i64(i2 << s->insn->data);
4342 }
4343 #define SPEC_in2_i2_32u_shl 0
4344
4345 /* ====================================================================== */
4346
4347 /* Find opc within the table of insns. This is formulated as a switch
4348 statement so that (1) we get compile-time notice of cut-paste errors
4349 for duplicated opcodes, and (2) the compiler generates the binary
4350 search tree, rather than us having to post-process the table. */
4351
4352 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4353 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4354
4355 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4356
4357 enum DisasInsnEnum {
4358 #include "insn-data.def"
4359 };
4360
4361 #undef D
4362 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4363 .opc = OPC, \
4364 .fmt = FMT_##FT, \
4365 .fac = FAC_##FC, \
4366 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4367 .name = #NM, \
4368 .help_in1 = in1_##I1, \
4369 .help_in2 = in2_##I2, \
4370 .help_prep = prep_##P, \
4371 .help_wout = wout_##W, \
4372 .help_cout = cout_##CC, \
4373 .help_op = op_##OP, \
4374 .data = D \
4375 },
4376
4377 /* Allow 0 to be used for NULL in the table below. */
4378 #define in1_0 NULL
4379 #define in2_0 NULL
4380 #define prep_0 NULL
4381 #define wout_0 NULL
4382 #define cout_0 NULL
4383 #define op_0 NULL
4384
4385 #define SPEC_in1_0 0
4386 #define SPEC_in2_0 0
4387 #define SPEC_prep_0 0
4388 #define SPEC_wout_0 0
4389
4390 static const DisasInsn insn_info[] = {
4391 #include "insn-data.def"
4392 };
4393
4394 #undef D
4395 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4396 case OPC: return &insn_info[insn_ ## NM];
4397
4398 static const DisasInsn *lookup_opc(uint16_t opc)
4399 {
4400 switch (opc) {
4401 #include "insn-data.def"
4402 default:
4403 return NULL;
4404 }
4405 }
4406
4407 #undef D
4408 #undef C
4409
4410 /* Extract a field from the insn. The INSN should be left-aligned in
4411 the uint64_t so that we can more easily utilize the big-bit-endian
4412 definitions we extract from the Principals of Operation. */
4413
4414 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4415 {
4416 uint32_t r, m;
4417
4418 if (f->size == 0) {
4419 return;
4420 }
4421
4422 /* Zero extract the field from the insn. */
4423 r = (insn << f->beg) >> (64 - f->size);
4424
4425 /* Sign-extend, or un-swap the field as necessary. */
4426 switch (f->type) {
4427 case 0: /* unsigned */
4428 break;
4429 case 1: /* signed */
4430 assert(f->size <= 32);
4431 m = 1u << (f->size - 1);
4432 r = (r ^ m) - m;
4433 break;
4434 case 2: /* dl+dh split, signed 20 bit. */
4435 r = ((int8_t)r << 12) | (r >> 8);
4436 break;
4437 default:
4438 abort();
4439 }
4440
4441 /* Validate that the "compressed" encoding we selected above is valid.
4442 I.e. we havn't make two different original fields overlap. */
4443 assert(((o->presentC >> f->indexC) & 1) == 0);
4444 o->presentC |= 1 << f->indexC;
4445 o->presentO |= 1 << f->indexO;
4446
4447 o->c[f->indexC] = r;
4448 }
4449
4450 /* Lookup the insn at the current PC, extracting the operands into O and
4451 returning the info struct for the insn. Returns NULL for invalid insn. */
4452
4453 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4454 DisasFields *f)
4455 {
4456 uint64_t insn, pc = s->pc;
4457 int op, op2, ilen;
4458 const DisasInsn *info;
4459
4460 insn = ld_code2(env, pc);
4461 op = (insn >> 8) & 0xff;
4462 ilen = get_ilen(op);
4463 s->next_pc = s->pc + ilen;
4464
4465 switch (ilen) {
4466 case 2:
4467 insn = insn << 48;
4468 break;
4469 case 4:
4470 insn = ld_code4(env, pc) << 32;
4471 break;
4472 case 6:
4473 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4474 break;
4475 default:
4476 abort();
4477 }
4478
4479 /* We can't actually determine the insn format until we've looked up
4480 the full insn opcode. Which we can't do without locating the
4481 secondary opcode. Assume by default that OP2 is at bit 40; for
4482 those smaller insns that don't actually have a secondary opcode
4483 this will correctly result in OP2 = 0. */
4484 switch (op) {
4485 case 0x01: /* E */
4486 case 0x80: /* S */
4487 case 0x82: /* S */
4488 case 0x93: /* S */
4489 case 0xb2: /* S, RRF, RRE */
4490 case 0xb3: /* RRE, RRD, RRF */
4491 case 0xb9: /* RRE, RRF */
4492 case 0xe5: /* SSE, SIL */
4493 op2 = (insn << 8) >> 56;
4494 break;
4495 case 0xa5: /* RI */
4496 case 0xa7: /* RI */
4497 case 0xc0: /* RIL */
4498 case 0xc2: /* RIL */
4499 case 0xc4: /* RIL */
4500 case 0xc6: /* RIL */
4501 case 0xc8: /* SSF */
4502 case 0xcc: /* RIL */
4503 op2 = (insn << 12) >> 60;
4504 break;
4505 case 0xd0 ... 0xdf: /* SS */
4506 case 0xe1: /* SS */
4507 case 0xe2: /* SS */
4508 case 0xe8: /* SS */
4509 case 0xe9: /* SS */
4510 case 0xea: /* SS */
4511 case 0xee ... 0xf3: /* SS */
4512 case 0xf8 ... 0xfd: /* SS */
4513 op2 = 0;
4514 break;
4515 default:
4516 op2 = (insn << 40) >> 56;
4517 break;
4518 }
4519
4520 memset(f, 0, sizeof(*f));
4521 f->op = op;
4522 f->op2 = op2;
4523
4524 /* Lookup the instruction. */
4525 info = lookup_opc(op << 8 | op2);
4526
4527 /* If we found it, extract the operands. */
4528 if (info != NULL) {
4529 DisasFormat fmt = info->fmt;
4530 int i;
4531
4532 for (i = 0; i < NUM_C_FIELD; ++i) {
4533 extract_field(f, &format_info[fmt].op[i], insn);
4534 }
4535 }
4536 return info;
4537 }
4538
4539 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4540 {
4541 const DisasInsn *insn;
4542 ExitStatus ret = NO_EXIT;
4543 DisasFields f;
4544 DisasOps o;
4545
4546 /* Search for the insn in the table. */
4547 insn = extract_insn(env, s, &f);
4548
4549 /* Not found means unimplemented/illegal opcode. */
4550 if (insn == NULL) {
4551 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4552 f.op, f.op2);
4553 gen_illegal_opcode(s);
4554 return EXIT_NORETURN;
4555 }
4556
4557 /* Check for insn specification exceptions. */
4558 if (insn->spec) {
4559 int spec = insn->spec, excp = 0, r;
4560
4561 if (spec & SPEC_r1_even) {
4562 r = get_field(&f, r1);
4563 if (r & 1) {
4564 excp = PGM_SPECIFICATION;
4565 }
4566 }
4567 if (spec & SPEC_r2_even) {
4568 r = get_field(&f, r2);
4569 if (r & 1) {
4570 excp = PGM_SPECIFICATION;
4571 }
4572 }
4573 if (spec & SPEC_r1_f128) {
4574 r = get_field(&f, r1);
4575 if (r > 13) {
4576 excp = PGM_SPECIFICATION;
4577 }
4578 }
4579 if (spec & SPEC_r2_f128) {
4580 r = get_field(&f, r2);
4581 if (r > 13) {
4582 excp = PGM_SPECIFICATION;
4583 }
4584 }
4585 if (excp) {
4586 gen_program_exception(s, excp);
4587 return EXIT_NORETURN;
4588 }
4589 }
4590
4591 /* Set up the strutures we use to communicate with the helpers. */
4592 s->insn = insn;
4593 s->fields = &f;
4594 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4595 TCGV_UNUSED_I64(o.out);
4596 TCGV_UNUSED_I64(o.out2);
4597 TCGV_UNUSED_I64(o.in1);
4598 TCGV_UNUSED_I64(o.in2);
4599 TCGV_UNUSED_I64(o.addr1);
4600
4601 /* Implement the instruction. */
4602 if (insn->help_in1) {
4603 insn->help_in1(s, &f, &o);
4604 }
4605 if (insn->help_in2) {
4606 insn->help_in2(s, &f, &o);
4607 }
4608 if (insn->help_prep) {
4609 insn->help_prep(s, &f, &o);
4610 }
4611 if (insn->help_op) {
4612 ret = insn->help_op(s, &o);
4613 }
4614 if (insn->help_wout) {
4615 insn->help_wout(s, &f, &o);
4616 }
4617 if (insn->help_cout) {
4618 insn->help_cout(s, &o);
4619 }
4620
4621 /* Free any temporaries created by the helpers. */
4622 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4623 tcg_temp_free_i64(o.out);
4624 }
4625 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4626 tcg_temp_free_i64(o.out2);
4627 }
4628 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4629 tcg_temp_free_i64(o.in1);
4630 }
4631 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4632 tcg_temp_free_i64(o.in2);
4633 }
4634 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4635 tcg_temp_free_i64(o.addr1);
4636 }
4637
4638 /* Advance to the next instruction. */
4639 s->pc = s->next_pc;
4640 return ret;
4641 }
4642
4643 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4644 TranslationBlock *tb,
4645 int search_pc)
4646 {
4647 DisasContext dc;
4648 target_ulong pc_start;
4649 uint64_t next_page_start;
4650 uint16_t *gen_opc_end;
4651 int j, lj = -1;
4652 int num_insns, max_insns;
4653 CPUBreakpoint *bp;
4654 ExitStatus status;
4655 bool do_debug;
4656
4657 pc_start = tb->pc;
4658
4659 /* 31-bit mode */
4660 if (!(tb->flags & FLAG_MASK_64)) {
4661 pc_start &= 0x7fffffff;
4662 }
4663
4664 dc.tb = tb;
4665 dc.pc = pc_start;
4666 dc.cc_op = CC_OP_DYNAMIC;
4667 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4668
4669 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4670
4671 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4672
4673 num_insns = 0;
4674 max_insns = tb->cflags & CF_COUNT_MASK;
4675 if (max_insns == 0) {
4676 max_insns = CF_COUNT_MASK;
4677 }
4678
4679 gen_icount_start();
4680
4681 do {
4682 if (search_pc) {
4683 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4684 if (lj < j) {
4685 lj++;
4686 while (lj < j) {
4687 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4688 }
4689 }
4690 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4691 gen_opc_cc_op[lj] = dc.cc_op;
4692 tcg_ctx.gen_opc_instr_start[lj] = 1;
4693 tcg_ctx.gen_opc_icount[lj] = num_insns;
4694 }
4695 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4696 gen_io_start();
4697 }
4698
4699 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4700 tcg_gen_debug_insn_start(dc.pc);
4701 }
4702
4703 status = NO_EXIT;
4704 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4705 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4706 if (bp->pc == dc.pc) {
4707 status = EXIT_PC_STALE;
4708 do_debug = true;
4709 break;
4710 }
4711 }
4712 }
4713 if (status == NO_EXIT) {
4714 status = translate_one(env, &dc);
4715 }
4716
4717 /* If we reach a page boundary, are single stepping,
4718 or exhaust instruction count, stop generation. */
4719 if (status == NO_EXIT
4720 && (dc.pc >= next_page_start
4721 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4722 || num_insns >= max_insns
4723 || singlestep
4724 || env->singlestep_enabled)) {
4725 status = EXIT_PC_STALE;
4726 }
4727 } while (status == NO_EXIT);
4728
4729 if (tb->cflags & CF_LAST_IO) {
4730 gen_io_end();
4731 }
4732
4733 switch (status) {
4734 case EXIT_GOTO_TB:
4735 case EXIT_NORETURN:
4736 break;
4737 case EXIT_PC_STALE:
4738 update_psw_addr(&dc);
4739 /* FALLTHRU */
4740 case EXIT_PC_UPDATED:
4741 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4742 cc op type is in env */
4743 update_cc_op(&dc);
4744 /* Exit the TB, either by raising a debug exception or by return. */
4745 if (do_debug) {
4746 gen_exception(EXCP_DEBUG);
4747 } else {
4748 tcg_gen_exit_tb(0);
4749 }
4750 break;
4751 default:
4752 abort();
4753 }
4754
4755 gen_icount_end(tb, num_insns);
4756 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4757 if (search_pc) {
4758 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4759 lj++;
4760 while (lj <= j) {
4761 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4762 }
4763 } else {
4764 tb->size = dc.pc - pc_start;
4765 tb->icount = num_insns;
4766 }
4767
4768 #if defined(S390X_DEBUG_DISAS)
4769 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4770 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4771 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4772 qemu_log("\n");
4773 }
4774 #endif
4775 }
4776
4777 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4778 {
4779 gen_intermediate_code_internal(env, tb, 0);
4780 }
4781
4782 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4783 {
4784 gen_intermediate_code_internal(env, tb, 1);
4785 }
4786
4787 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4788 {
4789 int cc_op;
4790 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4791 cc_op = gen_opc_cc_op[pc_pos];
4792 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4793 env->cc_op = cc_op;
4794 }
4795 }