]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Tidy comparisons
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 };
59
60 /* Information carried about a condition to be evaluated. */
61 typedef struct {
62 TCGCond cond:8;
63 bool is_64;
64 bool g1;
65 bool g2;
66 union {
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
69 } u;
70 } DisasCompare;
71
72 #define DISAS_EXCP 4
73
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit[CC_OP_MAX];
76 static uint64_t inline_branch_miss[CC_OP_MAX];
77 #endif
78
79 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
80 {
81 if (!(s->tb->flags & FLAG_MASK_64)) {
82 if (s->tb->flags & FLAG_MASK_32) {
83 return pc | 0x80000000;
84 }
85 }
86 return pc;
87 }
88
89 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
90 int flags)
91 {
92 int i;
93
94 if (env->cc_op > 3) {
95 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
96 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
97 } else {
98 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
99 env->psw.mask, env->psw.addr, env->cc_op);
100 }
101
102 for (i = 0; i < 16; i++) {
103 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
104 if ((i % 4) == 3) {
105 cpu_fprintf(f, "\n");
106 } else {
107 cpu_fprintf(f, " ");
108 }
109 }
110
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
113 if ((i % 4) == 3) {
114 cpu_fprintf(f, "\n");
115 } else {
116 cpu_fprintf(f, " ");
117 }
118 }
119
120 #ifndef CONFIG_USER_ONLY
121 for (i = 0; i < 16; i++) {
122 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
123 if ((i % 4) == 3) {
124 cpu_fprintf(f, "\n");
125 } else {
126 cpu_fprintf(f, " ");
127 }
128 }
129 #endif
130
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i = 0; i < CC_OP_MAX; i++) {
133 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
134 inline_branch_miss[i], inline_branch_hit[i]);
135 }
136 #endif
137
138 cpu_fprintf(f, "\n");
139 }
140
141 static TCGv_i64 psw_addr;
142 static TCGv_i64 psw_mask;
143
144 static TCGv_i32 cc_op;
145 static TCGv_i64 cc_src;
146 static TCGv_i64 cc_dst;
147 static TCGv_i64 cc_vr;
148
149 static char cpu_reg_names[32][4];
150 static TCGv_i64 regs[16];
151 static TCGv_i64 fregs[16];
152
153 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
154
155 void s390x_translate_init(void)
156 {
157 int i;
158
159 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
160 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
161 offsetof(CPUS390XState, psw.addr),
162 "psw_addr");
163 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
164 offsetof(CPUS390XState, psw.mask),
165 "psw_mask");
166
167 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
168 "cc_op");
169 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
170 "cc_src");
171 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
172 "cc_dst");
173 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
174 "cc_vr");
175
176 for (i = 0; i < 16; i++) {
177 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
178 regs[i] = tcg_global_mem_new(TCG_AREG0,
179 offsetof(CPUS390XState, regs[i]),
180 cpu_reg_names[i]);
181 }
182
183 for (i = 0; i < 16; i++) {
184 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
185 fregs[i] = tcg_global_mem_new(TCG_AREG0,
186 offsetof(CPUS390XState, fregs[i].d),
187 cpu_reg_names[i + 16]);
188 }
189
190 /* register helpers */
191 #define GEN_HELPER 2
192 #include "helper.h"
193 }
194
195 static TCGv_i64 load_reg(int reg)
196 {
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
200 }
201
202 static TCGv_i64 load_freg32_i64(int reg)
203 {
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
206 return r;
207 }
208
209 static void store_reg(int reg, TCGv_i64 v)
210 {
211 tcg_gen_mov_i64(regs[reg], v);
212 }
213
214 static void store_freg(int reg, TCGv_i64 v)
215 {
216 tcg_gen_mov_i64(fregs[reg], v);
217 }
218
219 static void store_reg32_i64(int reg, TCGv_i64 v)
220 {
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
223 }
224
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
226 {
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
228 }
229
230 static void store_freg32_i64(int reg, TCGv_i64 v)
231 {
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
233 }
234
235 static void return_low128(TCGv_i64 dest)
236 {
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
238 }
239
240 static void update_psw_addr(DisasContext *s)
241 {
242 /* psw.addr */
243 tcg_gen_movi_i64(psw_addr, s->pc);
244 }
245
246 static void update_cc_op(DisasContext *s)
247 {
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
250 }
251 }
252
253 static void potential_page_fault(DisasContext *s)
254 {
255 update_psw_addr(s);
256 update_cc_op(s);
257 }
258
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
260 {
261 return (uint64_t)cpu_lduw_code(env, pc);
262 }
263
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
265 {
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
267 }
268
269 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
270 {
271 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
272 }
273
274 static int get_mem_index(DisasContext *s)
275 {
276 switch (s->tb->flags & FLAG_MASK_ASC) {
277 case PSW_ASC_PRIMARY >> 32:
278 return 0;
279 case PSW_ASC_SECONDARY >> 32:
280 return 1;
281 case PSW_ASC_HOME >> 32:
282 return 2;
283 default:
284 tcg_abort();
285 break;
286 }
287 }
288
289 static void gen_exception(int excp)
290 {
291 TCGv_i32 tmp = tcg_const_i32(excp);
292 gen_helper_exception(cpu_env, tmp);
293 tcg_temp_free_i32(tmp);
294 }
295
296 static void gen_program_exception(DisasContext *s, int code)
297 {
298 TCGv_i32 tmp;
299
300 /* Remember what pgm exeption this was. */
301 tmp = tcg_const_i32(code);
302 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
303 tcg_temp_free_i32(tmp);
304
305 tmp = tcg_const_i32(s->next_pc - s->pc);
306 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
307 tcg_temp_free_i32(tmp);
308
309 /* Advance past instruction. */
310 s->pc = s->next_pc;
311 update_psw_addr(s);
312
313 /* Save off cc. */
314 update_cc_op(s);
315
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM);
318 }
319
320 static inline void gen_illegal_opcode(DisasContext *s)
321 {
322 gen_program_exception(s, PGM_SPECIFICATION);
323 }
324
325 static inline void check_privileged(DisasContext *s)
326 {
327 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
328 gen_program_exception(s, PGM_PRIVILEGED);
329 }
330 }
331
332 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
333 {
334 TCGv_i64 tmp;
335
336 /* 31-bitify the immediate part; register contents are dealt with below */
337 if (!(s->tb->flags & FLAG_MASK_64)) {
338 d2 &= 0x7fffffffUL;
339 }
340
341 if (x2) {
342 if (d2) {
343 tmp = tcg_const_i64(d2);
344 tcg_gen_add_i64(tmp, tmp, regs[x2]);
345 } else {
346 tmp = load_reg(x2);
347 }
348 if (b2) {
349 tcg_gen_add_i64(tmp, tmp, regs[b2]);
350 }
351 } else if (b2) {
352 if (d2) {
353 tmp = tcg_const_i64(d2);
354 tcg_gen_add_i64(tmp, tmp, regs[b2]);
355 } else {
356 tmp = load_reg(b2);
357 }
358 } else {
359 tmp = tcg_const_i64(d2);
360 }
361
362 /* 31-bit mode mask if there are values loaded from registers */
363 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
364 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
365 }
366
367 return tmp;
368 }
369
370 static inline bool live_cc_data(DisasContext *s)
371 {
372 return (s->cc_op != CC_OP_DYNAMIC
373 && s->cc_op != CC_OP_STATIC
374 && s->cc_op > 3);
375 }
376
377 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
378 {
379 if (live_cc_data(s)) {
380 tcg_gen_discard_i64(cc_src);
381 tcg_gen_discard_i64(cc_dst);
382 tcg_gen_discard_i64(cc_vr);
383 }
384 s->cc_op = CC_OP_CONST0 + val;
385 }
386
387 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
388 {
389 if (live_cc_data(s)) {
390 tcg_gen_discard_i64(cc_src);
391 tcg_gen_discard_i64(cc_vr);
392 }
393 tcg_gen_mov_i64(cc_dst, dst);
394 s->cc_op = op;
395 }
396
397 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
398 TCGv_i64 dst)
399 {
400 if (live_cc_data(s)) {
401 tcg_gen_discard_i64(cc_vr);
402 }
403 tcg_gen_mov_i64(cc_src, src);
404 tcg_gen_mov_i64(cc_dst, dst);
405 s->cc_op = op;
406 }
407
408 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
409 TCGv_i64 dst, TCGv_i64 vr)
410 {
411 tcg_gen_mov_i64(cc_src, src);
412 tcg_gen_mov_i64(cc_dst, dst);
413 tcg_gen_mov_i64(cc_vr, vr);
414 s->cc_op = op;
415 }
416
417 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
418 {
419 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
420 }
421
422 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
423 {
424 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
425 }
426
427 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
428 {
429 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
430 }
431
432 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
433 {
434 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
435 }
436
437 /* CC value is in env->cc_op */
438 static void set_cc_static(DisasContext *s)
439 {
440 if (live_cc_data(s)) {
441 tcg_gen_discard_i64(cc_src);
442 tcg_gen_discard_i64(cc_dst);
443 tcg_gen_discard_i64(cc_vr);
444 }
445 s->cc_op = CC_OP_STATIC;
446 }
447
448 /* calculates cc into cc_op */
449 static void gen_op_calc_cc(DisasContext *s)
450 {
451 TCGv_i32 local_cc_op;
452 TCGv_i64 dummy;
453
454 TCGV_UNUSED_I32(local_cc_op);
455 TCGV_UNUSED_I64(dummy);
456 switch (s->cc_op) {
457 default:
458 dummy = tcg_const_i64(0);
459 /* FALLTHRU */
460 case CC_OP_ADD_64:
461 case CC_OP_ADDU_64:
462 case CC_OP_ADDC_64:
463 case CC_OP_SUB_64:
464 case CC_OP_SUBU_64:
465 case CC_OP_SUBB_64:
466 case CC_OP_ADD_32:
467 case CC_OP_ADDU_32:
468 case CC_OP_ADDC_32:
469 case CC_OP_SUB_32:
470 case CC_OP_SUBU_32:
471 case CC_OP_SUBB_32:
472 local_cc_op = tcg_const_i32(s->cc_op);
473 break;
474 case CC_OP_CONST0:
475 case CC_OP_CONST1:
476 case CC_OP_CONST2:
477 case CC_OP_CONST3:
478 case CC_OP_STATIC:
479 case CC_OP_DYNAMIC:
480 break;
481 }
482
483 switch (s->cc_op) {
484 case CC_OP_CONST0:
485 case CC_OP_CONST1:
486 case CC_OP_CONST2:
487 case CC_OP_CONST3:
488 /* s->cc_op is the cc value */
489 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
490 break;
491 case CC_OP_STATIC:
492 /* env->cc_op already is the cc value */
493 break;
494 case CC_OP_NZ:
495 case CC_OP_ABS_64:
496 case CC_OP_NABS_64:
497 case CC_OP_ABS_32:
498 case CC_OP_NABS_32:
499 case CC_OP_LTGT0_32:
500 case CC_OP_LTGT0_64:
501 case CC_OP_COMP_32:
502 case CC_OP_COMP_64:
503 case CC_OP_NZ_F32:
504 case CC_OP_NZ_F64:
505 case CC_OP_FLOGR:
506 /* 1 argument */
507 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
508 break;
509 case CC_OP_ICM:
510 case CC_OP_LTGT_32:
511 case CC_OP_LTGT_64:
512 case CC_OP_LTUGTU_32:
513 case CC_OP_LTUGTU_64:
514 case CC_OP_TM_32:
515 case CC_OP_TM_64:
516 case CC_OP_SLA_32:
517 case CC_OP_SLA_64:
518 case CC_OP_NZ_F128:
519 /* 2 arguments */
520 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
521 break;
522 case CC_OP_ADD_64:
523 case CC_OP_ADDU_64:
524 case CC_OP_ADDC_64:
525 case CC_OP_SUB_64:
526 case CC_OP_SUBU_64:
527 case CC_OP_SUBB_64:
528 case CC_OP_ADD_32:
529 case CC_OP_ADDU_32:
530 case CC_OP_ADDC_32:
531 case CC_OP_SUB_32:
532 case CC_OP_SUBU_32:
533 case CC_OP_SUBB_32:
534 /* 3 arguments */
535 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
536 break;
537 case CC_OP_DYNAMIC:
538 /* unknown operation - assume 3 arguments and cc_op in env */
539 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
540 break;
541 default:
542 tcg_abort();
543 }
544
545 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
546 tcg_temp_free_i32(local_cc_op);
547 }
548 if (!TCGV_IS_UNUSED_I64(dummy)) {
549 tcg_temp_free_i64(dummy);
550 }
551
552 /* We now have cc in cc_op as constant */
553 set_cc_static(s);
554 }
555
556 static int use_goto_tb(DisasContext *s, uint64_t dest)
557 {
558 /* NOTE: we handle the case where the TB spans two pages here */
559 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
560 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
561 && !s->singlestep_enabled
562 && !(s->tb->cflags & CF_LAST_IO));
563 }
564
565 static void account_noninline_branch(DisasContext *s, int cc_op)
566 {
567 #ifdef DEBUG_INLINE_BRANCHES
568 inline_branch_miss[cc_op]++;
569 #endif
570 }
571
572 static void account_inline_branch(DisasContext *s, int cc_op)
573 {
574 #ifdef DEBUG_INLINE_BRANCHES
575 inline_branch_hit[cc_op]++;
576 #endif
577 }
578
579 /* Table of mask values to comparison codes, given a comparison as input.
580 For such, CC=3 should not be possible. */
581 static const TCGCond ltgt_cond[16] = {
582 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
583 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
584 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
585 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
586 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
587 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
588 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
589 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
590 };
591
592 /* Table of mask values to comparison codes, given a logic op as input.
593 For such, only CC=0 and CC=1 should be possible. */
594 static const TCGCond nz_cond[16] = {
595 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
596 TCG_COND_NEVER, TCG_COND_NEVER,
597 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
598 TCG_COND_NE, TCG_COND_NE,
599 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
600 TCG_COND_EQ, TCG_COND_EQ,
601 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
602 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
603 };
604
605 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
606 details required to generate a TCG comparison. */
607 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
608 {
609 TCGCond cond;
610 enum cc_op old_cc_op = s->cc_op;
611
612 if (mask == 15 || mask == 0) {
613 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
614 c->u.s32.a = cc_op;
615 c->u.s32.b = cc_op;
616 c->g1 = c->g2 = true;
617 c->is_64 = false;
618 return;
619 }
620
621 /* Find the TCG condition for the mask + cc op. */
622 switch (old_cc_op) {
623 case CC_OP_LTGT0_32:
624 case CC_OP_LTGT0_64:
625 case CC_OP_LTGT_32:
626 case CC_OP_LTGT_64:
627 cond = ltgt_cond[mask];
628 if (cond == TCG_COND_NEVER) {
629 goto do_dynamic;
630 }
631 account_inline_branch(s, old_cc_op);
632 break;
633
634 case CC_OP_LTUGTU_32:
635 case CC_OP_LTUGTU_64:
636 cond = tcg_unsigned_cond(ltgt_cond[mask]);
637 if (cond == TCG_COND_NEVER) {
638 goto do_dynamic;
639 }
640 account_inline_branch(s, old_cc_op);
641 break;
642
643 case CC_OP_NZ:
644 cond = nz_cond[mask];
645 if (cond == TCG_COND_NEVER) {
646 goto do_dynamic;
647 }
648 account_inline_branch(s, old_cc_op);
649 break;
650
651 case CC_OP_TM_32:
652 case CC_OP_TM_64:
653 switch (mask) {
654 case 8:
655 cond = TCG_COND_EQ;
656 break;
657 case 4 | 2 | 1:
658 cond = TCG_COND_NE;
659 break;
660 default:
661 goto do_dynamic;
662 }
663 account_inline_branch(s, old_cc_op);
664 break;
665
666 case CC_OP_ICM:
667 switch (mask) {
668 case 8:
669 cond = TCG_COND_EQ;
670 break;
671 case 4 | 2 | 1:
672 case 4 | 2:
673 cond = TCG_COND_NE;
674 break;
675 default:
676 goto do_dynamic;
677 }
678 account_inline_branch(s, old_cc_op);
679 break;
680
681 case CC_OP_FLOGR:
682 switch (mask & 0xa) {
683 case 8: /* src == 0 -> no one bit found */
684 cond = TCG_COND_EQ;
685 break;
686 case 2: /* src != 0 -> one bit found */
687 cond = TCG_COND_NE;
688 break;
689 default:
690 goto do_dynamic;
691 }
692 account_inline_branch(s, old_cc_op);
693 break;
694
695 default:
696 do_dynamic:
697 /* Calculate cc value. */
698 gen_op_calc_cc(s);
699 /* FALLTHRU */
700
701 case CC_OP_STATIC:
702 /* Jump based on CC. We'll load up the real cond below;
703 the assignment here merely avoids a compiler warning. */
704 account_noninline_branch(s, old_cc_op);
705 old_cc_op = CC_OP_STATIC;
706 cond = TCG_COND_NEVER;
707 break;
708 }
709
710 /* Load up the arguments of the comparison. */
711 c->is_64 = true;
712 c->g1 = c->g2 = false;
713 switch (old_cc_op) {
714 case CC_OP_LTGT0_32:
715 c->is_64 = false;
716 c->u.s32.a = tcg_temp_new_i32();
717 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
718 c->u.s32.b = tcg_const_i32(0);
719 break;
720 case CC_OP_LTGT_32:
721 case CC_OP_LTUGTU_32:
722 c->is_64 = false;
723 c->u.s32.a = tcg_temp_new_i32();
724 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
725 c->u.s32.b = tcg_temp_new_i32();
726 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
727 break;
728
729 case CC_OP_LTGT0_64:
730 case CC_OP_NZ:
731 case CC_OP_FLOGR:
732 c->u.s64.a = cc_dst;
733 c->u.s64.b = tcg_const_i64(0);
734 c->g1 = true;
735 break;
736 case CC_OP_LTGT_64:
737 case CC_OP_LTUGTU_64:
738 c->u.s64.a = cc_src;
739 c->u.s64.b = cc_dst;
740 c->g1 = c->g2 = true;
741 break;
742
743 case CC_OP_TM_32:
744 case CC_OP_TM_64:
745 case CC_OP_ICM:
746 c->u.s64.a = tcg_temp_new_i64();
747 c->u.s64.b = tcg_const_i64(0);
748 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
749 break;
750
751 case CC_OP_STATIC:
752 c->is_64 = false;
753 c->u.s32.a = cc_op;
754 c->g1 = true;
755 switch (mask) {
756 case 0x8 | 0x4 | 0x2: /* cc != 3 */
757 cond = TCG_COND_NE;
758 c->u.s32.b = tcg_const_i32(3);
759 break;
760 case 0x8 | 0x4 | 0x1: /* cc != 2 */
761 cond = TCG_COND_NE;
762 c->u.s32.b = tcg_const_i32(2);
763 break;
764 case 0x8 | 0x2 | 0x1: /* cc != 1 */
765 cond = TCG_COND_NE;
766 c->u.s32.b = tcg_const_i32(1);
767 break;
768 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
769 cond = TCG_COND_EQ;
770 c->g1 = false;
771 c->u.s32.a = tcg_temp_new_i32();
772 c->u.s32.b = tcg_const_i32(0);
773 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
774 break;
775 case 0x8 | 0x4: /* cc < 2 */
776 cond = TCG_COND_LTU;
777 c->u.s32.b = tcg_const_i32(2);
778 break;
779 case 0x8: /* cc == 0 */
780 cond = TCG_COND_EQ;
781 c->u.s32.b = tcg_const_i32(0);
782 break;
783 case 0x4 | 0x2 | 0x1: /* cc != 0 */
784 cond = TCG_COND_NE;
785 c->u.s32.b = tcg_const_i32(0);
786 break;
787 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
788 cond = TCG_COND_NE;
789 c->g1 = false;
790 c->u.s32.a = tcg_temp_new_i32();
791 c->u.s32.b = tcg_const_i32(0);
792 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
793 break;
794 case 0x4: /* cc == 1 */
795 cond = TCG_COND_EQ;
796 c->u.s32.b = tcg_const_i32(1);
797 break;
798 case 0x2 | 0x1: /* cc > 1 */
799 cond = TCG_COND_GTU;
800 c->u.s32.b = tcg_const_i32(1);
801 break;
802 case 0x2: /* cc == 2 */
803 cond = TCG_COND_EQ;
804 c->u.s32.b = tcg_const_i32(2);
805 break;
806 case 0x1: /* cc == 3 */
807 cond = TCG_COND_EQ;
808 c->u.s32.b = tcg_const_i32(3);
809 break;
810 default:
811 /* CC is masked by something else: (8 >> cc) & mask. */
812 cond = TCG_COND_NE;
813 c->g1 = false;
814 c->u.s32.a = tcg_const_i32(8);
815 c->u.s32.b = tcg_const_i32(0);
816 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
817 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
818 break;
819 }
820 break;
821
822 default:
823 abort();
824 }
825 c->cond = cond;
826 }
827
828 static void free_compare(DisasCompare *c)
829 {
830 if (!c->g1) {
831 if (c->is_64) {
832 tcg_temp_free_i64(c->u.s64.a);
833 } else {
834 tcg_temp_free_i32(c->u.s32.a);
835 }
836 }
837 if (!c->g2) {
838 if (c->is_64) {
839 tcg_temp_free_i64(c->u.s64.b);
840 } else {
841 tcg_temp_free_i32(c->u.s32.b);
842 }
843 }
844 }
845
846 /* ====================================================================== */
847 /* Define the insn format enumeration. */
848 #define F0(N) FMT_##N,
849 #define F1(N, X1) F0(N)
850 #define F2(N, X1, X2) F0(N)
851 #define F3(N, X1, X2, X3) F0(N)
852 #define F4(N, X1, X2, X3, X4) F0(N)
853 #define F5(N, X1, X2, X3, X4, X5) F0(N)
854
855 typedef enum {
856 #include "insn-format.def"
857 } DisasFormat;
858
859 #undef F0
860 #undef F1
861 #undef F2
862 #undef F3
863 #undef F4
864 #undef F5
865
866 /* Define a structure to hold the decoded fields. We'll store each inside
867 an array indexed by an enum. In order to conserve memory, we'll arrange
868 for fields that do not exist at the same time to overlap, thus the "C"
869 for compact. For checking purposes there is an "O" for original index
870 as well that will be applied to availability bitmaps. */
871
872 enum DisasFieldIndexO {
873 FLD_O_r1,
874 FLD_O_r2,
875 FLD_O_r3,
876 FLD_O_m1,
877 FLD_O_m3,
878 FLD_O_m4,
879 FLD_O_b1,
880 FLD_O_b2,
881 FLD_O_b4,
882 FLD_O_d1,
883 FLD_O_d2,
884 FLD_O_d4,
885 FLD_O_x2,
886 FLD_O_l1,
887 FLD_O_l2,
888 FLD_O_i1,
889 FLD_O_i2,
890 FLD_O_i3,
891 FLD_O_i4,
892 FLD_O_i5
893 };
894
895 enum DisasFieldIndexC {
896 FLD_C_r1 = 0,
897 FLD_C_m1 = 0,
898 FLD_C_b1 = 0,
899 FLD_C_i1 = 0,
900
901 FLD_C_r2 = 1,
902 FLD_C_b2 = 1,
903 FLD_C_i2 = 1,
904
905 FLD_C_r3 = 2,
906 FLD_C_m3 = 2,
907 FLD_C_i3 = 2,
908
909 FLD_C_m4 = 3,
910 FLD_C_b4 = 3,
911 FLD_C_i4 = 3,
912 FLD_C_l1 = 3,
913
914 FLD_C_i5 = 4,
915 FLD_C_d1 = 4,
916
917 FLD_C_d2 = 5,
918
919 FLD_C_d4 = 6,
920 FLD_C_x2 = 6,
921 FLD_C_l2 = 6,
922
923 NUM_C_FIELD = 7
924 };
925
926 struct DisasFields {
927 unsigned op:8;
928 unsigned op2:8;
929 unsigned presentC:16;
930 unsigned int presentO;
931 int c[NUM_C_FIELD];
932 };
933
934 /* This is the way fields are to be accessed out of DisasFields. */
935 #define have_field(S, F) have_field1((S), FLD_O_##F)
936 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
937
938 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
939 {
940 return (f->presentO >> c) & 1;
941 }
942
943 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
944 enum DisasFieldIndexC c)
945 {
946 assert(have_field1(f, o));
947 return f->c[c];
948 }
949
950 /* Describe the layout of each field in each format. */
951 typedef struct DisasField {
952 unsigned int beg:8;
953 unsigned int size:8;
954 unsigned int type:2;
955 unsigned int indexC:6;
956 enum DisasFieldIndexO indexO:8;
957 } DisasField;
958
959 typedef struct DisasFormatInfo {
960 DisasField op[NUM_C_FIELD];
961 } DisasFormatInfo;
962
963 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
964 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
965 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
966 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
967 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
968 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
969 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
970 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
971 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
972 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
973 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
974 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
975 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
976 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
977
978 #define F0(N) { { } },
979 #define F1(N, X1) { { X1 } },
980 #define F2(N, X1, X2) { { X1, X2 } },
981 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
982 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
983 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
984
985 static const DisasFormatInfo format_info[] = {
986 #include "insn-format.def"
987 };
988
989 #undef F0
990 #undef F1
991 #undef F2
992 #undef F3
993 #undef F4
994 #undef F5
995 #undef R
996 #undef M
997 #undef BD
998 #undef BXD
999 #undef BDL
1000 #undef BXDL
1001 #undef I
1002 #undef L
1003
1004 /* Generally, we'll extract operands into this structures, operate upon
1005 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1006 of routines below for more details. */
1007 typedef struct {
1008 bool g_out, g_out2, g_in1, g_in2;
1009 TCGv_i64 out, out2, in1, in2;
1010 TCGv_i64 addr1;
1011 } DisasOps;
1012
1013 /* Instructions can place constraints on their operands, raising specification
1014 exceptions if they are violated. To make this easy to automate, each "in1",
1015 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1016 of the following, or 0. To make this easy to document, we'll put the
1017 SPEC_<name> defines next to <name>. */
1018
1019 #define SPEC_r1_even 1
1020 #define SPEC_r2_even 2
1021 #define SPEC_r1_f128 4
1022 #define SPEC_r2_f128 8
1023
1024 /* Return values from translate_one, indicating the state of the TB. */
1025 typedef enum {
1026 /* Continue the TB. */
1027 NO_EXIT,
1028 /* We have emitted one or more goto_tb. No fixup required. */
1029 EXIT_GOTO_TB,
1030 /* We are not using a goto_tb (for whatever reason), but have updated
1031 the PC (for whatever reason), so there's no need to do it again on
1032 exiting the TB. */
1033 EXIT_PC_UPDATED,
1034 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1035 updated the PC for the next instruction to be executed. */
1036 EXIT_PC_STALE,
1037 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1038 No following code will be executed. */
1039 EXIT_NORETURN,
1040 } ExitStatus;
1041
1042 typedef enum DisasFacility {
1043 FAC_Z, /* zarch (default) */
1044 FAC_CASS, /* compare and swap and store */
1045 FAC_CASS2, /* compare and swap and store 2*/
1046 FAC_DFP, /* decimal floating point */
1047 FAC_DFPR, /* decimal floating point rounding */
1048 FAC_DO, /* distinct operands */
1049 FAC_EE, /* execute extensions */
1050 FAC_EI, /* extended immediate */
1051 FAC_FPE, /* floating point extension */
1052 FAC_FPSSH, /* floating point support sign handling */
1053 FAC_FPRGR, /* FPR-GR transfer */
1054 FAC_GIE, /* general instructions extension */
1055 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1056 FAC_HW, /* high-word */
1057 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1058 FAC_LOC, /* load/store on condition */
1059 FAC_LD, /* long displacement */
1060 FAC_PC, /* population count */
1061 FAC_SCF, /* store clock fast */
1062 FAC_SFLE, /* store facility list extended */
1063 } DisasFacility;
1064
1065 struct DisasInsn {
1066 unsigned opc:16;
1067 DisasFormat fmt:6;
1068 DisasFacility fac:6;
1069 unsigned spec:4;
1070
1071 const char *name;
1072
1073 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1074 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1075 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1076 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1077 void (*help_cout)(DisasContext *, DisasOps *);
1078 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1079
1080 uint64_t data;
1081 };
1082
1083 /* ====================================================================== */
1084 /* Miscelaneous helpers, used by several operations. */
1085
1086 static void help_l2_shift(DisasContext *s, DisasFields *f,
1087 DisasOps *o, int mask)
1088 {
1089 int b2 = get_field(f, b2);
1090 int d2 = get_field(f, d2);
1091
1092 if (b2 == 0) {
1093 o->in2 = tcg_const_i64(d2 & mask);
1094 } else {
1095 o->in2 = get_address(s, 0, b2, d2);
1096 tcg_gen_andi_i64(o->in2, o->in2, mask);
1097 }
1098 }
1099
1100 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1101 {
1102 if (dest == s->next_pc) {
1103 return NO_EXIT;
1104 }
1105 if (use_goto_tb(s, dest)) {
1106 update_cc_op(s);
1107 tcg_gen_goto_tb(0);
1108 tcg_gen_movi_i64(psw_addr, dest);
1109 tcg_gen_exit_tb((tcg_target_long)s->tb);
1110 return EXIT_GOTO_TB;
1111 } else {
1112 tcg_gen_movi_i64(psw_addr, dest);
1113 return EXIT_PC_UPDATED;
1114 }
1115 }
1116
1117 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1118 bool is_imm, int imm, TCGv_i64 cdest)
1119 {
1120 ExitStatus ret;
1121 uint64_t dest = s->pc + 2 * imm;
1122 int lab;
1123
1124 /* Take care of the special cases first. */
1125 if (c->cond == TCG_COND_NEVER) {
1126 ret = NO_EXIT;
1127 goto egress;
1128 }
1129 if (is_imm) {
1130 if (dest == s->next_pc) {
1131 /* Branch to next. */
1132 ret = NO_EXIT;
1133 goto egress;
1134 }
1135 if (c->cond == TCG_COND_ALWAYS) {
1136 ret = help_goto_direct(s, dest);
1137 goto egress;
1138 }
1139 } else {
1140 if (TCGV_IS_UNUSED_I64(cdest)) {
1141 /* E.g. bcr %r0 -> no branch. */
1142 ret = NO_EXIT;
1143 goto egress;
1144 }
1145 if (c->cond == TCG_COND_ALWAYS) {
1146 tcg_gen_mov_i64(psw_addr, cdest);
1147 ret = EXIT_PC_UPDATED;
1148 goto egress;
1149 }
1150 }
1151
1152 if (use_goto_tb(s, s->next_pc)) {
1153 if (is_imm && use_goto_tb(s, dest)) {
1154 /* Both exits can use goto_tb. */
1155 update_cc_op(s);
1156
1157 lab = gen_new_label();
1158 if (c->is_64) {
1159 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1160 } else {
1161 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1162 }
1163
1164 /* Branch not taken. */
1165 tcg_gen_goto_tb(0);
1166 tcg_gen_movi_i64(psw_addr, s->next_pc);
1167 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1168
1169 /* Branch taken. */
1170 gen_set_label(lab);
1171 tcg_gen_goto_tb(1);
1172 tcg_gen_movi_i64(psw_addr, dest);
1173 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1174
1175 ret = EXIT_GOTO_TB;
1176 } else {
1177 /* Fallthru can use goto_tb, but taken branch cannot. */
1178 /* Store taken branch destination before the brcond. This
1179 avoids having to allocate a new local temp to hold it.
1180 We'll overwrite this in the not taken case anyway. */
1181 if (!is_imm) {
1182 tcg_gen_mov_i64(psw_addr, cdest);
1183 }
1184
1185 lab = gen_new_label();
1186 if (c->is_64) {
1187 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1188 } else {
1189 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1190 }
1191
1192 /* Branch not taken. */
1193 update_cc_op(s);
1194 tcg_gen_goto_tb(0);
1195 tcg_gen_movi_i64(psw_addr, s->next_pc);
1196 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1197
1198 gen_set_label(lab);
1199 if (is_imm) {
1200 tcg_gen_movi_i64(psw_addr, dest);
1201 }
1202 ret = EXIT_PC_UPDATED;
1203 }
1204 } else {
1205 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1206 Most commonly we're single-stepping or some other condition that
1207 disables all use of goto_tb. Just update the PC and exit. */
1208
1209 TCGv_i64 next = tcg_const_i64(s->next_pc);
1210 if (is_imm) {
1211 cdest = tcg_const_i64(dest);
1212 }
1213
1214 if (c->is_64) {
1215 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1216 cdest, next);
1217 } else {
1218 TCGv_i32 t0 = tcg_temp_new_i32();
1219 TCGv_i64 t1 = tcg_temp_new_i64();
1220 TCGv_i64 z = tcg_const_i64(0);
1221 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1222 tcg_gen_extu_i32_i64(t1, t0);
1223 tcg_temp_free_i32(t0);
1224 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1225 tcg_temp_free_i64(t1);
1226 tcg_temp_free_i64(z);
1227 }
1228
1229 if (is_imm) {
1230 tcg_temp_free_i64(cdest);
1231 }
1232 tcg_temp_free_i64(next);
1233
1234 ret = EXIT_PC_UPDATED;
1235 }
1236
1237 egress:
1238 free_compare(c);
1239 return ret;
1240 }
1241
1242 /* ====================================================================== */
1243 /* The operations. These perform the bulk of the work for any insn,
1244 usually after the operands have been loaded and output initialized. */
1245
1246 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1247 {
1248 gen_helper_abs_i64(o->out, o->in2);
1249 return NO_EXIT;
1250 }
1251
1252 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1253 {
1254 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1255 return NO_EXIT;
1256 }
1257
1258 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1259 {
1260 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1261 return NO_EXIT;
1262 }
1263
1264 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1265 {
1266 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1267 tcg_gen_mov_i64(o->out2, o->in2);
1268 return NO_EXIT;
1269 }
1270
1271 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1272 {
1273 tcg_gen_add_i64(o->out, o->in1, o->in2);
1274 return NO_EXIT;
1275 }
1276
1277 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1278 {
1279 TCGv_i64 cc;
1280
1281 tcg_gen_add_i64(o->out, o->in1, o->in2);
1282
1283 /* XXX possible optimization point */
1284 gen_op_calc_cc(s);
1285 cc = tcg_temp_new_i64();
1286 tcg_gen_extu_i32_i64(cc, cc_op);
1287 tcg_gen_shri_i64(cc, cc, 1);
1288
1289 tcg_gen_add_i64(o->out, o->out, cc);
1290 tcg_temp_free_i64(cc);
1291 return NO_EXIT;
1292 }
1293
1294 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1295 {
1296 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1297 return NO_EXIT;
1298 }
1299
1300 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1301 {
1302 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1303 return NO_EXIT;
1304 }
1305
1306 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1307 {
1308 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1309 return_low128(o->out2);
1310 return NO_EXIT;
1311 }
1312
1313 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1314 {
1315 tcg_gen_and_i64(o->out, o->in1, o->in2);
1316 return NO_EXIT;
1317 }
1318
1319 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1320 {
1321 int shift = s->insn->data & 0xff;
1322 int size = s->insn->data >> 8;
1323 uint64_t mask = ((1ull << size) - 1) << shift;
1324
1325 assert(!o->g_in2);
1326 tcg_gen_shli_i64(o->in2, o->in2, shift);
1327 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1328 tcg_gen_and_i64(o->out, o->in1, o->in2);
1329
1330 /* Produce the CC from only the bits manipulated. */
1331 tcg_gen_andi_i64(cc_dst, o->out, mask);
1332 set_cc_nz_u64(s, cc_dst);
1333 return NO_EXIT;
1334 }
1335
1336 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1337 {
1338 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1339 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1340 tcg_gen_mov_i64(psw_addr, o->in2);
1341 return EXIT_PC_UPDATED;
1342 } else {
1343 return NO_EXIT;
1344 }
1345 }
1346
1347 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1348 {
1349 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1350 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1351 }
1352
1353 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1354 {
1355 int m1 = get_field(s->fields, m1);
1356 bool is_imm = have_field(s->fields, i2);
1357 int imm = is_imm ? get_field(s->fields, i2) : 0;
1358 DisasCompare c;
1359
1360 disas_jcc(s, &c, m1);
1361 return help_branch(s, &c, is_imm, imm, o->in2);
1362 }
1363
1364 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1365 {
1366 int r1 = get_field(s->fields, r1);
1367 bool is_imm = have_field(s->fields, i2);
1368 int imm = is_imm ? get_field(s->fields, i2) : 0;
1369 DisasCompare c;
1370 TCGv_i64 t;
1371
1372 c.cond = TCG_COND_NE;
1373 c.is_64 = false;
1374 c.g1 = false;
1375 c.g2 = false;
1376
1377 t = tcg_temp_new_i64();
1378 tcg_gen_subi_i64(t, regs[r1], 1);
1379 store_reg32_i64(r1, t);
1380 c.u.s32.a = tcg_temp_new_i32();
1381 c.u.s32.b = tcg_const_i32(0);
1382 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1383 tcg_temp_free_i64(t);
1384
1385 return help_branch(s, &c, is_imm, imm, o->in2);
1386 }
1387
1388 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1389 {
1390 int r1 = get_field(s->fields, r1);
1391 bool is_imm = have_field(s->fields, i2);
1392 int imm = is_imm ? get_field(s->fields, i2) : 0;
1393 DisasCompare c;
1394
1395 c.cond = TCG_COND_NE;
1396 c.is_64 = true;
1397 c.g1 = true;
1398 c.g2 = false;
1399
1400 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1401 c.u.s64.a = regs[r1];
1402 c.u.s64.b = tcg_const_i64(0);
1403
1404 return help_branch(s, &c, is_imm, imm, o->in2);
1405 }
1406
1407 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1408 {
1409 int r1 = get_field(s->fields, r1);
1410 int r3 = get_field(s->fields, r3);
1411 bool is_imm = have_field(s->fields, i2);
1412 int imm = is_imm ? get_field(s->fields, i2) : 0;
1413 DisasCompare c;
1414 TCGv_i64 t;
1415
1416 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1417 c.is_64 = false;
1418 c.g1 = false;
1419 c.g2 = false;
1420
1421 t = tcg_temp_new_i64();
1422 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1423 c.u.s32.a = tcg_temp_new_i32();
1424 c.u.s32.b = tcg_temp_new_i32();
1425 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1426 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1427 store_reg32_i64(r1, t);
1428 tcg_temp_free_i64(t);
1429
1430 return help_branch(s, &c, is_imm, imm, o->in2);
1431 }
1432
1433 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1434 {
1435 int r1 = get_field(s->fields, r1);
1436 int r3 = get_field(s->fields, r3);
1437 bool is_imm = have_field(s->fields, i2);
1438 int imm = is_imm ? get_field(s->fields, i2) : 0;
1439 DisasCompare c;
1440
1441 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1442 c.is_64 = true;
1443
1444 if (r1 == (r3 | 1)) {
1445 c.u.s64.b = load_reg(r3 | 1);
1446 c.g2 = false;
1447 } else {
1448 c.u.s64.b = regs[r3 | 1];
1449 c.g2 = true;
1450 }
1451
1452 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1453 c.u.s64.a = regs[r1];
1454 c.g1 = true;
1455
1456 return help_branch(s, &c, is_imm, imm, o->in2);
1457 }
1458
1459 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1460 {
1461 int imm, m3 = get_field(s->fields, m3);
1462 bool is_imm;
1463 DisasCompare c;
1464
1465 c.cond = ltgt_cond[m3];
1466 if (s->insn->data) {
1467 c.cond = tcg_unsigned_cond(c.cond);
1468 }
1469 c.is_64 = c.g1 = c.g2 = true;
1470 c.u.s64.a = o->in1;
1471 c.u.s64.b = o->in2;
1472
1473 is_imm = have_field(s->fields, i4);
1474 if (is_imm) {
1475 imm = get_field(s->fields, i4);
1476 } else {
1477 imm = 0;
1478 o->out = get_address(s, 0, get_field(s->fields, b4),
1479 get_field(s->fields, d4));
1480 }
1481
1482 return help_branch(s, &c, is_imm, imm, o->out);
1483 }
1484
1485 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1486 {
1487 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1488 set_cc_static(s);
1489 return NO_EXIT;
1490 }
1491
1492 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1493 {
1494 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1495 set_cc_static(s);
1496 return NO_EXIT;
1497 }
1498
1499 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1500 {
1501 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1502 set_cc_static(s);
1503 return NO_EXIT;
1504 }
1505
1506 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1507 {
1508 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1509 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1510 tcg_temp_free_i32(m3);
1511 gen_set_cc_nz_f32(s, o->in2);
1512 return NO_EXIT;
1513 }
1514
1515 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1516 {
1517 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1518 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1519 tcg_temp_free_i32(m3);
1520 gen_set_cc_nz_f64(s, o->in2);
1521 return NO_EXIT;
1522 }
1523
1524 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1525 {
1526 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1527 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1528 tcg_temp_free_i32(m3);
1529 gen_set_cc_nz_f128(s, o->in1, o->in2);
1530 return NO_EXIT;
1531 }
1532
1533 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1534 {
1535 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1536 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1537 tcg_temp_free_i32(m3);
1538 gen_set_cc_nz_f32(s, o->in2);
1539 return NO_EXIT;
1540 }
1541
1542 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1543 {
1544 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1545 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1546 tcg_temp_free_i32(m3);
1547 gen_set_cc_nz_f64(s, o->in2);
1548 return NO_EXIT;
1549 }
1550
1551 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1552 {
1553 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1554 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1555 tcg_temp_free_i32(m3);
1556 gen_set_cc_nz_f128(s, o->in1, o->in2);
1557 return NO_EXIT;
1558 }
1559
1560 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1561 {
1562 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1563 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1564 tcg_temp_free_i32(m3);
1565 gen_set_cc_nz_f32(s, o->in2);
1566 return NO_EXIT;
1567 }
1568
1569 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1570 {
1571 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1572 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1573 tcg_temp_free_i32(m3);
1574 gen_set_cc_nz_f64(s, o->in2);
1575 return NO_EXIT;
1576 }
1577
1578 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1579 {
1580 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1581 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1582 tcg_temp_free_i32(m3);
1583 gen_set_cc_nz_f128(s, o->in1, o->in2);
1584 return NO_EXIT;
1585 }
1586
1587 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1588 {
1589 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1590 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1591 tcg_temp_free_i32(m3);
1592 gen_set_cc_nz_f32(s, o->in2);
1593 return NO_EXIT;
1594 }
1595
1596 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1597 {
1598 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1599 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1600 tcg_temp_free_i32(m3);
1601 gen_set_cc_nz_f64(s, o->in2);
1602 return NO_EXIT;
1603 }
1604
1605 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1606 {
1607 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1608 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1609 tcg_temp_free_i32(m3);
1610 gen_set_cc_nz_f128(s, o->in1, o->in2);
1611 return NO_EXIT;
1612 }
1613
1614 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1615 {
1616 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1617 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1618 tcg_temp_free_i32(m3);
1619 return NO_EXIT;
1620 }
1621
1622 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1623 {
1624 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1625 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1626 tcg_temp_free_i32(m3);
1627 return NO_EXIT;
1628 }
1629
1630 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1631 {
1632 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1633 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1634 tcg_temp_free_i32(m3);
1635 return_low128(o->out2);
1636 return NO_EXIT;
1637 }
1638
1639 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1640 {
1641 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1642 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1643 tcg_temp_free_i32(m3);
1644 return NO_EXIT;
1645 }
1646
1647 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1648 {
1649 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1650 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1651 tcg_temp_free_i32(m3);
1652 return NO_EXIT;
1653 }
1654
1655 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1656 {
1657 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1658 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1659 tcg_temp_free_i32(m3);
1660 return_low128(o->out2);
1661 return NO_EXIT;
1662 }
1663
1664 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1665 {
1666 int r2 = get_field(s->fields, r2);
1667 TCGv_i64 len = tcg_temp_new_i64();
1668
1669 potential_page_fault(s);
1670 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1671 set_cc_static(s);
1672 return_low128(o->out);
1673
1674 tcg_gen_add_i64(regs[r2], regs[r2], len);
1675 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1676 tcg_temp_free_i64(len);
1677
1678 return NO_EXIT;
1679 }
1680
1681 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1682 {
1683 int l = get_field(s->fields, l1);
1684 TCGv_i32 vl;
1685
1686 switch (l + 1) {
1687 case 1:
1688 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1689 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1690 break;
1691 case 2:
1692 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1693 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1694 break;
1695 case 4:
1696 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1697 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1698 break;
1699 case 8:
1700 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1701 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1702 break;
1703 default:
1704 potential_page_fault(s);
1705 vl = tcg_const_i32(l);
1706 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1707 tcg_temp_free_i32(vl);
1708 set_cc_static(s);
1709 return NO_EXIT;
1710 }
1711 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1712 return NO_EXIT;
1713 }
1714
1715 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1716 {
1717 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1718 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1719 potential_page_fault(s);
1720 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1721 tcg_temp_free_i32(r1);
1722 tcg_temp_free_i32(r3);
1723 set_cc_static(s);
1724 return NO_EXIT;
1725 }
1726
1727 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1728 {
1729 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1730 TCGv_i32 t1 = tcg_temp_new_i32();
1731 tcg_gen_trunc_i64_i32(t1, o->in1);
1732 potential_page_fault(s);
1733 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1734 set_cc_static(s);
1735 tcg_temp_free_i32(t1);
1736 tcg_temp_free_i32(m3);
1737 return NO_EXIT;
1738 }
1739
1740 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1741 {
1742 potential_page_fault(s);
1743 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1744 set_cc_static(s);
1745 return_low128(o->in2);
1746 return NO_EXIT;
1747 }
1748
1749 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1750 {
1751 TCGv_i64 t = tcg_temp_new_i64();
1752 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1753 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1754 tcg_gen_or_i64(o->out, o->out, t);
1755 tcg_temp_free_i64(t);
1756 return NO_EXIT;
1757 }
1758
1759 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1760 {
1761 int r3 = get_field(s->fields, r3);
1762 potential_page_fault(s);
1763 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1764 set_cc_static(s);
1765 return NO_EXIT;
1766 }
1767
1768 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1769 {
1770 int r3 = get_field(s->fields, r3);
1771 potential_page_fault(s);
1772 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1773 set_cc_static(s);
1774 return NO_EXIT;
1775 }
1776
1777 #ifndef CONFIG_USER_ONLY
1778 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1779 {
1780 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1781 check_privileged(s);
1782 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1783 tcg_temp_free_i32(r1);
1784 set_cc_static(s);
1785 return NO_EXIT;
1786 }
1787 #endif
1788
1789 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1790 {
1791 int r3 = get_field(s->fields, r3);
1792 TCGv_i64 in3 = tcg_temp_new_i64();
1793 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1794 potential_page_fault(s);
1795 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1796 tcg_temp_free_i64(in3);
1797 set_cc_static(s);
1798 return NO_EXIT;
1799 }
1800
1801 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1802 {
1803 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1804 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1805 potential_page_fault(s);
1806 /* XXX rewrite in tcg */
1807 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1808 set_cc_static(s);
1809 return NO_EXIT;
1810 }
1811
1812 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1813 {
1814 TCGv_i64 t1 = tcg_temp_new_i64();
1815 TCGv_i32 t2 = tcg_temp_new_i32();
1816 tcg_gen_trunc_i64_i32(t2, o->in1);
1817 gen_helper_cvd(t1, t2);
1818 tcg_temp_free_i32(t2);
1819 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1820 tcg_temp_free_i64(t1);
1821 return NO_EXIT;
1822 }
1823
1824 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1825 {
1826 int m3 = get_field(s->fields, m3);
1827 int lab = gen_new_label();
1828 TCGv_i32 t;
1829 TCGCond c;
1830
1831 c = tcg_invert_cond(ltgt_cond[m3]);
1832 if (s->insn->data) {
1833 c = tcg_unsigned_cond(c);
1834 }
1835 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1836
1837 /* Set DXC to 0xff. */
1838 t = tcg_temp_new_i32();
1839 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1840 tcg_gen_ori_i32(t, t, 0xff00);
1841 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1842 tcg_temp_free_i32(t);
1843
1844 /* Trap. */
1845 gen_program_exception(s, PGM_DATA);
1846
1847 gen_set_label(lab);
1848 return NO_EXIT;
1849 }
1850
1851 #ifndef CONFIG_USER_ONLY
1852 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1853 {
1854 TCGv_i32 tmp;
1855
1856 check_privileged(s);
1857 potential_page_fault(s);
1858
1859 /* We pretend the format is RX_a so that D2 is the field we want. */
1860 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1861 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1862 tcg_temp_free_i32(tmp);
1863 return NO_EXIT;
1864 }
1865 #endif
1866
1867 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1868 {
1869 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1870 return_low128(o->out);
1871 return NO_EXIT;
1872 }
1873
1874 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1875 {
1876 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1877 return_low128(o->out);
1878 return NO_EXIT;
1879 }
1880
1881 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1882 {
1883 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1884 return_low128(o->out);
1885 return NO_EXIT;
1886 }
1887
1888 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1889 {
1890 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
1891 return_low128(o->out);
1892 return NO_EXIT;
1893 }
1894
1895 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
1896 {
1897 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
1898 return NO_EXIT;
1899 }
1900
1901 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
1902 {
1903 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
1904 return NO_EXIT;
1905 }
1906
1907 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
1908 {
1909 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1910 return_low128(o->out2);
1911 return NO_EXIT;
1912 }
1913
1914 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
1915 {
1916 int r2 = get_field(s->fields, r2);
1917 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1918 return NO_EXIT;
1919 }
1920
1921 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
1922 {
1923 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
1924 return NO_EXIT;
1925 }
1926
1927 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
1928 {
1929 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
1930 tb->flags, (ab)use the tb->cs_base field as the address of
1931 the template in memory, and grab 8 bits of tb->flags/cflags for
1932 the contents of the register. We would then recognize all this
1933 in gen_intermediate_code_internal, generating code for exactly
1934 one instruction. This new TB then gets executed normally.
1935
1936 On the other hand, this seems to be mostly used for modifying
1937 MVC inside of memcpy, which needs a helper call anyway. So
1938 perhaps this doesn't bear thinking about any further. */
1939
1940 TCGv_i64 tmp;
1941
1942 update_psw_addr(s);
1943 update_cc_op(s);
1944
1945 tmp = tcg_const_i64(s->next_pc);
1946 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
1947 tcg_temp_free_i64(tmp);
1948
1949 set_cc_static(s);
1950 return NO_EXIT;
1951 }
1952
1953 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
1954 {
1955 /* We'll use the original input for cc computation, since we get to
1956 compare that against 0, which ought to be better than comparing
1957 the real output against 64. It also lets cc_dst be a convenient
1958 temporary during our computation. */
1959 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
1960
1961 /* R1 = IN ? CLZ(IN) : 64. */
1962 gen_helper_clz(o->out, o->in2);
1963
1964 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
1965 value by 64, which is undefined. But since the shift is 64 iff the
1966 input is zero, we still get the correct result after and'ing. */
1967 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
1968 tcg_gen_shr_i64(o->out2, o->out2, o->out);
1969 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
1970 return NO_EXIT;
1971 }
1972
1973 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
1974 {
1975 int m3 = get_field(s->fields, m3);
1976 int pos, len, base = s->insn->data;
1977 TCGv_i64 tmp = tcg_temp_new_i64();
1978 uint64_t ccm;
1979
1980 switch (m3) {
1981 case 0xf:
1982 /* Effectively a 32-bit load. */
1983 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
1984 len = 32;
1985 goto one_insert;
1986
1987 case 0xc:
1988 case 0x6:
1989 case 0x3:
1990 /* Effectively a 16-bit load. */
1991 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
1992 len = 16;
1993 goto one_insert;
1994
1995 case 0x8:
1996 case 0x4:
1997 case 0x2:
1998 case 0x1:
1999 /* Effectively an 8-bit load. */
2000 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2001 len = 8;
2002 goto one_insert;
2003
2004 one_insert:
2005 pos = base + ctz32(m3) * 8;
2006 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2007 ccm = ((1ull << len) - 1) << pos;
2008 break;
2009
2010 default:
2011 /* This is going to be a sequence of loads and inserts. */
2012 pos = base + 32 - 8;
2013 ccm = 0;
2014 while (m3) {
2015 if (m3 & 0x8) {
2016 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2017 tcg_gen_addi_i64(o->in2, o->in2, 1);
2018 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2019 ccm |= 0xff << pos;
2020 }
2021 m3 = (m3 << 1) & 0xf;
2022 pos -= 8;
2023 }
2024 break;
2025 }
2026
2027 tcg_gen_movi_i64(tmp, ccm);
2028 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2029 tcg_temp_free_i64(tmp);
2030 return NO_EXIT;
2031 }
2032
2033 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2034 {
2035 int shift = s->insn->data & 0xff;
2036 int size = s->insn->data >> 8;
2037 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2038 return NO_EXIT;
2039 }
2040
2041 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2042 {
2043 TCGv_i64 t1;
2044
2045 gen_op_calc_cc(s);
2046 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2047
2048 t1 = tcg_temp_new_i64();
2049 tcg_gen_shli_i64(t1, psw_mask, 20);
2050 tcg_gen_shri_i64(t1, t1, 36);
2051 tcg_gen_or_i64(o->out, o->out, t1);
2052
2053 tcg_gen_extu_i32_i64(t1, cc_op);
2054 tcg_gen_shli_i64(t1, t1, 28);
2055 tcg_gen_or_i64(o->out, o->out, t1);
2056 tcg_temp_free_i64(t1);
2057 return NO_EXIT;
2058 }
2059
2060 #ifndef CONFIG_USER_ONLY
2061 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2062 {
2063 check_privileged(s);
2064 gen_helper_ipte(cpu_env, o->in1, o->in2);
2065 return NO_EXIT;
2066 }
2067
2068 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2069 {
2070 check_privileged(s);
2071 gen_helper_iske(o->out, cpu_env, o->in2);
2072 return NO_EXIT;
2073 }
2074 #endif
2075
2076 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2077 {
2078 gen_helper_ldeb(o->out, cpu_env, o->in2);
2079 return NO_EXIT;
2080 }
2081
2082 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2083 {
2084 gen_helper_ledb(o->out, cpu_env, o->in2);
2085 return NO_EXIT;
2086 }
2087
2088 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2089 {
2090 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2091 return NO_EXIT;
2092 }
2093
2094 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2095 {
2096 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2097 return NO_EXIT;
2098 }
2099
2100 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2101 {
2102 gen_helper_lxdb(o->out, cpu_env, o->in2);
2103 return_low128(o->out2);
2104 return NO_EXIT;
2105 }
2106
2107 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2108 {
2109 gen_helper_lxeb(o->out, cpu_env, o->in2);
2110 return_low128(o->out2);
2111 return NO_EXIT;
2112 }
2113
2114 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2115 {
2116 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2117 return NO_EXIT;
2118 }
2119
2120 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2121 {
2122 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2123 return NO_EXIT;
2124 }
2125
2126 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2127 {
2128 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2129 return NO_EXIT;
2130 }
2131
2132 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2133 {
2134 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2135 return NO_EXIT;
2136 }
2137
2138 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2139 {
2140 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2141 return NO_EXIT;
2142 }
2143
2144 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2145 {
2146 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2147 return NO_EXIT;
2148 }
2149
2150 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2151 {
2152 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2153 return NO_EXIT;
2154 }
2155
2156 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2157 {
2158 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2159 return NO_EXIT;
2160 }
2161
2162 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2163 {
2164 DisasCompare c;
2165
2166 disas_jcc(s, &c, get_field(s->fields, m3));
2167
2168 if (c.is_64) {
2169 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2170 o->in2, o->in1);
2171 free_compare(&c);
2172 } else {
2173 TCGv_i32 t32 = tcg_temp_new_i32();
2174 TCGv_i64 t, z;
2175
2176 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2177 free_compare(&c);
2178
2179 t = tcg_temp_new_i64();
2180 tcg_gen_extu_i32_i64(t, t32);
2181 tcg_temp_free_i32(t32);
2182
2183 z = tcg_const_i64(0);
2184 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2185 tcg_temp_free_i64(t);
2186 tcg_temp_free_i64(z);
2187 }
2188
2189 return NO_EXIT;
2190 }
2191
2192 #ifndef CONFIG_USER_ONLY
2193 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2194 {
2195 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2196 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2197 check_privileged(s);
2198 potential_page_fault(s);
2199 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2200 tcg_temp_free_i32(r1);
2201 tcg_temp_free_i32(r3);
2202 return NO_EXIT;
2203 }
2204
2205 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2206 {
2207 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2208 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2209 check_privileged(s);
2210 potential_page_fault(s);
2211 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2212 tcg_temp_free_i32(r1);
2213 tcg_temp_free_i32(r3);
2214 return NO_EXIT;
2215 }
2216 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2217 {
2218 check_privileged(s);
2219 potential_page_fault(s);
2220 gen_helper_lra(o->out, cpu_env, o->in2);
2221 set_cc_static(s);
2222 return NO_EXIT;
2223 }
2224
2225 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2226 {
2227 TCGv_i64 t1, t2;
2228
2229 check_privileged(s);
2230
2231 t1 = tcg_temp_new_i64();
2232 t2 = tcg_temp_new_i64();
2233 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2234 tcg_gen_addi_i64(o->in2, o->in2, 4);
2235 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2236 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2237 tcg_gen_shli_i64(t1, t1, 32);
2238 gen_helper_load_psw(cpu_env, t1, t2);
2239 tcg_temp_free_i64(t1);
2240 tcg_temp_free_i64(t2);
2241 return EXIT_NORETURN;
2242 }
2243
2244 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2245 {
2246 TCGv_i64 t1, t2;
2247
2248 check_privileged(s);
2249
2250 t1 = tcg_temp_new_i64();
2251 t2 = tcg_temp_new_i64();
2252 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2253 tcg_gen_addi_i64(o->in2, o->in2, 8);
2254 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2255 gen_helper_load_psw(cpu_env, t1, t2);
2256 tcg_temp_free_i64(t1);
2257 tcg_temp_free_i64(t2);
2258 return EXIT_NORETURN;
2259 }
2260 #endif
2261
2262 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2263 {
2264 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2265 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2266 potential_page_fault(s);
2267 gen_helper_lam(cpu_env, r1, o->in2, r3);
2268 tcg_temp_free_i32(r1);
2269 tcg_temp_free_i32(r3);
2270 return NO_EXIT;
2271 }
2272
2273 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2274 {
2275 int r1 = get_field(s->fields, r1);
2276 int r3 = get_field(s->fields, r3);
2277 TCGv_i64 t = tcg_temp_new_i64();
2278 TCGv_i64 t4 = tcg_const_i64(4);
2279
2280 while (1) {
2281 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2282 store_reg32_i64(r1, t);
2283 if (r1 == r3) {
2284 break;
2285 }
2286 tcg_gen_add_i64(o->in2, o->in2, t4);
2287 r1 = (r1 + 1) & 15;
2288 }
2289
2290 tcg_temp_free_i64(t);
2291 tcg_temp_free_i64(t4);
2292 return NO_EXIT;
2293 }
2294
2295 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2296 {
2297 int r1 = get_field(s->fields, r1);
2298 int r3 = get_field(s->fields, r3);
2299 TCGv_i64 t = tcg_temp_new_i64();
2300 TCGv_i64 t4 = tcg_const_i64(4);
2301
2302 while (1) {
2303 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2304 store_reg32h_i64(r1, t);
2305 if (r1 == r3) {
2306 break;
2307 }
2308 tcg_gen_add_i64(o->in2, o->in2, t4);
2309 r1 = (r1 + 1) & 15;
2310 }
2311
2312 tcg_temp_free_i64(t);
2313 tcg_temp_free_i64(t4);
2314 return NO_EXIT;
2315 }
2316
2317 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2318 {
2319 int r1 = get_field(s->fields, r1);
2320 int r3 = get_field(s->fields, r3);
2321 TCGv_i64 t8 = tcg_const_i64(8);
2322
2323 while (1) {
2324 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2325 if (r1 == r3) {
2326 break;
2327 }
2328 tcg_gen_add_i64(o->in2, o->in2, t8);
2329 r1 = (r1 + 1) & 15;
2330 }
2331
2332 tcg_temp_free_i64(t8);
2333 return NO_EXIT;
2334 }
2335
2336 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2337 {
2338 o->out = o->in2;
2339 o->g_out = o->g_in2;
2340 TCGV_UNUSED_I64(o->in2);
2341 o->g_in2 = false;
2342 return NO_EXIT;
2343 }
2344
2345 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2346 {
2347 o->out = o->in1;
2348 o->out2 = o->in2;
2349 o->g_out = o->g_in1;
2350 o->g_out2 = o->g_in2;
2351 TCGV_UNUSED_I64(o->in1);
2352 TCGV_UNUSED_I64(o->in2);
2353 o->g_in1 = o->g_in2 = false;
2354 return NO_EXIT;
2355 }
2356
2357 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2358 {
2359 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2360 potential_page_fault(s);
2361 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2362 tcg_temp_free_i32(l);
2363 return NO_EXIT;
2364 }
2365
2366 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2367 {
2368 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2369 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2370 potential_page_fault(s);
2371 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2372 tcg_temp_free_i32(r1);
2373 tcg_temp_free_i32(r2);
2374 set_cc_static(s);
2375 return NO_EXIT;
2376 }
2377
2378 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2379 {
2380 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2381 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2382 potential_page_fault(s);
2383 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2384 tcg_temp_free_i32(r1);
2385 tcg_temp_free_i32(r3);
2386 set_cc_static(s);
2387 return NO_EXIT;
2388 }
2389
2390 #ifndef CONFIG_USER_ONLY
2391 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2392 {
2393 int r1 = get_field(s->fields, l1);
2394 check_privileged(s);
2395 potential_page_fault(s);
2396 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2397 set_cc_static(s);
2398 return NO_EXIT;
2399 }
2400
2401 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2402 {
2403 int r1 = get_field(s->fields, l1);
2404 check_privileged(s);
2405 potential_page_fault(s);
2406 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2407 set_cc_static(s);
2408 return NO_EXIT;
2409 }
2410 #endif
2411
2412 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2413 {
2414 potential_page_fault(s);
2415 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2416 set_cc_static(s);
2417 return NO_EXIT;
2418 }
2419
2420 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2421 {
2422 potential_page_fault(s);
2423 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2424 set_cc_static(s);
2425 return_low128(o->in2);
2426 return NO_EXIT;
2427 }
2428
2429 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2430 {
2431 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2432 return NO_EXIT;
2433 }
2434
2435 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2436 {
2437 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2438 return_low128(o->out2);
2439 return NO_EXIT;
2440 }
2441
2442 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2443 {
2444 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2445 return NO_EXIT;
2446 }
2447
2448 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2449 {
2450 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2451 return NO_EXIT;
2452 }
2453
2454 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2455 {
2456 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2457 return NO_EXIT;
2458 }
2459
2460 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2461 {
2462 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2463 return_low128(o->out2);
2464 return NO_EXIT;
2465 }
2466
2467 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2468 {
2469 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2470 return_low128(o->out2);
2471 return NO_EXIT;
2472 }
2473
2474 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2475 {
2476 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2477 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2478 tcg_temp_free_i64(r3);
2479 return NO_EXIT;
2480 }
2481
2482 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2483 {
2484 int r3 = get_field(s->fields, r3);
2485 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2486 return NO_EXIT;
2487 }
2488
2489 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2490 {
2491 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2492 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2493 tcg_temp_free_i64(r3);
2494 return NO_EXIT;
2495 }
2496
2497 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2498 {
2499 int r3 = get_field(s->fields, r3);
2500 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2501 return NO_EXIT;
2502 }
2503
2504 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2505 {
2506 gen_helper_nabs_i64(o->out, o->in2);
2507 return NO_EXIT;
2508 }
2509
2510 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2511 {
2512 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2513 return NO_EXIT;
2514 }
2515
2516 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2517 {
2518 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2519 return NO_EXIT;
2520 }
2521
2522 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2523 {
2524 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2525 tcg_gen_mov_i64(o->out2, o->in2);
2526 return NO_EXIT;
2527 }
2528
2529 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2530 {
2531 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2532 potential_page_fault(s);
2533 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2534 tcg_temp_free_i32(l);
2535 set_cc_static(s);
2536 return NO_EXIT;
2537 }
2538
2539 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2540 {
2541 tcg_gen_neg_i64(o->out, o->in2);
2542 return NO_EXIT;
2543 }
2544
2545 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2546 {
2547 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2548 return NO_EXIT;
2549 }
2550
2551 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2552 {
2553 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2554 return NO_EXIT;
2555 }
2556
2557 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2558 {
2559 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2560 tcg_gen_mov_i64(o->out2, o->in2);
2561 return NO_EXIT;
2562 }
2563
2564 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2565 {
2566 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2567 potential_page_fault(s);
2568 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2569 tcg_temp_free_i32(l);
2570 set_cc_static(s);
2571 return NO_EXIT;
2572 }
2573
2574 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2575 {
2576 tcg_gen_or_i64(o->out, o->in1, o->in2);
2577 return NO_EXIT;
2578 }
2579
2580 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2581 {
2582 int shift = s->insn->data & 0xff;
2583 int size = s->insn->data >> 8;
2584 uint64_t mask = ((1ull << size) - 1) << shift;
2585
2586 assert(!o->g_in2);
2587 tcg_gen_shli_i64(o->in2, o->in2, shift);
2588 tcg_gen_or_i64(o->out, o->in1, o->in2);
2589
2590 /* Produce the CC from only the bits manipulated. */
2591 tcg_gen_andi_i64(cc_dst, o->out, mask);
2592 set_cc_nz_u64(s, cc_dst);
2593 return NO_EXIT;
2594 }
2595
2596 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2597 {
2598 gen_helper_popcnt(o->out, o->in2);
2599 return NO_EXIT;
2600 }
2601
2602 #ifndef CONFIG_USER_ONLY
2603 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2604 {
2605 check_privileged(s);
2606 gen_helper_ptlb(cpu_env);
2607 return NO_EXIT;
2608 }
2609 #endif
2610
2611 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2612 {
2613 int i3 = get_field(s->fields, i3);
2614 int i4 = get_field(s->fields, i4);
2615 int i5 = get_field(s->fields, i5);
2616 int do_zero = i4 & 0x80;
2617 uint64_t mask, imask, pmask;
2618 int pos, len, rot;
2619
2620 /* Adjust the arguments for the specific insn. */
2621 switch (s->fields->op2) {
2622 case 0x55: /* risbg */
2623 i3 &= 63;
2624 i4 &= 63;
2625 pmask = ~0;
2626 break;
2627 case 0x5d: /* risbhg */
2628 i3 &= 31;
2629 i4 &= 31;
2630 pmask = 0xffffffff00000000ull;
2631 break;
2632 case 0x51: /* risblg */
2633 i3 &= 31;
2634 i4 &= 31;
2635 pmask = 0x00000000ffffffffull;
2636 break;
2637 default:
2638 abort();
2639 }
2640
2641 /* MASK is the set of bits to be inserted from R2.
2642 Take care for I3/I4 wraparound. */
2643 mask = pmask >> i3;
2644 if (i3 <= i4) {
2645 mask ^= pmask >> i4 >> 1;
2646 } else {
2647 mask |= ~(pmask >> i4 >> 1);
2648 }
2649 mask &= pmask;
2650
2651 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2652 insns, we need to keep the other half of the register. */
2653 imask = ~mask | ~pmask;
2654 if (do_zero) {
2655 if (s->fields->op2 == 0x55) {
2656 imask = 0;
2657 } else {
2658 imask = ~pmask;
2659 }
2660 }
2661
2662 /* In some cases we can implement this with deposit, which can be more
2663 efficient on some hosts. */
2664 if (~mask == imask && i3 <= i4) {
2665 if (s->fields->op2 == 0x5d) {
2666 i3 += 32, i4 += 32;
2667 }
2668 /* Note that we rotate the bits to be inserted to the lsb, not to
2669 the position as described in the PoO. */
2670 len = i4 - i3 + 1;
2671 pos = 63 - i4;
2672 rot = (i5 - pos) & 63;
2673 } else {
2674 pos = len = -1;
2675 rot = i5 & 63;
2676 }
2677
2678 /* Rotate the input as necessary. */
2679 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2680
2681 /* Insert the selected bits into the output. */
2682 if (pos >= 0) {
2683 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2684 } else if (imask == 0) {
2685 tcg_gen_andi_i64(o->out, o->in2, mask);
2686 } else {
2687 tcg_gen_andi_i64(o->in2, o->in2, mask);
2688 tcg_gen_andi_i64(o->out, o->out, imask);
2689 tcg_gen_or_i64(o->out, o->out, o->in2);
2690 }
2691 return NO_EXIT;
2692 }
2693
2694 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2695 {
2696 int i3 = get_field(s->fields, i3);
2697 int i4 = get_field(s->fields, i4);
2698 int i5 = get_field(s->fields, i5);
2699 uint64_t mask;
2700
2701 /* If this is a test-only form, arrange to discard the result. */
2702 if (i3 & 0x80) {
2703 o->out = tcg_temp_new_i64();
2704 o->g_out = false;
2705 }
2706
2707 i3 &= 63;
2708 i4 &= 63;
2709 i5 &= 63;
2710
2711 /* MASK is the set of bits to be operated on from R2.
2712 Take care for I3/I4 wraparound. */
2713 mask = ~0ull >> i3;
2714 if (i3 <= i4) {
2715 mask ^= ~0ull >> i4 >> 1;
2716 } else {
2717 mask |= ~(~0ull >> i4 >> 1);
2718 }
2719
2720 /* Rotate the input as necessary. */
2721 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2722
2723 /* Operate. */
2724 switch (s->fields->op2) {
2725 case 0x55: /* AND */
2726 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2727 tcg_gen_and_i64(o->out, o->out, o->in2);
2728 break;
2729 case 0x56: /* OR */
2730 tcg_gen_andi_i64(o->in2, o->in2, mask);
2731 tcg_gen_or_i64(o->out, o->out, o->in2);
2732 break;
2733 case 0x57: /* XOR */
2734 tcg_gen_andi_i64(o->in2, o->in2, mask);
2735 tcg_gen_xor_i64(o->out, o->out, o->in2);
2736 break;
2737 default:
2738 abort();
2739 }
2740
2741 /* Set the CC. */
2742 tcg_gen_andi_i64(cc_dst, o->out, mask);
2743 set_cc_nz_u64(s, cc_dst);
2744 return NO_EXIT;
2745 }
2746
2747 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2748 {
2749 tcg_gen_bswap16_i64(o->out, o->in2);
2750 return NO_EXIT;
2751 }
2752
2753 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2754 {
2755 tcg_gen_bswap32_i64(o->out, o->in2);
2756 return NO_EXIT;
2757 }
2758
2759 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2760 {
2761 tcg_gen_bswap64_i64(o->out, o->in2);
2762 return NO_EXIT;
2763 }
2764
2765 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2766 {
2767 TCGv_i32 t1 = tcg_temp_new_i32();
2768 TCGv_i32 t2 = tcg_temp_new_i32();
2769 TCGv_i32 to = tcg_temp_new_i32();
2770 tcg_gen_trunc_i64_i32(t1, o->in1);
2771 tcg_gen_trunc_i64_i32(t2, o->in2);
2772 tcg_gen_rotl_i32(to, t1, t2);
2773 tcg_gen_extu_i32_i64(o->out, to);
2774 tcg_temp_free_i32(t1);
2775 tcg_temp_free_i32(t2);
2776 tcg_temp_free_i32(to);
2777 return NO_EXIT;
2778 }
2779
2780 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2781 {
2782 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2783 return NO_EXIT;
2784 }
2785
2786 #ifndef CONFIG_USER_ONLY
2787 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2788 {
2789 check_privileged(s);
2790 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2791 set_cc_static(s);
2792 return NO_EXIT;
2793 }
2794
2795 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2796 {
2797 check_privileged(s);
2798 gen_helper_sacf(cpu_env, o->in2);
2799 /* Addressing mode has changed, so end the block. */
2800 return EXIT_PC_STALE;
2801 }
2802 #endif
2803
2804 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2805 {
2806 int r1 = get_field(s->fields, r1);
2807 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2808 return NO_EXIT;
2809 }
2810
2811 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2812 {
2813 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2814 return NO_EXIT;
2815 }
2816
2817 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2818 {
2819 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2820 return NO_EXIT;
2821 }
2822
2823 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2824 {
2825 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2826 return_low128(o->out2);
2827 return NO_EXIT;
2828 }
2829
2830 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2831 {
2832 gen_helper_sqeb(o->out, cpu_env, o->in2);
2833 return NO_EXIT;
2834 }
2835
2836 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2837 {
2838 gen_helper_sqdb(o->out, cpu_env, o->in2);
2839 return NO_EXIT;
2840 }
2841
2842 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2843 {
2844 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2845 return_low128(o->out2);
2846 return NO_EXIT;
2847 }
2848
2849 #ifndef CONFIG_USER_ONLY
2850 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2851 {
2852 check_privileged(s);
2853 potential_page_fault(s);
2854 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2855 set_cc_static(s);
2856 return NO_EXIT;
2857 }
2858
2859 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2860 {
2861 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2862 check_privileged(s);
2863 potential_page_fault(s);
2864 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2865 tcg_temp_free_i32(r1);
2866 return NO_EXIT;
2867 }
2868 #endif
2869
2870 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
2871 {
2872 DisasCompare c;
2873 TCGv_i64 a;
2874 int lab, r1;
2875
2876 disas_jcc(s, &c, get_field(s->fields, m3));
2877
2878 lab = gen_new_label();
2879 if (c.is_64) {
2880 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
2881 } else {
2882 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
2883 }
2884 free_compare(&c);
2885
2886 r1 = get_field(s->fields, r1);
2887 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2888 if (s->insn->data) {
2889 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
2890 } else {
2891 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
2892 }
2893 tcg_temp_free_i64(a);
2894
2895 gen_set_label(lab);
2896 return NO_EXIT;
2897 }
2898
2899 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2900 {
2901 uint64_t sign = 1ull << s->insn->data;
2902 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2903 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2904 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2905 /* The arithmetic left shift is curious in that it does not affect
2906 the sign bit. Copy that over from the source unchanged. */
2907 tcg_gen_andi_i64(o->out, o->out, ~sign);
2908 tcg_gen_andi_i64(o->in1, o->in1, sign);
2909 tcg_gen_or_i64(o->out, o->out, o->in1);
2910 return NO_EXIT;
2911 }
2912
2913 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2914 {
2915 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2916 return NO_EXIT;
2917 }
2918
2919 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2920 {
2921 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2922 return NO_EXIT;
2923 }
2924
2925 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2926 {
2927 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2928 return NO_EXIT;
2929 }
2930
2931 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2932 {
2933 gen_helper_sfpc(cpu_env, o->in2);
2934 return NO_EXIT;
2935 }
2936
2937 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
2938 {
2939 gen_helper_sfas(cpu_env, o->in2);
2940 return NO_EXIT;
2941 }
2942
2943 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
2944 {
2945 int b2 = get_field(s->fields, b2);
2946 int d2 = get_field(s->fields, d2);
2947 TCGv_i64 t1 = tcg_temp_new_i64();
2948 TCGv_i64 t2 = tcg_temp_new_i64();
2949 int mask, pos, len;
2950
2951 switch (s->fields->op2) {
2952 case 0x99: /* SRNM */
2953 pos = 0, len = 2;
2954 break;
2955 case 0xb8: /* SRNMB */
2956 pos = 0, len = 3;
2957 break;
2958 case 0xb9: /* SRNMT */
2959 pos = 4, len = 3;
2960 default:
2961 tcg_abort();
2962 }
2963 mask = (1 << len) - 1;
2964
2965 /* Insert the value into the appropriate field of the FPC. */
2966 if (b2 == 0) {
2967 tcg_gen_movi_i64(t1, d2 & mask);
2968 } else {
2969 tcg_gen_addi_i64(t1, regs[b2], d2);
2970 tcg_gen_andi_i64(t1, t1, mask);
2971 }
2972 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
2973 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
2974 tcg_temp_free_i64(t1);
2975
2976 /* Then install the new FPC to set the rounding mode in fpu_status. */
2977 gen_helper_sfpc(cpu_env, t2);
2978 tcg_temp_free_i64(t2);
2979 return NO_EXIT;
2980 }
2981
2982 #ifndef CONFIG_USER_ONLY
2983 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2984 {
2985 check_privileged(s);
2986 tcg_gen_shri_i64(o->in2, o->in2, 4);
2987 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2988 return NO_EXIT;
2989 }
2990
2991 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
2992 {
2993 check_privileged(s);
2994 gen_helper_sske(cpu_env, o->in1, o->in2);
2995 return NO_EXIT;
2996 }
2997
2998 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2999 {
3000 check_privileged(s);
3001 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3002 return NO_EXIT;
3003 }
3004
3005 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3006 {
3007 check_privileged(s);
3008 /* ??? Surely cpu address != cpu number. In any case the previous
3009 version of this stored more than the required half-word, so it
3010 is unlikely this has ever been tested. */
3011 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3012 return NO_EXIT;
3013 }
3014
3015 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3016 {
3017 gen_helper_stck(o->out, cpu_env);
3018 /* ??? We don't implement clock states. */
3019 gen_op_movi_cc(s, 0);
3020 return NO_EXIT;
3021 }
3022
3023 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3024 {
3025 TCGv_i64 c1 = tcg_temp_new_i64();
3026 TCGv_i64 c2 = tcg_temp_new_i64();
3027 gen_helper_stck(c1, cpu_env);
3028 /* Shift the 64-bit value into its place as a zero-extended
3029 104-bit value. Note that "bit positions 64-103 are always
3030 non-zero so that they compare differently to STCK"; we set
3031 the least significant bit to 1. */
3032 tcg_gen_shli_i64(c2, c1, 56);
3033 tcg_gen_shri_i64(c1, c1, 8);
3034 tcg_gen_ori_i64(c2, c2, 0x10000);
3035 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3036 tcg_gen_addi_i64(o->in2, o->in2, 8);
3037 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3038 tcg_temp_free_i64(c1);
3039 tcg_temp_free_i64(c2);
3040 /* ??? We don't implement clock states. */
3041 gen_op_movi_cc(s, 0);
3042 return NO_EXIT;
3043 }
3044
3045 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3046 {
3047 check_privileged(s);
3048 gen_helper_sckc(cpu_env, o->in2);
3049 return NO_EXIT;
3050 }
3051
3052 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3053 {
3054 check_privileged(s);
3055 gen_helper_stckc(o->out, cpu_env);
3056 return NO_EXIT;
3057 }
3058
3059 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3060 {
3061 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3062 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3063 check_privileged(s);
3064 potential_page_fault(s);
3065 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3066 tcg_temp_free_i32(r1);
3067 tcg_temp_free_i32(r3);
3068 return NO_EXIT;
3069 }
3070
3071 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3072 {
3073 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3074 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3075 check_privileged(s);
3076 potential_page_fault(s);
3077 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3078 tcg_temp_free_i32(r1);
3079 tcg_temp_free_i32(r3);
3080 return NO_EXIT;
3081 }
3082
3083 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3084 {
3085 check_privileged(s);
3086 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3087 return NO_EXIT;
3088 }
3089
3090 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3091 {
3092 check_privileged(s);
3093 gen_helper_spt(cpu_env, o->in2);
3094 return NO_EXIT;
3095 }
3096
3097 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3098 {
3099 TCGv_i64 f, a;
3100 /* We really ought to have more complete indication of facilities
3101 that we implement. Address this when STFLE is implemented. */
3102 check_privileged(s);
3103 f = tcg_const_i64(0xc0000000);
3104 a = tcg_const_i64(200);
3105 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3106 tcg_temp_free_i64(f);
3107 tcg_temp_free_i64(a);
3108 return NO_EXIT;
3109 }
3110
3111 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3112 {
3113 check_privileged(s);
3114 gen_helper_stpt(o->out, cpu_env);
3115 return NO_EXIT;
3116 }
3117
3118 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3119 {
3120 check_privileged(s);
3121 potential_page_fault(s);
3122 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3123 set_cc_static(s);
3124 return NO_EXIT;
3125 }
3126
3127 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3128 {
3129 check_privileged(s);
3130 gen_helper_spx(cpu_env, o->in2);
3131 return NO_EXIT;
3132 }
3133
3134 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3135 {
3136 check_privileged(s);
3137 /* Not operational. */
3138 gen_op_movi_cc(s, 3);
3139 return NO_EXIT;
3140 }
3141
3142 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3143 {
3144 check_privileged(s);
3145 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3146 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3147 return NO_EXIT;
3148 }
3149
3150 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3151 {
3152 uint64_t i2 = get_field(s->fields, i2);
3153 TCGv_i64 t;
3154
3155 check_privileged(s);
3156
3157 /* It is important to do what the instruction name says: STORE THEN.
3158 If we let the output hook perform the store then if we fault and
3159 restart, we'll have the wrong SYSTEM MASK in place. */
3160 t = tcg_temp_new_i64();
3161 tcg_gen_shri_i64(t, psw_mask, 56);
3162 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3163 tcg_temp_free_i64(t);
3164
3165 if (s->fields->op == 0xac) {
3166 tcg_gen_andi_i64(psw_mask, psw_mask,
3167 (i2 << 56) | 0x00ffffffffffffffull);
3168 } else {
3169 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3170 }
3171 return NO_EXIT;
3172 }
3173
3174 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3175 {
3176 check_privileged(s);
3177 potential_page_fault(s);
3178 gen_helper_stura(cpu_env, o->in2, o->in1);
3179 return NO_EXIT;
3180 }
3181 #endif
3182
3183 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3184 {
3185 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3186 return NO_EXIT;
3187 }
3188
3189 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3190 {
3191 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3192 return NO_EXIT;
3193 }
3194
3195 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3196 {
3197 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3198 return NO_EXIT;
3199 }
3200
3201 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3202 {
3203 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3204 return NO_EXIT;
3205 }
3206
3207 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3208 {
3209 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3210 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3211 potential_page_fault(s);
3212 gen_helper_stam(cpu_env, r1, o->in2, r3);
3213 tcg_temp_free_i32(r1);
3214 tcg_temp_free_i32(r3);
3215 return NO_EXIT;
3216 }
3217
3218 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3219 {
3220 int m3 = get_field(s->fields, m3);
3221 int pos, base = s->insn->data;
3222 TCGv_i64 tmp = tcg_temp_new_i64();
3223
3224 pos = base + ctz32(m3) * 8;
3225 switch (m3) {
3226 case 0xf:
3227 /* Effectively a 32-bit store. */
3228 tcg_gen_shri_i64(tmp, o->in1, pos);
3229 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3230 break;
3231
3232 case 0xc:
3233 case 0x6:
3234 case 0x3:
3235 /* Effectively a 16-bit store. */
3236 tcg_gen_shri_i64(tmp, o->in1, pos);
3237 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3238 break;
3239
3240 case 0x8:
3241 case 0x4:
3242 case 0x2:
3243 case 0x1:
3244 /* Effectively an 8-bit store. */
3245 tcg_gen_shri_i64(tmp, o->in1, pos);
3246 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3247 break;
3248
3249 default:
3250 /* This is going to be a sequence of shifts and stores. */
3251 pos = base + 32 - 8;
3252 while (m3) {
3253 if (m3 & 0x8) {
3254 tcg_gen_shri_i64(tmp, o->in1, pos);
3255 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3256 tcg_gen_addi_i64(o->in2, o->in2, 1);
3257 }
3258 m3 = (m3 << 1) & 0xf;
3259 pos -= 8;
3260 }
3261 break;
3262 }
3263 tcg_temp_free_i64(tmp);
3264 return NO_EXIT;
3265 }
3266
3267 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3268 {
3269 int r1 = get_field(s->fields, r1);
3270 int r3 = get_field(s->fields, r3);
3271 int size = s->insn->data;
3272 TCGv_i64 tsize = tcg_const_i64(size);
3273
3274 while (1) {
3275 if (size == 8) {
3276 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3277 } else {
3278 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3279 }
3280 if (r1 == r3) {
3281 break;
3282 }
3283 tcg_gen_add_i64(o->in2, o->in2, tsize);
3284 r1 = (r1 + 1) & 15;
3285 }
3286
3287 tcg_temp_free_i64(tsize);
3288 return NO_EXIT;
3289 }
3290
3291 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3292 {
3293 int r1 = get_field(s->fields, r1);
3294 int r3 = get_field(s->fields, r3);
3295 TCGv_i64 t = tcg_temp_new_i64();
3296 TCGv_i64 t4 = tcg_const_i64(4);
3297 TCGv_i64 t32 = tcg_const_i64(32);
3298
3299 while (1) {
3300 tcg_gen_shl_i64(t, regs[r1], t32);
3301 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3302 if (r1 == r3) {
3303 break;
3304 }
3305 tcg_gen_add_i64(o->in2, o->in2, t4);
3306 r1 = (r1 + 1) & 15;
3307 }
3308
3309 tcg_temp_free_i64(t);
3310 tcg_temp_free_i64(t4);
3311 tcg_temp_free_i64(t32);
3312 return NO_EXIT;
3313 }
3314
3315 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3316 {
3317 potential_page_fault(s);
3318 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3319 set_cc_static(s);
3320 return_low128(o->in2);
3321 return NO_EXIT;
3322 }
3323
3324 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3325 {
3326 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3327 return NO_EXIT;
3328 }
3329
3330 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3331 {
3332 TCGv_i64 cc;
3333
3334 assert(!o->g_in2);
3335 tcg_gen_not_i64(o->in2, o->in2);
3336 tcg_gen_add_i64(o->out, o->in1, o->in2);
3337
3338 /* XXX possible optimization point */
3339 gen_op_calc_cc(s);
3340 cc = tcg_temp_new_i64();
3341 tcg_gen_extu_i32_i64(cc, cc_op);
3342 tcg_gen_shri_i64(cc, cc, 1);
3343 tcg_gen_add_i64(o->out, o->out, cc);
3344 tcg_temp_free_i64(cc);
3345 return NO_EXIT;
3346 }
3347
3348 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3349 {
3350 TCGv_i32 t;
3351
3352 update_psw_addr(s);
3353 update_cc_op(s);
3354
3355 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3356 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3357 tcg_temp_free_i32(t);
3358
3359 t = tcg_const_i32(s->next_pc - s->pc);
3360 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3361 tcg_temp_free_i32(t);
3362
3363 gen_exception(EXCP_SVC);
3364 return EXIT_NORETURN;
3365 }
3366
3367 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3368 {
3369 gen_helper_tceb(cc_op, o->in1, o->in2);
3370 set_cc_static(s);
3371 return NO_EXIT;
3372 }
3373
3374 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3375 {
3376 gen_helper_tcdb(cc_op, o->in1, o->in2);
3377 set_cc_static(s);
3378 return NO_EXIT;
3379 }
3380
3381 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3382 {
3383 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3384 set_cc_static(s);
3385 return NO_EXIT;
3386 }
3387
3388 #ifndef CONFIG_USER_ONLY
3389 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3390 {
3391 potential_page_fault(s);
3392 gen_helper_tprot(cc_op, o->addr1, o->in2);
3393 set_cc_static(s);
3394 return NO_EXIT;
3395 }
3396 #endif
3397
3398 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3399 {
3400 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3401 potential_page_fault(s);
3402 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3403 tcg_temp_free_i32(l);
3404 set_cc_static(s);
3405 return NO_EXIT;
3406 }
3407
3408 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3409 {
3410 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3411 potential_page_fault(s);
3412 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3413 tcg_temp_free_i32(l);
3414 return NO_EXIT;
3415 }
3416
3417 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3418 {
3419 int d1 = get_field(s->fields, d1);
3420 int d2 = get_field(s->fields, d2);
3421 int b1 = get_field(s->fields, b1);
3422 int b2 = get_field(s->fields, b2);
3423 int l = get_field(s->fields, l1);
3424 TCGv_i32 t32;
3425
3426 o->addr1 = get_address(s, 0, b1, d1);
3427
3428 /* If the addresses are identical, this is a store/memset of zero. */
3429 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3430 o->in2 = tcg_const_i64(0);
3431
3432 l++;
3433 while (l >= 8) {
3434 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3435 l -= 8;
3436 if (l > 0) {
3437 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3438 }
3439 }
3440 if (l >= 4) {
3441 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3442 l -= 4;
3443 if (l > 0) {
3444 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3445 }
3446 }
3447 if (l >= 2) {
3448 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3449 l -= 2;
3450 if (l > 0) {
3451 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3452 }
3453 }
3454 if (l) {
3455 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3456 }
3457 gen_op_movi_cc(s, 0);
3458 return NO_EXIT;
3459 }
3460
3461 /* But in general we'll defer to a helper. */
3462 o->in2 = get_address(s, 0, b2, d2);
3463 t32 = tcg_const_i32(l);
3464 potential_page_fault(s);
3465 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3466 tcg_temp_free_i32(t32);
3467 set_cc_static(s);
3468 return NO_EXIT;
3469 }
3470
3471 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3472 {
3473 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3474 return NO_EXIT;
3475 }
3476
3477 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3478 {
3479 int shift = s->insn->data & 0xff;
3480 int size = s->insn->data >> 8;
3481 uint64_t mask = ((1ull << size) - 1) << shift;
3482
3483 assert(!o->g_in2);
3484 tcg_gen_shli_i64(o->in2, o->in2, shift);
3485 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3486
3487 /* Produce the CC from only the bits manipulated. */
3488 tcg_gen_andi_i64(cc_dst, o->out, mask);
3489 set_cc_nz_u64(s, cc_dst);
3490 return NO_EXIT;
3491 }
3492
3493 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3494 {
3495 o->out = tcg_const_i64(0);
3496 return NO_EXIT;
3497 }
3498
3499 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3500 {
3501 o->out = tcg_const_i64(0);
3502 o->out2 = o->out;
3503 o->g_out2 = true;
3504 return NO_EXIT;
3505 }
3506
3507 /* ====================================================================== */
3508 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3509 the original inputs), update the various cc data structures in order to
3510 be able to compute the new condition code. */
3511
3512 static void cout_abs32(DisasContext *s, DisasOps *o)
3513 {
3514 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3515 }
3516
3517 static void cout_abs64(DisasContext *s, DisasOps *o)
3518 {
3519 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3520 }
3521
3522 static void cout_adds32(DisasContext *s, DisasOps *o)
3523 {
3524 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3525 }
3526
3527 static void cout_adds64(DisasContext *s, DisasOps *o)
3528 {
3529 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3530 }
3531
3532 static void cout_addu32(DisasContext *s, DisasOps *o)
3533 {
3534 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3535 }
3536
3537 static void cout_addu64(DisasContext *s, DisasOps *o)
3538 {
3539 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3540 }
3541
3542 static void cout_addc32(DisasContext *s, DisasOps *o)
3543 {
3544 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3545 }
3546
3547 static void cout_addc64(DisasContext *s, DisasOps *o)
3548 {
3549 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3550 }
3551
3552 static void cout_cmps32(DisasContext *s, DisasOps *o)
3553 {
3554 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3555 }
3556
3557 static void cout_cmps64(DisasContext *s, DisasOps *o)
3558 {
3559 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3560 }
3561
3562 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3563 {
3564 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3565 }
3566
3567 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3568 {
3569 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3570 }
3571
3572 static void cout_f32(DisasContext *s, DisasOps *o)
3573 {
3574 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3575 }
3576
3577 static void cout_f64(DisasContext *s, DisasOps *o)
3578 {
3579 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3580 }
3581
3582 static void cout_f128(DisasContext *s, DisasOps *o)
3583 {
3584 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3585 }
3586
3587 static void cout_nabs32(DisasContext *s, DisasOps *o)
3588 {
3589 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3590 }
3591
3592 static void cout_nabs64(DisasContext *s, DisasOps *o)
3593 {
3594 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3595 }
3596
3597 static void cout_neg32(DisasContext *s, DisasOps *o)
3598 {
3599 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3600 }
3601
3602 static void cout_neg64(DisasContext *s, DisasOps *o)
3603 {
3604 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3605 }
3606
3607 static void cout_nz32(DisasContext *s, DisasOps *o)
3608 {
3609 tcg_gen_ext32u_i64(cc_dst, o->out);
3610 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3611 }
3612
3613 static void cout_nz64(DisasContext *s, DisasOps *o)
3614 {
3615 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3616 }
3617
3618 static void cout_s32(DisasContext *s, DisasOps *o)
3619 {
3620 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3621 }
3622
3623 static void cout_s64(DisasContext *s, DisasOps *o)
3624 {
3625 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3626 }
3627
3628 static void cout_subs32(DisasContext *s, DisasOps *o)
3629 {
3630 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3631 }
3632
3633 static void cout_subs64(DisasContext *s, DisasOps *o)
3634 {
3635 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3636 }
3637
3638 static void cout_subu32(DisasContext *s, DisasOps *o)
3639 {
3640 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3641 }
3642
3643 static void cout_subu64(DisasContext *s, DisasOps *o)
3644 {
3645 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3646 }
3647
3648 static void cout_subb32(DisasContext *s, DisasOps *o)
3649 {
3650 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3651 }
3652
3653 static void cout_subb64(DisasContext *s, DisasOps *o)
3654 {
3655 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3656 }
3657
3658 static void cout_tm32(DisasContext *s, DisasOps *o)
3659 {
3660 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3661 }
3662
3663 static void cout_tm64(DisasContext *s, DisasOps *o)
3664 {
3665 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3666 }
3667
3668 /* ====================================================================== */
3669 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3670 with the TCG register to which we will write. Used in combination with
3671 the "wout" generators, in some cases we need a new temporary, and in
3672 some cases we can write to a TCG global. */
3673
3674 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3675 {
3676 o->out = tcg_temp_new_i64();
3677 }
3678 #define SPEC_prep_new 0
3679
3680 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3681 {
3682 o->out = tcg_temp_new_i64();
3683 o->out2 = tcg_temp_new_i64();
3684 }
3685 #define SPEC_prep_new_P 0
3686
3687 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3688 {
3689 o->out = regs[get_field(f, r1)];
3690 o->g_out = true;
3691 }
3692 #define SPEC_prep_r1 0
3693
3694 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3695 {
3696 int r1 = get_field(f, r1);
3697 o->out = regs[r1];
3698 o->out2 = regs[r1 + 1];
3699 o->g_out = o->g_out2 = true;
3700 }
3701 #define SPEC_prep_r1_P SPEC_r1_even
3702
3703 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3704 {
3705 o->out = fregs[get_field(f, r1)];
3706 o->g_out = true;
3707 }
3708 #define SPEC_prep_f1 0
3709
3710 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3711 {
3712 int r1 = get_field(f, r1);
3713 o->out = fregs[r1];
3714 o->out2 = fregs[r1 + 2];
3715 o->g_out = o->g_out2 = true;
3716 }
3717 #define SPEC_prep_x1 SPEC_r1_f128
3718
3719 /* ====================================================================== */
3720 /* The "Write OUTput" generators. These generally perform some non-trivial
3721 copy of data to TCG globals, or to main memory. The trivial cases are
3722 generally handled by having a "prep" generator install the TCG global
3723 as the destination of the operation. */
3724
3725 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3726 {
3727 store_reg(get_field(f, r1), o->out);
3728 }
3729 #define SPEC_wout_r1 0
3730
3731 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3732 {
3733 int r1 = get_field(f, r1);
3734 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3735 }
3736 #define SPEC_wout_r1_8 0
3737
3738 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3739 {
3740 int r1 = get_field(f, r1);
3741 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3742 }
3743 #define SPEC_wout_r1_16 0
3744
3745 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3746 {
3747 store_reg32_i64(get_field(f, r1), o->out);
3748 }
3749 #define SPEC_wout_r1_32 0
3750
3751 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3752 {
3753 int r1 = get_field(f, r1);
3754 store_reg32_i64(r1, o->out);
3755 store_reg32_i64(r1 + 1, o->out2);
3756 }
3757 #define SPEC_wout_r1_P32 SPEC_r1_even
3758
3759 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3760 {
3761 int r1 = get_field(f, r1);
3762 store_reg32_i64(r1 + 1, o->out);
3763 tcg_gen_shri_i64(o->out, o->out, 32);
3764 store_reg32_i64(r1, o->out);
3765 }
3766 #define SPEC_wout_r1_D32 SPEC_r1_even
3767
3768 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3769 {
3770 store_freg32_i64(get_field(f, r1), o->out);
3771 }
3772 #define SPEC_wout_e1 0
3773
3774 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3775 {
3776 store_freg(get_field(f, r1), o->out);
3777 }
3778 #define SPEC_wout_f1 0
3779
3780 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3781 {
3782 int f1 = get_field(s->fields, r1);
3783 store_freg(f1, o->out);
3784 store_freg(f1 + 2, o->out2);
3785 }
3786 #define SPEC_wout_x1 SPEC_r1_f128
3787
3788 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3789 {
3790 if (get_field(f, r1) != get_field(f, r2)) {
3791 store_reg32_i64(get_field(f, r1), o->out);
3792 }
3793 }
3794 #define SPEC_wout_cond_r1r2_32 0
3795
3796 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3797 {
3798 if (get_field(f, r1) != get_field(f, r2)) {
3799 store_freg32_i64(get_field(f, r1), o->out);
3800 }
3801 }
3802 #define SPEC_wout_cond_e1e2 0
3803
3804 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3805 {
3806 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3807 }
3808 #define SPEC_wout_m1_8 0
3809
3810 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3811 {
3812 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3813 }
3814 #define SPEC_wout_m1_16 0
3815
3816 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3817 {
3818 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3819 }
3820 #define SPEC_wout_m1_32 0
3821
3822 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3823 {
3824 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3825 }
3826 #define SPEC_wout_m1_64 0
3827
3828 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3829 {
3830 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3831 }
3832 #define SPEC_wout_m2_32 0
3833
3834 /* ====================================================================== */
3835 /* The "INput 1" generators. These load the first operand to an insn. */
3836
3837 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3838 {
3839 o->in1 = load_reg(get_field(f, r1));
3840 }
3841 #define SPEC_in1_r1 0
3842
3843 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3844 {
3845 o->in1 = regs[get_field(f, r1)];
3846 o->g_in1 = true;
3847 }
3848 #define SPEC_in1_r1_o 0
3849
3850 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3851 {
3852 o->in1 = tcg_temp_new_i64();
3853 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3854 }
3855 #define SPEC_in1_r1_32s 0
3856
3857 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3858 {
3859 o->in1 = tcg_temp_new_i64();
3860 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3861 }
3862 #define SPEC_in1_r1_32u 0
3863
3864 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3865 {
3866 o->in1 = tcg_temp_new_i64();
3867 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3868 }
3869 #define SPEC_in1_r1_sr32 0
3870
3871 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3872 {
3873 o->in1 = load_reg(get_field(f, r1) + 1);
3874 }
3875 #define SPEC_in1_r1p1 SPEC_r1_even
3876
3877 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3878 {
3879 o->in1 = tcg_temp_new_i64();
3880 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
3881 }
3882 #define SPEC_in1_r1p1_32s SPEC_r1_even
3883
3884 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3885 {
3886 o->in1 = tcg_temp_new_i64();
3887 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
3888 }
3889 #define SPEC_in1_r1p1_32u SPEC_r1_even
3890
3891 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3892 {
3893 int r1 = get_field(f, r1);
3894 o->in1 = tcg_temp_new_i64();
3895 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3896 }
3897 #define SPEC_in1_r1_D32 SPEC_r1_even
3898
3899 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3900 {
3901 o->in1 = load_reg(get_field(f, r2));
3902 }
3903 #define SPEC_in1_r2 0
3904
3905 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3906 {
3907 o->in1 = load_reg(get_field(f, r3));
3908 }
3909 #define SPEC_in1_r3 0
3910
3911 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3912 {
3913 o->in1 = regs[get_field(f, r3)];
3914 o->g_in1 = true;
3915 }
3916 #define SPEC_in1_r3_o 0
3917
3918 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3919 {
3920 o->in1 = tcg_temp_new_i64();
3921 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3922 }
3923 #define SPEC_in1_r3_32s 0
3924
3925 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3926 {
3927 o->in1 = tcg_temp_new_i64();
3928 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3929 }
3930 #define SPEC_in1_r3_32u 0
3931
3932 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3933 {
3934 o->in1 = load_freg32_i64(get_field(f, r1));
3935 }
3936 #define SPEC_in1_e1 0
3937
3938 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3939 {
3940 o->in1 = fregs[get_field(f, r1)];
3941 o->g_in1 = true;
3942 }
3943 #define SPEC_in1_f1_o 0
3944
3945 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3946 {
3947 int r1 = get_field(f, r1);
3948 o->out = fregs[r1];
3949 o->out2 = fregs[r1 + 2];
3950 o->g_out = o->g_out2 = true;
3951 }
3952 #define SPEC_in1_x1_o SPEC_r1_f128
3953
3954 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3955 {
3956 o->in1 = fregs[get_field(f, r3)];
3957 o->g_in1 = true;
3958 }
3959 #define SPEC_in1_f3_o 0
3960
3961 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3962 {
3963 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3964 }
3965 #define SPEC_in1_la1 0
3966
3967 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3968 {
3969 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3970 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3971 }
3972 #define SPEC_in1_la2 0
3973
3974 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3975 {
3976 in1_la1(s, f, o);
3977 o->in1 = tcg_temp_new_i64();
3978 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3979 }
3980 #define SPEC_in1_m1_8u 0
3981
3982 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3983 {
3984 in1_la1(s, f, o);
3985 o->in1 = tcg_temp_new_i64();
3986 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3987 }
3988 #define SPEC_in1_m1_16s 0
3989
3990 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3991 {
3992 in1_la1(s, f, o);
3993 o->in1 = tcg_temp_new_i64();
3994 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3995 }
3996 #define SPEC_in1_m1_16u 0
3997
3998 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3999 {
4000 in1_la1(s, f, o);
4001 o->in1 = tcg_temp_new_i64();
4002 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4003 }
4004 #define SPEC_in1_m1_32s 0
4005
4006 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4007 {
4008 in1_la1(s, f, o);
4009 o->in1 = tcg_temp_new_i64();
4010 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4011 }
4012 #define SPEC_in1_m1_32u 0
4013
4014 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4015 {
4016 in1_la1(s, f, o);
4017 o->in1 = tcg_temp_new_i64();
4018 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4019 }
4020 #define SPEC_in1_m1_64 0
4021
4022 /* ====================================================================== */
4023 /* The "INput 2" generators. These load the second operand to an insn. */
4024
4025 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4026 {
4027 o->in2 = regs[get_field(f, r1)];
4028 o->g_in2 = true;
4029 }
4030 #define SPEC_in2_r1_o 0
4031
4032 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4033 {
4034 o->in2 = tcg_temp_new_i64();
4035 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4036 }
4037 #define SPEC_in2_r1_16u 0
4038
4039 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4040 {
4041 o->in2 = tcg_temp_new_i64();
4042 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4043 }
4044 #define SPEC_in2_r1_32u 0
4045
4046 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4047 {
4048 o->in2 = load_reg(get_field(f, r2));
4049 }
4050 #define SPEC_in2_r2 0
4051
4052 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4053 {
4054 o->in2 = regs[get_field(f, r2)];
4055 o->g_in2 = true;
4056 }
4057 #define SPEC_in2_r2_o 0
4058
4059 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4060 {
4061 int r2 = get_field(f, r2);
4062 if (r2 != 0) {
4063 o->in2 = load_reg(r2);
4064 }
4065 }
4066 #define SPEC_in2_r2_nz 0
4067
4068 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4069 {
4070 o->in2 = tcg_temp_new_i64();
4071 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4072 }
4073 #define SPEC_in2_r2_8s 0
4074
4075 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4076 {
4077 o->in2 = tcg_temp_new_i64();
4078 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4079 }
4080 #define SPEC_in2_r2_8u 0
4081
4082 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4083 {
4084 o->in2 = tcg_temp_new_i64();
4085 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4086 }
4087 #define SPEC_in2_r2_16s 0
4088
4089 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4090 {
4091 o->in2 = tcg_temp_new_i64();
4092 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4093 }
4094 #define SPEC_in2_r2_16u 0
4095
4096 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4097 {
4098 o->in2 = load_reg(get_field(f, r3));
4099 }
4100 #define SPEC_in2_r3 0
4101
4102 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4103 {
4104 o->in2 = tcg_temp_new_i64();
4105 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4106 }
4107 #define SPEC_in2_r2_32s 0
4108
4109 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4110 {
4111 o->in2 = tcg_temp_new_i64();
4112 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4113 }
4114 #define SPEC_in2_r2_32u 0
4115
4116 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4117 {
4118 o->in2 = load_freg32_i64(get_field(f, r2));
4119 }
4120 #define SPEC_in2_e2 0
4121
4122 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4123 {
4124 o->in2 = fregs[get_field(f, r2)];
4125 o->g_in2 = true;
4126 }
4127 #define SPEC_in2_f2_o 0
4128
4129 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4130 {
4131 int r2 = get_field(f, r2);
4132 o->in1 = fregs[r2];
4133 o->in2 = fregs[r2 + 2];
4134 o->g_in1 = o->g_in2 = true;
4135 }
4136 #define SPEC_in2_x2_o SPEC_r2_f128
4137
4138 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4139 {
4140 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4141 }
4142 #define SPEC_in2_ra2 0
4143
4144 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4145 {
4146 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4147 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4148 }
4149 #define SPEC_in2_a2 0
4150
4151 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4152 {
4153 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4154 }
4155 #define SPEC_in2_ri2 0
4156
4157 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4158 {
4159 help_l2_shift(s, f, o, 31);
4160 }
4161 #define SPEC_in2_sh32 0
4162
4163 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4164 {
4165 help_l2_shift(s, f, o, 63);
4166 }
4167 #define SPEC_in2_sh64 0
4168
4169 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4170 {
4171 in2_a2(s, f, o);
4172 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4173 }
4174 #define SPEC_in2_m2_8u 0
4175
4176 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4177 {
4178 in2_a2(s, f, o);
4179 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4180 }
4181 #define SPEC_in2_m2_16s 0
4182
4183 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4184 {
4185 in2_a2(s, f, o);
4186 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4187 }
4188 #define SPEC_in2_m2_16u 0
4189
4190 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4191 {
4192 in2_a2(s, f, o);
4193 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4194 }
4195 #define SPEC_in2_m2_32s 0
4196
4197 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4198 {
4199 in2_a2(s, f, o);
4200 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4201 }
4202 #define SPEC_in2_m2_32u 0
4203
4204 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4205 {
4206 in2_a2(s, f, o);
4207 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4208 }
4209 #define SPEC_in2_m2_64 0
4210
4211 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4212 {
4213 in2_ri2(s, f, o);
4214 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4215 }
4216 #define SPEC_in2_mri2_16u 0
4217
4218 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4219 {
4220 in2_ri2(s, f, o);
4221 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4222 }
4223 #define SPEC_in2_mri2_32s 0
4224
4225 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4226 {
4227 in2_ri2(s, f, o);
4228 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4229 }
4230 #define SPEC_in2_mri2_32u 0
4231
4232 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4233 {
4234 in2_ri2(s, f, o);
4235 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4236 }
4237 #define SPEC_in2_mri2_64 0
4238
4239 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4240 {
4241 o->in2 = tcg_const_i64(get_field(f, i2));
4242 }
4243 #define SPEC_in2_i2 0
4244
4245 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4246 {
4247 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4248 }
4249 #define SPEC_in2_i2_8u 0
4250
4251 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4252 {
4253 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4254 }
4255 #define SPEC_in2_i2_16u 0
4256
4257 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4258 {
4259 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4260 }
4261 #define SPEC_in2_i2_32u 0
4262
4263 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4264 {
4265 uint64_t i2 = (uint16_t)get_field(f, i2);
4266 o->in2 = tcg_const_i64(i2 << s->insn->data);
4267 }
4268 #define SPEC_in2_i2_16u_shl 0
4269
4270 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4271 {
4272 uint64_t i2 = (uint32_t)get_field(f, i2);
4273 o->in2 = tcg_const_i64(i2 << s->insn->data);
4274 }
4275 #define SPEC_in2_i2_32u_shl 0
4276
4277 /* ====================================================================== */
4278
4279 /* Find opc within the table of insns. This is formulated as a switch
4280 statement so that (1) we get compile-time notice of cut-paste errors
4281 for duplicated opcodes, and (2) the compiler generates the binary
4282 search tree, rather than us having to post-process the table. */
4283
4284 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4285 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4286
4287 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4288
4289 enum DisasInsnEnum {
4290 #include "insn-data.def"
4291 };
4292
4293 #undef D
4294 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4295 .opc = OPC, \
4296 .fmt = FMT_##FT, \
4297 .fac = FAC_##FC, \
4298 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4299 .name = #NM, \
4300 .help_in1 = in1_##I1, \
4301 .help_in2 = in2_##I2, \
4302 .help_prep = prep_##P, \
4303 .help_wout = wout_##W, \
4304 .help_cout = cout_##CC, \
4305 .help_op = op_##OP, \
4306 .data = D \
4307 },
4308
4309 /* Allow 0 to be used for NULL in the table below. */
4310 #define in1_0 NULL
4311 #define in2_0 NULL
4312 #define prep_0 NULL
4313 #define wout_0 NULL
4314 #define cout_0 NULL
4315 #define op_0 NULL
4316
4317 #define SPEC_in1_0 0
4318 #define SPEC_in2_0 0
4319 #define SPEC_prep_0 0
4320 #define SPEC_wout_0 0
4321
4322 static const DisasInsn insn_info[] = {
4323 #include "insn-data.def"
4324 };
4325
4326 #undef D
4327 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4328 case OPC: return &insn_info[insn_ ## NM];
4329
4330 static const DisasInsn *lookup_opc(uint16_t opc)
4331 {
4332 switch (opc) {
4333 #include "insn-data.def"
4334 default:
4335 return NULL;
4336 }
4337 }
4338
4339 #undef D
4340 #undef C
4341
4342 /* Extract a field from the insn. The INSN should be left-aligned in
4343 the uint64_t so that we can more easily utilize the big-bit-endian
4344 definitions we extract from the Principals of Operation. */
4345
4346 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4347 {
4348 uint32_t r, m;
4349
4350 if (f->size == 0) {
4351 return;
4352 }
4353
4354 /* Zero extract the field from the insn. */
4355 r = (insn << f->beg) >> (64 - f->size);
4356
4357 /* Sign-extend, or un-swap the field as necessary. */
4358 switch (f->type) {
4359 case 0: /* unsigned */
4360 break;
4361 case 1: /* signed */
4362 assert(f->size <= 32);
4363 m = 1u << (f->size - 1);
4364 r = (r ^ m) - m;
4365 break;
4366 case 2: /* dl+dh split, signed 20 bit. */
4367 r = ((int8_t)r << 12) | (r >> 8);
4368 break;
4369 default:
4370 abort();
4371 }
4372
4373 /* Validate that the "compressed" encoding we selected above is valid.
4374 I.e. we havn't make two different original fields overlap. */
4375 assert(((o->presentC >> f->indexC) & 1) == 0);
4376 o->presentC |= 1 << f->indexC;
4377 o->presentO |= 1 << f->indexO;
4378
4379 o->c[f->indexC] = r;
4380 }
4381
4382 /* Lookup the insn at the current PC, extracting the operands into O and
4383 returning the info struct for the insn. Returns NULL for invalid insn. */
4384
4385 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4386 DisasFields *f)
4387 {
4388 uint64_t insn, pc = s->pc;
4389 int op, op2, ilen;
4390 const DisasInsn *info;
4391
4392 insn = ld_code2(env, pc);
4393 op = (insn >> 8) & 0xff;
4394 ilen = get_ilen(op);
4395 s->next_pc = s->pc + ilen;
4396
4397 switch (ilen) {
4398 case 2:
4399 insn = insn << 48;
4400 break;
4401 case 4:
4402 insn = ld_code4(env, pc) << 32;
4403 break;
4404 case 6:
4405 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4406 break;
4407 default:
4408 abort();
4409 }
4410
4411 /* We can't actually determine the insn format until we've looked up
4412 the full insn opcode. Which we can't do without locating the
4413 secondary opcode. Assume by default that OP2 is at bit 40; for
4414 those smaller insns that don't actually have a secondary opcode
4415 this will correctly result in OP2 = 0. */
4416 switch (op) {
4417 case 0x01: /* E */
4418 case 0x80: /* S */
4419 case 0x82: /* S */
4420 case 0x93: /* S */
4421 case 0xb2: /* S, RRF, RRE */
4422 case 0xb3: /* RRE, RRD, RRF */
4423 case 0xb9: /* RRE, RRF */
4424 case 0xe5: /* SSE, SIL */
4425 op2 = (insn << 8) >> 56;
4426 break;
4427 case 0xa5: /* RI */
4428 case 0xa7: /* RI */
4429 case 0xc0: /* RIL */
4430 case 0xc2: /* RIL */
4431 case 0xc4: /* RIL */
4432 case 0xc6: /* RIL */
4433 case 0xc8: /* SSF */
4434 case 0xcc: /* RIL */
4435 op2 = (insn << 12) >> 60;
4436 break;
4437 case 0xd0 ... 0xdf: /* SS */
4438 case 0xe1: /* SS */
4439 case 0xe2: /* SS */
4440 case 0xe8: /* SS */
4441 case 0xe9: /* SS */
4442 case 0xea: /* SS */
4443 case 0xee ... 0xf3: /* SS */
4444 case 0xf8 ... 0xfd: /* SS */
4445 op2 = 0;
4446 break;
4447 default:
4448 op2 = (insn << 40) >> 56;
4449 break;
4450 }
4451
4452 memset(f, 0, sizeof(*f));
4453 f->op = op;
4454 f->op2 = op2;
4455
4456 /* Lookup the instruction. */
4457 info = lookup_opc(op << 8 | op2);
4458
4459 /* If we found it, extract the operands. */
4460 if (info != NULL) {
4461 DisasFormat fmt = info->fmt;
4462 int i;
4463
4464 for (i = 0; i < NUM_C_FIELD; ++i) {
4465 extract_field(f, &format_info[fmt].op[i], insn);
4466 }
4467 }
4468 return info;
4469 }
4470
4471 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4472 {
4473 const DisasInsn *insn;
4474 ExitStatus ret = NO_EXIT;
4475 DisasFields f;
4476 DisasOps o;
4477
4478 /* Search for the insn in the table. */
4479 insn = extract_insn(env, s, &f);
4480
4481 /* Not found means unimplemented/illegal opcode. */
4482 if (insn == NULL) {
4483 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4484 f.op, f.op2);
4485 gen_illegal_opcode(s);
4486 return EXIT_NORETURN;
4487 }
4488
4489 /* Check for insn specification exceptions. */
4490 if (insn->spec) {
4491 int spec = insn->spec, excp = 0, r;
4492
4493 if (spec & SPEC_r1_even) {
4494 r = get_field(&f, r1);
4495 if (r & 1) {
4496 excp = PGM_SPECIFICATION;
4497 }
4498 }
4499 if (spec & SPEC_r2_even) {
4500 r = get_field(&f, r2);
4501 if (r & 1) {
4502 excp = PGM_SPECIFICATION;
4503 }
4504 }
4505 if (spec & SPEC_r1_f128) {
4506 r = get_field(&f, r1);
4507 if (r > 13) {
4508 excp = PGM_SPECIFICATION;
4509 }
4510 }
4511 if (spec & SPEC_r2_f128) {
4512 r = get_field(&f, r2);
4513 if (r > 13) {
4514 excp = PGM_SPECIFICATION;
4515 }
4516 }
4517 if (excp) {
4518 gen_program_exception(s, excp);
4519 return EXIT_NORETURN;
4520 }
4521 }
4522
4523 /* Set up the strutures we use to communicate with the helpers. */
4524 s->insn = insn;
4525 s->fields = &f;
4526 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4527 TCGV_UNUSED_I64(o.out);
4528 TCGV_UNUSED_I64(o.out2);
4529 TCGV_UNUSED_I64(o.in1);
4530 TCGV_UNUSED_I64(o.in2);
4531 TCGV_UNUSED_I64(o.addr1);
4532
4533 /* Implement the instruction. */
4534 if (insn->help_in1) {
4535 insn->help_in1(s, &f, &o);
4536 }
4537 if (insn->help_in2) {
4538 insn->help_in2(s, &f, &o);
4539 }
4540 if (insn->help_prep) {
4541 insn->help_prep(s, &f, &o);
4542 }
4543 if (insn->help_op) {
4544 ret = insn->help_op(s, &o);
4545 }
4546 if (insn->help_wout) {
4547 insn->help_wout(s, &f, &o);
4548 }
4549 if (insn->help_cout) {
4550 insn->help_cout(s, &o);
4551 }
4552
4553 /* Free any temporaries created by the helpers. */
4554 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4555 tcg_temp_free_i64(o.out);
4556 }
4557 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4558 tcg_temp_free_i64(o.out2);
4559 }
4560 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4561 tcg_temp_free_i64(o.in1);
4562 }
4563 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4564 tcg_temp_free_i64(o.in2);
4565 }
4566 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4567 tcg_temp_free_i64(o.addr1);
4568 }
4569
4570 /* Advance to the next instruction. */
4571 s->pc = s->next_pc;
4572 return ret;
4573 }
4574
4575 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4576 TranslationBlock *tb,
4577 int search_pc)
4578 {
4579 DisasContext dc;
4580 target_ulong pc_start;
4581 uint64_t next_page_start;
4582 uint16_t *gen_opc_end;
4583 int j, lj = -1;
4584 int num_insns, max_insns;
4585 CPUBreakpoint *bp;
4586 ExitStatus status;
4587 bool do_debug;
4588
4589 pc_start = tb->pc;
4590
4591 /* 31-bit mode */
4592 if (!(tb->flags & FLAG_MASK_64)) {
4593 pc_start &= 0x7fffffff;
4594 }
4595
4596 dc.tb = tb;
4597 dc.pc = pc_start;
4598 dc.cc_op = CC_OP_DYNAMIC;
4599 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4600
4601 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4602
4603 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4604
4605 num_insns = 0;
4606 max_insns = tb->cflags & CF_COUNT_MASK;
4607 if (max_insns == 0) {
4608 max_insns = CF_COUNT_MASK;
4609 }
4610
4611 gen_icount_start();
4612
4613 do {
4614 if (search_pc) {
4615 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4616 if (lj < j) {
4617 lj++;
4618 while (lj < j) {
4619 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4620 }
4621 }
4622 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4623 gen_opc_cc_op[lj] = dc.cc_op;
4624 tcg_ctx.gen_opc_instr_start[lj] = 1;
4625 tcg_ctx.gen_opc_icount[lj] = num_insns;
4626 }
4627 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4628 gen_io_start();
4629 }
4630
4631 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4632 tcg_gen_debug_insn_start(dc.pc);
4633 }
4634
4635 status = NO_EXIT;
4636 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4637 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4638 if (bp->pc == dc.pc) {
4639 status = EXIT_PC_STALE;
4640 do_debug = true;
4641 break;
4642 }
4643 }
4644 }
4645 if (status == NO_EXIT) {
4646 status = translate_one(env, &dc);
4647 }
4648
4649 /* If we reach a page boundary, are single stepping,
4650 or exhaust instruction count, stop generation. */
4651 if (status == NO_EXIT
4652 && (dc.pc >= next_page_start
4653 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4654 || num_insns >= max_insns
4655 || singlestep
4656 || env->singlestep_enabled)) {
4657 status = EXIT_PC_STALE;
4658 }
4659 } while (status == NO_EXIT);
4660
4661 if (tb->cflags & CF_LAST_IO) {
4662 gen_io_end();
4663 }
4664
4665 switch (status) {
4666 case EXIT_GOTO_TB:
4667 case EXIT_NORETURN:
4668 break;
4669 case EXIT_PC_STALE:
4670 update_psw_addr(&dc);
4671 /* FALLTHRU */
4672 case EXIT_PC_UPDATED:
4673 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4674 cc op type is in env */
4675 update_cc_op(&dc);
4676 /* Exit the TB, either by raising a debug exception or by return. */
4677 if (do_debug) {
4678 gen_exception(EXCP_DEBUG);
4679 } else {
4680 tcg_gen_exit_tb(0);
4681 }
4682 break;
4683 default:
4684 abort();
4685 }
4686
4687 gen_icount_end(tb, num_insns);
4688 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4689 if (search_pc) {
4690 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4691 lj++;
4692 while (lj <= j) {
4693 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4694 }
4695 } else {
4696 tb->size = dc.pc - pc_start;
4697 tb->icount = num_insns;
4698 }
4699
4700 #if defined(S390X_DEBUG_DISAS)
4701 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4702 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4703 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4704 qemu_log("\n");
4705 }
4706 #endif
4707 }
4708
4709 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4710 {
4711 gen_intermediate_code_internal(env, tb, 0);
4712 }
4713
4714 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4715 {
4716 gen_intermediate_code_internal(env, tb, 1);
4717 }
4718
4719 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4720 {
4721 int cc_op;
4722 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4723 cc_op = gen_opc_cc_op[pc_pos];
4724 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4725 env->cc_op = cc_op;
4726 }
4727 }