]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/translate.c
target-s390x: basic PER event handling
[mirror_qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
37
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
40
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44
45 #include "trace-tcg.h"
46
47
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
52
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t pc, next_pc;
58 enum cc_op cc_op;
59 bool singlestep_enabled;
60 };
61
62 /* Information carried about a condition to be evaluated. */
63 typedef struct {
64 TCGCond cond:8;
65 bool is_64;
66 bool g1;
67 bool g2;
68 union {
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
71 } u;
72 } DisasCompare;
73
74 #define DISAS_EXCP 4
75
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
80
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
82 {
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
86 }
87 }
88 return pc;
89 }
90
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
92 int flags)
93 {
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
96 int i;
97
98 if (env->cc_op > 3) {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 } else {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
104 }
105
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 if ((i % 4) == 3) {
109 cpu_fprintf(f, "\n");
110 } else {
111 cpu_fprintf(f, " ");
112 }
113 }
114
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
117 if ((i % 4) == 3) {
118 cpu_fprintf(f, "\n");
119 } else {
120 cpu_fprintf(f, " ");
121 }
122 }
123
124 for (i = 0; i < 32; i++) {
125 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
126 env->vregs[i][0].ll, env->vregs[i][1].ll);
127 cpu_fprintf(f, (i % 2) ? " " : "\n");
128 }
129
130 #ifndef CONFIG_USER_ONLY
131 for (i = 0; i < 16; i++) {
132 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
133 if ((i % 4) == 3) {
134 cpu_fprintf(f, "\n");
135 } else {
136 cpu_fprintf(f, " ");
137 }
138 }
139 #endif
140
141 #ifdef DEBUG_INLINE_BRANCHES
142 for (i = 0; i < CC_OP_MAX; i++) {
143 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
144 inline_branch_miss[i], inline_branch_hit[i]);
145 }
146 #endif
147
148 cpu_fprintf(f, "\n");
149 }
150
151 static TCGv_i64 psw_addr;
152 static TCGv_i64 psw_mask;
153
154 static TCGv_i32 cc_op;
155 static TCGv_i64 cc_src;
156 static TCGv_i64 cc_dst;
157 static TCGv_i64 cc_vr;
158
159 static char cpu_reg_names[32][4];
160 static TCGv_i64 regs[16];
161 static TCGv_i64 fregs[16];
162
163 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
164
165 void s390x_translate_init(void)
166 {
167 int i;
168
169 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
170 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
171 offsetof(CPUS390XState, psw.addr),
172 "psw_addr");
173 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
174 offsetof(CPUS390XState, psw.mask),
175 "psw_mask");
176
177 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
178 "cc_op");
179 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
180 "cc_src");
181 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
182 "cc_dst");
183 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
184 "cc_vr");
185
186 for (i = 0; i < 16; i++) {
187 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
188 regs[i] = tcg_global_mem_new(TCG_AREG0,
189 offsetof(CPUS390XState, regs[i]),
190 cpu_reg_names[i]);
191 }
192
193 for (i = 0; i < 16; i++) {
194 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
195 fregs[i] = tcg_global_mem_new(TCG_AREG0,
196 offsetof(CPUS390XState, vregs[i][0].d),
197 cpu_reg_names[i + 16]);
198 }
199 }
200
201 static TCGv_i64 load_reg(int reg)
202 {
203 TCGv_i64 r = tcg_temp_new_i64();
204 tcg_gen_mov_i64(r, regs[reg]);
205 return r;
206 }
207
208 static TCGv_i64 load_freg32_i64(int reg)
209 {
210 TCGv_i64 r = tcg_temp_new_i64();
211 tcg_gen_shri_i64(r, fregs[reg], 32);
212 return r;
213 }
214
215 static void store_reg(int reg, TCGv_i64 v)
216 {
217 tcg_gen_mov_i64(regs[reg], v);
218 }
219
220 static void store_freg(int reg, TCGv_i64 v)
221 {
222 tcg_gen_mov_i64(fregs[reg], v);
223 }
224
225 static void store_reg32_i64(int reg, TCGv_i64 v)
226 {
227 /* 32 bit register writes keep the upper half */
228 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
229 }
230
231 static void store_reg32h_i64(int reg, TCGv_i64 v)
232 {
233 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
234 }
235
236 static void store_freg32_i64(int reg, TCGv_i64 v)
237 {
238 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
239 }
240
241 static void return_low128(TCGv_i64 dest)
242 {
243 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
244 }
245
246 static void update_psw_addr(DisasContext *s)
247 {
248 /* psw.addr */
249 tcg_gen_movi_i64(psw_addr, s->pc);
250 }
251
252 static void update_cc_op(DisasContext *s)
253 {
254 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
255 tcg_gen_movi_i32(cc_op, s->cc_op);
256 }
257 }
258
259 static void potential_page_fault(DisasContext *s)
260 {
261 update_psw_addr(s);
262 update_cc_op(s);
263 }
264
265 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
266 {
267 return (uint64_t)cpu_lduw_code(env, pc);
268 }
269
270 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
271 {
272 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
273 }
274
275 static int get_mem_index(DisasContext *s)
276 {
277 switch (s->tb->flags & FLAG_MASK_ASC) {
278 case PSW_ASC_PRIMARY >> 32:
279 return 0;
280 case PSW_ASC_SECONDARY >> 32:
281 return 1;
282 case PSW_ASC_HOME >> 32:
283 return 2;
284 default:
285 tcg_abort();
286 break;
287 }
288 }
289
290 static void gen_exception(int excp)
291 {
292 TCGv_i32 tmp = tcg_const_i32(excp);
293 gen_helper_exception(cpu_env, tmp);
294 tcg_temp_free_i32(tmp);
295 }
296
297 static void gen_program_exception(DisasContext *s, int code)
298 {
299 TCGv_i32 tmp;
300
301 /* Remember what pgm exeption this was. */
302 tmp = tcg_const_i32(code);
303 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
304 tcg_temp_free_i32(tmp);
305
306 tmp = tcg_const_i32(s->next_pc - s->pc);
307 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
308 tcg_temp_free_i32(tmp);
309
310 /* Advance past instruction. */
311 s->pc = s->next_pc;
312 update_psw_addr(s);
313
314 /* Save off cc. */
315 update_cc_op(s);
316
317 /* Trigger exception. */
318 gen_exception(EXCP_PGM);
319 }
320
321 static inline void gen_illegal_opcode(DisasContext *s)
322 {
323 gen_program_exception(s, PGM_OPERATION);
324 }
325
326 static inline void gen_trap(DisasContext *s)
327 {
328 TCGv_i32 t;
329
330 /* Set DXC to 0xff. */
331 t = tcg_temp_new_i32();
332 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
333 tcg_gen_ori_i32(t, t, 0xff00);
334 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
335 tcg_temp_free_i32(t);
336
337 gen_program_exception(s, PGM_DATA);
338 }
339
340 #ifndef CONFIG_USER_ONLY
341 static void check_privileged(DisasContext *s)
342 {
343 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
344 gen_program_exception(s, PGM_PRIVILEGED);
345 }
346 }
347 #endif
348
349 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
350 {
351 TCGv_i64 tmp = tcg_temp_new_i64();
352 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
353
354 /* Note that d2 is limited to 20 bits, signed. If we crop negative
355 displacements early we create larger immedate addends. */
356
357 /* Note that addi optimizes the imm==0 case. */
358 if (b2 && x2) {
359 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
360 tcg_gen_addi_i64(tmp, tmp, d2);
361 } else if (b2) {
362 tcg_gen_addi_i64(tmp, regs[b2], d2);
363 } else if (x2) {
364 tcg_gen_addi_i64(tmp, regs[x2], d2);
365 } else {
366 if (need_31) {
367 d2 &= 0x7fffffff;
368 need_31 = false;
369 }
370 tcg_gen_movi_i64(tmp, d2);
371 }
372 if (need_31) {
373 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
374 }
375
376 return tmp;
377 }
378
379 static inline bool live_cc_data(DisasContext *s)
380 {
381 return (s->cc_op != CC_OP_DYNAMIC
382 && s->cc_op != CC_OP_STATIC
383 && s->cc_op > 3);
384 }
385
386 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
387 {
388 if (live_cc_data(s)) {
389 tcg_gen_discard_i64(cc_src);
390 tcg_gen_discard_i64(cc_dst);
391 tcg_gen_discard_i64(cc_vr);
392 }
393 s->cc_op = CC_OP_CONST0 + val;
394 }
395
396 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
397 {
398 if (live_cc_data(s)) {
399 tcg_gen_discard_i64(cc_src);
400 tcg_gen_discard_i64(cc_vr);
401 }
402 tcg_gen_mov_i64(cc_dst, dst);
403 s->cc_op = op;
404 }
405
406 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
407 TCGv_i64 dst)
408 {
409 if (live_cc_data(s)) {
410 tcg_gen_discard_i64(cc_vr);
411 }
412 tcg_gen_mov_i64(cc_src, src);
413 tcg_gen_mov_i64(cc_dst, dst);
414 s->cc_op = op;
415 }
416
417 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
418 TCGv_i64 dst, TCGv_i64 vr)
419 {
420 tcg_gen_mov_i64(cc_src, src);
421 tcg_gen_mov_i64(cc_dst, dst);
422 tcg_gen_mov_i64(cc_vr, vr);
423 s->cc_op = op;
424 }
425
426 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
427 {
428 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
429 }
430
431 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
432 {
433 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
434 }
435
436 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
437 {
438 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
439 }
440
441 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
442 {
443 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
444 }
445
446 /* CC value is in env->cc_op */
447 static void set_cc_static(DisasContext *s)
448 {
449 if (live_cc_data(s)) {
450 tcg_gen_discard_i64(cc_src);
451 tcg_gen_discard_i64(cc_dst);
452 tcg_gen_discard_i64(cc_vr);
453 }
454 s->cc_op = CC_OP_STATIC;
455 }
456
457 /* calculates cc into cc_op */
458 static void gen_op_calc_cc(DisasContext *s)
459 {
460 TCGv_i32 local_cc_op;
461 TCGv_i64 dummy;
462
463 TCGV_UNUSED_I32(local_cc_op);
464 TCGV_UNUSED_I64(dummy);
465 switch (s->cc_op) {
466 default:
467 dummy = tcg_const_i64(0);
468 /* FALLTHRU */
469 case CC_OP_ADD_64:
470 case CC_OP_ADDU_64:
471 case CC_OP_ADDC_64:
472 case CC_OP_SUB_64:
473 case CC_OP_SUBU_64:
474 case CC_OP_SUBB_64:
475 case CC_OP_ADD_32:
476 case CC_OP_ADDU_32:
477 case CC_OP_ADDC_32:
478 case CC_OP_SUB_32:
479 case CC_OP_SUBU_32:
480 case CC_OP_SUBB_32:
481 local_cc_op = tcg_const_i32(s->cc_op);
482 break;
483 case CC_OP_CONST0:
484 case CC_OP_CONST1:
485 case CC_OP_CONST2:
486 case CC_OP_CONST3:
487 case CC_OP_STATIC:
488 case CC_OP_DYNAMIC:
489 break;
490 }
491
492 switch (s->cc_op) {
493 case CC_OP_CONST0:
494 case CC_OP_CONST1:
495 case CC_OP_CONST2:
496 case CC_OP_CONST3:
497 /* s->cc_op is the cc value */
498 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
499 break;
500 case CC_OP_STATIC:
501 /* env->cc_op already is the cc value */
502 break;
503 case CC_OP_NZ:
504 case CC_OP_ABS_64:
505 case CC_OP_NABS_64:
506 case CC_OP_ABS_32:
507 case CC_OP_NABS_32:
508 case CC_OP_LTGT0_32:
509 case CC_OP_LTGT0_64:
510 case CC_OP_COMP_32:
511 case CC_OP_COMP_64:
512 case CC_OP_NZ_F32:
513 case CC_OP_NZ_F64:
514 case CC_OP_FLOGR:
515 /* 1 argument */
516 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
517 break;
518 case CC_OP_ICM:
519 case CC_OP_LTGT_32:
520 case CC_OP_LTGT_64:
521 case CC_OP_LTUGTU_32:
522 case CC_OP_LTUGTU_64:
523 case CC_OP_TM_32:
524 case CC_OP_TM_64:
525 case CC_OP_SLA_32:
526 case CC_OP_SLA_64:
527 case CC_OP_NZ_F128:
528 /* 2 arguments */
529 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
530 break;
531 case CC_OP_ADD_64:
532 case CC_OP_ADDU_64:
533 case CC_OP_ADDC_64:
534 case CC_OP_SUB_64:
535 case CC_OP_SUBU_64:
536 case CC_OP_SUBB_64:
537 case CC_OP_ADD_32:
538 case CC_OP_ADDU_32:
539 case CC_OP_ADDC_32:
540 case CC_OP_SUB_32:
541 case CC_OP_SUBU_32:
542 case CC_OP_SUBB_32:
543 /* 3 arguments */
544 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
545 break;
546 case CC_OP_DYNAMIC:
547 /* unknown operation - assume 3 arguments and cc_op in env */
548 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
549 break;
550 default:
551 tcg_abort();
552 }
553
554 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
555 tcg_temp_free_i32(local_cc_op);
556 }
557 if (!TCGV_IS_UNUSED_I64(dummy)) {
558 tcg_temp_free_i64(dummy);
559 }
560
561 /* We now have cc in cc_op as constant */
562 set_cc_static(s);
563 }
564
565 static int use_goto_tb(DisasContext *s, uint64_t dest)
566 {
567 /* NOTE: we handle the case where the TB spans two pages here */
568 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
569 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
570 && !s->singlestep_enabled
571 && !(s->tb->cflags & CF_LAST_IO)
572 && !(s->tb->flags & FLAG_MASK_PER));
573 }
574
575 static void account_noninline_branch(DisasContext *s, int cc_op)
576 {
577 #ifdef DEBUG_INLINE_BRANCHES
578 inline_branch_miss[cc_op]++;
579 #endif
580 }
581
582 static void account_inline_branch(DisasContext *s, int cc_op)
583 {
584 #ifdef DEBUG_INLINE_BRANCHES
585 inline_branch_hit[cc_op]++;
586 #endif
587 }
588
589 /* Table of mask values to comparison codes, given a comparison as input.
590 For such, CC=3 should not be possible. */
591 static const TCGCond ltgt_cond[16] = {
592 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
593 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
594 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
595 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
596 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
597 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
598 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
599 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
600 };
601
602 /* Table of mask values to comparison codes, given a logic op as input.
603 For such, only CC=0 and CC=1 should be possible. */
604 static const TCGCond nz_cond[16] = {
605 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
606 TCG_COND_NEVER, TCG_COND_NEVER,
607 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
608 TCG_COND_NE, TCG_COND_NE,
609 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
610 TCG_COND_EQ, TCG_COND_EQ,
611 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
612 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
613 };
614
615 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
616 details required to generate a TCG comparison. */
617 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
618 {
619 TCGCond cond;
620 enum cc_op old_cc_op = s->cc_op;
621
622 if (mask == 15 || mask == 0) {
623 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
624 c->u.s32.a = cc_op;
625 c->u.s32.b = cc_op;
626 c->g1 = c->g2 = true;
627 c->is_64 = false;
628 return;
629 }
630
631 /* Find the TCG condition for the mask + cc op. */
632 switch (old_cc_op) {
633 case CC_OP_LTGT0_32:
634 case CC_OP_LTGT0_64:
635 case CC_OP_LTGT_32:
636 case CC_OP_LTGT_64:
637 cond = ltgt_cond[mask];
638 if (cond == TCG_COND_NEVER) {
639 goto do_dynamic;
640 }
641 account_inline_branch(s, old_cc_op);
642 break;
643
644 case CC_OP_LTUGTU_32:
645 case CC_OP_LTUGTU_64:
646 cond = tcg_unsigned_cond(ltgt_cond[mask]);
647 if (cond == TCG_COND_NEVER) {
648 goto do_dynamic;
649 }
650 account_inline_branch(s, old_cc_op);
651 break;
652
653 case CC_OP_NZ:
654 cond = nz_cond[mask];
655 if (cond == TCG_COND_NEVER) {
656 goto do_dynamic;
657 }
658 account_inline_branch(s, old_cc_op);
659 break;
660
661 case CC_OP_TM_32:
662 case CC_OP_TM_64:
663 switch (mask) {
664 case 8:
665 cond = TCG_COND_EQ;
666 break;
667 case 4 | 2 | 1:
668 cond = TCG_COND_NE;
669 break;
670 default:
671 goto do_dynamic;
672 }
673 account_inline_branch(s, old_cc_op);
674 break;
675
676 case CC_OP_ICM:
677 switch (mask) {
678 case 8:
679 cond = TCG_COND_EQ;
680 break;
681 case 4 | 2 | 1:
682 case 4 | 2:
683 cond = TCG_COND_NE;
684 break;
685 default:
686 goto do_dynamic;
687 }
688 account_inline_branch(s, old_cc_op);
689 break;
690
691 case CC_OP_FLOGR:
692 switch (mask & 0xa) {
693 case 8: /* src == 0 -> no one bit found */
694 cond = TCG_COND_EQ;
695 break;
696 case 2: /* src != 0 -> one bit found */
697 cond = TCG_COND_NE;
698 break;
699 default:
700 goto do_dynamic;
701 }
702 account_inline_branch(s, old_cc_op);
703 break;
704
705 case CC_OP_ADDU_32:
706 case CC_OP_ADDU_64:
707 switch (mask) {
708 case 8 | 2: /* vr == 0 */
709 cond = TCG_COND_EQ;
710 break;
711 case 4 | 1: /* vr != 0 */
712 cond = TCG_COND_NE;
713 break;
714 case 8 | 4: /* no carry -> vr >= src */
715 cond = TCG_COND_GEU;
716 break;
717 case 2 | 1: /* carry -> vr < src */
718 cond = TCG_COND_LTU;
719 break;
720 default:
721 goto do_dynamic;
722 }
723 account_inline_branch(s, old_cc_op);
724 break;
725
726 case CC_OP_SUBU_32:
727 case CC_OP_SUBU_64:
728 /* Note that CC=0 is impossible; treat it as dont-care. */
729 switch (mask & 7) {
730 case 2: /* zero -> op1 == op2 */
731 cond = TCG_COND_EQ;
732 break;
733 case 4 | 1: /* !zero -> op1 != op2 */
734 cond = TCG_COND_NE;
735 break;
736 case 4: /* borrow (!carry) -> op1 < op2 */
737 cond = TCG_COND_LTU;
738 break;
739 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
740 cond = TCG_COND_GEU;
741 break;
742 default:
743 goto do_dynamic;
744 }
745 account_inline_branch(s, old_cc_op);
746 break;
747
748 default:
749 do_dynamic:
750 /* Calculate cc value. */
751 gen_op_calc_cc(s);
752 /* FALLTHRU */
753
754 case CC_OP_STATIC:
755 /* Jump based on CC. We'll load up the real cond below;
756 the assignment here merely avoids a compiler warning. */
757 account_noninline_branch(s, old_cc_op);
758 old_cc_op = CC_OP_STATIC;
759 cond = TCG_COND_NEVER;
760 break;
761 }
762
763 /* Load up the arguments of the comparison. */
764 c->is_64 = true;
765 c->g1 = c->g2 = false;
766 switch (old_cc_op) {
767 case CC_OP_LTGT0_32:
768 c->is_64 = false;
769 c->u.s32.a = tcg_temp_new_i32();
770 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
771 c->u.s32.b = tcg_const_i32(0);
772 break;
773 case CC_OP_LTGT_32:
774 case CC_OP_LTUGTU_32:
775 case CC_OP_SUBU_32:
776 c->is_64 = false;
777 c->u.s32.a = tcg_temp_new_i32();
778 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
779 c->u.s32.b = tcg_temp_new_i32();
780 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
781 break;
782
783 case CC_OP_LTGT0_64:
784 case CC_OP_NZ:
785 case CC_OP_FLOGR:
786 c->u.s64.a = cc_dst;
787 c->u.s64.b = tcg_const_i64(0);
788 c->g1 = true;
789 break;
790 case CC_OP_LTGT_64:
791 case CC_OP_LTUGTU_64:
792 case CC_OP_SUBU_64:
793 c->u.s64.a = cc_src;
794 c->u.s64.b = cc_dst;
795 c->g1 = c->g2 = true;
796 break;
797
798 case CC_OP_TM_32:
799 case CC_OP_TM_64:
800 case CC_OP_ICM:
801 c->u.s64.a = tcg_temp_new_i64();
802 c->u.s64.b = tcg_const_i64(0);
803 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
804 break;
805
806 case CC_OP_ADDU_32:
807 c->is_64 = false;
808 c->u.s32.a = tcg_temp_new_i32();
809 c->u.s32.b = tcg_temp_new_i32();
810 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
811 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
812 tcg_gen_movi_i32(c->u.s32.b, 0);
813 } else {
814 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
815 }
816 break;
817
818 case CC_OP_ADDU_64:
819 c->u.s64.a = cc_vr;
820 c->g1 = true;
821 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
822 c->u.s64.b = tcg_const_i64(0);
823 } else {
824 c->u.s64.b = cc_src;
825 c->g2 = true;
826 }
827 break;
828
829 case CC_OP_STATIC:
830 c->is_64 = false;
831 c->u.s32.a = cc_op;
832 c->g1 = true;
833 switch (mask) {
834 case 0x8 | 0x4 | 0x2: /* cc != 3 */
835 cond = TCG_COND_NE;
836 c->u.s32.b = tcg_const_i32(3);
837 break;
838 case 0x8 | 0x4 | 0x1: /* cc != 2 */
839 cond = TCG_COND_NE;
840 c->u.s32.b = tcg_const_i32(2);
841 break;
842 case 0x8 | 0x2 | 0x1: /* cc != 1 */
843 cond = TCG_COND_NE;
844 c->u.s32.b = tcg_const_i32(1);
845 break;
846 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
847 cond = TCG_COND_EQ;
848 c->g1 = false;
849 c->u.s32.a = tcg_temp_new_i32();
850 c->u.s32.b = tcg_const_i32(0);
851 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
852 break;
853 case 0x8 | 0x4: /* cc < 2 */
854 cond = TCG_COND_LTU;
855 c->u.s32.b = tcg_const_i32(2);
856 break;
857 case 0x8: /* cc == 0 */
858 cond = TCG_COND_EQ;
859 c->u.s32.b = tcg_const_i32(0);
860 break;
861 case 0x4 | 0x2 | 0x1: /* cc != 0 */
862 cond = TCG_COND_NE;
863 c->u.s32.b = tcg_const_i32(0);
864 break;
865 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
866 cond = TCG_COND_NE;
867 c->g1 = false;
868 c->u.s32.a = tcg_temp_new_i32();
869 c->u.s32.b = tcg_const_i32(0);
870 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
871 break;
872 case 0x4: /* cc == 1 */
873 cond = TCG_COND_EQ;
874 c->u.s32.b = tcg_const_i32(1);
875 break;
876 case 0x2 | 0x1: /* cc > 1 */
877 cond = TCG_COND_GTU;
878 c->u.s32.b = tcg_const_i32(1);
879 break;
880 case 0x2: /* cc == 2 */
881 cond = TCG_COND_EQ;
882 c->u.s32.b = tcg_const_i32(2);
883 break;
884 case 0x1: /* cc == 3 */
885 cond = TCG_COND_EQ;
886 c->u.s32.b = tcg_const_i32(3);
887 break;
888 default:
889 /* CC is masked by something else: (8 >> cc) & mask. */
890 cond = TCG_COND_NE;
891 c->g1 = false;
892 c->u.s32.a = tcg_const_i32(8);
893 c->u.s32.b = tcg_const_i32(0);
894 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
895 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
896 break;
897 }
898 break;
899
900 default:
901 abort();
902 }
903 c->cond = cond;
904 }
905
906 static void free_compare(DisasCompare *c)
907 {
908 if (!c->g1) {
909 if (c->is_64) {
910 tcg_temp_free_i64(c->u.s64.a);
911 } else {
912 tcg_temp_free_i32(c->u.s32.a);
913 }
914 }
915 if (!c->g2) {
916 if (c->is_64) {
917 tcg_temp_free_i64(c->u.s64.b);
918 } else {
919 tcg_temp_free_i32(c->u.s32.b);
920 }
921 }
922 }
923
924 /* ====================================================================== */
925 /* Define the insn format enumeration. */
926 #define F0(N) FMT_##N,
927 #define F1(N, X1) F0(N)
928 #define F2(N, X1, X2) F0(N)
929 #define F3(N, X1, X2, X3) F0(N)
930 #define F4(N, X1, X2, X3, X4) F0(N)
931 #define F5(N, X1, X2, X3, X4, X5) F0(N)
932
933 typedef enum {
934 #include "insn-format.def"
935 } DisasFormat;
936
937 #undef F0
938 #undef F1
939 #undef F2
940 #undef F3
941 #undef F4
942 #undef F5
943
944 /* Define a structure to hold the decoded fields. We'll store each inside
945 an array indexed by an enum. In order to conserve memory, we'll arrange
946 for fields that do not exist at the same time to overlap, thus the "C"
947 for compact. For checking purposes there is an "O" for original index
948 as well that will be applied to availability bitmaps. */
949
950 enum DisasFieldIndexO {
951 FLD_O_r1,
952 FLD_O_r2,
953 FLD_O_r3,
954 FLD_O_m1,
955 FLD_O_m3,
956 FLD_O_m4,
957 FLD_O_b1,
958 FLD_O_b2,
959 FLD_O_b4,
960 FLD_O_d1,
961 FLD_O_d2,
962 FLD_O_d4,
963 FLD_O_x2,
964 FLD_O_l1,
965 FLD_O_l2,
966 FLD_O_i1,
967 FLD_O_i2,
968 FLD_O_i3,
969 FLD_O_i4,
970 FLD_O_i5
971 };
972
973 enum DisasFieldIndexC {
974 FLD_C_r1 = 0,
975 FLD_C_m1 = 0,
976 FLD_C_b1 = 0,
977 FLD_C_i1 = 0,
978
979 FLD_C_r2 = 1,
980 FLD_C_b2 = 1,
981 FLD_C_i2 = 1,
982
983 FLD_C_r3 = 2,
984 FLD_C_m3 = 2,
985 FLD_C_i3 = 2,
986
987 FLD_C_m4 = 3,
988 FLD_C_b4 = 3,
989 FLD_C_i4 = 3,
990 FLD_C_l1 = 3,
991
992 FLD_C_i5 = 4,
993 FLD_C_d1 = 4,
994
995 FLD_C_d2 = 5,
996
997 FLD_C_d4 = 6,
998 FLD_C_x2 = 6,
999 FLD_C_l2 = 6,
1000
1001 NUM_C_FIELD = 7
1002 };
1003
1004 struct DisasFields {
1005 uint64_t raw_insn;
1006 unsigned op:8;
1007 unsigned op2:8;
1008 unsigned presentC:16;
1009 unsigned int presentO;
1010 int c[NUM_C_FIELD];
1011 };
1012
1013 /* This is the way fields are to be accessed out of DisasFields. */
1014 #define have_field(S, F) have_field1((S), FLD_O_##F)
1015 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1016
1017 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1018 {
1019 return (f->presentO >> c) & 1;
1020 }
1021
1022 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1023 enum DisasFieldIndexC c)
1024 {
1025 assert(have_field1(f, o));
1026 return f->c[c];
1027 }
1028
1029 /* Describe the layout of each field in each format. */
1030 typedef struct DisasField {
1031 unsigned int beg:8;
1032 unsigned int size:8;
1033 unsigned int type:2;
1034 unsigned int indexC:6;
1035 enum DisasFieldIndexO indexO:8;
1036 } DisasField;
1037
1038 typedef struct DisasFormatInfo {
1039 DisasField op[NUM_C_FIELD];
1040 } DisasFormatInfo;
1041
1042 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1043 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1044 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1045 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1046 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1047 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1048 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1049 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1050 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1051 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1052 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1053 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1054 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1055 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1056
1057 #define F0(N) { { } },
1058 #define F1(N, X1) { { X1 } },
1059 #define F2(N, X1, X2) { { X1, X2 } },
1060 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1061 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1062 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1063
1064 static const DisasFormatInfo format_info[] = {
1065 #include "insn-format.def"
1066 };
1067
1068 #undef F0
1069 #undef F1
1070 #undef F2
1071 #undef F3
1072 #undef F4
1073 #undef F5
1074 #undef R
1075 #undef M
1076 #undef BD
1077 #undef BXD
1078 #undef BDL
1079 #undef BXDL
1080 #undef I
1081 #undef L
1082
1083 /* Generally, we'll extract operands into this structures, operate upon
1084 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1085 of routines below for more details. */
1086 typedef struct {
1087 bool g_out, g_out2, g_in1, g_in2;
1088 TCGv_i64 out, out2, in1, in2;
1089 TCGv_i64 addr1;
1090 } DisasOps;
1091
1092 /* Instructions can place constraints on their operands, raising specification
1093 exceptions if they are violated. To make this easy to automate, each "in1",
1094 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1095 of the following, or 0. To make this easy to document, we'll put the
1096 SPEC_<name> defines next to <name>. */
1097
1098 #define SPEC_r1_even 1
1099 #define SPEC_r2_even 2
1100 #define SPEC_r3_even 4
1101 #define SPEC_r1_f128 8
1102 #define SPEC_r2_f128 16
1103
1104 /* Return values from translate_one, indicating the state of the TB. */
1105 typedef enum {
1106 /* Continue the TB. */
1107 NO_EXIT,
1108 /* We have emitted one or more goto_tb. No fixup required. */
1109 EXIT_GOTO_TB,
1110 /* We are not using a goto_tb (for whatever reason), but have updated
1111 the PC (for whatever reason), so there's no need to do it again on
1112 exiting the TB. */
1113 EXIT_PC_UPDATED,
1114 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1115 updated the PC for the next instruction to be executed. */
1116 EXIT_PC_STALE,
1117 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1118 No following code will be executed. */
1119 EXIT_NORETURN,
1120 } ExitStatus;
1121
1122 typedef enum DisasFacility {
1123 FAC_Z, /* zarch (default) */
1124 FAC_CASS, /* compare and swap and store */
1125 FAC_CASS2, /* compare and swap and store 2*/
1126 FAC_DFP, /* decimal floating point */
1127 FAC_DFPR, /* decimal floating point rounding */
1128 FAC_DO, /* distinct operands */
1129 FAC_EE, /* execute extensions */
1130 FAC_EI, /* extended immediate */
1131 FAC_FPE, /* floating point extension */
1132 FAC_FPSSH, /* floating point support sign handling */
1133 FAC_FPRGR, /* FPR-GR transfer */
1134 FAC_GIE, /* general instructions extension */
1135 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1136 FAC_HW, /* high-word */
1137 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1138 FAC_MIE, /* miscellaneous-instruction-extensions */
1139 FAC_LAT, /* load-and-trap */
1140 FAC_LOC, /* load/store on condition */
1141 FAC_LD, /* long displacement */
1142 FAC_PC, /* population count */
1143 FAC_SCF, /* store clock fast */
1144 FAC_SFLE, /* store facility list extended */
1145 FAC_ILA, /* interlocked access facility 1 */
1146 } DisasFacility;
1147
1148 struct DisasInsn {
1149 unsigned opc:16;
1150 DisasFormat fmt:8;
1151 DisasFacility fac:8;
1152 unsigned spec:8;
1153
1154 const char *name;
1155
1156 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1157 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1158 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1159 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1160 void (*help_cout)(DisasContext *, DisasOps *);
1161 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1162
1163 uint64_t data;
1164 };
1165
1166 /* ====================================================================== */
1167 /* Miscellaneous helpers, used by several operations. */
1168
1169 static void help_l2_shift(DisasContext *s, DisasFields *f,
1170 DisasOps *o, int mask)
1171 {
1172 int b2 = get_field(f, b2);
1173 int d2 = get_field(f, d2);
1174
1175 if (b2 == 0) {
1176 o->in2 = tcg_const_i64(d2 & mask);
1177 } else {
1178 o->in2 = get_address(s, 0, b2, d2);
1179 tcg_gen_andi_i64(o->in2, o->in2, mask);
1180 }
1181 }
1182
1183 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1184 {
1185 if (dest == s->next_pc) {
1186 return NO_EXIT;
1187 }
1188 if (use_goto_tb(s, dest)) {
1189 update_cc_op(s);
1190 tcg_gen_goto_tb(0);
1191 tcg_gen_movi_i64(psw_addr, dest);
1192 tcg_gen_exit_tb((uintptr_t)s->tb);
1193 return EXIT_GOTO_TB;
1194 } else {
1195 tcg_gen_movi_i64(psw_addr, dest);
1196 return EXIT_PC_UPDATED;
1197 }
1198 }
1199
1200 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1201 bool is_imm, int imm, TCGv_i64 cdest)
1202 {
1203 ExitStatus ret;
1204 uint64_t dest = s->pc + 2 * imm;
1205 TCGLabel *lab;
1206
1207 /* Take care of the special cases first. */
1208 if (c->cond == TCG_COND_NEVER) {
1209 ret = NO_EXIT;
1210 goto egress;
1211 }
1212 if (is_imm) {
1213 if (dest == s->next_pc) {
1214 /* Branch to next. */
1215 ret = NO_EXIT;
1216 goto egress;
1217 }
1218 if (c->cond == TCG_COND_ALWAYS) {
1219 ret = help_goto_direct(s, dest);
1220 goto egress;
1221 }
1222 } else {
1223 if (TCGV_IS_UNUSED_I64(cdest)) {
1224 /* E.g. bcr %r0 -> no branch. */
1225 ret = NO_EXIT;
1226 goto egress;
1227 }
1228 if (c->cond == TCG_COND_ALWAYS) {
1229 tcg_gen_mov_i64(psw_addr, cdest);
1230 ret = EXIT_PC_UPDATED;
1231 goto egress;
1232 }
1233 }
1234
1235 if (use_goto_tb(s, s->next_pc)) {
1236 if (is_imm && use_goto_tb(s, dest)) {
1237 /* Both exits can use goto_tb. */
1238 update_cc_op(s);
1239
1240 lab = gen_new_label();
1241 if (c->is_64) {
1242 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1243 } else {
1244 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1245 }
1246
1247 /* Branch not taken. */
1248 tcg_gen_goto_tb(0);
1249 tcg_gen_movi_i64(psw_addr, s->next_pc);
1250 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1251
1252 /* Branch taken. */
1253 gen_set_label(lab);
1254 tcg_gen_goto_tb(1);
1255 tcg_gen_movi_i64(psw_addr, dest);
1256 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1257
1258 ret = EXIT_GOTO_TB;
1259 } else {
1260 /* Fallthru can use goto_tb, but taken branch cannot. */
1261 /* Store taken branch destination before the brcond. This
1262 avoids having to allocate a new local temp to hold it.
1263 We'll overwrite this in the not taken case anyway. */
1264 if (!is_imm) {
1265 tcg_gen_mov_i64(psw_addr, cdest);
1266 }
1267
1268 lab = gen_new_label();
1269 if (c->is_64) {
1270 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1271 } else {
1272 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1273 }
1274
1275 /* Branch not taken. */
1276 update_cc_op(s);
1277 tcg_gen_goto_tb(0);
1278 tcg_gen_movi_i64(psw_addr, s->next_pc);
1279 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1280
1281 gen_set_label(lab);
1282 if (is_imm) {
1283 tcg_gen_movi_i64(psw_addr, dest);
1284 }
1285 ret = EXIT_PC_UPDATED;
1286 }
1287 } else {
1288 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1289 Most commonly we're single-stepping or some other condition that
1290 disables all use of goto_tb. Just update the PC and exit. */
1291
1292 TCGv_i64 next = tcg_const_i64(s->next_pc);
1293 if (is_imm) {
1294 cdest = tcg_const_i64(dest);
1295 }
1296
1297 if (c->is_64) {
1298 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1299 cdest, next);
1300 } else {
1301 TCGv_i32 t0 = tcg_temp_new_i32();
1302 TCGv_i64 t1 = tcg_temp_new_i64();
1303 TCGv_i64 z = tcg_const_i64(0);
1304 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1305 tcg_gen_extu_i32_i64(t1, t0);
1306 tcg_temp_free_i32(t0);
1307 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1308 tcg_temp_free_i64(t1);
1309 tcg_temp_free_i64(z);
1310 }
1311
1312 if (is_imm) {
1313 tcg_temp_free_i64(cdest);
1314 }
1315 tcg_temp_free_i64(next);
1316
1317 ret = EXIT_PC_UPDATED;
1318 }
1319
1320 egress:
1321 free_compare(c);
1322 return ret;
1323 }
1324
1325 /* ====================================================================== */
1326 /* The operations. These perform the bulk of the work for any insn,
1327 usually after the operands have been loaded and output initialized. */
1328
1329 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1330 {
1331 TCGv_i64 z, n;
1332 z = tcg_const_i64(0);
1333 n = tcg_temp_new_i64();
1334 tcg_gen_neg_i64(n, o->in2);
1335 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1336 tcg_temp_free_i64(n);
1337 tcg_temp_free_i64(z);
1338 return NO_EXIT;
1339 }
1340
1341 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1342 {
1343 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1344 return NO_EXIT;
1345 }
1346
1347 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1348 {
1349 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1350 return NO_EXIT;
1351 }
1352
1353 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1354 {
1355 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1356 tcg_gen_mov_i64(o->out2, o->in2);
1357 return NO_EXIT;
1358 }
1359
1360 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1361 {
1362 tcg_gen_add_i64(o->out, o->in1, o->in2);
1363 return NO_EXIT;
1364 }
1365
1366 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1367 {
1368 DisasCompare cmp;
1369 TCGv_i64 carry;
1370
1371 tcg_gen_add_i64(o->out, o->in1, o->in2);
1372
1373 /* The carry flag is the msb of CC, therefore the branch mask that would
1374 create that comparison is 3. Feeding the generated comparison to
1375 setcond produces the carry flag that we desire. */
1376 disas_jcc(s, &cmp, 3);
1377 carry = tcg_temp_new_i64();
1378 if (cmp.is_64) {
1379 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1380 } else {
1381 TCGv_i32 t = tcg_temp_new_i32();
1382 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1383 tcg_gen_extu_i32_i64(carry, t);
1384 tcg_temp_free_i32(t);
1385 }
1386 free_compare(&cmp);
1387
1388 tcg_gen_add_i64(o->out, o->out, carry);
1389 tcg_temp_free_i64(carry);
1390 return NO_EXIT;
1391 }
1392
1393 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1394 {
1395 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1396 return NO_EXIT;
1397 }
1398
1399 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1400 {
1401 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1402 return NO_EXIT;
1403 }
1404
1405 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1406 {
1407 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1408 return_low128(o->out2);
1409 return NO_EXIT;
1410 }
1411
1412 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1413 {
1414 tcg_gen_and_i64(o->out, o->in1, o->in2);
1415 return NO_EXIT;
1416 }
1417
1418 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1419 {
1420 int shift = s->insn->data & 0xff;
1421 int size = s->insn->data >> 8;
1422 uint64_t mask = ((1ull << size) - 1) << shift;
1423
1424 assert(!o->g_in2);
1425 tcg_gen_shli_i64(o->in2, o->in2, shift);
1426 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1427 tcg_gen_and_i64(o->out, o->in1, o->in2);
1428
1429 /* Produce the CC from only the bits manipulated. */
1430 tcg_gen_andi_i64(cc_dst, o->out, mask);
1431 set_cc_nz_u64(s, cc_dst);
1432 return NO_EXIT;
1433 }
1434
1435 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1436 {
1437 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1438 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1439 tcg_gen_mov_i64(psw_addr, o->in2);
1440 return EXIT_PC_UPDATED;
1441 } else {
1442 return NO_EXIT;
1443 }
1444 }
1445
1446 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1447 {
1448 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1449 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1450 }
1451
1452 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1453 {
1454 int m1 = get_field(s->fields, m1);
1455 bool is_imm = have_field(s->fields, i2);
1456 int imm = is_imm ? get_field(s->fields, i2) : 0;
1457 DisasCompare c;
1458
1459 disas_jcc(s, &c, m1);
1460 return help_branch(s, &c, is_imm, imm, o->in2);
1461 }
1462
1463 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1464 {
1465 int r1 = get_field(s->fields, r1);
1466 bool is_imm = have_field(s->fields, i2);
1467 int imm = is_imm ? get_field(s->fields, i2) : 0;
1468 DisasCompare c;
1469 TCGv_i64 t;
1470
1471 c.cond = TCG_COND_NE;
1472 c.is_64 = false;
1473 c.g1 = false;
1474 c.g2 = false;
1475
1476 t = tcg_temp_new_i64();
1477 tcg_gen_subi_i64(t, regs[r1], 1);
1478 store_reg32_i64(r1, t);
1479 c.u.s32.a = tcg_temp_new_i32();
1480 c.u.s32.b = tcg_const_i32(0);
1481 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1482 tcg_temp_free_i64(t);
1483
1484 return help_branch(s, &c, is_imm, imm, o->in2);
1485 }
1486
1487 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1488 {
1489 int r1 = get_field(s->fields, r1);
1490 int imm = get_field(s->fields, i2);
1491 DisasCompare c;
1492 TCGv_i64 t;
1493
1494 c.cond = TCG_COND_NE;
1495 c.is_64 = false;
1496 c.g1 = false;
1497 c.g2 = false;
1498
1499 t = tcg_temp_new_i64();
1500 tcg_gen_shri_i64(t, regs[r1], 32);
1501 tcg_gen_subi_i64(t, t, 1);
1502 store_reg32h_i64(r1, t);
1503 c.u.s32.a = tcg_temp_new_i32();
1504 c.u.s32.b = tcg_const_i32(0);
1505 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1506 tcg_temp_free_i64(t);
1507
1508 return help_branch(s, &c, 1, imm, o->in2);
1509 }
1510
1511 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1512 {
1513 int r1 = get_field(s->fields, r1);
1514 bool is_imm = have_field(s->fields, i2);
1515 int imm = is_imm ? get_field(s->fields, i2) : 0;
1516 DisasCompare c;
1517
1518 c.cond = TCG_COND_NE;
1519 c.is_64 = true;
1520 c.g1 = true;
1521 c.g2 = false;
1522
1523 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1524 c.u.s64.a = regs[r1];
1525 c.u.s64.b = tcg_const_i64(0);
1526
1527 return help_branch(s, &c, is_imm, imm, o->in2);
1528 }
1529
1530 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1531 {
1532 int r1 = get_field(s->fields, r1);
1533 int r3 = get_field(s->fields, r3);
1534 bool is_imm = have_field(s->fields, i2);
1535 int imm = is_imm ? get_field(s->fields, i2) : 0;
1536 DisasCompare c;
1537 TCGv_i64 t;
1538
1539 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1540 c.is_64 = false;
1541 c.g1 = false;
1542 c.g2 = false;
1543
1544 t = tcg_temp_new_i64();
1545 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1546 c.u.s32.a = tcg_temp_new_i32();
1547 c.u.s32.b = tcg_temp_new_i32();
1548 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1549 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1550 store_reg32_i64(r1, t);
1551 tcg_temp_free_i64(t);
1552
1553 return help_branch(s, &c, is_imm, imm, o->in2);
1554 }
1555
1556 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1557 {
1558 int r1 = get_field(s->fields, r1);
1559 int r3 = get_field(s->fields, r3);
1560 bool is_imm = have_field(s->fields, i2);
1561 int imm = is_imm ? get_field(s->fields, i2) : 0;
1562 DisasCompare c;
1563
1564 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1565 c.is_64 = true;
1566
1567 if (r1 == (r3 | 1)) {
1568 c.u.s64.b = load_reg(r3 | 1);
1569 c.g2 = false;
1570 } else {
1571 c.u.s64.b = regs[r3 | 1];
1572 c.g2 = true;
1573 }
1574
1575 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1576 c.u.s64.a = regs[r1];
1577 c.g1 = true;
1578
1579 return help_branch(s, &c, is_imm, imm, o->in2);
1580 }
1581
1582 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1583 {
1584 int imm, m3 = get_field(s->fields, m3);
1585 bool is_imm;
1586 DisasCompare c;
1587
1588 c.cond = ltgt_cond[m3];
1589 if (s->insn->data) {
1590 c.cond = tcg_unsigned_cond(c.cond);
1591 }
1592 c.is_64 = c.g1 = c.g2 = true;
1593 c.u.s64.a = o->in1;
1594 c.u.s64.b = o->in2;
1595
1596 is_imm = have_field(s->fields, i4);
1597 if (is_imm) {
1598 imm = get_field(s->fields, i4);
1599 } else {
1600 imm = 0;
1601 o->out = get_address(s, 0, get_field(s->fields, b4),
1602 get_field(s->fields, d4));
1603 }
1604
1605 return help_branch(s, &c, is_imm, imm, o->out);
1606 }
1607
1608 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1609 {
1610 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1611 set_cc_static(s);
1612 return NO_EXIT;
1613 }
1614
1615 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1616 {
1617 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1618 set_cc_static(s);
1619 return NO_EXIT;
1620 }
1621
1622 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1623 {
1624 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1625 set_cc_static(s);
1626 return NO_EXIT;
1627 }
1628
1629 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1630 {
1631 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1632 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1633 tcg_temp_free_i32(m3);
1634 gen_set_cc_nz_f32(s, o->in2);
1635 return NO_EXIT;
1636 }
1637
1638 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1639 {
1640 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1641 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1642 tcg_temp_free_i32(m3);
1643 gen_set_cc_nz_f64(s, o->in2);
1644 return NO_EXIT;
1645 }
1646
1647 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1648 {
1649 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1650 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1651 tcg_temp_free_i32(m3);
1652 gen_set_cc_nz_f128(s, o->in1, o->in2);
1653 return NO_EXIT;
1654 }
1655
1656 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1657 {
1658 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1659 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1660 tcg_temp_free_i32(m3);
1661 gen_set_cc_nz_f32(s, o->in2);
1662 return NO_EXIT;
1663 }
1664
1665 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1666 {
1667 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1668 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1669 tcg_temp_free_i32(m3);
1670 gen_set_cc_nz_f64(s, o->in2);
1671 return NO_EXIT;
1672 }
1673
1674 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1675 {
1676 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1677 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1678 tcg_temp_free_i32(m3);
1679 gen_set_cc_nz_f128(s, o->in1, o->in2);
1680 return NO_EXIT;
1681 }
1682
1683 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1684 {
1685 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1686 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1687 tcg_temp_free_i32(m3);
1688 gen_set_cc_nz_f32(s, o->in2);
1689 return NO_EXIT;
1690 }
1691
1692 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1693 {
1694 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1695 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1696 tcg_temp_free_i32(m3);
1697 gen_set_cc_nz_f64(s, o->in2);
1698 return NO_EXIT;
1699 }
1700
1701 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1702 {
1703 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1704 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1705 tcg_temp_free_i32(m3);
1706 gen_set_cc_nz_f128(s, o->in1, o->in2);
1707 return NO_EXIT;
1708 }
1709
1710 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1711 {
1712 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1713 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1714 tcg_temp_free_i32(m3);
1715 gen_set_cc_nz_f32(s, o->in2);
1716 return NO_EXIT;
1717 }
1718
1719 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1720 {
1721 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1722 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1723 tcg_temp_free_i32(m3);
1724 gen_set_cc_nz_f64(s, o->in2);
1725 return NO_EXIT;
1726 }
1727
1728 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1729 {
1730 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1731 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1732 tcg_temp_free_i32(m3);
1733 gen_set_cc_nz_f128(s, o->in1, o->in2);
1734 return NO_EXIT;
1735 }
1736
1737 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1738 {
1739 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1740 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1741 tcg_temp_free_i32(m3);
1742 return NO_EXIT;
1743 }
1744
1745 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1746 {
1747 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1748 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1749 tcg_temp_free_i32(m3);
1750 return NO_EXIT;
1751 }
1752
1753 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1754 {
1755 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1756 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1757 tcg_temp_free_i32(m3);
1758 return_low128(o->out2);
1759 return NO_EXIT;
1760 }
1761
1762 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1763 {
1764 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1765 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1766 tcg_temp_free_i32(m3);
1767 return NO_EXIT;
1768 }
1769
1770 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1771 {
1772 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1773 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1774 tcg_temp_free_i32(m3);
1775 return NO_EXIT;
1776 }
1777
1778 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1779 {
1780 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1781 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1782 tcg_temp_free_i32(m3);
1783 return_low128(o->out2);
1784 return NO_EXIT;
1785 }
1786
1787 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1788 {
1789 int r2 = get_field(s->fields, r2);
1790 TCGv_i64 len = tcg_temp_new_i64();
1791
1792 potential_page_fault(s);
1793 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1794 set_cc_static(s);
1795 return_low128(o->out);
1796
1797 tcg_gen_add_i64(regs[r2], regs[r2], len);
1798 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1799 tcg_temp_free_i64(len);
1800
1801 return NO_EXIT;
1802 }
1803
1804 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1805 {
1806 int l = get_field(s->fields, l1);
1807 TCGv_i32 vl;
1808
1809 switch (l + 1) {
1810 case 1:
1811 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1812 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1813 break;
1814 case 2:
1815 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1816 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1817 break;
1818 case 4:
1819 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1820 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1821 break;
1822 case 8:
1823 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1824 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1825 break;
1826 default:
1827 potential_page_fault(s);
1828 vl = tcg_const_i32(l);
1829 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1830 tcg_temp_free_i32(vl);
1831 set_cc_static(s);
1832 return NO_EXIT;
1833 }
1834 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1835 return NO_EXIT;
1836 }
1837
1838 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1839 {
1840 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1841 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1842 potential_page_fault(s);
1843 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1844 tcg_temp_free_i32(r1);
1845 tcg_temp_free_i32(r3);
1846 set_cc_static(s);
1847 return NO_EXIT;
1848 }
1849
1850 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1851 {
1852 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1853 TCGv_i32 t1 = tcg_temp_new_i32();
1854 tcg_gen_trunc_i64_i32(t1, o->in1);
1855 potential_page_fault(s);
1856 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1857 set_cc_static(s);
1858 tcg_temp_free_i32(t1);
1859 tcg_temp_free_i32(m3);
1860 return NO_EXIT;
1861 }
1862
1863 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1864 {
1865 potential_page_fault(s);
1866 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1867 set_cc_static(s);
1868 return_low128(o->in2);
1869 return NO_EXIT;
1870 }
1871
1872 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1873 {
1874 TCGv_i64 t = tcg_temp_new_i64();
1875 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1876 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1877 tcg_gen_or_i64(o->out, o->out, t);
1878 tcg_temp_free_i64(t);
1879 return NO_EXIT;
1880 }
1881
1882 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1883 {
1884 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1885 int d2 = get_field(s->fields, d2);
1886 int b2 = get_field(s->fields, b2);
1887 int is_64 = s->insn->data;
1888 TCGv_i64 addr, mem, cc, z;
1889
1890 /* Note that in1 = R3 (new value) and
1891 in2 = (zero-extended) R1 (expected value). */
1892
1893 /* Load the memory into the (temporary) output. While the PoO only talks
1894 about moving the memory to R1 on inequality, if we include equality it
1895 means that R1 is equal to the memory in all conditions. */
1896 addr = get_address(s, 0, b2, d2);
1897 if (is_64) {
1898 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1899 } else {
1900 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1901 }
1902
1903 /* Are the memory and expected values (un)equal? Note that this setcond
1904 produces the output CC value, thus the NE sense of the test. */
1905 cc = tcg_temp_new_i64();
1906 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1907
1908 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1909 Recall that we are allowed to unconditionally issue the store (and
1910 thus any possible write trap), so (re-)store the original contents
1911 of MEM in case of inequality. */
1912 z = tcg_const_i64(0);
1913 mem = tcg_temp_new_i64();
1914 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1915 if (is_64) {
1916 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1917 } else {
1918 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1919 }
1920 tcg_temp_free_i64(z);
1921 tcg_temp_free_i64(mem);
1922 tcg_temp_free_i64(addr);
1923
1924 /* Store CC back to cc_op. Wait until after the store so that any
1925 exception gets the old cc_op value. */
1926 tcg_gen_trunc_i64_i32(cc_op, cc);
1927 tcg_temp_free_i64(cc);
1928 set_cc_static(s);
1929 return NO_EXIT;
1930 }
1931
1932 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1933 {
1934 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1935 int r1 = get_field(s->fields, r1);
1936 int r3 = get_field(s->fields, r3);
1937 int d2 = get_field(s->fields, d2);
1938 int b2 = get_field(s->fields, b2);
1939 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1940
1941 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1942
1943 addrh = get_address(s, 0, b2, d2);
1944 addrl = get_address(s, 0, b2, d2 + 8);
1945 outh = tcg_temp_new_i64();
1946 outl = tcg_temp_new_i64();
1947
1948 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1949 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1950
1951 /* Fold the double-word compare with arithmetic. */
1952 cc = tcg_temp_new_i64();
1953 z = tcg_temp_new_i64();
1954 tcg_gen_xor_i64(cc, outh, regs[r1]);
1955 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1956 tcg_gen_or_i64(cc, cc, z);
1957 tcg_gen_movi_i64(z, 0);
1958 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1959
1960 memh = tcg_temp_new_i64();
1961 meml = tcg_temp_new_i64();
1962 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1963 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1964 tcg_temp_free_i64(z);
1965
1966 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1967 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1968 tcg_temp_free_i64(memh);
1969 tcg_temp_free_i64(meml);
1970 tcg_temp_free_i64(addrh);
1971 tcg_temp_free_i64(addrl);
1972
1973 /* Save back state now that we've passed all exceptions. */
1974 tcg_gen_mov_i64(regs[r1], outh);
1975 tcg_gen_mov_i64(regs[r1 + 1], outl);
1976 tcg_gen_trunc_i64_i32(cc_op, cc);
1977 tcg_temp_free_i64(outh);
1978 tcg_temp_free_i64(outl);
1979 tcg_temp_free_i64(cc);
1980 set_cc_static(s);
1981 return NO_EXIT;
1982 }
1983
1984 #ifndef CONFIG_USER_ONLY
1985 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1986 {
1987 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1988 check_privileged(s);
1989 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1990 tcg_temp_free_i32(r1);
1991 set_cc_static(s);
1992 return NO_EXIT;
1993 }
1994 #endif
1995
1996 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1997 {
1998 TCGv_i64 t1 = tcg_temp_new_i64();
1999 TCGv_i32 t2 = tcg_temp_new_i32();
2000 tcg_gen_trunc_i64_i32(t2, o->in1);
2001 gen_helper_cvd(t1, t2);
2002 tcg_temp_free_i32(t2);
2003 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2004 tcg_temp_free_i64(t1);
2005 return NO_EXIT;
2006 }
2007
2008 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2009 {
2010 int m3 = get_field(s->fields, m3);
2011 TCGLabel *lab = gen_new_label();
2012 TCGCond c;
2013
2014 c = tcg_invert_cond(ltgt_cond[m3]);
2015 if (s->insn->data) {
2016 c = tcg_unsigned_cond(c);
2017 }
2018 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2019
2020 /* Trap. */
2021 gen_trap(s);
2022
2023 gen_set_label(lab);
2024 return NO_EXIT;
2025 }
2026
2027 #ifndef CONFIG_USER_ONLY
2028 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2029 {
2030 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2031 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2032 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2033
2034 check_privileged(s);
2035 update_psw_addr(s);
2036 gen_op_calc_cc(s);
2037
2038 gen_helper_diag(cpu_env, r1, r3, func_code);
2039
2040 tcg_temp_free_i32(func_code);
2041 tcg_temp_free_i32(r3);
2042 tcg_temp_free_i32(r1);
2043 return NO_EXIT;
2044 }
2045 #endif
2046
2047 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2048 {
2049 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2050 return_low128(o->out);
2051 return NO_EXIT;
2052 }
2053
2054 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2055 {
2056 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2057 return_low128(o->out);
2058 return NO_EXIT;
2059 }
2060
2061 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2062 {
2063 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2064 return_low128(o->out);
2065 return NO_EXIT;
2066 }
2067
2068 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2069 {
2070 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2071 return_low128(o->out);
2072 return NO_EXIT;
2073 }
2074
2075 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2076 {
2077 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2078 return NO_EXIT;
2079 }
2080
2081 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2082 {
2083 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2084 return NO_EXIT;
2085 }
2086
2087 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2088 {
2089 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2090 return_low128(o->out2);
2091 return NO_EXIT;
2092 }
2093
2094 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2095 {
2096 int r2 = get_field(s->fields, r2);
2097 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2098 return NO_EXIT;
2099 }
2100
2101 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2102 {
2103 /* No cache information provided. */
2104 tcg_gen_movi_i64(o->out, -1);
2105 return NO_EXIT;
2106 }
2107
2108 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2109 {
2110 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2111 return NO_EXIT;
2112 }
2113
2114 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2115 {
2116 int r1 = get_field(s->fields, r1);
2117 int r2 = get_field(s->fields, r2);
2118 TCGv_i64 t = tcg_temp_new_i64();
2119
2120 /* Note the "subsequently" in the PoO, which implies a defined result
2121 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2122 tcg_gen_shri_i64(t, psw_mask, 32);
2123 store_reg32_i64(r1, t);
2124 if (r2 != 0) {
2125 store_reg32_i64(r2, psw_mask);
2126 }
2127
2128 tcg_temp_free_i64(t);
2129 return NO_EXIT;
2130 }
2131
2132 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2133 {
2134 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2135 tb->flags, (ab)use the tb->cs_base field as the address of
2136 the template in memory, and grab 8 bits of tb->flags/cflags for
2137 the contents of the register. We would then recognize all this
2138 in gen_intermediate_code_internal, generating code for exactly
2139 one instruction. This new TB then gets executed normally.
2140
2141 On the other hand, this seems to be mostly used for modifying
2142 MVC inside of memcpy, which needs a helper call anyway. So
2143 perhaps this doesn't bear thinking about any further. */
2144
2145 TCGv_i64 tmp;
2146
2147 update_psw_addr(s);
2148 gen_op_calc_cc(s);
2149
2150 tmp = tcg_const_i64(s->next_pc);
2151 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2152 tcg_temp_free_i64(tmp);
2153
2154 return NO_EXIT;
2155 }
2156
2157 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2158 {
2159 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2160 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2161 tcg_temp_free_i32(m3);
2162 return NO_EXIT;
2163 }
2164
2165 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2166 {
2167 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2168 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2169 tcg_temp_free_i32(m3);
2170 return NO_EXIT;
2171 }
2172
2173 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2174 {
2175 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2176 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2177 return_low128(o->out2);
2178 tcg_temp_free_i32(m3);
2179 return NO_EXIT;
2180 }
2181
2182 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2183 {
2184 /* We'll use the original input for cc computation, since we get to
2185 compare that against 0, which ought to be better than comparing
2186 the real output against 64. It also lets cc_dst be a convenient
2187 temporary during our computation. */
2188 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2189
2190 /* R1 = IN ? CLZ(IN) : 64. */
2191 gen_helper_clz(o->out, o->in2);
2192
2193 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2194 value by 64, which is undefined. But since the shift is 64 iff the
2195 input is zero, we still get the correct result after and'ing. */
2196 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2197 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2198 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2199 return NO_EXIT;
2200 }
2201
2202 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2203 {
2204 int m3 = get_field(s->fields, m3);
2205 int pos, len, base = s->insn->data;
2206 TCGv_i64 tmp = tcg_temp_new_i64();
2207 uint64_t ccm;
2208
2209 switch (m3) {
2210 case 0xf:
2211 /* Effectively a 32-bit load. */
2212 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2213 len = 32;
2214 goto one_insert;
2215
2216 case 0xc:
2217 case 0x6:
2218 case 0x3:
2219 /* Effectively a 16-bit load. */
2220 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2221 len = 16;
2222 goto one_insert;
2223
2224 case 0x8:
2225 case 0x4:
2226 case 0x2:
2227 case 0x1:
2228 /* Effectively an 8-bit load. */
2229 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2230 len = 8;
2231 goto one_insert;
2232
2233 one_insert:
2234 pos = base + ctz32(m3) * 8;
2235 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2236 ccm = ((1ull << len) - 1) << pos;
2237 break;
2238
2239 default:
2240 /* This is going to be a sequence of loads and inserts. */
2241 pos = base + 32 - 8;
2242 ccm = 0;
2243 while (m3) {
2244 if (m3 & 0x8) {
2245 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2246 tcg_gen_addi_i64(o->in2, o->in2, 1);
2247 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2248 ccm |= 0xff << pos;
2249 }
2250 m3 = (m3 << 1) & 0xf;
2251 pos -= 8;
2252 }
2253 break;
2254 }
2255
2256 tcg_gen_movi_i64(tmp, ccm);
2257 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2258 tcg_temp_free_i64(tmp);
2259 return NO_EXIT;
2260 }
2261
2262 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2263 {
2264 int shift = s->insn->data & 0xff;
2265 int size = s->insn->data >> 8;
2266 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2267 return NO_EXIT;
2268 }
2269
2270 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2271 {
2272 TCGv_i64 t1;
2273
2274 gen_op_calc_cc(s);
2275 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2276
2277 t1 = tcg_temp_new_i64();
2278 tcg_gen_shli_i64(t1, psw_mask, 20);
2279 tcg_gen_shri_i64(t1, t1, 36);
2280 tcg_gen_or_i64(o->out, o->out, t1);
2281
2282 tcg_gen_extu_i32_i64(t1, cc_op);
2283 tcg_gen_shli_i64(t1, t1, 28);
2284 tcg_gen_or_i64(o->out, o->out, t1);
2285 tcg_temp_free_i64(t1);
2286 return NO_EXIT;
2287 }
2288
2289 #ifndef CONFIG_USER_ONLY
2290 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2291 {
2292 check_privileged(s);
2293 gen_helper_ipte(cpu_env, o->in1, o->in2);
2294 return NO_EXIT;
2295 }
2296
2297 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2298 {
2299 check_privileged(s);
2300 gen_helper_iske(o->out, cpu_env, o->in2);
2301 return NO_EXIT;
2302 }
2303 #endif
2304
2305 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2306 {
2307 gen_helper_ldeb(o->out, cpu_env, o->in2);
2308 return NO_EXIT;
2309 }
2310
2311 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2312 {
2313 gen_helper_ledb(o->out, cpu_env, o->in2);
2314 return NO_EXIT;
2315 }
2316
2317 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2318 {
2319 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2320 return NO_EXIT;
2321 }
2322
2323 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2324 {
2325 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2326 return NO_EXIT;
2327 }
2328
2329 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2330 {
2331 gen_helper_lxdb(o->out, cpu_env, o->in2);
2332 return_low128(o->out2);
2333 return NO_EXIT;
2334 }
2335
2336 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2337 {
2338 gen_helper_lxeb(o->out, cpu_env, o->in2);
2339 return_low128(o->out2);
2340 return NO_EXIT;
2341 }
2342
2343 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2344 {
2345 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2346 return NO_EXIT;
2347 }
2348
2349 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2350 {
2351 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2352 return NO_EXIT;
2353 }
2354
2355 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2356 {
2357 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2358 return NO_EXIT;
2359 }
2360
2361 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2362 {
2363 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2364 return NO_EXIT;
2365 }
2366
2367 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2368 {
2369 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2370 return NO_EXIT;
2371 }
2372
2373 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2374 {
2375 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2376 return NO_EXIT;
2377 }
2378
2379 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2380 {
2381 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2382 return NO_EXIT;
2383 }
2384
2385 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2386 {
2387 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2388 return NO_EXIT;
2389 }
2390
2391 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2392 {
2393 TCGLabel *lab = gen_new_label();
2394 store_reg32_i64(get_field(s->fields, r1), o->in2);
2395 /* The value is stored even in case of trap. */
2396 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2397 gen_trap(s);
2398 gen_set_label(lab);
2399 return NO_EXIT;
2400 }
2401
2402 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2403 {
2404 TCGLabel *lab = gen_new_label();
2405 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2406 /* The value is stored even in case of trap. */
2407 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2408 gen_trap(s);
2409 gen_set_label(lab);
2410 return NO_EXIT;
2411 }
2412
2413 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2414 {
2415 TCGLabel *lab = gen_new_label();
2416 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2417 /* The value is stored even in case of trap. */
2418 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2419 gen_trap(s);
2420 gen_set_label(lab);
2421 return NO_EXIT;
2422 }
2423
2424 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2425 {
2426 TCGLabel *lab = gen_new_label();
2427 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2428 /* The value is stored even in case of trap. */
2429 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2430 gen_trap(s);
2431 gen_set_label(lab);
2432 return NO_EXIT;
2433 }
2434
2435 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2436 {
2437 TCGLabel *lab = gen_new_label();
2438 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2439 /* The value is stored even in case of trap. */
2440 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2441 gen_trap(s);
2442 gen_set_label(lab);
2443 return NO_EXIT;
2444 }
2445
2446 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2447 {
2448 DisasCompare c;
2449
2450 disas_jcc(s, &c, get_field(s->fields, m3));
2451
2452 if (c.is_64) {
2453 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2454 o->in2, o->in1);
2455 free_compare(&c);
2456 } else {
2457 TCGv_i32 t32 = tcg_temp_new_i32();
2458 TCGv_i64 t, z;
2459
2460 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2461 free_compare(&c);
2462
2463 t = tcg_temp_new_i64();
2464 tcg_gen_extu_i32_i64(t, t32);
2465 tcg_temp_free_i32(t32);
2466
2467 z = tcg_const_i64(0);
2468 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2469 tcg_temp_free_i64(t);
2470 tcg_temp_free_i64(z);
2471 }
2472
2473 return NO_EXIT;
2474 }
2475
2476 #ifndef CONFIG_USER_ONLY
2477 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2478 {
2479 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2480 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2481 check_privileged(s);
2482 potential_page_fault(s);
2483 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2484 tcg_temp_free_i32(r1);
2485 tcg_temp_free_i32(r3);
2486 return NO_EXIT;
2487 }
2488
2489 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2490 {
2491 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2492 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2493 check_privileged(s);
2494 potential_page_fault(s);
2495 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2496 tcg_temp_free_i32(r1);
2497 tcg_temp_free_i32(r3);
2498 return NO_EXIT;
2499 }
2500 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2501 {
2502 check_privileged(s);
2503 potential_page_fault(s);
2504 gen_helper_lra(o->out, cpu_env, o->in2);
2505 set_cc_static(s);
2506 return NO_EXIT;
2507 }
2508
2509 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2510 {
2511 TCGv_i64 t1, t2;
2512
2513 check_privileged(s);
2514
2515 t1 = tcg_temp_new_i64();
2516 t2 = tcg_temp_new_i64();
2517 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2518 tcg_gen_addi_i64(o->in2, o->in2, 4);
2519 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2520 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2521 tcg_gen_shli_i64(t1, t1, 32);
2522 gen_helper_load_psw(cpu_env, t1, t2);
2523 tcg_temp_free_i64(t1);
2524 tcg_temp_free_i64(t2);
2525 return EXIT_NORETURN;
2526 }
2527
2528 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2529 {
2530 TCGv_i64 t1, t2;
2531
2532 check_privileged(s);
2533
2534 t1 = tcg_temp_new_i64();
2535 t2 = tcg_temp_new_i64();
2536 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2537 tcg_gen_addi_i64(o->in2, o->in2, 8);
2538 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2539 gen_helper_load_psw(cpu_env, t1, t2);
2540 tcg_temp_free_i64(t1);
2541 tcg_temp_free_i64(t2);
2542 return EXIT_NORETURN;
2543 }
2544 #endif
2545
2546 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2547 {
2548 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2549 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2550 potential_page_fault(s);
2551 gen_helper_lam(cpu_env, r1, o->in2, r3);
2552 tcg_temp_free_i32(r1);
2553 tcg_temp_free_i32(r3);
2554 return NO_EXIT;
2555 }
2556
2557 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2558 {
2559 int r1 = get_field(s->fields, r1);
2560 int r3 = get_field(s->fields, r3);
2561 TCGv_i64 t1, t2;
2562
2563 /* Only one register to read. */
2564 t1 = tcg_temp_new_i64();
2565 if (unlikely(r1 == r3)) {
2566 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2567 store_reg32_i64(r1, t1);
2568 tcg_temp_free(t1);
2569 return NO_EXIT;
2570 }
2571
2572 /* First load the values of the first and last registers to trigger
2573 possible page faults. */
2574 t2 = tcg_temp_new_i64();
2575 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2576 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2577 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2578 store_reg32_i64(r1, t1);
2579 store_reg32_i64(r3, t2);
2580
2581 /* Only two registers to read. */
2582 if (((r1 + 1) & 15) == r3) {
2583 tcg_temp_free(t2);
2584 tcg_temp_free(t1);
2585 return NO_EXIT;
2586 }
2587
2588 /* Then load the remaining registers. Page fault can't occur. */
2589 r3 = (r3 - 1) & 15;
2590 tcg_gen_movi_i64(t2, 4);
2591 while (r1 != r3) {
2592 r1 = (r1 + 1) & 15;
2593 tcg_gen_add_i64(o->in2, o->in2, t2);
2594 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2595 store_reg32_i64(r1, t1);
2596 }
2597 tcg_temp_free(t2);
2598 tcg_temp_free(t1);
2599
2600 return NO_EXIT;
2601 }
2602
2603 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2604 {
2605 int r1 = get_field(s->fields, r1);
2606 int r3 = get_field(s->fields, r3);
2607 TCGv_i64 t1, t2;
2608
2609 /* Only one register to read. */
2610 t1 = tcg_temp_new_i64();
2611 if (unlikely(r1 == r3)) {
2612 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2613 store_reg32h_i64(r1, t1);
2614 tcg_temp_free(t1);
2615 return NO_EXIT;
2616 }
2617
2618 /* First load the values of the first and last registers to trigger
2619 possible page faults. */
2620 t2 = tcg_temp_new_i64();
2621 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2622 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2623 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2624 store_reg32h_i64(r1, t1);
2625 store_reg32h_i64(r3, t2);
2626
2627 /* Only two registers to read. */
2628 if (((r1 + 1) & 15) == r3) {
2629 tcg_temp_free(t2);
2630 tcg_temp_free(t1);
2631 return NO_EXIT;
2632 }
2633
2634 /* Then load the remaining registers. Page fault can't occur. */
2635 r3 = (r3 - 1) & 15;
2636 tcg_gen_movi_i64(t2, 4);
2637 while (r1 != r3) {
2638 r1 = (r1 + 1) & 15;
2639 tcg_gen_add_i64(o->in2, o->in2, t2);
2640 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2641 store_reg32h_i64(r1, t1);
2642 }
2643 tcg_temp_free(t2);
2644 tcg_temp_free(t1);
2645
2646 return NO_EXIT;
2647 }
2648
2649 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2650 {
2651 int r1 = get_field(s->fields, r1);
2652 int r3 = get_field(s->fields, r3);
2653 TCGv_i64 t1, t2;
2654
2655 /* Only one register to read. */
2656 if (unlikely(r1 == r3)) {
2657 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2658 return NO_EXIT;
2659 }
2660
2661 /* First load the values of the first and last registers to trigger
2662 possible page faults. */
2663 t1 = tcg_temp_new_i64();
2664 t2 = tcg_temp_new_i64();
2665 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2666 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2667 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2668 tcg_gen_mov_i64(regs[r1], t1);
2669 tcg_temp_free(t2);
2670
2671 /* Only two registers to read. */
2672 if (((r1 + 1) & 15) == r3) {
2673 tcg_temp_free(t1);
2674 return NO_EXIT;
2675 }
2676
2677 /* Then load the remaining registers. Page fault can't occur. */
2678 r3 = (r3 - 1) & 15;
2679 tcg_gen_movi_i64(t1, 8);
2680 while (r1 != r3) {
2681 r1 = (r1 + 1) & 15;
2682 tcg_gen_add_i64(o->in2, o->in2, t1);
2683 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2684 }
2685 tcg_temp_free(t1);
2686
2687 return NO_EXIT;
2688 }
2689
2690 #ifndef CONFIG_USER_ONLY
2691 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2692 {
2693 check_privileged(s);
2694 potential_page_fault(s);
2695 gen_helper_lura(o->out, cpu_env, o->in2);
2696 return NO_EXIT;
2697 }
2698
2699 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2700 {
2701 check_privileged(s);
2702 potential_page_fault(s);
2703 gen_helper_lurag(o->out, cpu_env, o->in2);
2704 return NO_EXIT;
2705 }
2706 #endif
2707
2708 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2709 {
2710 o->out = o->in2;
2711 o->g_out = o->g_in2;
2712 TCGV_UNUSED_I64(o->in2);
2713 o->g_in2 = false;
2714 return NO_EXIT;
2715 }
2716
2717 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2718 {
2719 int b2 = get_field(s->fields, b2);
2720 TCGv ar1 = tcg_temp_new_i64();
2721
2722 o->out = o->in2;
2723 o->g_out = o->g_in2;
2724 TCGV_UNUSED_I64(o->in2);
2725 o->g_in2 = false;
2726
2727 switch (s->tb->flags & FLAG_MASK_ASC) {
2728 case PSW_ASC_PRIMARY >> 32:
2729 tcg_gen_movi_i64(ar1, 0);
2730 break;
2731 case PSW_ASC_ACCREG >> 32:
2732 tcg_gen_movi_i64(ar1, 1);
2733 break;
2734 case PSW_ASC_SECONDARY >> 32:
2735 if (b2) {
2736 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2737 } else {
2738 tcg_gen_movi_i64(ar1, 0);
2739 }
2740 break;
2741 case PSW_ASC_HOME >> 32:
2742 tcg_gen_movi_i64(ar1, 2);
2743 break;
2744 }
2745
2746 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2747 tcg_temp_free_i64(ar1);
2748
2749 return NO_EXIT;
2750 }
2751
2752 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2753 {
2754 o->out = o->in1;
2755 o->out2 = o->in2;
2756 o->g_out = o->g_in1;
2757 o->g_out2 = o->g_in2;
2758 TCGV_UNUSED_I64(o->in1);
2759 TCGV_UNUSED_I64(o->in2);
2760 o->g_in1 = o->g_in2 = false;
2761 return NO_EXIT;
2762 }
2763
2764 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2765 {
2766 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2767 potential_page_fault(s);
2768 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2769 tcg_temp_free_i32(l);
2770 return NO_EXIT;
2771 }
2772
2773 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2774 {
2775 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2776 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2777 potential_page_fault(s);
2778 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2779 tcg_temp_free_i32(r1);
2780 tcg_temp_free_i32(r2);
2781 set_cc_static(s);
2782 return NO_EXIT;
2783 }
2784
2785 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2786 {
2787 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2788 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2789 potential_page_fault(s);
2790 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2791 tcg_temp_free_i32(r1);
2792 tcg_temp_free_i32(r3);
2793 set_cc_static(s);
2794 return NO_EXIT;
2795 }
2796
2797 #ifndef CONFIG_USER_ONLY
2798 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2799 {
2800 int r1 = get_field(s->fields, l1);
2801 check_privileged(s);
2802 potential_page_fault(s);
2803 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2804 set_cc_static(s);
2805 return NO_EXIT;
2806 }
2807
2808 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2809 {
2810 int r1 = get_field(s->fields, l1);
2811 check_privileged(s);
2812 potential_page_fault(s);
2813 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2814 set_cc_static(s);
2815 return NO_EXIT;
2816 }
2817 #endif
2818
2819 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2820 {
2821 potential_page_fault(s);
2822 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2823 set_cc_static(s);
2824 return NO_EXIT;
2825 }
2826
2827 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2828 {
2829 potential_page_fault(s);
2830 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2831 set_cc_static(s);
2832 return_low128(o->in2);
2833 return NO_EXIT;
2834 }
2835
2836 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2837 {
2838 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2839 return NO_EXIT;
2840 }
2841
2842 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2843 {
2844 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2845 return NO_EXIT;
2846 }
2847
2848 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2849 {
2850 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2851 return NO_EXIT;
2852 }
2853
2854 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2855 {
2856 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2857 return NO_EXIT;
2858 }
2859
2860 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2861 {
2862 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2863 return NO_EXIT;
2864 }
2865
2866 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2867 {
2868 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2869 return_low128(o->out2);
2870 return NO_EXIT;
2871 }
2872
2873 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2874 {
2875 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2876 return_low128(o->out2);
2877 return NO_EXIT;
2878 }
2879
2880 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2881 {
2882 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2883 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2884 tcg_temp_free_i64(r3);
2885 return NO_EXIT;
2886 }
2887
2888 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2889 {
2890 int r3 = get_field(s->fields, r3);
2891 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2892 return NO_EXIT;
2893 }
2894
2895 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2896 {
2897 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2898 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2899 tcg_temp_free_i64(r3);
2900 return NO_EXIT;
2901 }
2902
2903 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2904 {
2905 int r3 = get_field(s->fields, r3);
2906 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2907 return NO_EXIT;
2908 }
2909
2910 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2911 {
2912 TCGv_i64 z, n;
2913 z = tcg_const_i64(0);
2914 n = tcg_temp_new_i64();
2915 tcg_gen_neg_i64(n, o->in2);
2916 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2917 tcg_temp_free_i64(n);
2918 tcg_temp_free_i64(z);
2919 return NO_EXIT;
2920 }
2921
2922 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2923 {
2924 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2925 return NO_EXIT;
2926 }
2927
2928 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2929 {
2930 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2931 return NO_EXIT;
2932 }
2933
2934 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2935 {
2936 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2937 tcg_gen_mov_i64(o->out2, o->in2);
2938 return NO_EXIT;
2939 }
2940
2941 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2942 {
2943 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2944 potential_page_fault(s);
2945 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2946 tcg_temp_free_i32(l);
2947 set_cc_static(s);
2948 return NO_EXIT;
2949 }
2950
2951 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2952 {
2953 tcg_gen_neg_i64(o->out, o->in2);
2954 return NO_EXIT;
2955 }
2956
2957 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2958 {
2959 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2960 return NO_EXIT;
2961 }
2962
2963 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2964 {
2965 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2966 return NO_EXIT;
2967 }
2968
2969 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2970 {
2971 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2972 tcg_gen_mov_i64(o->out2, o->in2);
2973 return NO_EXIT;
2974 }
2975
2976 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2977 {
2978 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2979 potential_page_fault(s);
2980 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2981 tcg_temp_free_i32(l);
2982 set_cc_static(s);
2983 return NO_EXIT;
2984 }
2985
2986 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2987 {
2988 tcg_gen_or_i64(o->out, o->in1, o->in2);
2989 return NO_EXIT;
2990 }
2991
2992 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2993 {
2994 int shift = s->insn->data & 0xff;
2995 int size = s->insn->data >> 8;
2996 uint64_t mask = ((1ull << size) - 1) << shift;
2997
2998 assert(!o->g_in2);
2999 tcg_gen_shli_i64(o->in2, o->in2, shift);
3000 tcg_gen_or_i64(o->out, o->in1, o->in2);
3001
3002 /* Produce the CC from only the bits manipulated. */
3003 tcg_gen_andi_i64(cc_dst, o->out, mask);
3004 set_cc_nz_u64(s, cc_dst);
3005 return NO_EXIT;
3006 }
3007
3008 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3009 {
3010 gen_helper_popcnt(o->out, o->in2);
3011 return NO_EXIT;
3012 }
3013
3014 #ifndef CONFIG_USER_ONLY
3015 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3016 {
3017 check_privileged(s);
3018 gen_helper_ptlb(cpu_env);
3019 return NO_EXIT;
3020 }
3021 #endif
3022
3023 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3024 {
3025 int i3 = get_field(s->fields, i3);
3026 int i4 = get_field(s->fields, i4);
3027 int i5 = get_field(s->fields, i5);
3028 int do_zero = i4 & 0x80;
3029 uint64_t mask, imask, pmask;
3030 int pos, len, rot;
3031
3032 /* Adjust the arguments for the specific insn. */
3033 switch (s->fields->op2) {
3034 case 0x55: /* risbg */
3035 i3 &= 63;
3036 i4 &= 63;
3037 pmask = ~0;
3038 break;
3039 case 0x5d: /* risbhg */
3040 i3 &= 31;
3041 i4 &= 31;
3042 pmask = 0xffffffff00000000ull;
3043 break;
3044 case 0x51: /* risblg */
3045 i3 &= 31;
3046 i4 &= 31;
3047 pmask = 0x00000000ffffffffull;
3048 break;
3049 default:
3050 abort();
3051 }
3052
3053 /* MASK is the set of bits to be inserted from R2.
3054 Take care for I3/I4 wraparound. */
3055 mask = pmask >> i3;
3056 if (i3 <= i4) {
3057 mask ^= pmask >> i4 >> 1;
3058 } else {
3059 mask |= ~(pmask >> i4 >> 1);
3060 }
3061 mask &= pmask;
3062
3063 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3064 insns, we need to keep the other half of the register. */
3065 imask = ~mask | ~pmask;
3066 if (do_zero) {
3067 if (s->fields->op2 == 0x55) {
3068 imask = 0;
3069 } else {
3070 imask = ~pmask;
3071 }
3072 }
3073
3074 /* In some cases we can implement this with deposit, which can be more
3075 efficient on some hosts. */
3076 if (~mask == imask && i3 <= i4) {
3077 if (s->fields->op2 == 0x5d) {
3078 i3 += 32, i4 += 32;
3079 }
3080 /* Note that we rotate the bits to be inserted to the lsb, not to
3081 the position as described in the PoO. */
3082 len = i4 - i3 + 1;
3083 pos = 63 - i4;
3084 rot = (i5 - pos) & 63;
3085 } else {
3086 pos = len = -1;
3087 rot = i5 & 63;
3088 }
3089
3090 /* Rotate the input as necessary. */
3091 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3092
3093 /* Insert the selected bits into the output. */
3094 if (pos >= 0) {
3095 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3096 } else if (imask == 0) {
3097 tcg_gen_andi_i64(o->out, o->in2, mask);
3098 } else {
3099 tcg_gen_andi_i64(o->in2, o->in2, mask);
3100 tcg_gen_andi_i64(o->out, o->out, imask);
3101 tcg_gen_or_i64(o->out, o->out, o->in2);
3102 }
3103 return NO_EXIT;
3104 }
3105
3106 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3107 {
3108 int i3 = get_field(s->fields, i3);
3109 int i4 = get_field(s->fields, i4);
3110 int i5 = get_field(s->fields, i5);
3111 uint64_t mask;
3112
3113 /* If this is a test-only form, arrange to discard the result. */
3114 if (i3 & 0x80) {
3115 o->out = tcg_temp_new_i64();
3116 o->g_out = false;
3117 }
3118
3119 i3 &= 63;
3120 i4 &= 63;
3121 i5 &= 63;
3122
3123 /* MASK is the set of bits to be operated on from R2.
3124 Take care for I3/I4 wraparound. */
3125 mask = ~0ull >> i3;
3126 if (i3 <= i4) {
3127 mask ^= ~0ull >> i4 >> 1;
3128 } else {
3129 mask |= ~(~0ull >> i4 >> 1);
3130 }
3131
3132 /* Rotate the input as necessary. */
3133 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3134
3135 /* Operate. */
3136 switch (s->fields->op2) {
3137 case 0x55: /* AND */
3138 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3139 tcg_gen_and_i64(o->out, o->out, o->in2);
3140 break;
3141 case 0x56: /* OR */
3142 tcg_gen_andi_i64(o->in2, o->in2, mask);
3143 tcg_gen_or_i64(o->out, o->out, o->in2);
3144 break;
3145 case 0x57: /* XOR */
3146 tcg_gen_andi_i64(o->in2, o->in2, mask);
3147 tcg_gen_xor_i64(o->out, o->out, o->in2);
3148 break;
3149 default:
3150 abort();
3151 }
3152
3153 /* Set the CC. */
3154 tcg_gen_andi_i64(cc_dst, o->out, mask);
3155 set_cc_nz_u64(s, cc_dst);
3156 return NO_EXIT;
3157 }
3158
3159 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3160 {
3161 tcg_gen_bswap16_i64(o->out, o->in2);
3162 return NO_EXIT;
3163 }
3164
3165 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3166 {
3167 tcg_gen_bswap32_i64(o->out, o->in2);
3168 return NO_EXIT;
3169 }
3170
3171 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3172 {
3173 tcg_gen_bswap64_i64(o->out, o->in2);
3174 return NO_EXIT;
3175 }
3176
3177 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3178 {
3179 TCGv_i32 t1 = tcg_temp_new_i32();
3180 TCGv_i32 t2 = tcg_temp_new_i32();
3181 TCGv_i32 to = tcg_temp_new_i32();
3182 tcg_gen_trunc_i64_i32(t1, o->in1);
3183 tcg_gen_trunc_i64_i32(t2, o->in2);
3184 tcg_gen_rotl_i32(to, t1, t2);
3185 tcg_gen_extu_i32_i64(o->out, to);
3186 tcg_temp_free_i32(t1);
3187 tcg_temp_free_i32(t2);
3188 tcg_temp_free_i32(to);
3189 return NO_EXIT;
3190 }
3191
3192 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3193 {
3194 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3195 return NO_EXIT;
3196 }
3197
3198 #ifndef CONFIG_USER_ONLY
3199 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3200 {
3201 check_privileged(s);
3202 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3203 set_cc_static(s);
3204 return NO_EXIT;
3205 }
3206
3207 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3208 {
3209 check_privileged(s);
3210 gen_helper_sacf(cpu_env, o->in2);
3211 /* Addressing mode has changed, so end the block. */
3212 return EXIT_PC_STALE;
3213 }
3214 #endif
3215
3216 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3217 {
3218 int sam = s->insn->data;
3219 TCGv_i64 tsam;
3220 uint64_t mask;
3221
3222 switch (sam) {
3223 case 0:
3224 mask = 0xffffff;
3225 break;
3226 case 1:
3227 mask = 0x7fffffff;
3228 break;
3229 default:
3230 mask = -1;
3231 break;
3232 }
3233
3234 /* Bizarre but true, we check the address of the current insn for the
3235 specification exception, not the next to be executed. Thus the PoO
3236 documents that Bad Things Happen two bytes before the end. */
3237 if (s->pc & ~mask) {
3238 gen_program_exception(s, PGM_SPECIFICATION);
3239 return EXIT_NORETURN;
3240 }
3241 s->next_pc &= mask;
3242
3243 tsam = tcg_const_i64(sam);
3244 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3245 tcg_temp_free_i64(tsam);
3246
3247 /* Always exit the TB, since we (may have) changed execution mode. */
3248 return EXIT_PC_STALE;
3249 }
3250
3251 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3252 {
3253 int r1 = get_field(s->fields, r1);
3254 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3255 return NO_EXIT;
3256 }
3257
3258 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3259 {
3260 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3261 return NO_EXIT;
3262 }
3263
3264 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3265 {
3266 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3267 return NO_EXIT;
3268 }
3269
3270 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3271 {
3272 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3273 return_low128(o->out2);
3274 return NO_EXIT;
3275 }
3276
3277 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3278 {
3279 gen_helper_sqeb(o->out, cpu_env, o->in2);
3280 return NO_EXIT;
3281 }
3282
3283 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3284 {
3285 gen_helper_sqdb(o->out, cpu_env, o->in2);
3286 return NO_EXIT;
3287 }
3288
3289 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3290 {
3291 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3292 return_low128(o->out2);
3293 return NO_EXIT;
3294 }
3295
3296 #ifndef CONFIG_USER_ONLY
3297 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3298 {
3299 check_privileged(s);
3300 potential_page_fault(s);
3301 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3302 set_cc_static(s);
3303 return NO_EXIT;
3304 }
3305
3306 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3307 {
3308 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3309 check_privileged(s);
3310 potential_page_fault(s);
3311 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3312 tcg_temp_free_i32(r1);
3313 return NO_EXIT;
3314 }
3315 #endif
3316
3317 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3318 {
3319 DisasCompare c;
3320 TCGv_i64 a;
3321 TCGLabel *lab;
3322 int r1;
3323
3324 disas_jcc(s, &c, get_field(s->fields, m3));
3325
3326 /* We want to store when the condition is fulfilled, so branch
3327 out when it's not */
3328 c.cond = tcg_invert_cond(c.cond);
3329
3330 lab = gen_new_label();
3331 if (c.is_64) {
3332 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3333 } else {
3334 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3335 }
3336 free_compare(&c);
3337
3338 r1 = get_field(s->fields, r1);
3339 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3340 if (s->insn->data) {
3341 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3342 } else {
3343 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3344 }
3345 tcg_temp_free_i64(a);
3346
3347 gen_set_label(lab);
3348 return NO_EXIT;
3349 }
3350
3351 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3352 {
3353 uint64_t sign = 1ull << s->insn->data;
3354 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3355 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3356 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3357 /* The arithmetic left shift is curious in that it does not affect
3358 the sign bit. Copy that over from the source unchanged. */
3359 tcg_gen_andi_i64(o->out, o->out, ~sign);
3360 tcg_gen_andi_i64(o->in1, o->in1, sign);
3361 tcg_gen_or_i64(o->out, o->out, o->in1);
3362 return NO_EXIT;
3363 }
3364
3365 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3366 {
3367 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3368 return NO_EXIT;
3369 }
3370
3371 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3372 {
3373 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3374 return NO_EXIT;
3375 }
3376
3377 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3378 {
3379 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3380 return NO_EXIT;
3381 }
3382
3383 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3384 {
3385 gen_helper_sfpc(cpu_env, o->in2);
3386 return NO_EXIT;
3387 }
3388
3389 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3390 {
3391 gen_helper_sfas(cpu_env, o->in2);
3392 return NO_EXIT;
3393 }
3394
3395 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3396 {
3397 int b2 = get_field(s->fields, b2);
3398 int d2 = get_field(s->fields, d2);
3399 TCGv_i64 t1 = tcg_temp_new_i64();
3400 TCGv_i64 t2 = tcg_temp_new_i64();
3401 int mask, pos, len;
3402
3403 switch (s->fields->op2) {
3404 case 0x99: /* SRNM */
3405 pos = 0, len = 2;
3406 break;
3407 case 0xb8: /* SRNMB */
3408 pos = 0, len = 3;
3409 break;
3410 case 0xb9: /* SRNMT */
3411 pos = 4, len = 3;
3412 break;
3413 default:
3414 tcg_abort();
3415 }
3416 mask = (1 << len) - 1;
3417
3418 /* Insert the value into the appropriate field of the FPC. */
3419 if (b2 == 0) {
3420 tcg_gen_movi_i64(t1, d2 & mask);
3421 } else {
3422 tcg_gen_addi_i64(t1, regs[b2], d2);
3423 tcg_gen_andi_i64(t1, t1, mask);
3424 }
3425 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3426 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3427 tcg_temp_free_i64(t1);
3428
3429 /* Then install the new FPC to set the rounding mode in fpu_status. */
3430 gen_helper_sfpc(cpu_env, t2);
3431 tcg_temp_free_i64(t2);
3432 return NO_EXIT;
3433 }
3434
3435 #ifndef CONFIG_USER_ONLY
3436 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3437 {
3438 check_privileged(s);
3439 tcg_gen_shri_i64(o->in2, o->in2, 4);
3440 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3441 return NO_EXIT;
3442 }
3443
3444 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3445 {
3446 check_privileged(s);
3447 gen_helper_sske(cpu_env, o->in1, o->in2);
3448 return NO_EXIT;
3449 }
3450
3451 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3452 {
3453 check_privileged(s);
3454 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3455 return NO_EXIT;
3456 }
3457
3458 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3459 {
3460 check_privileged(s);
3461 /* ??? Surely cpu address != cpu number. In any case the previous
3462 version of this stored more than the required half-word, so it
3463 is unlikely this has ever been tested. */
3464 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3465 return NO_EXIT;
3466 }
3467
3468 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3469 {
3470 gen_helper_stck(o->out, cpu_env);
3471 /* ??? We don't implement clock states. */
3472 gen_op_movi_cc(s, 0);
3473 return NO_EXIT;
3474 }
3475
3476 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3477 {
3478 TCGv_i64 c1 = tcg_temp_new_i64();
3479 TCGv_i64 c2 = tcg_temp_new_i64();
3480 gen_helper_stck(c1, cpu_env);
3481 /* Shift the 64-bit value into its place as a zero-extended
3482 104-bit value. Note that "bit positions 64-103 are always
3483 non-zero so that they compare differently to STCK"; we set
3484 the least significant bit to 1. */
3485 tcg_gen_shli_i64(c2, c1, 56);
3486 tcg_gen_shri_i64(c1, c1, 8);
3487 tcg_gen_ori_i64(c2, c2, 0x10000);
3488 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3489 tcg_gen_addi_i64(o->in2, o->in2, 8);
3490 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3491 tcg_temp_free_i64(c1);
3492 tcg_temp_free_i64(c2);
3493 /* ??? We don't implement clock states. */
3494 gen_op_movi_cc(s, 0);
3495 return NO_EXIT;
3496 }
3497
3498 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3499 {
3500 check_privileged(s);
3501 gen_helper_sckc(cpu_env, o->in2);
3502 return NO_EXIT;
3503 }
3504
3505 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3506 {
3507 check_privileged(s);
3508 gen_helper_stckc(o->out, cpu_env);
3509 return NO_EXIT;
3510 }
3511
3512 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3513 {
3514 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3515 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3516 check_privileged(s);
3517 potential_page_fault(s);
3518 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3519 tcg_temp_free_i32(r1);
3520 tcg_temp_free_i32(r3);
3521 return NO_EXIT;
3522 }
3523
3524 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3525 {
3526 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3527 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3528 check_privileged(s);
3529 potential_page_fault(s);
3530 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3531 tcg_temp_free_i32(r1);
3532 tcg_temp_free_i32(r3);
3533 return NO_EXIT;
3534 }
3535
3536 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3537 {
3538 TCGv_i64 t1 = tcg_temp_new_i64();
3539
3540 check_privileged(s);
3541 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3542 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3543 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3544 tcg_temp_free_i64(t1);
3545
3546 return NO_EXIT;
3547 }
3548
3549 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3550 {
3551 check_privileged(s);
3552 gen_helper_spt(cpu_env, o->in2);
3553 return NO_EXIT;
3554 }
3555
3556 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3557 {
3558 TCGv_i64 f, a;
3559 /* We really ought to have more complete indication of facilities
3560 that we implement. Address this when STFLE is implemented. */
3561 check_privileged(s);
3562 f = tcg_const_i64(0xc0000000);
3563 a = tcg_const_i64(200);
3564 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3565 tcg_temp_free_i64(f);
3566 tcg_temp_free_i64(a);
3567 return NO_EXIT;
3568 }
3569
3570 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3571 {
3572 check_privileged(s);
3573 gen_helper_stpt(o->out, cpu_env);
3574 return NO_EXIT;
3575 }
3576
3577 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3578 {
3579 check_privileged(s);
3580 potential_page_fault(s);
3581 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3582 set_cc_static(s);
3583 return NO_EXIT;
3584 }
3585
3586 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3587 {
3588 check_privileged(s);
3589 gen_helper_spx(cpu_env, o->in2);
3590 return NO_EXIT;
3591 }
3592
3593 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3594 {
3595 check_privileged(s);
3596 potential_page_fault(s);
3597 gen_helper_xsch(cpu_env, regs[1]);
3598 set_cc_static(s);
3599 return NO_EXIT;
3600 }
3601
3602 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3603 {
3604 check_privileged(s);
3605 potential_page_fault(s);
3606 gen_helper_csch(cpu_env, regs[1]);
3607 set_cc_static(s);
3608 return NO_EXIT;
3609 }
3610
3611 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3612 {
3613 check_privileged(s);
3614 potential_page_fault(s);
3615 gen_helper_hsch(cpu_env, regs[1]);
3616 set_cc_static(s);
3617 return NO_EXIT;
3618 }
3619
3620 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3621 {
3622 check_privileged(s);
3623 potential_page_fault(s);
3624 gen_helper_msch(cpu_env, regs[1], o->in2);
3625 set_cc_static(s);
3626 return NO_EXIT;
3627 }
3628
3629 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3630 {
3631 check_privileged(s);
3632 potential_page_fault(s);
3633 gen_helper_rchp(cpu_env, regs[1]);
3634 set_cc_static(s);
3635 return NO_EXIT;
3636 }
3637
3638 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3639 {
3640 check_privileged(s);
3641 potential_page_fault(s);
3642 gen_helper_rsch(cpu_env, regs[1]);
3643 set_cc_static(s);
3644 return NO_EXIT;
3645 }
3646
3647 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3648 {
3649 check_privileged(s);
3650 potential_page_fault(s);
3651 gen_helper_ssch(cpu_env, regs[1], o->in2);
3652 set_cc_static(s);
3653 return NO_EXIT;
3654 }
3655
3656 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3657 {
3658 check_privileged(s);
3659 potential_page_fault(s);
3660 gen_helper_stsch(cpu_env, regs[1], o->in2);
3661 set_cc_static(s);
3662 return NO_EXIT;
3663 }
3664
3665 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3666 {
3667 check_privileged(s);
3668 potential_page_fault(s);
3669 gen_helper_tsch(cpu_env, regs[1], o->in2);
3670 set_cc_static(s);
3671 return NO_EXIT;
3672 }
3673
3674 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3675 {
3676 check_privileged(s);
3677 potential_page_fault(s);
3678 gen_helper_chsc(cpu_env, o->in2);
3679 set_cc_static(s);
3680 return NO_EXIT;
3681 }
3682
3683 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3684 {
3685 check_privileged(s);
3686 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3687 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3688 return NO_EXIT;
3689 }
3690
3691 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3692 {
3693 uint64_t i2 = get_field(s->fields, i2);
3694 TCGv_i64 t;
3695
3696 check_privileged(s);
3697
3698 /* It is important to do what the instruction name says: STORE THEN.
3699 If we let the output hook perform the store then if we fault and
3700 restart, we'll have the wrong SYSTEM MASK in place. */
3701 t = tcg_temp_new_i64();
3702 tcg_gen_shri_i64(t, psw_mask, 56);
3703 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3704 tcg_temp_free_i64(t);
3705
3706 if (s->fields->op == 0xac) {
3707 tcg_gen_andi_i64(psw_mask, psw_mask,
3708 (i2 << 56) | 0x00ffffffffffffffull);
3709 } else {
3710 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3711 }
3712 return NO_EXIT;
3713 }
3714
3715 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3716 {
3717 check_privileged(s);
3718 potential_page_fault(s);
3719 gen_helper_stura(cpu_env, o->in2, o->in1);
3720 return NO_EXIT;
3721 }
3722
3723 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3724 {
3725 check_privileged(s);
3726 potential_page_fault(s);
3727 gen_helper_sturg(cpu_env, o->in2, o->in1);
3728 return NO_EXIT;
3729 }
3730 #endif
3731
3732 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3733 {
3734 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3735 return NO_EXIT;
3736 }
3737
3738 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3739 {
3740 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3741 return NO_EXIT;
3742 }
3743
3744 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3745 {
3746 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3747 return NO_EXIT;
3748 }
3749
3750 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3751 {
3752 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3753 return NO_EXIT;
3754 }
3755
3756 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3757 {
3758 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3759 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3760 potential_page_fault(s);
3761 gen_helper_stam(cpu_env, r1, o->in2, r3);
3762 tcg_temp_free_i32(r1);
3763 tcg_temp_free_i32(r3);
3764 return NO_EXIT;
3765 }
3766
3767 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3768 {
3769 int m3 = get_field(s->fields, m3);
3770 int pos, base = s->insn->data;
3771 TCGv_i64 tmp = tcg_temp_new_i64();
3772
3773 pos = base + ctz32(m3) * 8;
3774 switch (m3) {
3775 case 0xf:
3776 /* Effectively a 32-bit store. */
3777 tcg_gen_shri_i64(tmp, o->in1, pos);
3778 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3779 break;
3780
3781 case 0xc:
3782 case 0x6:
3783 case 0x3:
3784 /* Effectively a 16-bit store. */
3785 tcg_gen_shri_i64(tmp, o->in1, pos);
3786 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3787 break;
3788
3789 case 0x8:
3790 case 0x4:
3791 case 0x2:
3792 case 0x1:
3793 /* Effectively an 8-bit store. */
3794 tcg_gen_shri_i64(tmp, o->in1, pos);
3795 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3796 break;
3797
3798 default:
3799 /* This is going to be a sequence of shifts and stores. */
3800 pos = base + 32 - 8;
3801 while (m3) {
3802 if (m3 & 0x8) {
3803 tcg_gen_shri_i64(tmp, o->in1, pos);
3804 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3805 tcg_gen_addi_i64(o->in2, o->in2, 1);
3806 }
3807 m3 = (m3 << 1) & 0xf;
3808 pos -= 8;
3809 }
3810 break;
3811 }
3812 tcg_temp_free_i64(tmp);
3813 return NO_EXIT;
3814 }
3815
3816 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3817 {
3818 int r1 = get_field(s->fields, r1);
3819 int r3 = get_field(s->fields, r3);
3820 int size = s->insn->data;
3821 TCGv_i64 tsize = tcg_const_i64(size);
3822
3823 while (1) {
3824 if (size == 8) {
3825 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3826 } else {
3827 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3828 }
3829 if (r1 == r3) {
3830 break;
3831 }
3832 tcg_gen_add_i64(o->in2, o->in2, tsize);
3833 r1 = (r1 + 1) & 15;
3834 }
3835
3836 tcg_temp_free_i64(tsize);
3837 return NO_EXIT;
3838 }
3839
3840 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3841 {
3842 int r1 = get_field(s->fields, r1);
3843 int r3 = get_field(s->fields, r3);
3844 TCGv_i64 t = tcg_temp_new_i64();
3845 TCGv_i64 t4 = tcg_const_i64(4);
3846 TCGv_i64 t32 = tcg_const_i64(32);
3847
3848 while (1) {
3849 tcg_gen_shl_i64(t, regs[r1], t32);
3850 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3851 if (r1 == r3) {
3852 break;
3853 }
3854 tcg_gen_add_i64(o->in2, o->in2, t4);
3855 r1 = (r1 + 1) & 15;
3856 }
3857
3858 tcg_temp_free_i64(t);
3859 tcg_temp_free_i64(t4);
3860 tcg_temp_free_i64(t32);
3861 return NO_EXIT;
3862 }
3863
3864 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3865 {
3866 potential_page_fault(s);
3867 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3868 set_cc_static(s);
3869 return_low128(o->in2);
3870 return NO_EXIT;
3871 }
3872
3873 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3874 {
3875 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3876 return NO_EXIT;
3877 }
3878
3879 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3880 {
3881 DisasCompare cmp;
3882 TCGv_i64 borrow;
3883
3884 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3885
3886 /* The !borrow flag is the msb of CC. Since we want the inverse of
3887 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3888 disas_jcc(s, &cmp, 8 | 4);
3889 borrow = tcg_temp_new_i64();
3890 if (cmp.is_64) {
3891 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3892 } else {
3893 TCGv_i32 t = tcg_temp_new_i32();
3894 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3895 tcg_gen_extu_i32_i64(borrow, t);
3896 tcg_temp_free_i32(t);
3897 }
3898 free_compare(&cmp);
3899
3900 tcg_gen_sub_i64(o->out, o->out, borrow);
3901 tcg_temp_free_i64(borrow);
3902 return NO_EXIT;
3903 }
3904
3905 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3906 {
3907 TCGv_i32 t;
3908
3909 update_psw_addr(s);
3910 update_cc_op(s);
3911
3912 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3913 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3914 tcg_temp_free_i32(t);
3915
3916 t = tcg_const_i32(s->next_pc - s->pc);
3917 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3918 tcg_temp_free_i32(t);
3919
3920 gen_exception(EXCP_SVC);
3921 return EXIT_NORETURN;
3922 }
3923
3924 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3925 {
3926 gen_helper_tceb(cc_op, o->in1, o->in2);
3927 set_cc_static(s);
3928 return NO_EXIT;
3929 }
3930
3931 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3932 {
3933 gen_helper_tcdb(cc_op, o->in1, o->in2);
3934 set_cc_static(s);
3935 return NO_EXIT;
3936 }
3937
3938 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3939 {
3940 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3941 set_cc_static(s);
3942 return NO_EXIT;
3943 }
3944
3945 #ifndef CONFIG_USER_ONLY
3946 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3947 {
3948 potential_page_fault(s);
3949 gen_helper_tprot(cc_op, o->addr1, o->in2);
3950 set_cc_static(s);
3951 return NO_EXIT;
3952 }
3953 #endif
3954
3955 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3956 {
3957 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3958 potential_page_fault(s);
3959 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3960 tcg_temp_free_i32(l);
3961 set_cc_static(s);
3962 return NO_EXIT;
3963 }
3964
3965 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
3966 {
3967 potential_page_fault(s);
3968 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
3969 return_low128(o->out2);
3970 set_cc_static(s);
3971 return NO_EXIT;
3972 }
3973
3974 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
3975 {
3976 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3977 potential_page_fault(s);
3978 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
3979 tcg_temp_free_i32(l);
3980 set_cc_static(s);
3981 return NO_EXIT;
3982 }
3983
3984 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3985 {
3986 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3987 potential_page_fault(s);
3988 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3989 tcg_temp_free_i32(l);
3990 return NO_EXIT;
3991 }
3992
3993 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3994 {
3995 int d1 = get_field(s->fields, d1);
3996 int d2 = get_field(s->fields, d2);
3997 int b1 = get_field(s->fields, b1);
3998 int b2 = get_field(s->fields, b2);
3999 int l = get_field(s->fields, l1);
4000 TCGv_i32 t32;
4001
4002 o->addr1 = get_address(s, 0, b1, d1);
4003
4004 /* If the addresses are identical, this is a store/memset of zero. */
4005 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4006 o->in2 = tcg_const_i64(0);
4007
4008 l++;
4009 while (l >= 8) {
4010 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4011 l -= 8;
4012 if (l > 0) {
4013 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4014 }
4015 }
4016 if (l >= 4) {
4017 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4018 l -= 4;
4019 if (l > 0) {
4020 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4021 }
4022 }
4023 if (l >= 2) {
4024 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4025 l -= 2;
4026 if (l > 0) {
4027 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4028 }
4029 }
4030 if (l) {
4031 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4032 }
4033 gen_op_movi_cc(s, 0);
4034 return NO_EXIT;
4035 }
4036
4037 /* But in general we'll defer to a helper. */
4038 o->in2 = get_address(s, 0, b2, d2);
4039 t32 = tcg_const_i32(l);
4040 potential_page_fault(s);
4041 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4042 tcg_temp_free_i32(t32);
4043 set_cc_static(s);
4044 return NO_EXIT;
4045 }
4046
4047 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4048 {
4049 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4050 return NO_EXIT;
4051 }
4052
4053 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4054 {
4055 int shift = s->insn->data & 0xff;
4056 int size = s->insn->data >> 8;
4057 uint64_t mask = ((1ull << size) - 1) << shift;
4058
4059 assert(!o->g_in2);
4060 tcg_gen_shli_i64(o->in2, o->in2, shift);
4061 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4062
4063 /* Produce the CC from only the bits manipulated. */
4064 tcg_gen_andi_i64(cc_dst, o->out, mask);
4065 set_cc_nz_u64(s, cc_dst);
4066 return NO_EXIT;
4067 }
4068
4069 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4070 {
4071 o->out = tcg_const_i64(0);
4072 return NO_EXIT;
4073 }
4074
4075 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4076 {
4077 o->out = tcg_const_i64(0);
4078 o->out2 = o->out;
4079 o->g_out2 = true;
4080 return NO_EXIT;
4081 }
4082
4083 /* ====================================================================== */
4084 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4085 the original inputs), update the various cc data structures in order to
4086 be able to compute the new condition code. */
4087
4088 static void cout_abs32(DisasContext *s, DisasOps *o)
4089 {
4090 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4091 }
4092
4093 static void cout_abs64(DisasContext *s, DisasOps *o)
4094 {
4095 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4096 }
4097
4098 static void cout_adds32(DisasContext *s, DisasOps *o)
4099 {
4100 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4101 }
4102
4103 static void cout_adds64(DisasContext *s, DisasOps *o)
4104 {
4105 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4106 }
4107
4108 static void cout_addu32(DisasContext *s, DisasOps *o)
4109 {
4110 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4111 }
4112
4113 static void cout_addu64(DisasContext *s, DisasOps *o)
4114 {
4115 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4116 }
4117
4118 static void cout_addc32(DisasContext *s, DisasOps *o)
4119 {
4120 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4121 }
4122
4123 static void cout_addc64(DisasContext *s, DisasOps *o)
4124 {
4125 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4126 }
4127
4128 static void cout_cmps32(DisasContext *s, DisasOps *o)
4129 {
4130 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4131 }
4132
4133 static void cout_cmps64(DisasContext *s, DisasOps *o)
4134 {
4135 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4136 }
4137
4138 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4139 {
4140 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4141 }
4142
4143 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4144 {
4145 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4146 }
4147
4148 static void cout_f32(DisasContext *s, DisasOps *o)
4149 {
4150 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4151 }
4152
4153 static void cout_f64(DisasContext *s, DisasOps *o)
4154 {
4155 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4156 }
4157
4158 static void cout_f128(DisasContext *s, DisasOps *o)
4159 {
4160 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4161 }
4162
4163 static void cout_nabs32(DisasContext *s, DisasOps *o)
4164 {
4165 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4166 }
4167
4168 static void cout_nabs64(DisasContext *s, DisasOps *o)
4169 {
4170 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4171 }
4172
4173 static void cout_neg32(DisasContext *s, DisasOps *o)
4174 {
4175 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4176 }
4177
4178 static void cout_neg64(DisasContext *s, DisasOps *o)
4179 {
4180 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4181 }
4182
4183 static void cout_nz32(DisasContext *s, DisasOps *o)
4184 {
4185 tcg_gen_ext32u_i64(cc_dst, o->out);
4186 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4187 }
4188
4189 static void cout_nz64(DisasContext *s, DisasOps *o)
4190 {
4191 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4192 }
4193
4194 static void cout_s32(DisasContext *s, DisasOps *o)
4195 {
4196 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4197 }
4198
4199 static void cout_s64(DisasContext *s, DisasOps *o)
4200 {
4201 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4202 }
4203
4204 static void cout_subs32(DisasContext *s, DisasOps *o)
4205 {
4206 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4207 }
4208
4209 static void cout_subs64(DisasContext *s, DisasOps *o)
4210 {
4211 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4212 }
4213
4214 static void cout_subu32(DisasContext *s, DisasOps *o)
4215 {
4216 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4217 }
4218
4219 static void cout_subu64(DisasContext *s, DisasOps *o)
4220 {
4221 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4222 }
4223
4224 static void cout_subb32(DisasContext *s, DisasOps *o)
4225 {
4226 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4227 }
4228
4229 static void cout_subb64(DisasContext *s, DisasOps *o)
4230 {
4231 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4232 }
4233
4234 static void cout_tm32(DisasContext *s, DisasOps *o)
4235 {
4236 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4237 }
4238
4239 static void cout_tm64(DisasContext *s, DisasOps *o)
4240 {
4241 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4242 }
4243
4244 /* ====================================================================== */
4245 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4246 with the TCG register to which we will write. Used in combination with
4247 the "wout" generators, in some cases we need a new temporary, and in
4248 some cases we can write to a TCG global. */
4249
4250 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4251 {
4252 o->out = tcg_temp_new_i64();
4253 }
4254 #define SPEC_prep_new 0
4255
4256 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4257 {
4258 o->out = tcg_temp_new_i64();
4259 o->out2 = tcg_temp_new_i64();
4260 }
4261 #define SPEC_prep_new_P 0
4262
4263 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4264 {
4265 o->out = regs[get_field(f, r1)];
4266 o->g_out = true;
4267 }
4268 #define SPEC_prep_r1 0
4269
4270 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4271 {
4272 int r1 = get_field(f, r1);
4273 o->out = regs[r1];
4274 o->out2 = regs[r1 + 1];
4275 o->g_out = o->g_out2 = true;
4276 }
4277 #define SPEC_prep_r1_P SPEC_r1_even
4278
4279 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4280 {
4281 o->out = fregs[get_field(f, r1)];
4282 o->g_out = true;
4283 }
4284 #define SPEC_prep_f1 0
4285
4286 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4287 {
4288 int r1 = get_field(f, r1);
4289 o->out = fregs[r1];
4290 o->out2 = fregs[r1 + 2];
4291 o->g_out = o->g_out2 = true;
4292 }
4293 #define SPEC_prep_x1 SPEC_r1_f128
4294
4295 /* ====================================================================== */
4296 /* The "Write OUTput" generators. These generally perform some non-trivial
4297 copy of data to TCG globals, or to main memory. The trivial cases are
4298 generally handled by having a "prep" generator install the TCG global
4299 as the destination of the operation. */
4300
4301 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4302 {
4303 store_reg(get_field(f, r1), o->out);
4304 }
4305 #define SPEC_wout_r1 0
4306
4307 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4308 {
4309 int r1 = get_field(f, r1);
4310 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4311 }
4312 #define SPEC_wout_r1_8 0
4313
4314 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4315 {
4316 int r1 = get_field(f, r1);
4317 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4318 }
4319 #define SPEC_wout_r1_16 0
4320
4321 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4322 {
4323 store_reg32_i64(get_field(f, r1), o->out);
4324 }
4325 #define SPEC_wout_r1_32 0
4326
4327 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4328 {
4329 store_reg32h_i64(get_field(f, r1), o->out);
4330 }
4331 #define SPEC_wout_r1_32h 0
4332
4333 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4334 {
4335 int r1 = get_field(f, r1);
4336 store_reg32_i64(r1, o->out);
4337 store_reg32_i64(r1 + 1, o->out2);
4338 }
4339 #define SPEC_wout_r1_P32 SPEC_r1_even
4340
4341 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4342 {
4343 int r1 = get_field(f, r1);
4344 store_reg32_i64(r1 + 1, o->out);
4345 tcg_gen_shri_i64(o->out, o->out, 32);
4346 store_reg32_i64(r1, o->out);
4347 }
4348 #define SPEC_wout_r1_D32 SPEC_r1_even
4349
4350 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4351 {
4352 store_freg32_i64(get_field(f, r1), o->out);
4353 }
4354 #define SPEC_wout_e1 0
4355
4356 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4357 {
4358 store_freg(get_field(f, r1), o->out);
4359 }
4360 #define SPEC_wout_f1 0
4361
4362 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4363 {
4364 int f1 = get_field(s->fields, r1);
4365 store_freg(f1, o->out);
4366 store_freg(f1 + 2, o->out2);
4367 }
4368 #define SPEC_wout_x1 SPEC_r1_f128
4369
4370 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4371 {
4372 if (get_field(f, r1) != get_field(f, r2)) {
4373 store_reg32_i64(get_field(f, r1), o->out);
4374 }
4375 }
4376 #define SPEC_wout_cond_r1r2_32 0
4377
4378 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4379 {
4380 if (get_field(f, r1) != get_field(f, r2)) {
4381 store_freg32_i64(get_field(f, r1), o->out);
4382 }
4383 }
4384 #define SPEC_wout_cond_e1e2 0
4385
4386 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4387 {
4388 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4389 }
4390 #define SPEC_wout_m1_8 0
4391
4392 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4393 {
4394 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4395 }
4396 #define SPEC_wout_m1_16 0
4397
4398 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4399 {
4400 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4401 }
4402 #define SPEC_wout_m1_32 0
4403
4404 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4405 {
4406 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4407 }
4408 #define SPEC_wout_m1_64 0
4409
4410 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4411 {
4412 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4413 }
4414 #define SPEC_wout_m2_32 0
4415
4416 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4417 {
4418 /* XXX release reservation */
4419 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4420 store_reg32_i64(get_field(f, r1), o->in2);
4421 }
4422 #define SPEC_wout_m2_32_r1_atomic 0
4423
4424 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4425 {
4426 /* XXX release reservation */
4427 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4428 store_reg(get_field(f, r1), o->in2);
4429 }
4430 #define SPEC_wout_m2_64_r1_atomic 0
4431
4432 /* ====================================================================== */
4433 /* The "INput 1" generators. These load the first operand to an insn. */
4434
4435 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4436 {
4437 o->in1 = load_reg(get_field(f, r1));
4438 }
4439 #define SPEC_in1_r1 0
4440
4441 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4442 {
4443 o->in1 = regs[get_field(f, r1)];
4444 o->g_in1 = true;
4445 }
4446 #define SPEC_in1_r1_o 0
4447
4448 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4449 {
4450 o->in1 = tcg_temp_new_i64();
4451 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4452 }
4453 #define SPEC_in1_r1_32s 0
4454
4455 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4456 {
4457 o->in1 = tcg_temp_new_i64();
4458 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4459 }
4460 #define SPEC_in1_r1_32u 0
4461
4462 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4463 {
4464 o->in1 = tcg_temp_new_i64();
4465 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4466 }
4467 #define SPEC_in1_r1_sr32 0
4468
4469 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4470 {
4471 o->in1 = load_reg(get_field(f, r1) + 1);
4472 }
4473 #define SPEC_in1_r1p1 SPEC_r1_even
4474
4475 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4476 {
4477 o->in1 = tcg_temp_new_i64();
4478 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4479 }
4480 #define SPEC_in1_r1p1_32s SPEC_r1_even
4481
4482 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4483 {
4484 o->in1 = tcg_temp_new_i64();
4485 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4486 }
4487 #define SPEC_in1_r1p1_32u SPEC_r1_even
4488
4489 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4490 {
4491 int r1 = get_field(f, r1);
4492 o->in1 = tcg_temp_new_i64();
4493 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4494 }
4495 #define SPEC_in1_r1_D32 SPEC_r1_even
4496
4497 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4498 {
4499 o->in1 = load_reg(get_field(f, r2));
4500 }
4501 #define SPEC_in1_r2 0
4502
4503 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4504 {
4505 o->in1 = tcg_temp_new_i64();
4506 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4507 }
4508 #define SPEC_in1_r2_sr32 0
4509
4510 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4511 {
4512 o->in1 = load_reg(get_field(f, r3));
4513 }
4514 #define SPEC_in1_r3 0
4515
4516 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4517 {
4518 o->in1 = regs[get_field(f, r3)];
4519 o->g_in1 = true;
4520 }
4521 #define SPEC_in1_r3_o 0
4522
4523 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4524 {
4525 o->in1 = tcg_temp_new_i64();
4526 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4527 }
4528 #define SPEC_in1_r3_32s 0
4529
4530 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4531 {
4532 o->in1 = tcg_temp_new_i64();
4533 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4534 }
4535 #define SPEC_in1_r3_32u 0
4536
4537 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4538 {
4539 int r3 = get_field(f, r3);
4540 o->in1 = tcg_temp_new_i64();
4541 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4542 }
4543 #define SPEC_in1_r3_D32 SPEC_r3_even
4544
4545 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4546 {
4547 o->in1 = load_freg32_i64(get_field(f, r1));
4548 }
4549 #define SPEC_in1_e1 0
4550
4551 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4552 {
4553 o->in1 = fregs[get_field(f, r1)];
4554 o->g_in1 = true;
4555 }
4556 #define SPEC_in1_f1_o 0
4557
4558 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4559 {
4560 int r1 = get_field(f, r1);
4561 o->out = fregs[r1];
4562 o->out2 = fregs[r1 + 2];
4563 o->g_out = o->g_out2 = true;
4564 }
4565 #define SPEC_in1_x1_o SPEC_r1_f128
4566
4567 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4568 {
4569 o->in1 = fregs[get_field(f, r3)];
4570 o->g_in1 = true;
4571 }
4572 #define SPEC_in1_f3_o 0
4573
4574 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4575 {
4576 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4577 }
4578 #define SPEC_in1_la1 0
4579
4580 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4581 {
4582 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4583 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4584 }
4585 #define SPEC_in1_la2 0
4586
4587 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4588 {
4589 in1_la1(s, f, o);
4590 o->in1 = tcg_temp_new_i64();
4591 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4592 }
4593 #define SPEC_in1_m1_8u 0
4594
4595 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4596 {
4597 in1_la1(s, f, o);
4598 o->in1 = tcg_temp_new_i64();
4599 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4600 }
4601 #define SPEC_in1_m1_16s 0
4602
4603 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4604 {
4605 in1_la1(s, f, o);
4606 o->in1 = tcg_temp_new_i64();
4607 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4608 }
4609 #define SPEC_in1_m1_16u 0
4610
4611 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4612 {
4613 in1_la1(s, f, o);
4614 o->in1 = tcg_temp_new_i64();
4615 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4616 }
4617 #define SPEC_in1_m1_32s 0
4618
4619 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4620 {
4621 in1_la1(s, f, o);
4622 o->in1 = tcg_temp_new_i64();
4623 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4624 }
4625 #define SPEC_in1_m1_32u 0
4626
4627 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4628 {
4629 in1_la1(s, f, o);
4630 o->in1 = tcg_temp_new_i64();
4631 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4632 }
4633 #define SPEC_in1_m1_64 0
4634
4635 /* ====================================================================== */
4636 /* The "INput 2" generators. These load the second operand to an insn. */
4637
4638 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4639 {
4640 o->in2 = regs[get_field(f, r1)];
4641 o->g_in2 = true;
4642 }
4643 #define SPEC_in2_r1_o 0
4644
4645 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4646 {
4647 o->in2 = tcg_temp_new_i64();
4648 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4649 }
4650 #define SPEC_in2_r1_16u 0
4651
4652 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4653 {
4654 o->in2 = tcg_temp_new_i64();
4655 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4656 }
4657 #define SPEC_in2_r1_32u 0
4658
4659 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4660 {
4661 int r1 = get_field(f, r1);
4662 o->in2 = tcg_temp_new_i64();
4663 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4664 }
4665 #define SPEC_in2_r1_D32 SPEC_r1_even
4666
4667 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4668 {
4669 o->in2 = load_reg(get_field(f, r2));
4670 }
4671 #define SPEC_in2_r2 0
4672
4673 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4674 {
4675 o->in2 = regs[get_field(f, r2)];
4676 o->g_in2 = true;
4677 }
4678 #define SPEC_in2_r2_o 0
4679
4680 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4681 {
4682 int r2 = get_field(f, r2);
4683 if (r2 != 0) {
4684 o->in2 = load_reg(r2);
4685 }
4686 }
4687 #define SPEC_in2_r2_nz 0
4688
4689 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4690 {
4691 o->in2 = tcg_temp_new_i64();
4692 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4693 }
4694 #define SPEC_in2_r2_8s 0
4695
4696 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4697 {
4698 o->in2 = tcg_temp_new_i64();
4699 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4700 }
4701 #define SPEC_in2_r2_8u 0
4702
4703 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4704 {
4705 o->in2 = tcg_temp_new_i64();
4706 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4707 }
4708 #define SPEC_in2_r2_16s 0
4709
4710 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4711 {
4712 o->in2 = tcg_temp_new_i64();
4713 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4714 }
4715 #define SPEC_in2_r2_16u 0
4716
4717 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4718 {
4719 o->in2 = load_reg(get_field(f, r3));
4720 }
4721 #define SPEC_in2_r3 0
4722
4723 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4724 {
4725 o->in2 = tcg_temp_new_i64();
4726 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4727 }
4728 #define SPEC_in2_r3_sr32 0
4729
4730 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4731 {
4732 o->in2 = tcg_temp_new_i64();
4733 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4734 }
4735 #define SPEC_in2_r2_32s 0
4736
4737 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4738 {
4739 o->in2 = tcg_temp_new_i64();
4740 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4741 }
4742 #define SPEC_in2_r2_32u 0
4743
4744 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4745 {
4746 o->in2 = tcg_temp_new_i64();
4747 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4748 }
4749 #define SPEC_in2_r2_sr32 0
4750
4751 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4752 {
4753 o->in2 = load_freg32_i64(get_field(f, r2));
4754 }
4755 #define SPEC_in2_e2 0
4756
4757 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4758 {
4759 o->in2 = fregs[get_field(f, r2)];
4760 o->g_in2 = true;
4761 }
4762 #define SPEC_in2_f2_o 0
4763
4764 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4765 {
4766 int r2 = get_field(f, r2);
4767 o->in1 = fregs[r2];
4768 o->in2 = fregs[r2 + 2];
4769 o->g_in1 = o->g_in2 = true;
4770 }
4771 #define SPEC_in2_x2_o SPEC_r2_f128
4772
4773 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4774 {
4775 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4776 }
4777 #define SPEC_in2_ra2 0
4778
4779 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4780 {
4781 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4782 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4783 }
4784 #define SPEC_in2_a2 0
4785
4786 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4787 {
4788 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4789 }
4790 #define SPEC_in2_ri2 0
4791
4792 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4793 {
4794 help_l2_shift(s, f, o, 31);
4795 }
4796 #define SPEC_in2_sh32 0
4797
4798 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4799 {
4800 help_l2_shift(s, f, o, 63);
4801 }
4802 #define SPEC_in2_sh64 0
4803
4804 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4805 {
4806 in2_a2(s, f, o);
4807 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4808 }
4809 #define SPEC_in2_m2_8u 0
4810
4811 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4812 {
4813 in2_a2(s, f, o);
4814 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4815 }
4816 #define SPEC_in2_m2_16s 0
4817
4818 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4819 {
4820 in2_a2(s, f, o);
4821 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4822 }
4823 #define SPEC_in2_m2_16u 0
4824
4825 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4826 {
4827 in2_a2(s, f, o);
4828 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4829 }
4830 #define SPEC_in2_m2_32s 0
4831
4832 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4833 {
4834 in2_a2(s, f, o);
4835 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4836 }
4837 #define SPEC_in2_m2_32u 0
4838
4839 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4840 {
4841 in2_a2(s, f, o);
4842 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4843 }
4844 #define SPEC_in2_m2_64 0
4845
4846 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4847 {
4848 in2_ri2(s, f, o);
4849 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4850 }
4851 #define SPEC_in2_mri2_16u 0
4852
4853 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4854 {
4855 in2_ri2(s, f, o);
4856 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4857 }
4858 #define SPEC_in2_mri2_32s 0
4859
4860 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4861 {
4862 in2_ri2(s, f, o);
4863 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4864 }
4865 #define SPEC_in2_mri2_32u 0
4866
4867 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4868 {
4869 in2_ri2(s, f, o);
4870 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4871 }
4872 #define SPEC_in2_mri2_64 0
4873
4874 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4875 {
4876 /* XXX should reserve the address */
4877 in1_la2(s, f, o);
4878 o->in2 = tcg_temp_new_i64();
4879 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4880 }
4881 #define SPEC_in2_m2_32s_atomic 0
4882
4883 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4884 {
4885 /* XXX should reserve the address */
4886 in1_la2(s, f, o);
4887 o->in2 = tcg_temp_new_i64();
4888 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4889 }
4890 #define SPEC_in2_m2_64_atomic 0
4891
4892 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4893 {
4894 o->in2 = tcg_const_i64(get_field(f, i2));
4895 }
4896 #define SPEC_in2_i2 0
4897
4898 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4899 {
4900 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4901 }
4902 #define SPEC_in2_i2_8u 0
4903
4904 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4905 {
4906 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4907 }
4908 #define SPEC_in2_i2_16u 0
4909
4910 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4911 {
4912 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4913 }
4914 #define SPEC_in2_i2_32u 0
4915
4916 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4917 {
4918 uint64_t i2 = (uint16_t)get_field(f, i2);
4919 o->in2 = tcg_const_i64(i2 << s->insn->data);
4920 }
4921 #define SPEC_in2_i2_16u_shl 0
4922
4923 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4924 {
4925 uint64_t i2 = (uint32_t)get_field(f, i2);
4926 o->in2 = tcg_const_i64(i2 << s->insn->data);
4927 }
4928 #define SPEC_in2_i2_32u_shl 0
4929
4930 #ifndef CONFIG_USER_ONLY
4931 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
4932 {
4933 o->in2 = tcg_const_i64(s->fields->raw_insn);
4934 }
4935 #define SPEC_in2_insn 0
4936 #endif
4937
4938 /* ====================================================================== */
4939
4940 /* Find opc within the table of insns. This is formulated as a switch
4941 statement so that (1) we get compile-time notice of cut-paste errors
4942 for duplicated opcodes, and (2) the compiler generates the binary
4943 search tree, rather than us having to post-process the table. */
4944
4945 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4946 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4947
4948 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4949
4950 enum DisasInsnEnum {
4951 #include "insn-data.def"
4952 };
4953
4954 #undef D
4955 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4956 .opc = OPC, \
4957 .fmt = FMT_##FT, \
4958 .fac = FAC_##FC, \
4959 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4960 .name = #NM, \
4961 .help_in1 = in1_##I1, \
4962 .help_in2 = in2_##I2, \
4963 .help_prep = prep_##P, \
4964 .help_wout = wout_##W, \
4965 .help_cout = cout_##CC, \
4966 .help_op = op_##OP, \
4967 .data = D \
4968 },
4969
4970 /* Allow 0 to be used for NULL in the table below. */
4971 #define in1_0 NULL
4972 #define in2_0 NULL
4973 #define prep_0 NULL
4974 #define wout_0 NULL
4975 #define cout_0 NULL
4976 #define op_0 NULL
4977
4978 #define SPEC_in1_0 0
4979 #define SPEC_in2_0 0
4980 #define SPEC_prep_0 0
4981 #define SPEC_wout_0 0
4982
4983 static const DisasInsn insn_info[] = {
4984 #include "insn-data.def"
4985 };
4986
4987 #undef D
4988 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4989 case OPC: return &insn_info[insn_ ## NM];
4990
4991 static const DisasInsn *lookup_opc(uint16_t opc)
4992 {
4993 switch (opc) {
4994 #include "insn-data.def"
4995 default:
4996 return NULL;
4997 }
4998 }
4999
5000 #undef D
5001 #undef C
5002
5003 /* Extract a field from the insn. The INSN should be left-aligned in
5004 the uint64_t so that we can more easily utilize the big-bit-endian
5005 definitions we extract from the Principals of Operation. */
5006
5007 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5008 {
5009 uint32_t r, m;
5010
5011 if (f->size == 0) {
5012 return;
5013 }
5014
5015 /* Zero extract the field from the insn. */
5016 r = (insn << f->beg) >> (64 - f->size);
5017
5018 /* Sign-extend, or un-swap the field as necessary. */
5019 switch (f->type) {
5020 case 0: /* unsigned */
5021 break;
5022 case 1: /* signed */
5023 assert(f->size <= 32);
5024 m = 1u << (f->size - 1);
5025 r = (r ^ m) - m;
5026 break;
5027 case 2: /* dl+dh split, signed 20 bit. */
5028 r = ((int8_t)r << 12) | (r >> 8);
5029 break;
5030 default:
5031 abort();
5032 }
5033
5034 /* Validate that the "compressed" encoding we selected above is valid.
5035 I.e. we havn't make two different original fields overlap. */
5036 assert(((o->presentC >> f->indexC) & 1) == 0);
5037 o->presentC |= 1 << f->indexC;
5038 o->presentO |= 1 << f->indexO;
5039
5040 o->c[f->indexC] = r;
5041 }
5042
5043 /* Lookup the insn at the current PC, extracting the operands into O and
5044 returning the info struct for the insn. Returns NULL for invalid insn. */
5045
5046 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5047 DisasFields *f)
5048 {
5049 uint64_t insn, pc = s->pc;
5050 int op, op2, ilen;
5051 const DisasInsn *info;
5052
5053 insn = ld_code2(env, pc);
5054 op = (insn >> 8) & 0xff;
5055 ilen = get_ilen(op);
5056 s->next_pc = s->pc + ilen;
5057
5058 switch (ilen) {
5059 case 2:
5060 insn = insn << 48;
5061 break;
5062 case 4:
5063 insn = ld_code4(env, pc) << 32;
5064 break;
5065 case 6:
5066 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5067 break;
5068 default:
5069 abort();
5070 }
5071
5072 /* We can't actually determine the insn format until we've looked up
5073 the full insn opcode. Which we can't do without locating the
5074 secondary opcode. Assume by default that OP2 is at bit 40; for
5075 those smaller insns that don't actually have a secondary opcode
5076 this will correctly result in OP2 = 0. */
5077 switch (op) {
5078 case 0x01: /* E */
5079 case 0x80: /* S */
5080 case 0x82: /* S */
5081 case 0x93: /* S */
5082 case 0xb2: /* S, RRF, RRE */
5083 case 0xb3: /* RRE, RRD, RRF */
5084 case 0xb9: /* RRE, RRF */
5085 case 0xe5: /* SSE, SIL */
5086 op2 = (insn << 8) >> 56;
5087 break;
5088 case 0xa5: /* RI */
5089 case 0xa7: /* RI */
5090 case 0xc0: /* RIL */
5091 case 0xc2: /* RIL */
5092 case 0xc4: /* RIL */
5093 case 0xc6: /* RIL */
5094 case 0xc8: /* SSF */
5095 case 0xcc: /* RIL */
5096 op2 = (insn << 12) >> 60;
5097 break;
5098 case 0xd0 ... 0xdf: /* SS */
5099 case 0xe1: /* SS */
5100 case 0xe2: /* SS */
5101 case 0xe8: /* SS */
5102 case 0xe9: /* SS */
5103 case 0xea: /* SS */
5104 case 0xee ... 0xf3: /* SS */
5105 case 0xf8 ... 0xfd: /* SS */
5106 op2 = 0;
5107 break;
5108 default:
5109 op2 = (insn << 40) >> 56;
5110 break;
5111 }
5112
5113 memset(f, 0, sizeof(*f));
5114 f->raw_insn = insn;
5115 f->op = op;
5116 f->op2 = op2;
5117
5118 /* Lookup the instruction. */
5119 info = lookup_opc(op << 8 | op2);
5120
5121 /* If we found it, extract the operands. */
5122 if (info != NULL) {
5123 DisasFormat fmt = info->fmt;
5124 int i;
5125
5126 for (i = 0; i < NUM_C_FIELD; ++i) {
5127 extract_field(f, &format_info[fmt].op[i], insn);
5128 }
5129 }
5130 return info;
5131 }
5132
5133 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5134 {
5135 const DisasInsn *insn;
5136 ExitStatus ret = NO_EXIT;
5137 DisasFields f;
5138 DisasOps o;
5139
5140 /* Search for the insn in the table. */
5141 insn = extract_insn(env, s, &f);
5142
5143 /* Not found means unimplemented/illegal opcode. */
5144 if (insn == NULL) {
5145 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5146 f.op, f.op2);
5147 gen_illegal_opcode(s);
5148 return EXIT_NORETURN;
5149 }
5150
5151 /* Check for insn specification exceptions. */
5152 if (insn->spec) {
5153 int spec = insn->spec, excp = 0, r;
5154
5155 if (spec & SPEC_r1_even) {
5156 r = get_field(&f, r1);
5157 if (r & 1) {
5158 excp = PGM_SPECIFICATION;
5159 }
5160 }
5161 if (spec & SPEC_r2_even) {
5162 r = get_field(&f, r2);
5163 if (r & 1) {
5164 excp = PGM_SPECIFICATION;
5165 }
5166 }
5167 if (spec & SPEC_r3_even) {
5168 r = get_field(&f, r3);
5169 if (r & 1) {
5170 excp = PGM_SPECIFICATION;
5171 }
5172 }
5173 if (spec & SPEC_r1_f128) {
5174 r = get_field(&f, r1);
5175 if (r > 13) {
5176 excp = PGM_SPECIFICATION;
5177 }
5178 }
5179 if (spec & SPEC_r2_f128) {
5180 r = get_field(&f, r2);
5181 if (r > 13) {
5182 excp = PGM_SPECIFICATION;
5183 }
5184 }
5185 if (excp) {
5186 gen_program_exception(s, excp);
5187 return EXIT_NORETURN;
5188 }
5189 }
5190
5191 /* Set up the strutures we use to communicate with the helpers. */
5192 s->insn = insn;
5193 s->fields = &f;
5194 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5195 TCGV_UNUSED_I64(o.out);
5196 TCGV_UNUSED_I64(o.out2);
5197 TCGV_UNUSED_I64(o.in1);
5198 TCGV_UNUSED_I64(o.in2);
5199 TCGV_UNUSED_I64(o.addr1);
5200
5201 /* Implement the instruction. */
5202 if (insn->help_in1) {
5203 insn->help_in1(s, &f, &o);
5204 }
5205 if (insn->help_in2) {
5206 insn->help_in2(s, &f, &o);
5207 }
5208 if (insn->help_prep) {
5209 insn->help_prep(s, &f, &o);
5210 }
5211 if (insn->help_op) {
5212 ret = insn->help_op(s, &o);
5213 }
5214 if (insn->help_wout) {
5215 insn->help_wout(s, &f, &o);
5216 }
5217 if (insn->help_cout) {
5218 insn->help_cout(s, &o);
5219 }
5220
5221 /* Free any temporaries created by the helpers. */
5222 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5223 tcg_temp_free_i64(o.out);
5224 }
5225 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5226 tcg_temp_free_i64(o.out2);
5227 }
5228 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5229 tcg_temp_free_i64(o.in1);
5230 }
5231 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5232 tcg_temp_free_i64(o.in2);
5233 }
5234 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5235 tcg_temp_free_i64(o.addr1);
5236 }
5237
5238 #ifndef CONFIG_USER_ONLY
5239 if (s->tb->flags & FLAG_MASK_PER) {
5240 /* An exception might be triggered, save PSW if not already done. */
5241 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5242 tcg_gen_movi_i64(psw_addr, s->next_pc);
5243 }
5244
5245 /* Save off cc. */
5246 update_cc_op(s);
5247
5248 /* Call the helper to check for a possible PER exception. */
5249 gen_helper_per_check_exception(cpu_env);
5250 }
5251 #endif
5252
5253 /* Advance to the next instruction. */
5254 s->pc = s->next_pc;
5255 return ret;
5256 }
5257
5258 static inline void gen_intermediate_code_internal(S390CPU *cpu,
5259 TranslationBlock *tb,
5260 bool search_pc)
5261 {
5262 CPUState *cs = CPU(cpu);
5263 CPUS390XState *env = &cpu->env;
5264 DisasContext dc;
5265 target_ulong pc_start;
5266 uint64_t next_page_start;
5267 int j, lj = -1;
5268 int num_insns, max_insns;
5269 CPUBreakpoint *bp;
5270 ExitStatus status;
5271 bool do_debug;
5272
5273 pc_start = tb->pc;
5274
5275 /* 31-bit mode */
5276 if (!(tb->flags & FLAG_MASK_64)) {
5277 pc_start &= 0x7fffffff;
5278 }
5279
5280 dc.tb = tb;
5281 dc.pc = pc_start;
5282 dc.cc_op = CC_OP_DYNAMIC;
5283 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5284
5285 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5286
5287 num_insns = 0;
5288 max_insns = tb->cflags & CF_COUNT_MASK;
5289 if (max_insns == 0) {
5290 max_insns = CF_COUNT_MASK;
5291 }
5292
5293 gen_tb_start(tb);
5294
5295 do {
5296 if (search_pc) {
5297 j = tcg_op_buf_count();
5298 if (lj < j) {
5299 lj++;
5300 while (lj < j) {
5301 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5302 }
5303 }
5304 tcg_ctx.gen_opc_pc[lj] = dc.pc;
5305 gen_opc_cc_op[lj] = dc.cc_op;
5306 tcg_ctx.gen_opc_instr_start[lj] = 1;
5307 tcg_ctx.gen_opc_icount[lj] = num_insns;
5308 }
5309 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5310 gen_io_start();
5311 }
5312
5313 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5314 tcg_gen_debug_insn_start(dc.pc);
5315 }
5316
5317 status = NO_EXIT;
5318 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
5319 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
5320 if (bp->pc == dc.pc) {
5321 status = EXIT_PC_STALE;
5322 do_debug = true;
5323 break;
5324 }
5325 }
5326 }
5327 if (status == NO_EXIT) {
5328 status = translate_one(env, &dc);
5329 }
5330
5331 /* If we reach a page boundary, are single stepping,
5332 or exhaust instruction count, stop generation. */
5333 if (status == NO_EXIT
5334 && (dc.pc >= next_page_start
5335 || tcg_op_buf_full()
5336 || num_insns >= max_insns
5337 || singlestep
5338 || cs->singlestep_enabled)) {
5339 status = EXIT_PC_STALE;
5340 }
5341 } while (status == NO_EXIT);
5342
5343 if (tb->cflags & CF_LAST_IO) {
5344 gen_io_end();
5345 }
5346
5347 switch (status) {
5348 case EXIT_GOTO_TB:
5349 case EXIT_NORETURN:
5350 break;
5351 case EXIT_PC_STALE:
5352 update_psw_addr(&dc);
5353 /* FALLTHRU */
5354 case EXIT_PC_UPDATED:
5355 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5356 cc op type is in env */
5357 update_cc_op(&dc);
5358 /* Exit the TB, either by raising a debug exception or by return. */
5359 if (do_debug) {
5360 gen_exception(EXCP_DEBUG);
5361 } else {
5362 tcg_gen_exit_tb(0);
5363 }
5364 break;
5365 default:
5366 abort();
5367 }
5368
5369 gen_tb_end(tb, num_insns);
5370
5371 if (search_pc) {
5372 j = tcg_op_buf_count();
5373 lj++;
5374 while (lj <= j) {
5375 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5376 }
5377 } else {
5378 tb->size = dc.pc - pc_start;
5379 tb->icount = num_insns;
5380 }
5381
5382 #if defined(S390X_DEBUG_DISAS)
5383 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5384 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5385 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
5386 qemu_log("\n");
5387 }
5388 #endif
5389 }
5390
5391 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
5392 {
5393 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
5394 }
5395
5396 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
5397 {
5398 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
5399 }
5400
5401 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
5402 {
5403 int cc_op;
5404 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
5405 cc_op = gen_opc_cc_op[pc_pos];
5406 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5407 env->cc_op = cc_op;
5408 }
5409 }