]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/translate.c
Stop including qemu-common.h in memory.h
[mirror_qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
37
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
40
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44
45 #include "trace-tcg.h"
46
47
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
52
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t pc, next_pc;
58 enum cc_op cc_op;
59 bool singlestep_enabled;
60 };
61
62 /* Information carried about a condition to be evaluated. */
63 typedef struct {
64 TCGCond cond:8;
65 bool is_64;
66 bool g1;
67 bool g2;
68 union {
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
71 } u;
72 } DisasCompare;
73
74 #define DISAS_EXCP 4
75
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
80
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
82 {
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
86 }
87 }
88 return pc;
89 }
90
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
92 int flags)
93 {
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
96 int i;
97
98 if (env->cc_op > 3) {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 } else {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
104 }
105
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 if ((i % 4) == 3) {
109 cpu_fprintf(f, "\n");
110 } else {
111 cpu_fprintf(f, " ");
112 }
113 }
114
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
117 if ((i % 4) == 3) {
118 cpu_fprintf(f, "\n");
119 } else {
120 cpu_fprintf(f, " ");
121 }
122 }
123
124 for (i = 0; i < 32; i++) {
125 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
126 env->vregs[i][0].ll, env->vregs[i][1].ll);
127 cpu_fprintf(f, (i % 2) ? " " : "\n");
128 }
129
130 #ifndef CONFIG_USER_ONLY
131 for (i = 0; i < 16; i++) {
132 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
133 if ((i % 4) == 3) {
134 cpu_fprintf(f, "\n");
135 } else {
136 cpu_fprintf(f, " ");
137 }
138 }
139 #endif
140
141 #ifdef DEBUG_INLINE_BRANCHES
142 for (i = 0; i < CC_OP_MAX; i++) {
143 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
144 inline_branch_miss[i], inline_branch_hit[i]);
145 }
146 #endif
147
148 cpu_fprintf(f, "\n");
149 }
150
151 static TCGv_i64 psw_addr;
152 static TCGv_i64 psw_mask;
153 static TCGv_i64 gbea;
154
155 static TCGv_i32 cc_op;
156 static TCGv_i64 cc_src;
157 static TCGv_i64 cc_dst;
158 static TCGv_i64 cc_vr;
159
160 static char cpu_reg_names[32][4];
161 static TCGv_i64 regs[16];
162 static TCGv_i64 fregs[16];
163
164 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
165
166 void s390x_translate_init(void)
167 {
168 int i;
169
170 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
171 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.addr),
173 "psw_addr");
174 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
175 offsetof(CPUS390XState, psw.mask),
176 "psw_mask");
177 gbea = tcg_global_mem_new_i64(TCG_AREG0,
178 offsetof(CPUS390XState, gbea),
179 "gbea");
180
181 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
182 "cc_op");
183 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
184 "cc_src");
185 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
186 "cc_dst");
187 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
188 "cc_vr");
189
190 for (i = 0; i < 16; i++) {
191 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
192 regs[i] = tcg_global_mem_new(TCG_AREG0,
193 offsetof(CPUS390XState, regs[i]),
194 cpu_reg_names[i]);
195 }
196
197 for (i = 0; i < 16; i++) {
198 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
199 fregs[i] = tcg_global_mem_new(TCG_AREG0,
200 offsetof(CPUS390XState, vregs[i][0].d),
201 cpu_reg_names[i + 16]);
202 }
203 }
204
205 static TCGv_i64 load_reg(int reg)
206 {
207 TCGv_i64 r = tcg_temp_new_i64();
208 tcg_gen_mov_i64(r, regs[reg]);
209 return r;
210 }
211
212 static TCGv_i64 load_freg32_i64(int reg)
213 {
214 TCGv_i64 r = tcg_temp_new_i64();
215 tcg_gen_shri_i64(r, fregs[reg], 32);
216 return r;
217 }
218
219 static void store_reg(int reg, TCGv_i64 v)
220 {
221 tcg_gen_mov_i64(regs[reg], v);
222 }
223
224 static void store_freg(int reg, TCGv_i64 v)
225 {
226 tcg_gen_mov_i64(fregs[reg], v);
227 }
228
229 static void store_reg32_i64(int reg, TCGv_i64 v)
230 {
231 /* 32 bit register writes keep the upper half */
232 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
233 }
234
235 static void store_reg32h_i64(int reg, TCGv_i64 v)
236 {
237 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
238 }
239
240 static void store_freg32_i64(int reg, TCGv_i64 v)
241 {
242 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
243 }
244
245 static void return_low128(TCGv_i64 dest)
246 {
247 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
248 }
249
250 static void update_psw_addr(DisasContext *s)
251 {
252 /* psw.addr */
253 tcg_gen_movi_i64(psw_addr, s->pc);
254 }
255
256 static void per_branch(DisasContext *s, bool to_next)
257 {
258 #ifndef CONFIG_USER_ONLY
259 tcg_gen_movi_i64(gbea, s->pc);
260
261 if (s->tb->flags & FLAG_MASK_PER) {
262 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
263 gen_helper_per_branch(cpu_env, gbea, next_pc);
264 if (to_next) {
265 tcg_temp_free_i64(next_pc);
266 }
267 }
268 #endif
269 }
270
271 static void per_branch_cond(DisasContext *s, TCGCond cond,
272 TCGv_i64 arg1, TCGv_i64 arg2)
273 {
274 #ifndef CONFIG_USER_ONLY
275 if (s->tb->flags & FLAG_MASK_PER) {
276 TCGLabel *lab = gen_new_label();
277 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
278
279 tcg_gen_movi_i64(gbea, s->pc);
280 gen_helper_per_branch(cpu_env, gbea, psw_addr);
281
282 gen_set_label(lab);
283 } else {
284 TCGv_i64 pc = tcg_const_i64(s->pc);
285 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
286 tcg_temp_free_i64(pc);
287 }
288 #endif
289 }
290
291 static void per_breaking_event(DisasContext *s)
292 {
293 tcg_gen_movi_i64(gbea, s->pc);
294 }
295
296 static void update_cc_op(DisasContext *s)
297 {
298 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
299 tcg_gen_movi_i32(cc_op, s->cc_op);
300 }
301 }
302
303 static void potential_page_fault(DisasContext *s)
304 {
305 update_psw_addr(s);
306 update_cc_op(s);
307 }
308
309 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
310 {
311 return (uint64_t)cpu_lduw_code(env, pc);
312 }
313
314 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
315 {
316 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
317 }
318
319 static int get_mem_index(DisasContext *s)
320 {
321 switch (s->tb->flags & FLAG_MASK_ASC) {
322 case PSW_ASC_PRIMARY >> 32:
323 return 0;
324 case PSW_ASC_SECONDARY >> 32:
325 return 1;
326 case PSW_ASC_HOME >> 32:
327 return 2;
328 default:
329 tcg_abort();
330 break;
331 }
332 }
333
334 static void gen_exception(int excp)
335 {
336 TCGv_i32 tmp = tcg_const_i32(excp);
337 gen_helper_exception(cpu_env, tmp);
338 tcg_temp_free_i32(tmp);
339 }
340
341 static void gen_program_exception(DisasContext *s, int code)
342 {
343 TCGv_i32 tmp;
344
345 /* Remember what pgm exeption this was. */
346 tmp = tcg_const_i32(code);
347 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
348 tcg_temp_free_i32(tmp);
349
350 tmp = tcg_const_i32(s->next_pc - s->pc);
351 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
352 tcg_temp_free_i32(tmp);
353
354 /* Advance past instruction. */
355 s->pc = s->next_pc;
356 update_psw_addr(s);
357
358 /* Save off cc. */
359 update_cc_op(s);
360
361 /* Trigger exception. */
362 gen_exception(EXCP_PGM);
363 }
364
365 static inline void gen_illegal_opcode(DisasContext *s)
366 {
367 gen_program_exception(s, PGM_OPERATION);
368 }
369
370 static inline void gen_trap(DisasContext *s)
371 {
372 TCGv_i32 t;
373
374 /* Set DXC to 0xff. */
375 t = tcg_temp_new_i32();
376 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
377 tcg_gen_ori_i32(t, t, 0xff00);
378 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
379 tcg_temp_free_i32(t);
380
381 gen_program_exception(s, PGM_DATA);
382 }
383
384 #ifndef CONFIG_USER_ONLY
385 static void check_privileged(DisasContext *s)
386 {
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
389 }
390 }
391 #endif
392
393 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
394 {
395 TCGv_i64 tmp = tcg_temp_new_i64();
396 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
397
398 /* Note that d2 is limited to 20 bits, signed. If we crop negative
399 displacements early we create larger immedate addends. */
400
401 /* Note that addi optimizes the imm==0 case. */
402 if (b2 && x2) {
403 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
404 tcg_gen_addi_i64(tmp, tmp, d2);
405 } else if (b2) {
406 tcg_gen_addi_i64(tmp, regs[b2], d2);
407 } else if (x2) {
408 tcg_gen_addi_i64(tmp, regs[x2], d2);
409 } else {
410 if (need_31) {
411 d2 &= 0x7fffffff;
412 need_31 = false;
413 }
414 tcg_gen_movi_i64(tmp, d2);
415 }
416 if (need_31) {
417 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
418 }
419
420 return tmp;
421 }
422
423 static inline bool live_cc_data(DisasContext *s)
424 {
425 return (s->cc_op != CC_OP_DYNAMIC
426 && s->cc_op != CC_OP_STATIC
427 && s->cc_op > 3);
428 }
429
430 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
431 {
432 if (live_cc_data(s)) {
433 tcg_gen_discard_i64(cc_src);
434 tcg_gen_discard_i64(cc_dst);
435 tcg_gen_discard_i64(cc_vr);
436 }
437 s->cc_op = CC_OP_CONST0 + val;
438 }
439
440 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
441 {
442 if (live_cc_data(s)) {
443 tcg_gen_discard_i64(cc_src);
444 tcg_gen_discard_i64(cc_vr);
445 }
446 tcg_gen_mov_i64(cc_dst, dst);
447 s->cc_op = op;
448 }
449
450 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
451 TCGv_i64 dst)
452 {
453 if (live_cc_data(s)) {
454 tcg_gen_discard_i64(cc_vr);
455 }
456 tcg_gen_mov_i64(cc_src, src);
457 tcg_gen_mov_i64(cc_dst, dst);
458 s->cc_op = op;
459 }
460
461 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
462 TCGv_i64 dst, TCGv_i64 vr)
463 {
464 tcg_gen_mov_i64(cc_src, src);
465 tcg_gen_mov_i64(cc_dst, dst);
466 tcg_gen_mov_i64(cc_vr, vr);
467 s->cc_op = op;
468 }
469
470 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
471 {
472 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
473 }
474
475 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
476 {
477 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
478 }
479
480 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
481 {
482 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
483 }
484
485 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
486 {
487 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
488 }
489
490 /* CC value is in env->cc_op */
491 static void set_cc_static(DisasContext *s)
492 {
493 if (live_cc_data(s)) {
494 tcg_gen_discard_i64(cc_src);
495 tcg_gen_discard_i64(cc_dst);
496 tcg_gen_discard_i64(cc_vr);
497 }
498 s->cc_op = CC_OP_STATIC;
499 }
500
501 /* calculates cc into cc_op */
502 static void gen_op_calc_cc(DisasContext *s)
503 {
504 TCGv_i32 local_cc_op;
505 TCGv_i64 dummy;
506
507 TCGV_UNUSED_I32(local_cc_op);
508 TCGV_UNUSED_I64(dummy);
509 switch (s->cc_op) {
510 default:
511 dummy = tcg_const_i64(0);
512 /* FALLTHRU */
513 case CC_OP_ADD_64:
514 case CC_OP_ADDU_64:
515 case CC_OP_ADDC_64:
516 case CC_OP_SUB_64:
517 case CC_OP_SUBU_64:
518 case CC_OP_SUBB_64:
519 case CC_OP_ADD_32:
520 case CC_OP_ADDU_32:
521 case CC_OP_ADDC_32:
522 case CC_OP_SUB_32:
523 case CC_OP_SUBU_32:
524 case CC_OP_SUBB_32:
525 local_cc_op = tcg_const_i32(s->cc_op);
526 break;
527 case CC_OP_CONST0:
528 case CC_OP_CONST1:
529 case CC_OP_CONST2:
530 case CC_OP_CONST3:
531 case CC_OP_STATIC:
532 case CC_OP_DYNAMIC:
533 break;
534 }
535
536 switch (s->cc_op) {
537 case CC_OP_CONST0:
538 case CC_OP_CONST1:
539 case CC_OP_CONST2:
540 case CC_OP_CONST3:
541 /* s->cc_op is the cc value */
542 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
543 break;
544 case CC_OP_STATIC:
545 /* env->cc_op already is the cc value */
546 break;
547 case CC_OP_NZ:
548 case CC_OP_ABS_64:
549 case CC_OP_NABS_64:
550 case CC_OP_ABS_32:
551 case CC_OP_NABS_32:
552 case CC_OP_LTGT0_32:
553 case CC_OP_LTGT0_64:
554 case CC_OP_COMP_32:
555 case CC_OP_COMP_64:
556 case CC_OP_NZ_F32:
557 case CC_OP_NZ_F64:
558 case CC_OP_FLOGR:
559 /* 1 argument */
560 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
561 break;
562 case CC_OP_ICM:
563 case CC_OP_LTGT_32:
564 case CC_OP_LTGT_64:
565 case CC_OP_LTUGTU_32:
566 case CC_OP_LTUGTU_64:
567 case CC_OP_TM_32:
568 case CC_OP_TM_64:
569 case CC_OP_SLA_32:
570 case CC_OP_SLA_64:
571 case CC_OP_NZ_F128:
572 /* 2 arguments */
573 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
574 break;
575 case CC_OP_ADD_64:
576 case CC_OP_ADDU_64:
577 case CC_OP_ADDC_64:
578 case CC_OP_SUB_64:
579 case CC_OP_SUBU_64:
580 case CC_OP_SUBB_64:
581 case CC_OP_ADD_32:
582 case CC_OP_ADDU_32:
583 case CC_OP_ADDC_32:
584 case CC_OP_SUB_32:
585 case CC_OP_SUBU_32:
586 case CC_OP_SUBB_32:
587 /* 3 arguments */
588 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
589 break;
590 case CC_OP_DYNAMIC:
591 /* unknown operation - assume 3 arguments and cc_op in env */
592 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
593 break;
594 default:
595 tcg_abort();
596 }
597
598 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
599 tcg_temp_free_i32(local_cc_op);
600 }
601 if (!TCGV_IS_UNUSED_I64(dummy)) {
602 tcg_temp_free_i64(dummy);
603 }
604
605 /* We now have cc in cc_op as constant */
606 set_cc_static(s);
607 }
608
609 static int use_goto_tb(DisasContext *s, uint64_t dest)
610 {
611 /* NOTE: we handle the case where the TB spans two pages here */
612 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
613 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
614 && !s->singlestep_enabled
615 && !(s->tb->cflags & CF_LAST_IO)
616 && !(s->tb->flags & FLAG_MASK_PER));
617 }
618
619 static void account_noninline_branch(DisasContext *s, int cc_op)
620 {
621 #ifdef DEBUG_INLINE_BRANCHES
622 inline_branch_miss[cc_op]++;
623 #endif
624 }
625
626 static void account_inline_branch(DisasContext *s, int cc_op)
627 {
628 #ifdef DEBUG_INLINE_BRANCHES
629 inline_branch_hit[cc_op]++;
630 #endif
631 }
632
633 /* Table of mask values to comparison codes, given a comparison as input.
634 For such, CC=3 should not be possible. */
635 static const TCGCond ltgt_cond[16] = {
636 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
637 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
638 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
639 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
640 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
641 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
642 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
643 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
644 };
645
646 /* Table of mask values to comparison codes, given a logic op as input.
647 For such, only CC=0 and CC=1 should be possible. */
648 static const TCGCond nz_cond[16] = {
649 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
650 TCG_COND_NEVER, TCG_COND_NEVER,
651 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
652 TCG_COND_NE, TCG_COND_NE,
653 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
654 TCG_COND_EQ, TCG_COND_EQ,
655 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
656 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
657 };
658
659 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
660 details required to generate a TCG comparison. */
661 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
662 {
663 TCGCond cond;
664 enum cc_op old_cc_op = s->cc_op;
665
666 if (mask == 15 || mask == 0) {
667 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
668 c->u.s32.a = cc_op;
669 c->u.s32.b = cc_op;
670 c->g1 = c->g2 = true;
671 c->is_64 = false;
672 return;
673 }
674
675 /* Find the TCG condition for the mask + cc op. */
676 switch (old_cc_op) {
677 case CC_OP_LTGT0_32:
678 case CC_OP_LTGT0_64:
679 case CC_OP_LTGT_32:
680 case CC_OP_LTGT_64:
681 cond = ltgt_cond[mask];
682 if (cond == TCG_COND_NEVER) {
683 goto do_dynamic;
684 }
685 account_inline_branch(s, old_cc_op);
686 break;
687
688 case CC_OP_LTUGTU_32:
689 case CC_OP_LTUGTU_64:
690 cond = tcg_unsigned_cond(ltgt_cond[mask]);
691 if (cond == TCG_COND_NEVER) {
692 goto do_dynamic;
693 }
694 account_inline_branch(s, old_cc_op);
695 break;
696
697 case CC_OP_NZ:
698 cond = nz_cond[mask];
699 if (cond == TCG_COND_NEVER) {
700 goto do_dynamic;
701 }
702 account_inline_branch(s, old_cc_op);
703 break;
704
705 case CC_OP_TM_32:
706 case CC_OP_TM_64:
707 switch (mask) {
708 case 8:
709 cond = TCG_COND_EQ;
710 break;
711 case 4 | 2 | 1:
712 cond = TCG_COND_NE;
713 break;
714 default:
715 goto do_dynamic;
716 }
717 account_inline_branch(s, old_cc_op);
718 break;
719
720 case CC_OP_ICM:
721 switch (mask) {
722 case 8:
723 cond = TCG_COND_EQ;
724 break;
725 case 4 | 2 | 1:
726 case 4 | 2:
727 cond = TCG_COND_NE;
728 break;
729 default:
730 goto do_dynamic;
731 }
732 account_inline_branch(s, old_cc_op);
733 break;
734
735 case CC_OP_FLOGR:
736 switch (mask & 0xa) {
737 case 8: /* src == 0 -> no one bit found */
738 cond = TCG_COND_EQ;
739 break;
740 case 2: /* src != 0 -> one bit found */
741 cond = TCG_COND_NE;
742 break;
743 default:
744 goto do_dynamic;
745 }
746 account_inline_branch(s, old_cc_op);
747 break;
748
749 case CC_OP_ADDU_32:
750 case CC_OP_ADDU_64:
751 switch (mask) {
752 case 8 | 2: /* vr == 0 */
753 cond = TCG_COND_EQ;
754 break;
755 case 4 | 1: /* vr != 0 */
756 cond = TCG_COND_NE;
757 break;
758 case 8 | 4: /* no carry -> vr >= src */
759 cond = TCG_COND_GEU;
760 break;
761 case 2 | 1: /* carry -> vr < src */
762 cond = TCG_COND_LTU;
763 break;
764 default:
765 goto do_dynamic;
766 }
767 account_inline_branch(s, old_cc_op);
768 break;
769
770 case CC_OP_SUBU_32:
771 case CC_OP_SUBU_64:
772 /* Note that CC=0 is impossible; treat it as dont-care. */
773 switch (mask & 7) {
774 case 2: /* zero -> op1 == op2 */
775 cond = TCG_COND_EQ;
776 break;
777 case 4 | 1: /* !zero -> op1 != op2 */
778 cond = TCG_COND_NE;
779 break;
780 case 4: /* borrow (!carry) -> op1 < op2 */
781 cond = TCG_COND_LTU;
782 break;
783 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
784 cond = TCG_COND_GEU;
785 break;
786 default:
787 goto do_dynamic;
788 }
789 account_inline_branch(s, old_cc_op);
790 break;
791
792 default:
793 do_dynamic:
794 /* Calculate cc value. */
795 gen_op_calc_cc(s);
796 /* FALLTHRU */
797
798 case CC_OP_STATIC:
799 /* Jump based on CC. We'll load up the real cond below;
800 the assignment here merely avoids a compiler warning. */
801 account_noninline_branch(s, old_cc_op);
802 old_cc_op = CC_OP_STATIC;
803 cond = TCG_COND_NEVER;
804 break;
805 }
806
807 /* Load up the arguments of the comparison. */
808 c->is_64 = true;
809 c->g1 = c->g2 = false;
810 switch (old_cc_op) {
811 case CC_OP_LTGT0_32:
812 c->is_64 = false;
813 c->u.s32.a = tcg_temp_new_i32();
814 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
815 c->u.s32.b = tcg_const_i32(0);
816 break;
817 case CC_OP_LTGT_32:
818 case CC_OP_LTUGTU_32:
819 case CC_OP_SUBU_32:
820 c->is_64 = false;
821 c->u.s32.a = tcg_temp_new_i32();
822 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
823 c->u.s32.b = tcg_temp_new_i32();
824 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
825 break;
826
827 case CC_OP_LTGT0_64:
828 case CC_OP_NZ:
829 case CC_OP_FLOGR:
830 c->u.s64.a = cc_dst;
831 c->u.s64.b = tcg_const_i64(0);
832 c->g1 = true;
833 break;
834 case CC_OP_LTGT_64:
835 case CC_OP_LTUGTU_64:
836 case CC_OP_SUBU_64:
837 c->u.s64.a = cc_src;
838 c->u.s64.b = cc_dst;
839 c->g1 = c->g2 = true;
840 break;
841
842 case CC_OP_TM_32:
843 case CC_OP_TM_64:
844 case CC_OP_ICM:
845 c->u.s64.a = tcg_temp_new_i64();
846 c->u.s64.b = tcg_const_i64(0);
847 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
848 break;
849
850 case CC_OP_ADDU_32:
851 c->is_64 = false;
852 c->u.s32.a = tcg_temp_new_i32();
853 c->u.s32.b = tcg_temp_new_i32();
854 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
855 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
856 tcg_gen_movi_i32(c->u.s32.b, 0);
857 } else {
858 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
859 }
860 break;
861
862 case CC_OP_ADDU_64:
863 c->u.s64.a = cc_vr;
864 c->g1 = true;
865 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
866 c->u.s64.b = tcg_const_i64(0);
867 } else {
868 c->u.s64.b = cc_src;
869 c->g2 = true;
870 }
871 break;
872
873 case CC_OP_STATIC:
874 c->is_64 = false;
875 c->u.s32.a = cc_op;
876 c->g1 = true;
877 switch (mask) {
878 case 0x8 | 0x4 | 0x2: /* cc != 3 */
879 cond = TCG_COND_NE;
880 c->u.s32.b = tcg_const_i32(3);
881 break;
882 case 0x8 | 0x4 | 0x1: /* cc != 2 */
883 cond = TCG_COND_NE;
884 c->u.s32.b = tcg_const_i32(2);
885 break;
886 case 0x8 | 0x2 | 0x1: /* cc != 1 */
887 cond = TCG_COND_NE;
888 c->u.s32.b = tcg_const_i32(1);
889 break;
890 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
891 cond = TCG_COND_EQ;
892 c->g1 = false;
893 c->u.s32.a = tcg_temp_new_i32();
894 c->u.s32.b = tcg_const_i32(0);
895 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
896 break;
897 case 0x8 | 0x4: /* cc < 2 */
898 cond = TCG_COND_LTU;
899 c->u.s32.b = tcg_const_i32(2);
900 break;
901 case 0x8: /* cc == 0 */
902 cond = TCG_COND_EQ;
903 c->u.s32.b = tcg_const_i32(0);
904 break;
905 case 0x4 | 0x2 | 0x1: /* cc != 0 */
906 cond = TCG_COND_NE;
907 c->u.s32.b = tcg_const_i32(0);
908 break;
909 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
910 cond = TCG_COND_NE;
911 c->g1 = false;
912 c->u.s32.a = tcg_temp_new_i32();
913 c->u.s32.b = tcg_const_i32(0);
914 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
915 break;
916 case 0x4: /* cc == 1 */
917 cond = TCG_COND_EQ;
918 c->u.s32.b = tcg_const_i32(1);
919 break;
920 case 0x2 | 0x1: /* cc > 1 */
921 cond = TCG_COND_GTU;
922 c->u.s32.b = tcg_const_i32(1);
923 break;
924 case 0x2: /* cc == 2 */
925 cond = TCG_COND_EQ;
926 c->u.s32.b = tcg_const_i32(2);
927 break;
928 case 0x1: /* cc == 3 */
929 cond = TCG_COND_EQ;
930 c->u.s32.b = tcg_const_i32(3);
931 break;
932 default:
933 /* CC is masked by something else: (8 >> cc) & mask. */
934 cond = TCG_COND_NE;
935 c->g1 = false;
936 c->u.s32.a = tcg_const_i32(8);
937 c->u.s32.b = tcg_const_i32(0);
938 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
939 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
940 break;
941 }
942 break;
943
944 default:
945 abort();
946 }
947 c->cond = cond;
948 }
949
950 static void free_compare(DisasCompare *c)
951 {
952 if (!c->g1) {
953 if (c->is_64) {
954 tcg_temp_free_i64(c->u.s64.a);
955 } else {
956 tcg_temp_free_i32(c->u.s32.a);
957 }
958 }
959 if (!c->g2) {
960 if (c->is_64) {
961 tcg_temp_free_i64(c->u.s64.b);
962 } else {
963 tcg_temp_free_i32(c->u.s32.b);
964 }
965 }
966 }
967
968 /* ====================================================================== */
969 /* Define the insn format enumeration. */
970 #define F0(N) FMT_##N,
971 #define F1(N, X1) F0(N)
972 #define F2(N, X1, X2) F0(N)
973 #define F3(N, X1, X2, X3) F0(N)
974 #define F4(N, X1, X2, X3, X4) F0(N)
975 #define F5(N, X1, X2, X3, X4, X5) F0(N)
976
977 typedef enum {
978 #include "insn-format.def"
979 } DisasFormat;
980
981 #undef F0
982 #undef F1
983 #undef F2
984 #undef F3
985 #undef F4
986 #undef F5
987
988 /* Define a structure to hold the decoded fields. We'll store each inside
989 an array indexed by an enum. In order to conserve memory, we'll arrange
990 for fields that do not exist at the same time to overlap, thus the "C"
991 for compact. For checking purposes there is an "O" for original index
992 as well that will be applied to availability bitmaps. */
993
994 enum DisasFieldIndexO {
995 FLD_O_r1,
996 FLD_O_r2,
997 FLD_O_r3,
998 FLD_O_m1,
999 FLD_O_m3,
1000 FLD_O_m4,
1001 FLD_O_b1,
1002 FLD_O_b2,
1003 FLD_O_b4,
1004 FLD_O_d1,
1005 FLD_O_d2,
1006 FLD_O_d4,
1007 FLD_O_x2,
1008 FLD_O_l1,
1009 FLD_O_l2,
1010 FLD_O_i1,
1011 FLD_O_i2,
1012 FLD_O_i3,
1013 FLD_O_i4,
1014 FLD_O_i5
1015 };
1016
1017 enum DisasFieldIndexC {
1018 FLD_C_r1 = 0,
1019 FLD_C_m1 = 0,
1020 FLD_C_b1 = 0,
1021 FLD_C_i1 = 0,
1022
1023 FLD_C_r2 = 1,
1024 FLD_C_b2 = 1,
1025 FLD_C_i2 = 1,
1026
1027 FLD_C_r3 = 2,
1028 FLD_C_m3 = 2,
1029 FLD_C_i3 = 2,
1030
1031 FLD_C_m4 = 3,
1032 FLD_C_b4 = 3,
1033 FLD_C_i4 = 3,
1034 FLD_C_l1 = 3,
1035
1036 FLD_C_i5 = 4,
1037 FLD_C_d1 = 4,
1038
1039 FLD_C_d2 = 5,
1040
1041 FLD_C_d4 = 6,
1042 FLD_C_x2 = 6,
1043 FLD_C_l2 = 6,
1044
1045 NUM_C_FIELD = 7
1046 };
1047
1048 struct DisasFields {
1049 uint64_t raw_insn;
1050 unsigned op:8;
1051 unsigned op2:8;
1052 unsigned presentC:16;
1053 unsigned int presentO;
1054 int c[NUM_C_FIELD];
1055 };
1056
1057 /* This is the way fields are to be accessed out of DisasFields. */
1058 #define have_field(S, F) have_field1((S), FLD_O_##F)
1059 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1060
1061 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1062 {
1063 return (f->presentO >> c) & 1;
1064 }
1065
1066 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1067 enum DisasFieldIndexC c)
1068 {
1069 assert(have_field1(f, o));
1070 return f->c[c];
1071 }
1072
1073 /* Describe the layout of each field in each format. */
1074 typedef struct DisasField {
1075 unsigned int beg:8;
1076 unsigned int size:8;
1077 unsigned int type:2;
1078 unsigned int indexC:6;
1079 enum DisasFieldIndexO indexO:8;
1080 } DisasField;
1081
1082 typedef struct DisasFormatInfo {
1083 DisasField op[NUM_C_FIELD];
1084 } DisasFormatInfo;
1085
1086 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1087 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1088 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1089 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1090 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1091 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1092 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1093 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1094 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1095 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1096 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1097 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1098 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1099 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1100
1101 #define F0(N) { { } },
1102 #define F1(N, X1) { { X1 } },
1103 #define F2(N, X1, X2) { { X1, X2 } },
1104 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1105 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1106 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1107
1108 static const DisasFormatInfo format_info[] = {
1109 #include "insn-format.def"
1110 };
1111
1112 #undef F0
1113 #undef F1
1114 #undef F2
1115 #undef F3
1116 #undef F4
1117 #undef F5
1118 #undef R
1119 #undef M
1120 #undef BD
1121 #undef BXD
1122 #undef BDL
1123 #undef BXDL
1124 #undef I
1125 #undef L
1126
1127 /* Generally, we'll extract operands into this structures, operate upon
1128 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1129 of routines below for more details. */
1130 typedef struct {
1131 bool g_out, g_out2, g_in1, g_in2;
1132 TCGv_i64 out, out2, in1, in2;
1133 TCGv_i64 addr1;
1134 } DisasOps;
1135
1136 /* Instructions can place constraints on their operands, raising specification
1137 exceptions if they are violated. To make this easy to automate, each "in1",
1138 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1139 of the following, or 0. To make this easy to document, we'll put the
1140 SPEC_<name> defines next to <name>. */
1141
1142 #define SPEC_r1_even 1
1143 #define SPEC_r2_even 2
1144 #define SPEC_r3_even 4
1145 #define SPEC_r1_f128 8
1146 #define SPEC_r2_f128 16
1147
1148 /* Return values from translate_one, indicating the state of the TB. */
1149 typedef enum {
1150 /* Continue the TB. */
1151 NO_EXIT,
1152 /* We have emitted one or more goto_tb. No fixup required. */
1153 EXIT_GOTO_TB,
1154 /* We are not using a goto_tb (for whatever reason), but have updated
1155 the PC (for whatever reason), so there's no need to do it again on
1156 exiting the TB. */
1157 EXIT_PC_UPDATED,
1158 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1159 updated the PC for the next instruction to be executed. */
1160 EXIT_PC_STALE,
1161 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1162 No following code will be executed. */
1163 EXIT_NORETURN,
1164 } ExitStatus;
1165
1166 typedef enum DisasFacility {
1167 FAC_Z, /* zarch (default) */
1168 FAC_CASS, /* compare and swap and store */
1169 FAC_CASS2, /* compare and swap and store 2*/
1170 FAC_DFP, /* decimal floating point */
1171 FAC_DFPR, /* decimal floating point rounding */
1172 FAC_DO, /* distinct operands */
1173 FAC_EE, /* execute extensions */
1174 FAC_EI, /* extended immediate */
1175 FAC_FPE, /* floating point extension */
1176 FAC_FPSSH, /* floating point support sign handling */
1177 FAC_FPRGR, /* FPR-GR transfer */
1178 FAC_GIE, /* general instructions extension */
1179 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1180 FAC_HW, /* high-word */
1181 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1182 FAC_MIE, /* miscellaneous-instruction-extensions */
1183 FAC_LAT, /* load-and-trap */
1184 FAC_LOC, /* load/store on condition */
1185 FAC_LD, /* long displacement */
1186 FAC_PC, /* population count */
1187 FAC_SCF, /* store clock fast */
1188 FAC_SFLE, /* store facility list extended */
1189 FAC_ILA, /* interlocked access facility 1 */
1190 } DisasFacility;
1191
1192 struct DisasInsn {
1193 unsigned opc:16;
1194 DisasFormat fmt:8;
1195 DisasFacility fac:8;
1196 unsigned spec:8;
1197
1198 const char *name;
1199
1200 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1201 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1202 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1203 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1204 void (*help_cout)(DisasContext *, DisasOps *);
1205 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1206
1207 uint64_t data;
1208 };
1209
1210 /* ====================================================================== */
1211 /* Miscellaneous helpers, used by several operations. */
1212
1213 static void help_l2_shift(DisasContext *s, DisasFields *f,
1214 DisasOps *o, int mask)
1215 {
1216 int b2 = get_field(f, b2);
1217 int d2 = get_field(f, d2);
1218
1219 if (b2 == 0) {
1220 o->in2 = tcg_const_i64(d2 & mask);
1221 } else {
1222 o->in2 = get_address(s, 0, b2, d2);
1223 tcg_gen_andi_i64(o->in2, o->in2, mask);
1224 }
1225 }
1226
1227 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1228 {
1229 if (dest == s->next_pc) {
1230 per_branch(s, true);
1231 return NO_EXIT;
1232 }
1233 if (use_goto_tb(s, dest)) {
1234 update_cc_op(s);
1235 per_breaking_event(s);
1236 tcg_gen_goto_tb(0);
1237 tcg_gen_movi_i64(psw_addr, dest);
1238 tcg_gen_exit_tb((uintptr_t)s->tb);
1239 return EXIT_GOTO_TB;
1240 } else {
1241 tcg_gen_movi_i64(psw_addr, dest);
1242 per_branch(s, false);
1243 return EXIT_PC_UPDATED;
1244 }
1245 }
1246
1247 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1248 bool is_imm, int imm, TCGv_i64 cdest)
1249 {
1250 ExitStatus ret;
1251 uint64_t dest = s->pc + 2 * imm;
1252 TCGLabel *lab;
1253
1254 /* Take care of the special cases first. */
1255 if (c->cond == TCG_COND_NEVER) {
1256 ret = NO_EXIT;
1257 goto egress;
1258 }
1259 if (is_imm) {
1260 if (dest == s->next_pc) {
1261 /* Branch to next. */
1262 per_branch(s, true);
1263 ret = NO_EXIT;
1264 goto egress;
1265 }
1266 if (c->cond == TCG_COND_ALWAYS) {
1267 ret = help_goto_direct(s, dest);
1268 goto egress;
1269 }
1270 } else {
1271 if (TCGV_IS_UNUSED_I64(cdest)) {
1272 /* E.g. bcr %r0 -> no branch. */
1273 ret = NO_EXIT;
1274 goto egress;
1275 }
1276 if (c->cond == TCG_COND_ALWAYS) {
1277 tcg_gen_mov_i64(psw_addr, cdest);
1278 per_branch(s, false);
1279 ret = EXIT_PC_UPDATED;
1280 goto egress;
1281 }
1282 }
1283
1284 if (use_goto_tb(s, s->next_pc)) {
1285 if (is_imm && use_goto_tb(s, dest)) {
1286 /* Both exits can use goto_tb. */
1287 update_cc_op(s);
1288
1289 lab = gen_new_label();
1290 if (c->is_64) {
1291 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1292 } else {
1293 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1294 }
1295
1296 /* Branch not taken. */
1297 tcg_gen_goto_tb(0);
1298 tcg_gen_movi_i64(psw_addr, s->next_pc);
1299 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1300
1301 /* Branch taken. */
1302 gen_set_label(lab);
1303 per_breaking_event(s);
1304 tcg_gen_goto_tb(1);
1305 tcg_gen_movi_i64(psw_addr, dest);
1306 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1307
1308 ret = EXIT_GOTO_TB;
1309 } else {
1310 /* Fallthru can use goto_tb, but taken branch cannot. */
1311 /* Store taken branch destination before the brcond. This
1312 avoids having to allocate a new local temp to hold it.
1313 We'll overwrite this in the not taken case anyway. */
1314 if (!is_imm) {
1315 tcg_gen_mov_i64(psw_addr, cdest);
1316 }
1317
1318 lab = gen_new_label();
1319 if (c->is_64) {
1320 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1321 } else {
1322 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1323 }
1324
1325 /* Branch not taken. */
1326 update_cc_op(s);
1327 tcg_gen_goto_tb(0);
1328 tcg_gen_movi_i64(psw_addr, s->next_pc);
1329 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1330
1331 gen_set_label(lab);
1332 if (is_imm) {
1333 tcg_gen_movi_i64(psw_addr, dest);
1334 }
1335 per_breaking_event(s);
1336 ret = EXIT_PC_UPDATED;
1337 }
1338 } else {
1339 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1340 Most commonly we're single-stepping or some other condition that
1341 disables all use of goto_tb. Just update the PC and exit. */
1342
1343 TCGv_i64 next = tcg_const_i64(s->next_pc);
1344 if (is_imm) {
1345 cdest = tcg_const_i64(dest);
1346 }
1347
1348 if (c->is_64) {
1349 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1350 cdest, next);
1351 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1352 } else {
1353 TCGv_i32 t0 = tcg_temp_new_i32();
1354 TCGv_i64 t1 = tcg_temp_new_i64();
1355 TCGv_i64 z = tcg_const_i64(0);
1356 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1357 tcg_gen_extu_i32_i64(t1, t0);
1358 tcg_temp_free_i32(t0);
1359 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1360 per_branch_cond(s, TCG_COND_NE, t1, z);
1361 tcg_temp_free_i64(t1);
1362 tcg_temp_free_i64(z);
1363 }
1364
1365 if (is_imm) {
1366 tcg_temp_free_i64(cdest);
1367 }
1368 tcg_temp_free_i64(next);
1369
1370 ret = EXIT_PC_UPDATED;
1371 }
1372
1373 egress:
1374 free_compare(c);
1375 return ret;
1376 }
1377
1378 /* ====================================================================== */
1379 /* The operations. These perform the bulk of the work for any insn,
1380 usually after the operands have been loaded and output initialized. */
1381
1382 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1383 {
1384 TCGv_i64 z, n;
1385 z = tcg_const_i64(0);
1386 n = tcg_temp_new_i64();
1387 tcg_gen_neg_i64(n, o->in2);
1388 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1389 tcg_temp_free_i64(n);
1390 tcg_temp_free_i64(z);
1391 return NO_EXIT;
1392 }
1393
1394 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1395 {
1396 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1397 return NO_EXIT;
1398 }
1399
1400 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1401 {
1402 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1403 return NO_EXIT;
1404 }
1405
1406 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1407 {
1408 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1409 tcg_gen_mov_i64(o->out2, o->in2);
1410 return NO_EXIT;
1411 }
1412
1413 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1414 {
1415 tcg_gen_add_i64(o->out, o->in1, o->in2);
1416 return NO_EXIT;
1417 }
1418
1419 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1420 {
1421 DisasCompare cmp;
1422 TCGv_i64 carry;
1423
1424 tcg_gen_add_i64(o->out, o->in1, o->in2);
1425
1426 /* The carry flag is the msb of CC, therefore the branch mask that would
1427 create that comparison is 3. Feeding the generated comparison to
1428 setcond produces the carry flag that we desire. */
1429 disas_jcc(s, &cmp, 3);
1430 carry = tcg_temp_new_i64();
1431 if (cmp.is_64) {
1432 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1433 } else {
1434 TCGv_i32 t = tcg_temp_new_i32();
1435 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1436 tcg_gen_extu_i32_i64(carry, t);
1437 tcg_temp_free_i32(t);
1438 }
1439 free_compare(&cmp);
1440
1441 tcg_gen_add_i64(o->out, o->out, carry);
1442 tcg_temp_free_i64(carry);
1443 return NO_EXIT;
1444 }
1445
1446 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1447 {
1448 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1449 return NO_EXIT;
1450 }
1451
1452 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1453 {
1454 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1455 return NO_EXIT;
1456 }
1457
1458 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1459 {
1460 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1461 return_low128(o->out2);
1462 return NO_EXIT;
1463 }
1464
1465 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1466 {
1467 tcg_gen_and_i64(o->out, o->in1, o->in2);
1468 return NO_EXIT;
1469 }
1470
1471 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1472 {
1473 int shift = s->insn->data & 0xff;
1474 int size = s->insn->data >> 8;
1475 uint64_t mask = ((1ull << size) - 1) << shift;
1476
1477 assert(!o->g_in2);
1478 tcg_gen_shli_i64(o->in2, o->in2, shift);
1479 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1480 tcg_gen_and_i64(o->out, o->in1, o->in2);
1481
1482 /* Produce the CC from only the bits manipulated. */
1483 tcg_gen_andi_i64(cc_dst, o->out, mask);
1484 set_cc_nz_u64(s, cc_dst);
1485 return NO_EXIT;
1486 }
1487
1488 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1489 {
1490 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1491 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1492 tcg_gen_mov_i64(psw_addr, o->in2);
1493 per_branch(s, false);
1494 return EXIT_PC_UPDATED;
1495 } else {
1496 return NO_EXIT;
1497 }
1498 }
1499
1500 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1501 {
1502 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1503 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1504 }
1505
1506 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1507 {
1508 int m1 = get_field(s->fields, m1);
1509 bool is_imm = have_field(s->fields, i2);
1510 int imm = is_imm ? get_field(s->fields, i2) : 0;
1511 DisasCompare c;
1512
1513 disas_jcc(s, &c, m1);
1514 return help_branch(s, &c, is_imm, imm, o->in2);
1515 }
1516
1517 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1518 {
1519 int r1 = get_field(s->fields, r1);
1520 bool is_imm = have_field(s->fields, i2);
1521 int imm = is_imm ? get_field(s->fields, i2) : 0;
1522 DisasCompare c;
1523 TCGv_i64 t;
1524
1525 c.cond = TCG_COND_NE;
1526 c.is_64 = false;
1527 c.g1 = false;
1528 c.g2 = false;
1529
1530 t = tcg_temp_new_i64();
1531 tcg_gen_subi_i64(t, regs[r1], 1);
1532 store_reg32_i64(r1, t);
1533 c.u.s32.a = tcg_temp_new_i32();
1534 c.u.s32.b = tcg_const_i32(0);
1535 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1536 tcg_temp_free_i64(t);
1537
1538 return help_branch(s, &c, is_imm, imm, o->in2);
1539 }
1540
1541 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1542 {
1543 int r1 = get_field(s->fields, r1);
1544 int imm = get_field(s->fields, i2);
1545 DisasCompare c;
1546 TCGv_i64 t;
1547
1548 c.cond = TCG_COND_NE;
1549 c.is_64 = false;
1550 c.g1 = false;
1551 c.g2 = false;
1552
1553 t = tcg_temp_new_i64();
1554 tcg_gen_shri_i64(t, regs[r1], 32);
1555 tcg_gen_subi_i64(t, t, 1);
1556 store_reg32h_i64(r1, t);
1557 c.u.s32.a = tcg_temp_new_i32();
1558 c.u.s32.b = tcg_const_i32(0);
1559 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1560 tcg_temp_free_i64(t);
1561
1562 return help_branch(s, &c, 1, imm, o->in2);
1563 }
1564
1565 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1566 {
1567 int r1 = get_field(s->fields, r1);
1568 bool is_imm = have_field(s->fields, i2);
1569 int imm = is_imm ? get_field(s->fields, i2) : 0;
1570 DisasCompare c;
1571
1572 c.cond = TCG_COND_NE;
1573 c.is_64 = true;
1574 c.g1 = true;
1575 c.g2 = false;
1576
1577 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1578 c.u.s64.a = regs[r1];
1579 c.u.s64.b = tcg_const_i64(0);
1580
1581 return help_branch(s, &c, is_imm, imm, o->in2);
1582 }
1583
1584 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1585 {
1586 int r1 = get_field(s->fields, r1);
1587 int r3 = get_field(s->fields, r3);
1588 bool is_imm = have_field(s->fields, i2);
1589 int imm = is_imm ? get_field(s->fields, i2) : 0;
1590 DisasCompare c;
1591 TCGv_i64 t;
1592
1593 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1594 c.is_64 = false;
1595 c.g1 = false;
1596 c.g2 = false;
1597
1598 t = tcg_temp_new_i64();
1599 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1600 c.u.s32.a = tcg_temp_new_i32();
1601 c.u.s32.b = tcg_temp_new_i32();
1602 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1603 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1604 store_reg32_i64(r1, t);
1605 tcg_temp_free_i64(t);
1606
1607 return help_branch(s, &c, is_imm, imm, o->in2);
1608 }
1609
1610 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1611 {
1612 int r1 = get_field(s->fields, r1);
1613 int r3 = get_field(s->fields, r3);
1614 bool is_imm = have_field(s->fields, i2);
1615 int imm = is_imm ? get_field(s->fields, i2) : 0;
1616 DisasCompare c;
1617
1618 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1619 c.is_64 = true;
1620
1621 if (r1 == (r3 | 1)) {
1622 c.u.s64.b = load_reg(r3 | 1);
1623 c.g2 = false;
1624 } else {
1625 c.u.s64.b = regs[r3 | 1];
1626 c.g2 = true;
1627 }
1628
1629 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1630 c.u.s64.a = regs[r1];
1631 c.g1 = true;
1632
1633 return help_branch(s, &c, is_imm, imm, o->in2);
1634 }
1635
1636 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1637 {
1638 int imm, m3 = get_field(s->fields, m3);
1639 bool is_imm;
1640 DisasCompare c;
1641
1642 c.cond = ltgt_cond[m3];
1643 if (s->insn->data) {
1644 c.cond = tcg_unsigned_cond(c.cond);
1645 }
1646 c.is_64 = c.g1 = c.g2 = true;
1647 c.u.s64.a = o->in1;
1648 c.u.s64.b = o->in2;
1649
1650 is_imm = have_field(s->fields, i4);
1651 if (is_imm) {
1652 imm = get_field(s->fields, i4);
1653 } else {
1654 imm = 0;
1655 o->out = get_address(s, 0, get_field(s->fields, b4),
1656 get_field(s->fields, d4));
1657 }
1658
1659 return help_branch(s, &c, is_imm, imm, o->out);
1660 }
1661
1662 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1663 {
1664 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1665 set_cc_static(s);
1666 return NO_EXIT;
1667 }
1668
1669 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1670 {
1671 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1672 set_cc_static(s);
1673 return NO_EXIT;
1674 }
1675
1676 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1677 {
1678 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1679 set_cc_static(s);
1680 return NO_EXIT;
1681 }
1682
1683 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1684 {
1685 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1686 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1687 tcg_temp_free_i32(m3);
1688 gen_set_cc_nz_f32(s, o->in2);
1689 return NO_EXIT;
1690 }
1691
1692 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1693 {
1694 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1695 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1696 tcg_temp_free_i32(m3);
1697 gen_set_cc_nz_f64(s, o->in2);
1698 return NO_EXIT;
1699 }
1700
1701 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1702 {
1703 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1704 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1705 tcg_temp_free_i32(m3);
1706 gen_set_cc_nz_f128(s, o->in1, o->in2);
1707 return NO_EXIT;
1708 }
1709
1710 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1711 {
1712 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1713 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1714 tcg_temp_free_i32(m3);
1715 gen_set_cc_nz_f32(s, o->in2);
1716 return NO_EXIT;
1717 }
1718
1719 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1720 {
1721 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1722 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1723 tcg_temp_free_i32(m3);
1724 gen_set_cc_nz_f64(s, o->in2);
1725 return NO_EXIT;
1726 }
1727
1728 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1729 {
1730 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1731 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1732 tcg_temp_free_i32(m3);
1733 gen_set_cc_nz_f128(s, o->in1, o->in2);
1734 return NO_EXIT;
1735 }
1736
1737 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1738 {
1739 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1740 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1741 tcg_temp_free_i32(m3);
1742 gen_set_cc_nz_f32(s, o->in2);
1743 return NO_EXIT;
1744 }
1745
1746 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1747 {
1748 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1749 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1750 tcg_temp_free_i32(m3);
1751 gen_set_cc_nz_f64(s, o->in2);
1752 return NO_EXIT;
1753 }
1754
1755 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1756 {
1757 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1758 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1759 tcg_temp_free_i32(m3);
1760 gen_set_cc_nz_f128(s, o->in1, o->in2);
1761 return NO_EXIT;
1762 }
1763
1764 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1765 {
1766 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1767 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1768 tcg_temp_free_i32(m3);
1769 gen_set_cc_nz_f32(s, o->in2);
1770 return NO_EXIT;
1771 }
1772
1773 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1774 {
1775 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1776 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1777 tcg_temp_free_i32(m3);
1778 gen_set_cc_nz_f64(s, o->in2);
1779 return NO_EXIT;
1780 }
1781
1782 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1783 {
1784 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1785 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1786 tcg_temp_free_i32(m3);
1787 gen_set_cc_nz_f128(s, o->in1, o->in2);
1788 return NO_EXIT;
1789 }
1790
1791 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1792 {
1793 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1794 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1795 tcg_temp_free_i32(m3);
1796 return NO_EXIT;
1797 }
1798
1799 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1800 {
1801 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1802 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1803 tcg_temp_free_i32(m3);
1804 return NO_EXIT;
1805 }
1806
1807 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1808 {
1809 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1810 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1811 tcg_temp_free_i32(m3);
1812 return_low128(o->out2);
1813 return NO_EXIT;
1814 }
1815
1816 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1817 {
1818 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1819 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1820 tcg_temp_free_i32(m3);
1821 return NO_EXIT;
1822 }
1823
1824 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1825 {
1826 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1827 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1828 tcg_temp_free_i32(m3);
1829 return NO_EXIT;
1830 }
1831
1832 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1833 {
1834 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1835 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1836 tcg_temp_free_i32(m3);
1837 return_low128(o->out2);
1838 return NO_EXIT;
1839 }
1840
1841 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1842 {
1843 int r2 = get_field(s->fields, r2);
1844 TCGv_i64 len = tcg_temp_new_i64();
1845
1846 potential_page_fault(s);
1847 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1848 set_cc_static(s);
1849 return_low128(o->out);
1850
1851 tcg_gen_add_i64(regs[r2], regs[r2], len);
1852 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1853 tcg_temp_free_i64(len);
1854
1855 return NO_EXIT;
1856 }
1857
1858 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1859 {
1860 int l = get_field(s->fields, l1);
1861 TCGv_i32 vl;
1862
1863 switch (l + 1) {
1864 case 1:
1865 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1866 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1867 break;
1868 case 2:
1869 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1870 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1871 break;
1872 case 4:
1873 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1874 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1875 break;
1876 case 8:
1877 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1878 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1879 break;
1880 default:
1881 potential_page_fault(s);
1882 vl = tcg_const_i32(l);
1883 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1884 tcg_temp_free_i32(vl);
1885 set_cc_static(s);
1886 return NO_EXIT;
1887 }
1888 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1889 return NO_EXIT;
1890 }
1891
1892 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1893 {
1894 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1895 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1896 potential_page_fault(s);
1897 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1898 tcg_temp_free_i32(r1);
1899 tcg_temp_free_i32(r3);
1900 set_cc_static(s);
1901 return NO_EXIT;
1902 }
1903
1904 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1905 {
1906 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1907 TCGv_i32 t1 = tcg_temp_new_i32();
1908 tcg_gen_trunc_i64_i32(t1, o->in1);
1909 potential_page_fault(s);
1910 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1911 set_cc_static(s);
1912 tcg_temp_free_i32(t1);
1913 tcg_temp_free_i32(m3);
1914 return NO_EXIT;
1915 }
1916
1917 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1918 {
1919 potential_page_fault(s);
1920 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1921 set_cc_static(s);
1922 return_low128(o->in2);
1923 return NO_EXIT;
1924 }
1925
1926 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1927 {
1928 TCGv_i64 t = tcg_temp_new_i64();
1929 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1930 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1931 tcg_gen_or_i64(o->out, o->out, t);
1932 tcg_temp_free_i64(t);
1933 return NO_EXIT;
1934 }
1935
1936 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1937 {
1938 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1939 int d2 = get_field(s->fields, d2);
1940 int b2 = get_field(s->fields, b2);
1941 int is_64 = s->insn->data;
1942 TCGv_i64 addr, mem, cc, z;
1943
1944 /* Note that in1 = R3 (new value) and
1945 in2 = (zero-extended) R1 (expected value). */
1946
1947 /* Load the memory into the (temporary) output. While the PoO only talks
1948 about moving the memory to R1 on inequality, if we include equality it
1949 means that R1 is equal to the memory in all conditions. */
1950 addr = get_address(s, 0, b2, d2);
1951 if (is_64) {
1952 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1953 } else {
1954 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1955 }
1956
1957 /* Are the memory and expected values (un)equal? Note that this setcond
1958 produces the output CC value, thus the NE sense of the test. */
1959 cc = tcg_temp_new_i64();
1960 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1961
1962 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1963 Recall that we are allowed to unconditionally issue the store (and
1964 thus any possible write trap), so (re-)store the original contents
1965 of MEM in case of inequality. */
1966 z = tcg_const_i64(0);
1967 mem = tcg_temp_new_i64();
1968 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1969 if (is_64) {
1970 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1971 } else {
1972 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1973 }
1974 tcg_temp_free_i64(z);
1975 tcg_temp_free_i64(mem);
1976 tcg_temp_free_i64(addr);
1977
1978 /* Store CC back to cc_op. Wait until after the store so that any
1979 exception gets the old cc_op value. */
1980 tcg_gen_trunc_i64_i32(cc_op, cc);
1981 tcg_temp_free_i64(cc);
1982 set_cc_static(s);
1983 return NO_EXIT;
1984 }
1985
1986 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1987 {
1988 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1989 int r1 = get_field(s->fields, r1);
1990 int r3 = get_field(s->fields, r3);
1991 int d2 = get_field(s->fields, d2);
1992 int b2 = get_field(s->fields, b2);
1993 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1994
1995 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1996
1997 addrh = get_address(s, 0, b2, d2);
1998 addrl = get_address(s, 0, b2, d2 + 8);
1999 outh = tcg_temp_new_i64();
2000 outl = tcg_temp_new_i64();
2001
2002 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
2003 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
2004
2005 /* Fold the double-word compare with arithmetic. */
2006 cc = tcg_temp_new_i64();
2007 z = tcg_temp_new_i64();
2008 tcg_gen_xor_i64(cc, outh, regs[r1]);
2009 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
2010 tcg_gen_or_i64(cc, cc, z);
2011 tcg_gen_movi_i64(z, 0);
2012 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
2013
2014 memh = tcg_temp_new_i64();
2015 meml = tcg_temp_new_i64();
2016 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
2017 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
2018 tcg_temp_free_i64(z);
2019
2020 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
2021 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
2022 tcg_temp_free_i64(memh);
2023 tcg_temp_free_i64(meml);
2024 tcg_temp_free_i64(addrh);
2025 tcg_temp_free_i64(addrl);
2026
2027 /* Save back state now that we've passed all exceptions. */
2028 tcg_gen_mov_i64(regs[r1], outh);
2029 tcg_gen_mov_i64(regs[r1 + 1], outl);
2030 tcg_gen_trunc_i64_i32(cc_op, cc);
2031 tcg_temp_free_i64(outh);
2032 tcg_temp_free_i64(outl);
2033 tcg_temp_free_i64(cc);
2034 set_cc_static(s);
2035 return NO_EXIT;
2036 }
2037
2038 #ifndef CONFIG_USER_ONLY
2039 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2040 {
2041 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2042 check_privileged(s);
2043 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
2044 tcg_temp_free_i32(r1);
2045 set_cc_static(s);
2046 return NO_EXIT;
2047 }
2048 #endif
2049
2050 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2051 {
2052 TCGv_i64 t1 = tcg_temp_new_i64();
2053 TCGv_i32 t2 = tcg_temp_new_i32();
2054 tcg_gen_trunc_i64_i32(t2, o->in1);
2055 gen_helper_cvd(t1, t2);
2056 tcg_temp_free_i32(t2);
2057 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2058 tcg_temp_free_i64(t1);
2059 return NO_EXIT;
2060 }
2061
2062 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2063 {
2064 int m3 = get_field(s->fields, m3);
2065 TCGLabel *lab = gen_new_label();
2066 TCGCond c;
2067
2068 c = tcg_invert_cond(ltgt_cond[m3]);
2069 if (s->insn->data) {
2070 c = tcg_unsigned_cond(c);
2071 }
2072 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2073
2074 /* Trap. */
2075 gen_trap(s);
2076
2077 gen_set_label(lab);
2078 return NO_EXIT;
2079 }
2080
2081 #ifndef CONFIG_USER_ONLY
2082 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2083 {
2084 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2085 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2086 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2087
2088 check_privileged(s);
2089 update_psw_addr(s);
2090 gen_op_calc_cc(s);
2091
2092 gen_helper_diag(cpu_env, r1, r3, func_code);
2093
2094 tcg_temp_free_i32(func_code);
2095 tcg_temp_free_i32(r3);
2096 tcg_temp_free_i32(r1);
2097 return NO_EXIT;
2098 }
2099 #endif
2100
2101 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2102 {
2103 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2104 return_low128(o->out);
2105 return NO_EXIT;
2106 }
2107
2108 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2109 {
2110 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2111 return_low128(o->out);
2112 return NO_EXIT;
2113 }
2114
2115 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2116 {
2117 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2118 return_low128(o->out);
2119 return NO_EXIT;
2120 }
2121
2122 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2123 {
2124 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2125 return_low128(o->out);
2126 return NO_EXIT;
2127 }
2128
2129 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2130 {
2131 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2132 return NO_EXIT;
2133 }
2134
2135 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2136 {
2137 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2138 return NO_EXIT;
2139 }
2140
2141 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2142 {
2143 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2144 return_low128(o->out2);
2145 return NO_EXIT;
2146 }
2147
2148 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2149 {
2150 int r2 = get_field(s->fields, r2);
2151 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2152 return NO_EXIT;
2153 }
2154
2155 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2156 {
2157 /* No cache information provided. */
2158 tcg_gen_movi_i64(o->out, -1);
2159 return NO_EXIT;
2160 }
2161
2162 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2163 {
2164 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2165 return NO_EXIT;
2166 }
2167
2168 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2169 {
2170 int r1 = get_field(s->fields, r1);
2171 int r2 = get_field(s->fields, r2);
2172 TCGv_i64 t = tcg_temp_new_i64();
2173
2174 /* Note the "subsequently" in the PoO, which implies a defined result
2175 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2176 tcg_gen_shri_i64(t, psw_mask, 32);
2177 store_reg32_i64(r1, t);
2178 if (r2 != 0) {
2179 store_reg32_i64(r2, psw_mask);
2180 }
2181
2182 tcg_temp_free_i64(t);
2183 return NO_EXIT;
2184 }
2185
2186 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2187 {
2188 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2189 tb->flags, (ab)use the tb->cs_base field as the address of
2190 the template in memory, and grab 8 bits of tb->flags/cflags for
2191 the contents of the register. We would then recognize all this
2192 in gen_intermediate_code_internal, generating code for exactly
2193 one instruction. This new TB then gets executed normally.
2194
2195 On the other hand, this seems to be mostly used for modifying
2196 MVC inside of memcpy, which needs a helper call anyway. So
2197 perhaps this doesn't bear thinking about any further. */
2198
2199 TCGv_i64 tmp;
2200
2201 update_psw_addr(s);
2202 gen_op_calc_cc(s);
2203
2204 tmp = tcg_const_i64(s->next_pc);
2205 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2206 tcg_temp_free_i64(tmp);
2207
2208 return NO_EXIT;
2209 }
2210
2211 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2212 {
2213 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2214 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2215 tcg_temp_free_i32(m3);
2216 return NO_EXIT;
2217 }
2218
2219 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2220 {
2221 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2222 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2223 tcg_temp_free_i32(m3);
2224 return NO_EXIT;
2225 }
2226
2227 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2228 {
2229 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2230 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2231 return_low128(o->out2);
2232 tcg_temp_free_i32(m3);
2233 return NO_EXIT;
2234 }
2235
2236 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2237 {
2238 /* We'll use the original input for cc computation, since we get to
2239 compare that against 0, which ought to be better than comparing
2240 the real output against 64. It also lets cc_dst be a convenient
2241 temporary during our computation. */
2242 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2243
2244 /* R1 = IN ? CLZ(IN) : 64. */
2245 gen_helper_clz(o->out, o->in2);
2246
2247 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2248 value by 64, which is undefined. But since the shift is 64 iff the
2249 input is zero, we still get the correct result after and'ing. */
2250 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2251 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2252 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2253 return NO_EXIT;
2254 }
2255
2256 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2257 {
2258 int m3 = get_field(s->fields, m3);
2259 int pos, len, base = s->insn->data;
2260 TCGv_i64 tmp = tcg_temp_new_i64();
2261 uint64_t ccm;
2262
2263 switch (m3) {
2264 case 0xf:
2265 /* Effectively a 32-bit load. */
2266 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2267 len = 32;
2268 goto one_insert;
2269
2270 case 0xc:
2271 case 0x6:
2272 case 0x3:
2273 /* Effectively a 16-bit load. */
2274 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2275 len = 16;
2276 goto one_insert;
2277
2278 case 0x8:
2279 case 0x4:
2280 case 0x2:
2281 case 0x1:
2282 /* Effectively an 8-bit load. */
2283 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2284 len = 8;
2285 goto one_insert;
2286
2287 one_insert:
2288 pos = base + ctz32(m3) * 8;
2289 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2290 ccm = ((1ull << len) - 1) << pos;
2291 break;
2292
2293 default:
2294 /* This is going to be a sequence of loads and inserts. */
2295 pos = base + 32 - 8;
2296 ccm = 0;
2297 while (m3) {
2298 if (m3 & 0x8) {
2299 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2300 tcg_gen_addi_i64(o->in2, o->in2, 1);
2301 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2302 ccm |= 0xff << pos;
2303 }
2304 m3 = (m3 << 1) & 0xf;
2305 pos -= 8;
2306 }
2307 break;
2308 }
2309
2310 tcg_gen_movi_i64(tmp, ccm);
2311 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2312 tcg_temp_free_i64(tmp);
2313 return NO_EXIT;
2314 }
2315
2316 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2317 {
2318 int shift = s->insn->data & 0xff;
2319 int size = s->insn->data >> 8;
2320 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2321 return NO_EXIT;
2322 }
2323
2324 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2325 {
2326 TCGv_i64 t1;
2327
2328 gen_op_calc_cc(s);
2329 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2330
2331 t1 = tcg_temp_new_i64();
2332 tcg_gen_shli_i64(t1, psw_mask, 20);
2333 tcg_gen_shri_i64(t1, t1, 36);
2334 tcg_gen_or_i64(o->out, o->out, t1);
2335
2336 tcg_gen_extu_i32_i64(t1, cc_op);
2337 tcg_gen_shli_i64(t1, t1, 28);
2338 tcg_gen_or_i64(o->out, o->out, t1);
2339 tcg_temp_free_i64(t1);
2340 return NO_EXIT;
2341 }
2342
2343 #ifndef CONFIG_USER_ONLY
2344 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2345 {
2346 check_privileged(s);
2347 gen_helper_ipte(cpu_env, o->in1, o->in2);
2348 return NO_EXIT;
2349 }
2350
2351 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2352 {
2353 check_privileged(s);
2354 gen_helper_iske(o->out, cpu_env, o->in2);
2355 return NO_EXIT;
2356 }
2357 #endif
2358
2359 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2360 {
2361 gen_helper_ldeb(o->out, cpu_env, o->in2);
2362 return NO_EXIT;
2363 }
2364
2365 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2366 {
2367 gen_helper_ledb(o->out, cpu_env, o->in2);
2368 return NO_EXIT;
2369 }
2370
2371 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2372 {
2373 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2374 return NO_EXIT;
2375 }
2376
2377 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2378 {
2379 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2380 return NO_EXIT;
2381 }
2382
2383 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2384 {
2385 gen_helper_lxdb(o->out, cpu_env, o->in2);
2386 return_low128(o->out2);
2387 return NO_EXIT;
2388 }
2389
2390 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2391 {
2392 gen_helper_lxeb(o->out, cpu_env, o->in2);
2393 return_low128(o->out2);
2394 return NO_EXIT;
2395 }
2396
2397 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2398 {
2399 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2400 return NO_EXIT;
2401 }
2402
2403 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2404 {
2405 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2406 return NO_EXIT;
2407 }
2408
2409 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2410 {
2411 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2412 return NO_EXIT;
2413 }
2414
2415 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2416 {
2417 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2418 return NO_EXIT;
2419 }
2420
2421 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2422 {
2423 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2424 return NO_EXIT;
2425 }
2426
2427 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2428 {
2429 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2430 return NO_EXIT;
2431 }
2432
2433 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2434 {
2435 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2436 return NO_EXIT;
2437 }
2438
2439 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2440 {
2441 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2442 return NO_EXIT;
2443 }
2444
2445 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2446 {
2447 TCGLabel *lab = gen_new_label();
2448 store_reg32_i64(get_field(s->fields, r1), o->in2);
2449 /* The value is stored even in case of trap. */
2450 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2451 gen_trap(s);
2452 gen_set_label(lab);
2453 return NO_EXIT;
2454 }
2455
2456 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2457 {
2458 TCGLabel *lab = gen_new_label();
2459 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2460 /* The value is stored even in case of trap. */
2461 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2462 gen_trap(s);
2463 gen_set_label(lab);
2464 return NO_EXIT;
2465 }
2466
2467 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2468 {
2469 TCGLabel *lab = gen_new_label();
2470 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2471 /* The value is stored even in case of trap. */
2472 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2473 gen_trap(s);
2474 gen_set_label(lab);
2475 return NO_EXIT;
2476 }
2477
2478 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2479 {
2480 TCGLabel *lab = gen_new_label();
2481 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2482 /* The value is stored even in case of trap. */
2483 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2484 gen_trap(s);
2485 gen_set_label(lab);
2486 return NO_EXIT;
2487 }
2488
2489 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2490 {
2491 TCGLabel *lab = gen_new_label();
2492 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2493 /* The value is stored even in case of trap. */
2494 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2495 gen_trap(s);
2496 gen_set_label(lab);
2497 return NO_EXIT;
2498 }
2499
2500 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2501 {
2502 DisasCompare c;
2503
2504 disas_jcc(s, &c, get_field(s->fields, m3));
2505
2506 if (c.is_64) {
2507 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2508 o->in2, o->in1);
2509 free_compare(&c);
2510 } else {
2511 TCGv_i32 t32 = tcg_temp_new_i32();
2512 TCGv_i64 t, z;
2513
2514 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2515 free_compare(&c);
2516
2517 t = tcg_temp_new_i64();
2518 tcg_gen_extu_i32_i64(t, t32);
2519 tcg_temp_free_i32(t32);
2520
2521 z = tcg_const_i64(0);
2522 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2523 tcg_temp_free_i64(t);
2524 tcg_temp_free_i64(z);
2525 }
2526
2527 return NO_EXIT;
2528 }
2529
2530 #ifndef CONFIG_USER_ONLY
2531 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2532 {
2533 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2534 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2535 check_privileged(s);
2536 potential_page_fault(s);
2537 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2538 tcg_temp_free_i32(r1);
2539 tcg_temp_free_i32(r3);
2540 return NO_EXIT;
2541 }
2542
2543 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2544 {
2545 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2546 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2547 check_privileged(s);
2548 potential_page_fault(s);
2549 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2550 tcg_temp_free_i32(r1);
2551 tcg_temp_free_i32(r3);
2552 return NO_EXIT;
2553 }
2554 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2555 {
2556 check_privileged(s);
2557 potential_page_fault(s);
2558 gen_helper_lra(o->out, cpu_env, o->in2);
2559 set_cc_static(s);
2560 return NO_EXIT;
2561 }
2562
2563 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2564 {
2565 TCGv_i64 t1, t2;
2566
2567 check_privileged(s);
2568 per_breaking_event(s);
2569
2570 t1 = tcg_temp_new_i64();
2571 t2 = tcg_temp_new_i64();
2572 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2573 tcg_gen_addi_i64(o->in2, o->in2, 4);
2574 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2575 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2576 tcg_gen_shli_i64(t1, t1, 32);
2577 gen_helper_load_psw(cpu_env, t1, t2);
2578 tcg_temp_free_i64(t1);
2579 tcg_temp_free_i64(t2);
2580 return EXIT_NORETURN;
2581 }
2582
2583 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2584 {
2585 TCGv_i64 t1, t2;
2586
2587 check_privileged(s);
2588 per_breaking_event(s);
2589
2590 t1 = tcg_temp_new_i64();
2591 t2 = tcg_temp_new_i64();
2592 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2593 tcg_gen_addi_i64(o->in2, o->in2, 8);
2594 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2595 gen_helper_load_psw(cpu_env, t1, t2);
2596 tcg_temp_free_i64(t1);
2597 tcg_temp_free_i64(t2);
2598 return EXIT_NORETURN;
2599 }
2600 #endif
2601
2602 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2603 {
2604 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2605 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2606 potential_page_fault(s);
2607 gen_helper_lam(cpu_env, r1, o->in2, r3);
2608 tcg_temp_free_i32(r1);
2609 tcg_temp_free_i32(r3);
2610 return NO_EXIT;
2611 }
2612
2613 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2614 {
2615 int r1 = get_field(s->fields, r1);
2616 int r3 = get_field(s->fields, r3);
2617 TCGv_i64 t1, t2;
2618
2619 /* Only one register to read. */
2620 t1 = tcg_temp_new_i64();
2621 if (unlikely(r1 == r3)) {
2622 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2623 store_reg32_i64(r1, t1);
2624 tcg_temp_free(t1);
2625 return NO_EXIT;
2626 }
2627
2628 /* First load the values of the first and last registers to trigger
2629 possible page faults. */
2630 t2 = tcg_temp_new_i64();
2631 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2632 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2633 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2634 store_reg32_i64(r1, t1);
2635 store_reg32_i64(r3, t2);
2636
2637 /* Only two registers to read. */
2638 if (((r1 + 1) & 15) == r3) {
2639 tcg_temp_free(t2);
2640 tcg_temp_free(t1);
2641 return NO_EXIT;
2642 }
2643
2644 /* Then load the remaining registers. Page fault can't occur. */
2645 r3 = (r3 - 1) & 15;
2646 tcg_gen_movi_i64(t2, 4);
2647 while (r1 != r3) {
2648 r1 = (r1 + 1) & 15;
2649 tcg_gen_add_i64(o->in2, o->in2, t2);
2650 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2651 store_reg32_i64(r1, t1);
2652 }
2653 tcg_temp_free(t2);
2654 tcg_temp_free(t1);
2655
2656 return NO_EXIT;
2657 }
2658
2659 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2660 {
2661 int r1 = get_field(s->fields, r1);
2662 int r3 = get_field(s->fields, r3);
2663 TCGv_i64 t1, t2;
2664
2665 /* Only one register to read. */
2666 t1 = tcg_temp_new_i64();
2667 if (unlikely(r1 == r3)) {
2668 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2669 store_reg32h_i64(r1, t1);
2670 tcg_temp_free(t1);
2671 return NO_EXIT;
2672 }
2673
2674 /* First load the values of the first and last registers to trigger
2675 possible page faults. */
2676 t2 = tcg_temp_new_i64();
2677 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2678 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2679 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2680 store_reg32h_i64(r1, t1);
2681 store_reg32h_i64(r3, t2);
2682
2683 /* Only two registers to read. */
2684 if (((r1 + 1) & 15) == r3) {
2685 tcg_temp_free(t2);
2686 tcg_temp_free(t1);
2687 return NO_EXIT;
2688 }
2689
2690 /* Then load the remaining registers. Page fault can't occur. */
2691 r3 = (r3 - 1) & 15;
2692 tcg_gen_movi_i64(t2, 4);
2693 while (r1 != r3) {
2694 r1 = (r1 + 1) & 15;
2695 tcg_gen_add_i64(o->in2, o->in2, t2);
2696 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2697 store_reg32h_i64(r1, t1);
2698 }
2699 tcg_temp_free(t2);
2700 tcg_temp_free(t1);
2701
2702 return NO_EXIT;
2703 }
2704
2705 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2706 {
2707 int r1 = get_field(s->fields, r1);
2708 int r3 = get_field(s->fields, r3);
2709 TCGv_i64 t1, t2;
2710
2711 /* Only one register to read. */
2712 if (unlikely(r1 == r3)) {
2713 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2714 return NO_EXIT;
2715 }
2716
2717 /* First load the values of the first and last registers to trigger
2718 possible page faults. */
2719 t1 = tcg_temp_new_i64();
2720 t2 = tcg_temp_new_i64();
2721 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2722 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2723 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2724 tcg_gen_mov_i64(regs[r1], t1);
2725 tcg_temp_free(t2);
2726
2727 /* Only two registers to read. */
2728 if (((r1 + 1) & 15) == r3) {
2729 tcg_temp_free(t1);
2730 return NO_EXIT;
2731 }
2732
2733 /* Then load the remaining registers. Page fault can't occur. */
2734 r3 = (r3 - 1) & 15;
2735 tcg_gen_movi_i64(t1, 8);
2736 while (r1 != r3) {
2737 r1 = (r1 + 1) & 15;
2738 tcg_gen_add_i64(o->in2, o->in2, t1);
2739 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2740 }
2741 tcg_temp_free(t1);
2742
2743 return NO_EXIT;
2744 }
2745
2746 #ifndef CONFIG_USER_ONLY
2747 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2748 {
2749 check_privileged(s);
2750 potential_page_fault(s);
2751 gen_helper_lura(o->out, cpu_env, o->in2);
2752 return NO_EXIT;
2753 }
2754
2755 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2756 {
2757 check_privileged(s);
2758 potential_page_fault(s);
2759 gen_helper_lurag(o->out, cpu_env, o->in2);
2760 return NO_EXIT;
2761 }
2762 #endif
2763
2764 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2765 {
2766 o->out = o->in2;
2767 o->g_out = o->g_in2;
2768 TCGV_UNUSED_I64(o->in2);
2769 o->g_in2 = false;
2770 return NO_EXIT;
2771 }
2772
2773 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2774 {
2775 int b2 = get_field(s->fields, b2);
2776 TCGv ar1 = tcg_temp_new_i64();
2777
2778 o->out = o->in2;
2779 o->g_out = o->g_in2;
2780 TCGV_UNUSED_I64(o->in2);
2781 o->g_in2 = false;
2782
2783 switch (s->tb->flags & FLAG_MASK_ASC) {
2784 case PSW_ASC_PRIMARY >> 32:
2785 tcg_gen_movi_i64(ar1, 0);
2786 break;
2787 case PSW_ASC_ACCREG >> 32:
2788 tcg_gen_movi_i64(ar1, 1);
2789 break;
2790 case PSW_ASC_SECONDARY >> 32:
2791 if (b2) {
2792 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2793 } else {
2794 tcg_gen_movi_i64(ar1, 0);
2795 }
2796 break;
2797 case PSW_ASC_HOME >> 32:
2798 tcg_gen_movi_i64(ar1, 2);
2799 break;
2800 }
2801
2802 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2803 tcg_temp_free_i64(ar1);
2804
2805 return NO_EXIT;
2806 }
2807
2808 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2809 {
2810 o->out = o->in1;
2811 o->out2 = o->in2;
2812 o->g_out = o->g_in1;
2813 o->g_out2 = o->g_in2;
2814 TCGV_UNUSED_I64(o->in1);
2815 TCGV_UNUSED_I64(o->in2);
2816 o->g_in1 = o->g_in2 = false;
2817 return NO_EXIT;
2818 }
2819
2820 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2821 {
2822 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2823 potential_page_fault(s);
2824 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2825 tcg_temp_free_i32(l);
2826 return NO_EXIT;
2827 }
2828
2829 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2830 {
2831 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2832 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2833 potential_page_fault(s);
2834 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2835 tcg_temp_free_i32(r1);
2836 tcg_temp_free_i32(r2);
2837 set_cc_static(s);
2838 return NO_EXIT;
2839 }
2840
2841 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2842 {
2843 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2844 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2845 potential_page_fault(s);
2846 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2847 tcg_temp_free_i32(r1);
2848 tcg_temp_free_i32(r3);
2849 set_cc_static(s);
2850 return NO_EXIT;
2851 }
2852
2853 #ifndef CONFIG_USER_ONLY
2854 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2855 {
2856 int r1 = get_field(s->fields, l1);
2857 check_privileged(s);
2858 potential_page_fault(s);
2859 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2860 set_cc_static(s);
2861 return NO_EXIT;
2862 }
2863
2864 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2865 {
2866 int r1 = get_field(s->fields, l1);
2867 check_privileged(s);
2868 potential_page_fault(s);
2869 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2870 set_cc_static(s);
2871 return NO_EXIT;
2872 }
2873 #endif
2874
2875 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2876 {
2877 potential_page_fault(s);
2878 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2879 set_cc_static(s);
2880 return NO_EXIT;
2881 }
2882
2883 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2884 {
2885 potential_page_fault(s);
2886 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2887 set_cc_static(s);
2888 return_low128(o->in2);
2889 return NO_EXIT;
2890 }
2891
2892 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2893 {
2894 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2895 return NO_EXIT;
2896 }
2897
2898 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2899 {
2900 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2901 return NO_EXIT;
2902 }
2903
2904 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2905 {
2906 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2907 return NO_EXIT;
2908 }
2909
2910 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2911 {
2912 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2913 return NO_EXIT;
2914 }
2915
2916 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2917 {
2918 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2919 return NO_EXIT;
2920 }
2921
2922 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2923 {
2924 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2925 return_low128(o->out2);
2926 return NO_EXIT;
2927 }
2928
2929 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2930 {
2931 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2932 return_low128(o->out2);
2933 return NO_EXIT;
2934 }
2935
2936 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2937 {
2938 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2939 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2940 tcg_temp_free_i64(r3);
2941 return NO_EXIT;
2942 }
2943
2944 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2945 {
2946 int r3 = get_field(s->fields, r3);
2947 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2948 return NO_EXIT;
2949 }
2950
2951 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2952 {
2953 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2954 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2955 tcg_temp_free_i64(r3);
2956 return NO_EXIT;
2957 }
2958
2959 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2960 {
2961 int r3 = get_field(s->fields, r3);
2962 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2963 return NO_EXIT;
2964 }
2965
2966 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2967 {
2968 TCGv_i64 z, n;
2969 z = tcg_const_i64(0);
2970 n = tcg_temp_new_i64();
2971 tcg_gen_neg_i64(n, o->in2);
2972 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2973 tcg_temp_free_i64(n);
2974 tcg_temp_free_i64(z);
2975 return NO_EXIT;
2976 }
2977
2978 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2979 {
2980 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2981 return NO_EXIT;
2982 }
2983
2984 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2985 {
2986 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2987 return NO_EXIT;
2988 }
2989
2990 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2991 {
2992 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2993 tcg_gen_mov_i64(o->out2, o->in2);
2994 return NO_EXIT;
2995 }
2996
2997 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2998 {
2999 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3000 potential_page_fault(s);
3001 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3002 tcg_temp_free_i32(l);
3003 set_cc_static(s);
3004 return NO_EXIT;
3005 }
3006
3007 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3008 {
3009 tcg_gen_neg_i64(o->out, o->in2);
3010 return NO_EXIT;
3011 }
3012
3013 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3014 {
3015 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3016 return NO_EXIT;
3017 }
3018
3019 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3020 {
3021 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3022 return NO_EXIT;
3023 }
3024
3025 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3026 {
3027 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3028 tcg_gen_mov_i64(o->out2, o->in2);
3029 return NO_EXIT;
3030 }
3031
3032 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3033 {
3034 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3035 potential_page_fault(s);
3036 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3037 tcg_temp_free_i32(l);
3038 set_cc_static(s);
3039 return NO_EXIT;
3040 }
3041
3042 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3043 {
3044 tcg_gen_or_i64(o->out, o->in1, o->in2);
3045 return NO_EXIT;
3046 }
3047
3048 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3049 {
3050 int shift = s->insn->data & 0xff;
3051 int size = s->insn->data >> 8;
3052 uint64_t mask = ((1ull << size) - 1) << shift;
3053
3054 assert(!o->g_in2);
3055 tcg_gen_shli_i64(o->in2, o->in2, shift);
3056 tcg_gen_or_i64(o->out, o->in1, o->in2);
3057
3058 /* Produce the CC from only the bits manipulated. */
3059 tcg_gen_andi_i64(cc_dst, o->out, mask);
3060 set_cc_nz_u64(s, cc_dst);
3061 return NO_EXIT;
3062 }
3063
3064 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3065 {
3066 gen_helper_popcnt(o->out, o->in2);
3067 return NO_EXIT;
3068 }
3069
3070 #ifndef CONFIG_USER_ONLY
3071 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3072 {
3073 check_privileged(s);
3074 gen_helper_ptlb(cpu_env);
3075 return NO_EXIT;
3076 }
3077 #endif
3078
3079 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3080 {
3081 int i3 = get_field(s->fields, i3);
3082 int i4 = get_field(s->fields, i4);
3083 int i5 = get_field(s->fields, i5);
3084 int do_zero = i4 & 0x80;
3085 uint64_t mask, imask, pmask;
3086 int pos, len, rot;
3087
3088 /* Adjust the arguments for the specific insn. */
3089 switch (s->fields->op2) {
3090 case 0x55: /* risbg */
3091 i3 &= 63;
3092 i4 &= 63;
3093 pmask = ~0;
3094 break;
3095 case 0x5d: /* risbhg */
3096 i3 &= 31;
3097 i4 &= 31;
3098 pmask = 0xffffffff00000000ull;
3099 break;
3100 case 0x51: /* risblg */
3101 i3 &= 31;
3102 i4 &= 31;
3103 pmask = 0x00000000ffffffffull;
3104 break;
3105 default:
3106 abort();
3107 }
3108
3109 /* MASK is the set of bits to be inserted from R2.
3110 Take care for I3/I4 wraparound. */
3111 mask = pmask >> i3;
3112 if (i3 <= i4) {
3113 mask ^= pmask >> i4 >> 1;
3114 } else {
3115 mask |= ~(pmask >> i4 >> 1);
3116 }
3117 mask &= pmask;
3118
3119 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3120 insns, we need to keep the other half of the register. */
3121 imask = ~mask | ~pmask;
3122 if (do_zero) {
3123 if (s->fields->op2 == 0x55) {
3124 imask = 0;
3125 } else {
3126 imask = ~pmask;
3127 }
3128 }
3129
3130 /* In some cases we can implement this with deposit, which can be more
3131 efficient on some hosts. */
3132 if (~mask == imask && i3 <= i4) {
3133 if (s->fields->op2 == 0x5d) {
3134 i3 += 32, i4 += 32;
3135 }
3136 /* Note that we rotate the bits to be inserted to the lsb, not to
3137 the position as described in the PoO. */
3138 len = i4 - i3 + 1;
3139 pos = 63 - i4;
3140 rot = (i5 - pos) & 63;
3141 } else {
3142 pos = len = -1;
3143 rot = i5 & 63;
3144 }
3145
3146 /* Rotate the input as necessary. */
3147 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3148
3149 /* Insert the selected bits into the output. */
3150 if (pos >= 0) {
3151 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3152 } else if (imask == 0) {
3153 tcg_gen_andi_i64(o->out, o->in2, mask);
3154 } else {
3155 tcg_gen_andi_i64(o->in2, o->in2, mask);
3156 tcg_gen_andi_i64(o->out, o->out, imask);
3157 tcg_gen_or_i64(o->out, o->out, o->in2);
3158 }
3159 return NO_EXIT;
3160 }
3161
3162 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3163 {
3164 int i3 = get_field(s->fields, i3);
3165 int i4 = get_field(s->fields, i4);
3166 int i5 = get_field(s->fields, i5);
3167 uint64_t mask;
3168
3169 /* If this is a test-only form, arrange to discard the result. */
3170 if (i3 & 0x80) {
3171 o->out = tcg_temp_new_i64();
3172 o->g_out = false;
3173 }
3174
3175 i3 &= 63;
3176 i4 &= 63;
3177 i5 &= 63;
3178
3179 /* MASK is the set of bits to be operated on from R2.
3180 Take care for I3/I4 wraparound. */
3181 mask = ~0ull >> i3;
3182 if (i3 <= i4) {
3183 mask ^= ~0ull >> i4 >> 1;
3184 } else {
3185 mask |= ~(~0ull >> i4 >> 1);
3186 }
3187
3188 /* Rotate the input as necessary. */
3189 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3190
3191 /* Operate. */
3192 switch (s->fields->op2) {
3193 case 0x55: /* AND */
3194 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3195 tcg_gen_and_i64(o->out, o->out, o->in2);
3196 break;
3197 case 0x56: /* OR */
3198 tcg_gen_andi_i64(o->in2, o->in2, mask);
3199 tcg_gen_or_i64(o->out, o->out, o->in2);
3200 break;
3201 case 0x57: /* XOR */
3202 tcg_gen_andi_i64(o->in2, o->in2, mask);
3203 tcg_gen_xor_i64(o->out, o->out, o->in2);
3204 break;
3205 default:
3206 abort();
3207 }
3208
3209 /* Set the CC. */
3210 tcg_gen_andi_i64(cc_dst, o->out, mask);
3211 set_cc_nz_u64(s, cc_dst);
3212 return NO_EXIT;
3213 }
3214
3215 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3216 {
3217 tcg_gen_bswap16_i64(o->out, o->in2);
3218 return NO_EXIT;
3219 }
3220
3221 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3222 {
3223 tcg_gen_bswap32_i64(o->out, o->in2);
3224 return NO_EXIT;
3225 }
3226
3227 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3228 {
3229 tcg_gen_bswap64_i64(o->out, o->in2);
3230 return NO_EXIT;
3231 }
3232
3233 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3234 {
3235 TCGv_i32 t1 = tcg_temp_new_i32();
3236 TCGv_i32 t2 = tcg_temp_new_i32();
3237 TCGv_i32 to = tcg_temp_new_i32();
3238 tcg_gen_trunc_i64_i32(t1, o->in1);
3239 tcg_gen_trunc_i64_i32(t2, o->in2);
3240 tcg_gen_rotl_i32(to, t1, t2);
3241 tcg_gen_extu_i32_i64(o->out, to);
3242 tcg_temp_free_i32(t1);
3243 tcg_temp_free_i32(t2);
3244 tcg_temp_free_i32(to);
3245 return NO_EXIT;
3246 }
3247
3248 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3249 {
3250 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3251 return NO_EXIT;
3252 }
3253
3254 #ifndef CONFIG_USER_ONLY
3255 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3256 {
3257 check_privileged(s);
3258 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3259 set_cc_static(s);
3260 return NO_EXIT;
3261 }
3262
3263 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3264 {
3265 check_privileged(s);
3266 gen_helper_sacf(cpu_env, o->in2);
3267 /* Addressing mode has changed, so end the block. */
3268 return EXIT_PC_STALE;
3269 }
3270 #endif
3271
3272 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3273 {
3274 int sam = s->insn->data;
3275 TCGv_i64 tsam;
3276 uint64_t mask;
3277
3278 switch (sam) {
3279 case 0:
3280 mask = 0xffffff;
3281 break;
3282 case 1:
3283 mask = 0x7fffffff;
3284 break;
3285 default:
3286 mask = -1;
3287 break;
3288 }
3289
3290 /* Bizarre but true, we check the address of the current insn for the
3291 specification exception, not the next to be executed. Thus the PoO
3292 documents that Bad Things Happen two bytes before the end. */
3293 if (s->pc & ~mask) {
3294 gen_program_exception(s, PGM_SPECIFICATION);
3295 return EXIT_NORETURN;
3296 }
3297 s->next_pc &= mask;
3298
3299 tsam = tcg_const_i64(sam);
3300 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3301 tcg_temp_free_i64(tsam);
3302
3303 /* Always exit the TB, since we (may have) changed execution mode. */
3304 return EXIT_PC_STALE;
3305 }
3306
3307 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3308 {
3309 int r1 = get_field(s->fields, r1);
3310 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3311 return NO_EXIT;
3312 }
3313
3314 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3315 {
3316 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3317 return NO_EXIT;
3318 }
3319
3320 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3321 {
3322 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3323 return NO_EXIT;
3324 }
3325
3326 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3327 {
3328 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3329 return_low128(o->out2);
3330 return NO_EXIT;
3331 }
3332
3333 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3334 {
3335 gen_helper_sqeb(o->out, cpu_env, o->in2);
3336 return NO_EXIT;
3337 }
3338
3339 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3340 {
3341 gen_helper_sqdb(o->out, cpu_env, o->in2);
3342 return NO_EXIT;
3343 }
3344
3345 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3346 {
3347 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3348 return_low128(o->out2);
3349 return NO_EXIT;
3350 }
3351
3352 #ifndef CONFIG_USER_ONLY
3353 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3354 {
3355 check_privileged(s);
3356 potential_page_fault(s);
3357 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3358 set_cc_static(s);
3359 return NO_EXIT;
3360 }
3361
3362 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3363 {
3364 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3365 check_privileged(s);
3366 potential_page_fault(s);
3367 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3368 tcg_temp_free_i32(r1);
3369 return NO_EXIT;
3370 }
3371 #endif
3372
3373 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3374 {
3375 DisasCompare c;
3376 TCGv_i64 a;
3377 TCGLabel *lab;
3378 int r1;
3379
3380 disas_jcc(s, &c, get_field(s->fields, m3));
3381
3382 /* We want to store when the condition is fulfilled, so branch
3383 out when it's not */
3384 c.cond = tcg_invert_cond(c.cond);
3385
3386 lab = gen_new_label();
3387 if (c.is_64) {
3388 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3389 } else {
3390 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3391 }
3392 free_compare(&c);
3393
3394 r1 = get_field(s->fields, r1);
3395 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3396 if (s->insn->data) {
3397 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3398 } else {
3399 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3400 }
3401 tcg_temp_free_i64(a);
3402
3403 gen_set_label(lab);
3404 return NO_EXIT;
3405 }
3406
3407 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3408 {
3409 uint64_t sign = 1ull << s->insn->data;
3410 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3411 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3412 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3413 /* The arithmetic left shift is curious in that it does not affect
3414 the sign bit. Copy that over from the source unchanged. */
3415 tcg_gen_andi_i64(o->out, o->out, ~sign);
3416 tcg_gen_andi_i64(o->in1, o->in1, sign);
3417 tcg_gen_or_i64(o->out, o->out, o->in1);
3418 return NO_EXIT;
3419 }
3420
3421 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3422 {
3423 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3424 return NO_EXIT;
3425 }
3426
3427 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3428 {
3429 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3430 return NO_EXIT;
3431 }
3432
3433 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3434 {
3435 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3436 return NO_EXIT;
3437 }
3438
3439 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3440 {
3441 gen_helper_sfpc(cpu_env, o->in2);
3442 return NO_EXIT;
3443 }
3444
3445 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3446 {
3447 gen_helper_sfas(cpu_env, o->in2);
3448 return NO_EXIT;
3449 }
3450
3451 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3452 {
3453 int b2 = get_field(s->fields, b2);
3454 int d2 = get_field(s->fields, d2);
3455 TCGv_i64 t1 = tcg_temp_new_i64();
3456 TCGv_i64 t2 = tcg_temp_new_i64();
3457 int mask, pos, len;
3458
3459 switch (s->fields->op2) {
3460 case 0x99: /* SRNM */
3461 pos = 0, len = 2;
3462 break;
3463 case 0xb8: /* SRNMB */
3464 pos = 0, len = 3;
3465 break;
3466 case 0xb9: /* SRNMT */
3467 pos = 4, len = 3;
3468 break;
3469 default:
3470 tcg_abort();
3471 }
3472 mask = (1 << len) - 1;
3473
3474 /* Insert the value into the appropriate field of the FPC. */
3475 if (b2 == 0) {
3476 tcg_gen_movi_i64(t1, d2 & mask);
3477 } else {
3478 tcg_gen_addi_i64(t1, regs[b2], d2);
3479 tcg_gen_andi_i64(t1, t1, mask);
3480 }
3481 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3482 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3483 tcg_temp_free_i64(t1);
3484
3485 /* Then install the new FPC to set the rounding mode in fpu_status. */
3486 gen_helper_sfpc(cpu_env, t2);
3487 tcg_temp_free_i64(t2);
3488 return NO_EXIT;
3489 }
3490
3491 #ifndef CONFIG_USER_ONLY
3492 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3493 {
3494 check_privileged(s);
3495 tcg_gen_shri_i64(o->in2, o->in2, 4);
3496 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3497 return NO_EXIT;
3498 }
3499
3500 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3501 {
3502 check_privileged(s);
3503 gen_helper_sske(cpu_env, o->in1, o->in2);
3504 return NO_EXIT;
3505 }
3506
3507 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3508 {
3509 check_privileged(s);
3510 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3511 return NO_EXIT;
3512 }
3513
3514 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3515 {
3516 check_privileged(s);
3517 /* ??? Surely cpu address != cpu number. In any case the previous
3518 version of this stored more than the required half-word, so it
3519 is unlikely this has ever been tested. */
3520 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3521 return NO_EXIT;
3522 }
3523
3524 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3525 {
3526 gen_helper_stck(o->out, cpu_env);
3527 /* ??? We don't implement clock states. */
3528 gen_op_movi_cc(s, 0);
3529 return NO_EXIT;
3530 }
3531
3532 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3533 {
3534 TCGv_i64 c1 = tcg_temp_new_i64();
3535 TCGv_i64 c2 = tcg_temp_new_i64();
3536 gen_helper_stck(c1, cpu_env);
3537 /* Shift the 64-bit value into its place as a zero-extended
3538 104-bit value. Note that "bit positions 64-103 are always
3539 non-zero so that they compare differently to STCK"; we set
3540 the least significant bit to 1. */
3541 tcg_gen_shli_i64(c2, c1, 56);
3542 tcg_gen_shri_i64(c1, c1, 8);
3543 tcg_gen_ori_i64(c2, c2, 0x10000);
3544 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3545 tcg_gen_addi_i64(o->in2, o->in2, 8);
3546 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3547 tcg_temp_free_i64(c1);
3548 tcg_temp_free_i64(c2);
3549 /* ??? We don't implement clock states. */
3550 gen_op_movi_cc(s, 0);
3551 return NO_EXIT;
3552 }
3553
3554 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3555 {
3556 check_privileged(s);
3557 gen_helper_sckc(cpu_env, o->in2);
3558 return NO_EXIT;
3559 }
3560
3561 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3562 {
3563 check_privileged(s);
3564 gen_helper_stckc(o->out, cpu_env);
3565 return NO_EXIT;
3566 }
3567
3568 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3569 {
3570 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3571 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3572 check_privileged(s);
3573 potential_page_fault(s);
3574 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3575 tcg_temp_free_i32(r1);
3576 tcg_temp_free_i32(r3);
3577 return NO_EXIT;
3578 }
3579
3580 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3581 {
3582 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3583 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3584 check_privileged(s);
3585 potential_page_fault(s);
3586 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3587 tcg_temp_free_i32(r1);
3588 tcg_temp_free_i32(r3);
3589 return NO_EXIT;
3590 }
3591
3592 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3593 {
3594 TCGv_i64 t1 = tcg_temp_new_i64();
3595
3596 check_privileged(s);
3597 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3598 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3599 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3600 tcg_temp_free_i64(t1);
3601
3602 return NO_EXIT;
3603 }
3604
3605 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3606 {
3607 check_privileged(s);
3608 gen_helper_spt(cpu_env, o->in2);
3609 return NO_EXIT;
3610 }
3611
3612 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3613 {
3614 TCGv_i64 f, a;
3615 /* We really ought to have more complete indication of facilities
3616 that we implement. Address this when STFLE is implemented. */
3617 check_privileged(s);
3618 f = tcg_const_i64(0xc0000000);
3619 a = tcg_const_i64(200);
3620 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3621 tcg_temp_free_i64(f);
3622 tcg_temp_free_i64(a);
3623 return NO_EXIT;
3624 }
3625
3626 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3627 {
3628 check_privileged(s);
3629 gen_helper_stpt(o->out, cpu_env);
3630 return NO_EXIT;
3631 }
3632
3633 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3634 {
3635 check_privileged(s);
3636 potential_page_fault(s);
3637 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3638 set_cc_static(s);
3639 return NO_EXIT;
3640 }
3641
3642 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3643 {
3644 check_privileged(s);
3645 gen_helper_spx(cpu_env, o->in2);
3646 return NO_EXIT;
3647 }
3648
3649 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3650 {
3651 check_privileged(s);
3652 potential_page_fault(s);
3653 gen_helper_xsch(cpu_env, regs[1]);
3654 set_cc_static(s);
3655 return NO_EXIT;
3656 }
3657
3658 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3659 {
3660 check_privileged(s);
3661 potential_page_fault(s);
3662 gen_helper_csch(cpu_env, regs[1]);
3663 set_cc_static(s);
3664 return NO_EXIT;
3665 }
3666
3667 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3668 {
3669 check_privileged(s);
3670 potential_page_fault(s);
3671 gen_helper_hsch(cpu_env, regs[1]);
3672 set_cc_static(s);
3673 return NO_EXIT;
3674 }
3675
3676 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3677 {
3678 check_privileged(s);
3679 potential_page_fault(s);
3680 gen_helper_msch(cpu_env, regs[1], o->in2);
3681 set_cc_static(s);
3682 return NO_EXIT;
3683 }
3684
3685 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3686 {
3687 check_privileged(s);
3688 potential_page_fault(s);
3689 gen_helper_rchp(cpu_env, regs[1]);
3690 set_cc_static(s);
3691 return NO_EXIT;
3692 }
3693
3694 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3695 {
3696 check_privileged(s);
3697 potential_page_fault(s);
3698 gen_helper_rsch(cpu_env, regs[1]);
3699 set_cc_static(s);
3700 return NO_EXIT;
3701 }
3702
3703 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3704 {
3705 check_privileged(s);
3706 potential_page_fault(s);
3707 gen_helper_ssch(cpu_env, regs[1], o->in2);
3708 set_cc_static(s);
3709 return NO_EXIT;
3710 }
3711
3712 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3713 {
3714 check_privileged(s);
3715 potential_page_fault(s);
3716 gen_helper_stsch(cpu_env, regs[1], o->in2);
3717 set_cc_static(s);
3718 return NO_EXIT;
3719 }
3720
3721 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3722 {
3723 check_privileged(s);
3724 potential_page_fault(s);
3725 gen_helper_tsch(cpu_env, regs[1], o->in2);
3726 set_cc_static(s);
3727 return NO_EXIT;
3728 }
3729
3730 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3731 {
3732 check_privileged(s);
3733 potential_page_fault(s);
3734 gen_helper_chsc(cpu_env, o->in2);
3735 set_cc_static(s);
3736 return NO_EXIT;
3737 }
3738
3739 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3740 {
3741 check_privileged(s);
3742 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3743 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3744 return NO_EXIT;
3745 }
3746
3747 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3748 {
3749 uint64_t i2 = get_field(s->fields, i2);
3750 TCGv_i64 t;
3751
3752 check_privileged(s);
3753
3754 /* It is important to do what the instruction name says: STORE THEN.
3755 If we let the output hook perform the store then if we fault and
3756 restart, we'll have the wrong SYSTEM MASK in place. */
3757 t = tcg_temp_new_i64();
3758 tcg_gen_shri_i64(t, psw_mask, 56);
3759 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3760 tcg_temp_free_i64(t);
3761
3762 if (s->fields->op == 0xac) {
3763 tcg_gen_andi_i64(psw_mask, psw_mask,
3764 (i2 << 56) | 0x00ffffffffffffffull);
3765 } else {
3766 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3767 }
3768 return NO_EXIT;
3769 }
3770
3771 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3772 {
3773 check_privileged(s);
3774 potential_page_fault(s);
3775 gen_helper_stura(cpu_env, o->in2, o->in1);
3776 return NO_EXIT;
3777 }
3778
3779 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3780 {
3781 check_privileged(s);
3782 potential_page_fault(s);
3783 gen_helper_sturg(cpu_env, o->in2, o->in1);
3784 return NO_EXIT;
3785 }
3786 #endif
3787
3788 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3789 {
3790 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3791 return NO_EXIT;
3792 }
3793
3794 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3795 {
3796 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3797 return NO_EXIT;
3798 }
3799
3800 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3801 {
3802 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3803 return NO_EXIT;
3804 }
3805
3806 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3807 {
3808 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3809 return NO_EXIT;
3810 }
3811
3812 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3813 {
3814 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3815 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3816 potential_page_fault(s);
3817 gen_helper_stam(cpu_env, r1, o->in2, r3);
3818 tcg_temp_free_i32(r1);
3819 tcg_temp_free_i32(r3);
3820 return NO_EXIT;
3821 }
3822
3823 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3824 {
3825 int m3 = get_field(s->fields, m3);
3826 int pos, base = s->insn->data;
3827 TCGv_i64 tmp = tcg_temp_new_i64();
3828
3829 pos = base + ctz32(m3) * 8;
3830 switch (m3) {
3831 case 0xf:
3832 /* Effectively a 32-bit store. */
3833 tcg_gen_shri_i64(tmp, o->in1, pos);
3834 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3835 break;
3836
3837 case 0xc:
3838 case 0x6:
3839 case 0x3:
3840 /* Effectively a 16-bit store. */
3841 tcg_gen_shri_i64(tmp, o->in1, pos);
3842 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3843 break;
3844
3845 case 0x8:
3846 case 0x4:
3847 case 0x2:
3848 case 0x1:
3849 /* Effectively an 8-bit store. */
3850 tcg_gen_shri_i64(tmp, o->in1, pos);
3851 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3852 break;
3853
3854 default:
3855 /* This is going to be a sequence of shifts and stores. */
3856 pos = base + 32 - 8;
3857 while (m3) {
3858 if (m3 & 0x8) {
3859 tcg_gen_shri_i64(tmp, o->in1, pos);
3860 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3861 tcg_gen_addi_i64(o->in2, o->in2, 1);
3862 }
3863 m3 = (m3 << 1) & 0xf;
3864 pos -= 8;
3865 }
3866 break;
3867 }
3868 tcg_temp_free_i64(tmp);
3869 return NO_EXIT;
3870 }
3871
3872 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3873 {
3874 int r1 = get_field(s->fields, r1);
3875 int r3 = get_field(s->fields, r3);
3876 int size = s->insn->data;
3877 TCGv_i64 tsize = tcg_const_i64(size);
3878
3879 while (1) {
3880 if (size == 8) {
3881 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3882 } else {
3883 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3884 }
3885 if (r1 == r3) {
3886 break;
3887 }
3888 tcg_gen_add_i64(o->in2, o->in2, tsize);
3889 r1 = (r1 + 1) & 15;
3890 }
3891
3892 tcg_temp_free_i64(tsize);
3893 return NO_EXIT;
3894 }
3895
3896 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3897 {
3898 int r1 = get_field(s->fields, r1);
3899 int r3 = get_field(s->fields, r3);
3900 TCGv_i64 t = tcg_temp_new_i64();
3901 TCGv_i64 t4 = tcg_const_i64(4);
3902 TCGv_i64 t32 = tcg_const_i64(32);
3903
3904 while (1) {
3905 tcg_gen_shl_i64(t, regs[r1], t32);
3906 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3907 if (r1 == r3) {
3908 break;
3909 }
3910 tcg_gen_add_i64(o->in2, o->in2, t4);
3911 r1 = (r1 + 1) & 15;
3912 }
3913
3914 tcg_temp_free_i64(t);
3915 tcg_temp_free_i64(t4);
3916 tcg_temp_free_i64(t32);
3917 return NO_EXIT;
3918 }
3919
3920 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3921 {
3922 potential_page_fault(s);
3923 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3924 set_cc_static(s);
3925 return_low128(o->in2);
3926 return NO_EXIT;
3927 }
3928
3929 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3930 {
3931 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3932 return NO_EXIT;
3933 }
3934
3935 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3936 {
3937 DisasCompare cmp;
3938 TCGv_i64 borrow;
3939
3940 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3941
3942 /* The !borrow flag is the msb of CC. Since we want the inverse of
3943 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3944 disas_jcc(s, &cmp, 8 | 4);
3945 borrow = tcg_temp_new_i64();
3946 if (cmp.is_64) {
3947 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3948 } else {
3949 TCGv_i32 t = tcg_temp_new_i32();
3950 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3951 tcg_gen_extu_i32_i64(borrow, t);
3952 tcg_temp_free_i32(t);
3953 }
3954 free_compare(&cmp);
3955
3956 tcg_gen_sub_i64(o->out, o->out, borrow);
3957 tcg_temp_free_i64(borrow);
3958 return NO_EXIT;
3959 }
3960
3961 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3962 {
3963 TCGv_i32 t;
3964
3965 update_psw_addr(s);
3966 update_cc_op(s);
3967
3968 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3969 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3970 tcg_temp_free_i32(t);
3971
3972 t = tcg_const_i32(s->next_pc - s->pc);
3973 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3974 tcg_temp_free_i32(t);
3975
3976 gen_exception(EXCP_SVC);
3977 return EXIT_NORETURN;
3978 }
3979
3980 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3981 {
3982 gen_helper_tceb(cc_op, o->in1, o->in2);
3983 set_cc_static(s);
3984 return NO_EXIT;
3985 }
3986
3987 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3988 {
3989 gen_helper_tcdb(cc_op, o->in1, o->in2);
3990 set_cc_static(s);
3991 return NO_EXIT;
3992 }
3993
3994 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3995 {
3996 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3997 set_cc_static(s);
3998 return NO_EXIT;
3999 }
4000
4001 #ifndef CONFIG_USER_ONLY
4002 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4003 {
4004 potential_page_fault(s);
4005 gen_helper_tprot(cc_op, o->addr1, o->in2);
4006 set_cc_static(s);
4007 return NO_EXIT;
4008 }
4009 #endif
4010
4011 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4012 {
4013 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4014 potential_page_fault(s);
4015 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4016 tcg_temp_free_i32(l);
4017 set_cc_static(s);
4018 return NO_EXIT;
4019 }
4020
4021 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4022 {
4023 potential_page_fault(s);
4024 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4025 return_low128(o->out2);
4026 set_cc_static(s);
4027 return NO_EXIT;
4028 }
4029
4030 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4031 {
4032 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4033 potential_page_fault(s);
4034 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4035 tcg_temp_free_i32(l);
4036 set_cc_static(s);
4037 return NO_EXIT;
4038 }
4039
4040 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4041 {
4042 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4043 potential_page_fault(s);
4044 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4045 tcg_temp_free_i32(l);
4046 return NO_EXIT;
4047 }
4048
4049 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4050 {
4051 int d1 = get_field(s->fields, d1);
4052 int d2 = get_field(s->fields, d2);
4053 int b1 = get_field(s->fields, b1);
4054 int b2 = get_field(s->fields, b2);
4055 int l = get_field(s->fields, l1);
4056 TCGv_i32 t32;
4057
4058 o->addr1 = get_address(s, 0, b1, d1);
4059
4060 /* If the addresses are identical, this is a store/memset of zero. */
4061 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4062 o->in2 = tcg_const_i64(0);
4063
4064 l++;
4065 while (l >= 8) {
4066 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4067 l -= 8;
4068 if (l > 0) {
4069 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4070 }
4071 }
4072 if (l >= 4) {
4073 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4074 l -= 4;
4075 if (l > 0) {
4076 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4077 }
4078 }
4079 if (l >= 2) {
4080 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4081 l -= 2;
4082 if (l > 0) {
4083 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4084 }
4085 }
4086 if (l) {
4087 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4088 }
4089 gen_op_movi_cc(s, 0);
4090 return NO_EXIT;
4091 }
4092
4093 /* But in general we'll defer to a helper. */
4094 o->in2 = get_address(s, 0, b2, d2);
4095 t32 = tcg_const_i32(l);
4096 potential_page_fault(s);
4097 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4098 tcg_temp_free_i32(t32);
4099 set_cc_static(s);
4100 return NO_EXIT;
4101 }
4102
4103 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4104 {
4105 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4106 return NO_EXIT;
4107 }
4108
4109 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4110 {
4111 int shift = s->insn->data & 0xff;
4112 int size = s->insn->data >> 8;
4113 uint64_t mask = ((1ull << size) - 1) << shift;
4114
4115 assert(!o->g_in2);
4116 tcg_gen_shli_i64(o->in2, o->in2, shift);
4117 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4118
4119 /* Produce the CC from only the bits manipulated. */
4120 tcg_gen_andi_i64(cc_dst, o->out, mask);
4121 set_cc_nz_u64(s, cc_dst);
4122 return NO_EXIT;
4123 }
4124
4125 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4126 {
4127 o->out = tcg_const_i64(0);
4128 return NO_EXIT;
4129 }
4130
4131 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4132 {
4133 o->out = tcg_const_i64(0);
4134 o->out2 = o->out;
4135 o->g_out2 = true;
4136 return NO_EXIT;
4137 }
4138
4139 /* ====================================================================== */
4140 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4141 the original inputs), update the various cc data structures in order to
4142 be able to compute the new condition code. */
4143
4144 static void cout_abs32(DisasContext *s, DisasOps *o)
4145 {
4146 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4147 }
4148
4149 static void cout_abs64(DisasContext *s, DisasOps *o)
4150 {
4151 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4152 }
4153
4154 static void cout_adds32(DisasContext *s, DisasOps *o)
4155 {
4156 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4157 }
4158
4159 static void cout_adds64(DisasContext *s, DisasOps *o)
4160 {
4161 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4162 }
4163
4164 static void cout_addu32(DisasContext *s, DisasOps *o)
4165 {
4166 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4167 }
4168
4169 static void cout_addu64(DisasContext *s, DisasOps *o)
4170 {
4171 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4172 }
4173
4174 static void cout_addc32(DisasContext *s, DisasOps *o)
4175 {
4176 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4177 }
4178
4179 static void cout_addc64(DisasContext *s, DisasOps *o)
4180 {
4181 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4182 }
4183
4184 static void cout_cmps32(DisasContext *s, DisasOps *o)
4185 {
4186 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4187 }
4188
4189 static void cout_cmps64(DisasContext *s, DisasOps *o)
4190 {
4191 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4192 }
4193
4194 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4195 {
4196 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4197 }
4198
4199 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4200 {
4201 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4202 }
4203
4204 static void cout_f32(DisasContext *s, DisasOps *o)
4205 {
4206 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4207 }
4208
4209 static void cout_f64(DisasContext *s, DisasOps *o)
4210 {
4211 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4212 }
4213
4214 static void cout_f128(DisasContext *s, DisasOps *o)
4215 {
4216 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4217 }
4218
4219 static void cout_nabs32(DisasContext *s, DisasOps *o)
4220 {
4221 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4222 }
4223
4224 static void cout_nabs64(DisasContext *s, DisasOps *o)
4225 {
4226 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4227 }
4228
4229 static void cout_neg32(DisasContext *s, DisasOps *o)
4230 {
4231 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4232 }
4233
4234 static void cout_neg64(DisasContext *s, DisasOps *o)
4235 {
4236 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4237 }
4238
4239 static void cout_nz32(DisasContext *s, DisasOps *o)
4240 {
4241 tcg_gen_ext32u_i64(cc_dst, o->out);
4242 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4243 }
4244
4245 static void cout_nz64(DisasContext *s, DisasOps *o)
4246 {
4247 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4248 }
4249
4250 static void cout_s32(DisasContext *s, DisasOps *o)
4251 {
4252 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4253 }
4254
4255 static void cout_s64(DisasContext *s, DisasOps *o)
4256 {
4257 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4258 }
4259
4260 static void cout_subs32(DisasContext *s, DisasOps *o)
4261 {
4262 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4263 }
4264
4265 static void cout_subs64(DisasContext *s, DisasOps *o)
4266 {
4267 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4268 }
4269
4270 static void cout_subu32(DisasContext *s, DisasOps *o)
4271 {
4272 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4273 }
4274
4275 static void cout_subu64(DisasContext *s, DisasOps *o)
4276 {
4277 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4278 }
4279
4280 static void cout_subb32(DisasContext *s, DisasOps *o)
4281 {
4282 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4283 }
4284
4285 static void cout_subb64(DisasContext *s, DisasOps *o)
4286 {
4287 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4288 }
4289
4290 static void cout_tm32(DisasContext *s, DisasOps *o)
4291 {
4292 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4293 }
4294
4295 static void cout_tm64(DisasContext *s, DisasOps *o)
4296 {
4297 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4298 }
4299
4300 /* ====================================================================== */
4301 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4302 with the TCG register to which we will write. Used in combination with
4303 the "wout" generators, in some cases we need a new temporary, and in
4304 some cases we can write to a TCG global. */
4305
4306 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4307 {
4308 o->out = tcg_temp_new_i64();
4309 }
4310 #define SPEC_prep_new 0
4311
4312 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4313 {
4314 o->out = tcg_temp_new_i64();
4315 o->out2 = tcg_temp_new_i64();
4316 }
4317 #define SPEC_prep_new_P 0
4318
4319 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4320 {
4321 o->out = regs[get_field(f, r1)];
4322 o->g_out = true;
4323 }
4324 #define SPEC_prep_r1 0
4325
4326 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4327 {
4328 int r1 = get_field(f, r1);
4329 o->out = regs[r1];
4330 o->out2 = regs[r1 + 1];
4331 o->g_out = o->g_out2 = true;
4332 }
4333 #define SPEC_prep_r1_P SPEC_r1_even
4334
4335 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4336 {
4337 o->out = fregs[get_field(f, r1)];
4338 o->g_out = true;
4339 }
4340 #define SPEC_prep_f1 0
4341
4342 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4343 {
4344 int r1 = get_field(f, r1);
4345 o->out = fregs[r1];
4346 o->out2 = fregs[r1 + 2];
4347 o->g_out = o->g_out2 = true;
4348 }
4349 #define SPEC_prep_x1 SPEC_r1_f128
4350
4351 /* ====================================================================== */
4352 /* The "Write OUTput" generators. These generally perform some non-trivial
4353 copy of data to TCG globals, or to main memory. The trivial cases are
4354 generally handled by having a "prep" generator install the TCG global
4355 as the destination of the operation. */
4356
4357 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4358 {
4359 store_reg(get_field(f, r1), o->out);
4360 }
4361 #define SPEC_wout_r1 0
4362
4363 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4364 {
4365 int r1 = get_field(f, r1);
4366 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4367 }
4368 #define SPEC_wout_r1_8 0
4369
4370 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4371 {
4372 int r1 = get_field(f, r1);
4373 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4374 }
4375 #define SPEC_wout_r1_16 0
4376
4377 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4378 {
4379 store_reg32_i64(get_field(f, r1), o->out);
4380 }
4381 #define SPEC_wout_r1_32 0
4382
4383 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4384 {
4385 store_reg32h_i64(get_field(f, r1), o->out);
4386 }
4387 #define SPEC_wout_r1_32h 0
4388
4389 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4390 {
4391 int r1 = get_field(f, r1);
4392 store_reg32_i64(r1, o->out);
4393 store_reg32_i64(r1 + 1, o->out2);
4394 }
4395 #define SPEC_wout_r1_P32 SPEC_r1_even
4396
4397 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4398 {
4399 int r1 = get_field(f, r1);
4400 store_reg32_i64(r1 + 1, o->out);
4401 tcg_gen_shri_i64(o->out, o->out, 32);
4402 store_reg32_i64(r1, o->out);
4403 }
4404 #define SPEC_wout_r1_D32 SPEC_r1_even
4405
4406 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4407 {
4408 store_freg32_i64(get_field(f, r1), o->out);
4409 }
4410 #define SPEC_wout_e1 0
4411
4412 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4413 {
4414 store_freg(get_field(f, r1), o->out);
4415 }
4416 #define SPEC_wout_f1 0
4417
4418 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4419 {
4420 int f1 = get_field(s->fields, r1);
4421 store_freg(f1, o->out);
4422 store_freg(f1 + 2, o->out2);
4423 }
4424 #define SPEC_wout_x1 SPEC_r1_f128
4425
4426 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4427 {
4428 if (get_field(f, r1) != get_field(f, r2)) {
4429 store_reg32_i64(get_field(f, r1), o->out);
4430 }
4431 }
4432 #define SPEC_wout_cond_r1r2_32 0
4433
4434 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4435 {
4436 if (get_field(f, r1) != get_field(f, r2)) {
4437 store_freg32_i64(get_field(f, r1), o->out);
4438 }
4439 }
4440 #define SPEC_wout_cond_e1e2 0
4441
4442 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4443 {
4444 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4445 }
4446 #define SPEC_wout_m1_8 0
4447
4448 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4449 {
4450 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4451 }
4452 #define SPEC_wout_m1_16 0
4453
4454 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4455 {
4456 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4457 }
4458 #define SPEC_wout_m1_32 0
4459
4460 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4461 {
4462 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4463 }
4464 #define SPEC_wout_m1_64 0
4465
4466 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4467 {
4468 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4469 }
4470 #define SPEC_wout_m2_32 0
4471
4472 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4473 {
4474 /* XXX release reservation */
4475 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4476 store_reg32_i64(get_field(f, r1), o->in2);
4477 }
4478 #define SPEC_wout_m2_32_r1_atomic 0
4479
4480 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4481 {
4482 /* XXX release reservation */
4483 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4484 store_reg(get_field(f, r1), o->in2);
4485 }
4486 #define SPEC_wout_m2_64_r1_atomic 0
4487
4488 /* ====================================================================== */
4489 /* The "INput 1" generators. These load the first operand to an insn. */
4490
4491 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4492 {
4493 o->in1 = load_reg(get_field(f, r1));
4494 }
4495 #define SPEC_in1_r1 0
4496
4497 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4498 {
4499 o->in1 = regs[get_field(f, r1)];
4500 o->g_in1 = true;
4501 }
4502 #define SPEC_in1_r1_o 0
4503
4504 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4505 {
4506 o->in1 = tcg_temp_new_i64();
4507 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4508 }
4509 #define SPEC_in1_r1_32s 0
4510
4511 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4512 {
4513 o->in1 = tcg_temp_new_i64();
4514 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4515 }
4516 #define SPEC_in1_r1_32u 0
4517
4518 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4519 {
4520 o->in1 = tcg_temp_new_i64();
4521 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4522 }
4523 #define SPEC_in1_r1_sr32 0
4524
4525 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4526 {
4527 o->in1 = load_reg(get_field(f, r1) + 1);
4528 }
4529 #define SPEC_in1_r1p1 SPEC_r1_even
4530
4531 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4532 {
4533 o->in1 = tcg_temp_new_i64();
4534 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4535 }
4536 #define SPEC_in1_r1p1_32s SPEC_r1_even
4537
4538 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4539 {
4540 o->in1 = tcg_temp_new_i64();
4541 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4542 }
4543 #define SPEC_in1_r1p1_32u SPEC_r1_even
4544
4545 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4546 {
4547 int r1 = get_field(f, r1);
4548 o->in1 = tcg_temp_new_i64();
4549 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4550 }
4551 #define SPEC_in1_r1_D32 SPEC_r1_even
4552
4553 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4554 {
4555 o->in1 = load_reg(get_field(f, r2));
4556 }
4557 #define SPEC_in1_r2 0
4558
4559 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4560 {
4561 o->in1 = tcg_temp_new_i64();
4562 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4563 }
4564 #define SPEC_in1_r2_sr32 0
4565
4566 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4567 {
4568 o->in1 = load_reg(get_field(f, r3));
4569 }
4570 #define SPEC_in1_r3 0
4571
4572 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4573 {
4574 o->in1 = regs[get_field(f, r3)];
4575 o->g_in1 = true;
4576 }
4577 #define SPEC_in1_r3_o 0
4578
4579 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4580 {
4581 o->in1 = tcg_temp_new_i64();
4582 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4583 }
4584 #define SPEC_in1_r3_32s 0
4585
4586 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4587 {
4588 o->in1 = tcg_temp_new_i64();
4589 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4590 }
4591 #define SPEC_in1_r3_32u 0
4592
4593 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4594 {
4595 int r3 = get_field(f, r3);
4596 o->in1 = tcg_temp_new_i64();
4597 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4598 }
4599 #define SPEC_in1_r3_D32 SPEC_r3_even
4600
4601 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4602 {
4603 o->in1 = load_freg32_i64(get_field(f, r1));
4604 }
4605 #define SPEC_in1_e1 0
4606
4607 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4608 {
4609 o->in1 = fregs[get_field(f, r1)];
4610 o->g_in1 = true;
4611 }
4612 #define SPEC_in1_f1_o 0
4613
4614 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4615 {
4616 int r1 = get_field(f, r1);
4617 o->out = fregs[r1];
4618 o->out2 = fregs[r1 + 2];
4619 o->g_out = o->g_out2 = true;
4620 }
4621 #define SPEC_in1_x1_o SPEC_r1_f128
4622
4623 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4624 {
4625 o->in1 = fregs[get_field(f, r3)];
4626 o->g_in1 = true;
4627 }
4628 #define SPEC_in1_f3_o 0
4629
4630 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4631 {
4632 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4633 }
4634 #define SPEC_in1_la1 0
4635
4636 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4637 {
4638 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4639 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4640 }
4641 #define SPEC_in1_la2 0
4642
4643 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4644 {
4645 in1_la1(s, f, o);
4646 o->in1 = tcg_temp_new_i64();
4647 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4648 }
4649 #define SPEC_in1_m1_8u 0
4650
4651 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4652 {
4653 in1_la1(s, f, o);
4654 o->in1 = tcg_temp_new_i64();
4655 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4656 }
4657 #define SPEC_in1_m1_16s 0
4658
4659 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4660 {
4661 in1_la1(s, f, o);
4662 o->in1 = tcg_temp_new_i64();
4663 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4664 }
4665 #define SPEC_in1_m1_16u 0
4666
4667 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4668 {
4669 in1_la1(s, f, o);
4670 o->in1 = tcg_temp_new_i64();
4671 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4672 }
4673 #define SPEC_in1_m1_32s 0
4674
4675 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4676 {
4677 in1_la1(s, f, o);
4678 o->in1 = tcg_temp_new_i64();
4679 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4680 }
4681 #define SPEC_in1_m1_32u 0
4682
4683 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4684 {
4685 in1_la1(s, f, o);
4686 o->in1 = tcg_temp_new_i64();
4687 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4688 }
4689 #define SPEC_in1_m1_64 0
4690
4691 /* ====================================================================== */
4692 /* The "INput 2" generators. These load the second operand to an insn. */
4693
4694 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4695 {
4696 o->in2 = regs[get_field(f, r1)];
4697 o->g_in2 = true;
4698 }
4699 #define SPEC_in2_r1_o 0
4700
4701 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4702 {
4703 o->in2 = tcg_temp_new_i64();
4704 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4705 }
4706 #define SPEC_in2_r1_16u 0
4707
4708 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4709 {
4710 o->in2 = tcg_temp_new_i64();
4711 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4712 }
4713 #define SPEC_in2_r1_32u 0
4714
4715 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4716 {
4717 int r1 = get_field(f, r1);
4718 o->in2 = tcg_temp_new_i64();
4719 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4720 }
4721 #define SPEC_in2_r1_D32 SPEC_r1_even
4722
4723 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4724 {
4725 o->in2 = load_reg(get_field(f, r2));
4726 }
4727 #define SPEC_in2_r2 0
4728
4729 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4730 {
4731 o->in2 = regs[get_field(f, r2)];
4732 o->g_in2 = true;
4733 }
4734 #define SPEC_in2_r2_o 0
4735
4736 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4737 {
4738 int r2 = get_field(f, r2);
4739 if (r2 != 0) {
4740 o->in2 = load_reg(r2);
4741 }
4742 }
4743 #define SPEC_in2_r2_nz 0
4744
4745 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4746 {
4747 o->in2 = tcg_temp_new_i64();
4748 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4749 }
4750 #define SPEC_in2_r2_8s 0
4751
4752 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4753 {
4754 o->in2 = tcg_temp_new_i64();
4755 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4756 }
4757 #define SPEC_in2_r2_8u 0
4758
4759 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4760 {
4761 o->in2 = tcg_temp_new_i64();
4762 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4763 }
4764 #define SPEC_in2_r2_16s 0
4765
4766 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4767 {
4768 o->in2 = tcg_temp_new_i64();
4769 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4770 }
4771 #define SPEC_in2_r2_16u 0
4772
4773 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4774 {
4775 o->in2 = load_reg(get_field(f, r3));
4776 }
4777 #define SPEC_in2_r3 0
4778
4779 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4780 {
4781 o->in2 = tcg_temp_new_i64();
4782 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4783 }
4784 #define SPEC_in2_r3_sr32 0
4785
4786 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4787 {
4788 o->in2 = tcg_temp_new_i64();
4789 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4790 }
4791 #define SPEC_in2_r2_32s 0
4792
4793 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4794 {
4795 o->in2 = tcg_temp_new_i64();
4796 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4797 }
4798 #define SPEC_in2_r2_32u 0
4799
4800 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4801 {
4802 o->in2 = tcg_temp_new_i64();
4803 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4804 }
4805 #define SPEC_in2_r2_sr32 0
4806
4807 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4808 {
4809 o->in2 = load_freg32_i64(get_field(f, r2));
4810 }
4811 #define SPEC_in2_e2 0
4812
4813 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4814 {
4815 o->in2 = fregs[get_field(f, r2)];
4816 o->g_in2 = true;
4817 }
4818 #define SPEC_in2_f2_o 0
4819
4820 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4821 {
4822 int r2 = get_field(f, r2);
4823 o->in1 = fregs[r2];
4824 o->in2 = fregs[r2 + 2];
4825 o->g_in1 = o->g_in2 = true;
4826 }
4827 #define SPEC_in2_x2_o SPEC_r2_f128
4828
4829 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4830 {
4831 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4832 }
4833 #define SPEC_in2_ra2 0
4834
4835 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4836 {
4837 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4838 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4839 }
4840 #define SPEC_in2_a2 0
4841
4842 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4843 {
4844 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4845 }
4846 #define SPEC_in2_ri2 0
4847
4848 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4849 {
4850 help_l2_shift(s, f, o, 31);
4851 }
4852 #define SPEC_in2_sh32 0
4853
4854 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4855 {
4856 help_l2_shift(s, f, o, 63);
4857 }
4858 #define SPEC_in2_sh64 0
4859
4860 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4861 {
4862 in2_a2(s, f, o);
4863 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4864 }
4865 #define SPEC_in2_m2_8u 0
4866
4867 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4868 {
4869 in2_a2(s, f, o);
4870 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4871 }
4872 #define SPEC_in2_m2_16s 0
4873
4874 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4875 {
4876 in2_a2(s, f, o);
4877 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4878 }
4879 #define SPEC_in2_m2_16u 0
4880
4881 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4882 {
4883 in2_a2(s, f, o);
4884 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4885 }
4886 #define SPEC_in2_m2_32s 0
4887
4888 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4889 {
4890 in2_a2(s, f, o);
4891 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4892 }
4893 #define SPEC_in2_m2_32u 0
4894
4895 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4896 {
4897 in2_a2(s, f, o);
4898 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4899 }
4900 #define SPEC_in2_m2_64 0
4901
4902 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4903 {
4904 in2_ri2(s, f, o);
4905 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4906 }
4907 #define SPEC_in2_mri2_16u 0
4908
4909 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4910 {
4911 in2_ri2(s, f, o);
4912 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4913 }
4914 #define SPEC_in2_mri2_32s 0
4915
4916 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4917 {
4918 in2_ri2(s, f, o);
4919 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4920 }
4921 #define SPEC_in2_mri2_32u 0
4922
4923 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4924 {
4925 in2_ri2(s, f, o);
4926 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4927 }
4928 #define SPEC_in2_mri2_64 0
4929
4930 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4931 {
4932 /* XXX should reserve the address */
4933 in1_la2(s, f, o);
4934 o->in2 = tcg_temp_new_i64();
4935 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4936 }
4937 #define SPEC_in2_m2_32s_atomic 0
4938
4939 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4940 {
4941 /* XXX should reserve the address */
4942 in1_la2(s, f, o);
4943 o->in2 = tcg_temp_new_i64();
4944 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4945 }
4946 #define SPEC_in2_m2_64_atomic 0
4947
4948 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4949 {
4950 o->in2 = tcg_const_i64(get_field(f, i2));
4951 }
4952 #define SPEC_in2_i2 0
4953
4954 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4955 {
4956 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4957 }
4958 #define SPEC_in2_i2_8u 0
4959
4960 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4961 {
4962 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4963 }
4964 #define SPEC_in2_i2_16u 0
4965
4966 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4967 {
4968 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4969 }
4970 #define SPEC_in2_i2_32u 0
4971
4972 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4973 {
4974 uint64_t i2 = (uint16_t)get_field(f, i2);
4975 o->in2 = tcg_const_i64(i2 << s->insn->data);
4976 }
4977 #define SPEC_in2_i2_16u_shl 0
4978
4979 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4980 {
4981 uint64_t i2 = (uint32_t)get_field(f, i2);
4982 o->in2 = tcg_const_i64(i2 << s->insn->data);
4983 }
4984 #define SPEC_in2_i2_32u_shl 0
4985
4986 #ifndef CONFIG_USER_ONLY
4987 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
4988 {
4989 o->in2 = tcg_const_i64(s->fields->raw_insn);
4990 }
4991 #define SPEC_in2_insn 0
4992 #endif
4993
4994 /* ====================================================================== */
4995
4996 /* Find opc within the table of insns. This is formulated as a switch
4997 statement so that (1) we get compile-time notice of cut-paste errors
4998 for duplicated opcodes, and (2) the compiler generates the binary
4999 search tree, rather than us having to post-process the table. */
5000
5001 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5002 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5003
5004 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5005
5006 enum DisasInsnEnum {
5007 #include "insn-data.def"
5008 };
5009
5010 #undef D
5011 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5012 .opc = OPC, \
5013 .fmt = FMT_##FT, \
5014 .fac = FAC_##FC, \
5015 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5016 .name = #NM, \
5017 .help_in1 = in1_##I1, \
5018 .help_in2 = in2_##I2, \
5019 .help_prep = prep_##P, \
5020 .help_wout = wout_##W, \
5021 .help_cout = cout_##CC, \
5022 .help_op = op_##OP, \
5023 .data = D \
5024 },
5025
5026 /* Allow 0 to be used for NULL in the table below. */
5027 #define in1_0 NULL
5028 #define in2_0 NULL
5029 #define prep_0 NULL
5030 #define wout_0 NULL
5031 #define cout_0 NULL
5032 #define op_0 NULL
5033
5034 #define SPEC_in1_0 0
5035 #define SPEC_in2_0 0
5036 #define SPEC_prep_0 0
5037 #define SPEC_wout_0 0
5038
5039 static const DisasInsn insn_info[] = {
5040 #include "insn-data.def"
5041 };
5042
5043 #undef D
5044 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5045 case OPC: return &insn_info[insn_ ## NM];
5046
5047 static const DisasInsn *lookup_opc(uint16_t opc)
5048 {
5049 switch (opc) {
5050 #include "insn-data.def"
5051 default:
5052 return NULL;
5053 }
5054 }
5055
5056 #undef D
5057 #undef C
5058
5059 /* Extract a field from the insn. The INSN should be left-aligned in
5060 the uint64_t so that we can more easily utilize the big-bit-endian
5061 definitions we extract from the Principals of Operation. */
5062
5063 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5064 {
5065 uint32_t r, m;
5066
5067 if (f->size == 0) {
5068 return;
5069 }
5070
5071 /* Zero extract the field from the insn. */
5072 r = (insn << f->beg) >> (64 - f->size);
5073
5074 /* Sign-extend, or un-swap the field as necessary. */
5075 switch (f->type) {
5076 case 0: /* unsigned */
5077 break;
5078 case 1: /* signed */
5079 assert(f->size <= 32);
5080 m = 1u << (f->size - 1);
5081 r = (r ^ m) - m;
5082 break;
5083 case 2: /* dl+dh split, signed 20 bit. */
5084 r = ((int8_t)r << 12) | (r >> 8);
5085 break;
5086 default:
5087 abort();
5088 }
5089
5090 /* Validate that the "compressed" encoding we selected above is valid.
5091 I.e. we havn't make two different original fields overlap. */
5092 assert(((o->presentC >> f->indexC) & 1) == 0);
5093 o->presentC |= 1 << f->indexC;
5094 o->presentO |= 1 << f->indexO;
5095
5096 o->c[f->indexC] = r;
5097 }
5098
5099 /* Lookup the insn at the current PC, extracting the operands into O and
5100 returning the info struct for the insn. Returns NULL for invalid insn. */
5101
5102 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5103 DisasFields *f)
5104 {
5105 uint64_t insn, pc = s->pc;
5106 int op, op2, ilen;
5107 const DisasInsn *info;
5108
5109 insn = ld_code2(env, pc);
5110 op = (insn >> 8) & 0xff;
5111 ilen = get_ilen(op);
5112 s->next_pc = s->pc + ilen;
5113
5114 switch (ilen) {
5115 case 2:
5116 insn = insn << 48;
5117 break;
5118 case 4:
5119 insn = ld_code4(env, pc) << 32;
5120 break;
5121 case 6:
5122 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5123 break;
5124 default:
5125 abort();
5126 }
5127
5128 /* We can't actually determine the insn format until we've looked up
5129 the full insn opcode. Which we can't do without locating the
5130 secondary opcode. Assume by default that OP2 is at bit 40; for
5131 those smaller insns that don't actually have a secondary opcode
5132 this will correctly result in OP2 = 0. */
5133 switch (op) {
5134 case 0x01: /* E */
5135 case 0x80: /* S */
5136 case 0x82: /* S */
5137 case 0x93: /* S */
5138 case 0xb2: /* S, RRF, RRE */
5139 case 0xb3: /* RRE, RRD, RRF */
5140 case 0xb9: /* RRE, RRF */
5141 case 0xe5: /* SSE, SIL */
5142 op2 = (insn << 8) >> 56;
5143 break;
5144 case 0xa5: /* RI */
5145 case 0xa7: /* RI */
5146 case 0xc0: /* RIL */
5147 case 0xc2: /* RIL */
5148 case 0xc4: /* RIL */
5149 case 0xc6: /* RIL */
5150 case 0xc8: /* SSF */
5151 case 0xcc: /* RIL */
5152 op2 = (insn << 12) >> 60;
5153 break;
5154 case 0xd0 ... 0xdf: /* SS */
5155 case 0xe1: /* SS */
5156 case 0xe2: /* SS */
5157 case 0xe8: /* SS */
5158 case 0xe9: /* SS */
5159 case 0xea: /* SS */
5160 case 0xee ... 0xf3: /* SS */
5161 case 0xf8 ... 0xfd: /* SS */
5162 op2 = 0;
5163 break;
5164 default:
5165 op2 = (insn << 40) >> 56;
5166 break;
5167 }
5168
5169 memset(f, 0, sizeof(*f));
5170 f->raw_insn = insn;
5171 f->op = op;
5172 f->op2 = op2;
5173
5174 /* Lookup the instruction. */
5175 info = lookup_opc(op << 8 | op2);
5176
5177 /* If we found it, extract the operands. */
5178 if (info != NULL) {
5179 DisasFormat fmt = info->fmt;
5180 int i;
5181
5182 for (i = 0; i < NUM_C_FIELD; ++i) {
5183 extract_field(f, &format_info[fmt].op[i], insn);
5184 }
5185 }
5186 return info;
5187 }
5188
5189 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5190 {
5191 const DisasInsn *insn;
5192 ExitStatus ret = NO_EXIT;
5193 DisasFields f;
5194 DisasOps o;
5195
5196 /* Search for the insn in the table. */
5197 insn = extract_insn(env, s, &f);
5198
5199 /* Not found means unimplemented/illegal opcode. */
5200 if (insn == NULL) {
5201 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5202 f.op, f.op2);
5203 gen_illegal_opcode(s);
5204 return EXIT_NORETURN;
5205 }
5206
5207 #ifndef CONFIG_USER_ONLY
5208 if (s->tb->flags & FLAG_MASK_PER) {
5209 TCGv_i64 addr = tcg_const_i64(s->pc);
5210 gen_helper_per_ifetch(cpu_env, addr);
5211 tcg_temp_free_i64(addr);
5212 }
5213 #endif
5214
5215 /* Check for insn specification exceptions. */
5216 if (insn->spec) {
5217 int spec = insn->spec, excp = 0, r;
5218
5219 if (spec & SPEC_r1_even) {
5220 r = get_field(&f, r1);
5221 if (r & 1) {
5222 excp = PGM_SPECIFICATION;
5223 }
5224 }
5225 if (spec & SPEC_r2_even) {
5226 r = get_field(&f, r2);
5227 if (r & 1) {
5228 excp = PGM_SPECIFICATION;
5229 }
5230 }
5231 if (spec & SPEC_r3_even) {
5232 r = get_field(&f, r3);
5233 if (r & 1) {
5234 excp = PGM_SPECIFICATION;
5235 }
5236 }
5237 if (spec & SPEC_r1_f128) {
5238 r = get_field(&f, r1);
5239 if (r > 13) {
5240 excp = PGM_SPECIFICATION;
5241 }
5242 }
5243 if (spec & SPEC_r2_f128) {
5244 r = get_field(&f, r2);
5245 if (r > 13) {
5246 excp = PGM_SPECIFICATION;
5247 }
5248 }
5249 if (excp) {
5250 gen_program_exception(s, excp);
5251 return EXIT_NORETURN;
5252 }
5253 }
5254
5255 /* Set up the strutures we use to communicate with the helpers. */
5256 s->insn = insn;
5257 s->fields = &f;
5258 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5259 TCGV_UNUSED_I64(o.out);
5260 TCGV_UNUSED_I64(o.out2);
5261 TCGV_UNUSED_I64(o.in1);
5262 TCGV_UNUSED_I64(o.in2);
5263 TCGV_UNUSED_I64(o.addr1);
5264
5265 /* Implement the instruction. */
5266 if (insn->help_in1) {
5267 insn->help_in1(s, &f, &o);
5268 }
5269 if (insn->help_in2) {
5270 insn->help_in2(s, &f, &o);
5271 }
5272 if (insn->help_prep) {
5273 insn->help_prep(s, &f, &o);
5274 }
5275 if (insn->help_op) {
5276 ret = insn->help_op(s, &o);
5277 }
5278 if (insn->help_wout) {
5279 insn->help_wout(s, &f, &o);
5280 }
5281 if (insn->help_cout) {
5282 insn->help_cout(s, &o);
5283 }
5284
5285 /* Free any temporaries created by the helpers. */
5286 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5287 tcg_temp_free_i64(o.out);
5288 }
5289 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5290 tcg_temp_free_i64(o.out2);
5291 }
5292 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5293 tcg_temp_free_i64(o.in1);
5294 }
5295 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5296 tcg_temp_free_i64(o.in2);
5297 }
5298 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5299 tcg_temp_free_i64(o.addr1);
5300 }
5301
5302 #ifndef CONFIG_USER_ONLY
5303 if (s->tb->flags & FLAG_MASK_PER) {
5304 /* An exception might be triggered, save PSW if not already done. */
5305 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5306 tcg_gen_movi_i64(psw_addr, s->next_pc);
5307 }
5308
5309 /* Save off cc. */
5310 update_cc_op(s);
5311
5312 /* Call the helper to check for a possible PER exception. */
5313 gen_helper_per_check_exception(cpu_env);
5314 }
5315 #endif
5316
5317 /* Advance to the next instruction. */
5318 s->pc = s->next_pc;
5319 return ret;
5320 }
5321
5322 static inline void gen_intermediate_code_internal(S390CPU *cpu,
5323 TranslationBlock *tb,
5324 bool search_pc)
5325 {
5326 CPUState *cs = CPU(cpu);
5327 CPUS390XState *env = &cpu->env;
5328 DisasContext dc;
5329 target_ulong pc_start;
5330 uint64_t next_page_start;
5331 int j, lj = -1;
5332 int num_insns, max_insns;
5333 CPUBreakpoint *bp;
5334 ExitStatus status;
5335 bool do_debug;
5336
5337 pc_start = tb->pc;
5338
5339 /* 31-bit mode */
5340 if (!(tb->flags & FLAG_MASK_64)) {
5341 pc_start &= 0x7fffffff;
5342 }
5343
5344 dc.tb = tb;
5345 dc.pc = pc_start;
5346 dc.cc_op = CC_OP_DYNAMIC;
5347 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5348
5349 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5350
5351 num_insns = 0;
5352 max_insns = tb->cflags & CF_COUNT_MASK;
5353 if (max_insns == 0) {
5354 max_insns = CF_COUNT_MASK;
5355 }
5356
5357 gen_tb_start(tb);
5358
5359 do {
5360 if (search_pc) {
5361 j = tcg_op_buf_count();
5362 if (lj < j) {
5363 lj++;
5364 while (lj < j) {
5365 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5366 }
5367 }
5368 tcg_ctx.gen_opc_pc[lj] = dc.pc;
5369 gen_opc_cc_op[lj] = dc.cc_op;
5370 tcg_ctx.gen_opc_instr_start[lj] = 1;
5371 tcg_ctx.gen_opc_icount[lj] = num_insns;
5372 }
5373 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5374 gen_io_start();
5375 }
5376
5377 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5378 tcg_gen_debug_insn_start(dc.pc);
5379 }
5380
5381 status = NO_EXIT;
5382 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
5383 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
5384 if (bp->pc == dc.pc) {
5385 status = EXIT_PC_STALE;
5386 do_debug = true;
5387 break;
5388 }
5389 }
5390 }
5391 if (status == NO_EXIT) {
5392 status = translate_one(env, &dc);
5393 }
5394
5395 /* If we reach a page boundary, are single stepping,
5396 or exhaust instruction count, stop generation. */
5397 if (status == NO_EXIT
5398 && (dc.pc >= next_page_start
5399 || tcg_op_buf_full()
5400 || num_insns >= max_insns
5401 || singlestep
5402 || cs->singlestep_enabled)) {
5403 status = EXIT_PC_STALE;
5404 }
5405 } while (status == NO_EXIT);
5406
5407 if (tb->cflags & CF_LAST_IO) {
5408 gen_io_end();
5409 }
5410
5411 switch (status) {
5412 case EXIT_GOTO_TB:
5413 case EXIT_NORETURN:
5414 break;
5415 case EXIT_PC_STALE:
5416 update_psw_addr(&dc);
5417 /* FALLTHRU */
5418 case EXIT_PC_UPDATED:
5419 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5420 cc op type is in env */
5421 update_cc_op(&dc);
5422 /* Exit the TB, either by raising a debug exception or by return. */
5423 if (do_debug) {
5424 gen_exception(EXCP_DEBUG);
5425 } else {
5426 tcg_gen_exit_tb(0);
5427 }
5428 break;
5429 default:
5430 abort();
5431 }
5432
5433 gen_tb_end(tb, num_insns);
5434
5435 if (search_pc) {
5436 j = tcg_op_buf_count();
5437 lj++;
5438 while (lj <= j) {
5439 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5440 }
5441 } else {
5442 tb->size = dc.pc - pc_start;
5443 tb->icount = num_insns;
5444 }
5445
5446 #if defined(S390X_DEBUG_DISAS)
5447 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5448 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5449 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5450 qemu_log("\n");
5451 }
5452 #endif
5453 }
5454
5455 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
5456 {
5457 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
5458 }
5459
5460 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
5461 {
5462 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
5463 }
5464
5465 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
5466 {
5467 int cc_op;
5468 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
5469 cc_op = gen_opc_cc_op[pc_pos];
5470 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5471 env->cc_op = cc_op;
5472 }
5473 }