]> git.proxmox.com Git - mirror_qemu.git/blob - target/s390x/tcg/translate.c
exec/translator: Pass the locked filepointer to disas_log hook
[mirror_qemu.git] / target / s390x / tcg / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44
45 #include "exec/translator.h"
46 #include "exec/log.h"
47 #include "qemu/atomic128.h"
48
49
50 /* Information that (most) every instruction needs to manipulate. */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
54
55 /*
56 * Define a structure to hold the decoded fields. We'll store each inside
57 * an array indexed by an enum. In order to conserve memory, we'll arrange
58 * for fields that do not exist at the same time to overlap, thus the "C"
59 * for compact. For checking purposes there is an "O" for original index
60 * as well that will be applied to availability bitmaps.
61 */
62
63 enum DisasFieldIndexO {
64 FLD_O_r1,
65 FLD_O_r2,
66 FLD_O_r3,
67 FLD_O_m1,
68 FLD_O_m3,
69 FLD_O_m4,
70 FLD_O_m5,
71 FLD_O_m6,
72 FLD_O_b1,
73 FLD_O_b2,
74 FLD_O_b4,
75 FLD_O_d1,
76 FLD_O_d2,
77 FLD_O_d4,
78 FLD_O_x2,
79 FLD_O_l1,
80 FLD_O_l2,
81 FLD_O_i1,
82 FLD_O_i2,
83 FLD_O_i3,
84 FLD_O_i4,
85 FLD_O_i5,
86 FLD_O_v1,
87 FLD_O_v2,
88 FLD_O_v3,
89 FLD_O_v4,
90 };
91
92 enum DisasFieldIndexC {
93 FLD_C_r1 = 0,
94 FLD_C_m1 = 0,
95 FLD_C_b1 = 0,
96 FLD_C_i1 = 0,
97 FLD_C_v1 = 0,
98
99 FLD_C_r2 = 1,
100 FLD_C_b2 = 1,
101 FLD_C_i2 = 1,
102
103 FLD_C_r3 = 2,
104 FLD_C_m3 = 2,
105 FLD_C_i3 = 2,
106 FLD_C_v3 = 2,
107
108 FLD_C_m4 = 3,
109 FLD_C_b4 = 3,
110 FLD_C_i4 = 3,
111 FLD_C_l1 = 3,
112 FLD_C_v4 = 3,
113
114 FLD_C_i5 = 4,
115 FLD_C_d1 = 4,
116 FLD_C_m5 = 4,
117
118 FLD_C_d2 = 5,
119 FLD_C_m6 = 5,
120
121 FLD_C_d4 = 6,
122 FLD_C_x2 = 6,
123 FLD_C_l2 = 6,
124 FLD_C_v2 = 6,
125
126 NUM_C_FIELD = 7
127 };
128
129 struct DisasFields {
130 uint64_t raw_insn;
131 unsigned op:8;
132 unsigned op2:8;
133 unsigned presentC:16;
134 unsigned int presentO;
135 int c[NUM_C_FIELD];
136 };
137
138 struct DisasContext {
139 DisasContextBase base;
140 const DisasInsn *insn;
141 TCGOp *insn_start;
142 DisasFields fields;
143 uint64_t ex_value;
144 /*
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
148 */
149 uint64_t pc_tmp;
150 uint32_t ilen;
151 enum cc_op cc_op;
152 };
153
154 /* Information carried about a condition to be evaluated. */
155 typedef struct {
156 TCGCond cond:8;
157 bool is_64;
158 bool g1;
159 bool g2;
160 union {
161 struct { TCGv_i64 a, b; } s64;
162 struct { TCGv_i32 a, b; } s32;
163 } u;
164 } DisasCompare;
165
166 #ifdef DEBUG_INLINE_BRANCHES
167 static uint64_t inline_branch_hit[CC_OP_MAX];
168 static uint64_t inline_branch_miss[CC_OP_MAX];
169 #endif
170
171 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
172 {
173 TCGv_i64 tmp;
174
175 if (s->base.tb->flags & FLAG_MASK_32) {
176 if (s->base.tb->flags & FLAG_MASK_64) {
177 tcg_gen_movi_i64(out, pc);
178 return;
179 }
180 pc |= 0x80000000;
181 }
182 assert(!(s->base.tb->flags & FLAG_MASK_64));
183 tmp = tcg_const_i64(pc);
184 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
185 tcg_temp_free_i64(tmp);
186 }
187
188 static TCGv_i64 psw_addr;
189 static TCGv_i64 psw_mask;
190 static TCGv_i64 gbea;
191
192 static TCGv_i32 cc_op;
193 static TCGv_i64 cc_src;
194 static TCGv_i64 cc_dst;
195 static TCGv_i64 cc_vr;
196
197 static char cpu_reg_names[16][4];
198 static TCGv_i64 regs[16];
199
200 void s390x_translate_init(void)
201 {
202 int i;
203
204 psw_addr = tcg_global_mem_new_i64(cpu_env,
205 offsetof(CPUS390XState, psw.addr),
206 "psw_addr");
207 psw_mask = tcg_global_mem_new_i64(cpu_env,
208 offsetof(CPUS390XState, psw.mask),
209 "psw_mask");
210 gbea = tcg_global_mem_new_i64(cpu_env,
211 offsetof(CPUS390XState, gbea),
212 "gbea");
213
214 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
215 "cc_op");
216 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
217 "cc_src");
218 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
219 "cc_dst");
220 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
221 "cc_vr");
222
223 for (i = 0; i < 16; i++) {
224 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
225 regs[i] = tcg_global_mem_new(cpu_env,
226 offsetof(CPUS390XState, regs[i]),
227 cpu_reg_names[i]);
228 }
229 }
230
231 static inline int vec_full_reg_offset(uint8_t reg)
232 {
233 g_assert(reg < 32);
234 return offsetof(CPUS390XState, vregs[reg][0]);
235 }
236
237 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
238 {
239 /* Convert element size (es) - e.g. MO_8 - to bytes */
240 const uint8_t bytes = 1 << es;
241 int offs = enr * bytes;
242
243 /*
244 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
245 * of the 16 byte vector, on both, little and big endian systems.
246 *
247 * Big Endian (target/possible host)
248 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
249 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
250 * W: [ 0][ 1] - [ 2][ 3]
251 * DW: [ 0] - [ 1]
252 *
253 * Little Endian (possible host)
254 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
255 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
256 * W: [ 1][ 0] - [ 3][ 2]
257 * DW: [ 0] - [ 1]
258 *
259 * For 16 byte elements, the two 8 byte halves will not form a host
260 * int128 if the host is little endian, since they're in the wrong order.
261 * Some operations (e.g. xor) do not care. For operations like addition,
262 * the two 8 byte elements have to be loaded separately. Let's force all
263 * 16 byte operations to handle it in a special way.
264 */
265 g_assert(es <= MO_64);
266 #if !HOST_BIG_ENDIAN
267 offs ^= (8 - bytes);
268 #endif
269 return offs + vec_full_reg_offset(reg);
270 }
271
272 static inline int freg64_offset(uint8_t reg)
273 {
274 g_assert(reg < 16);
275 return vec_reg_offset(reg, 0, MO_64);
276 }
277
278 static inline int freg32_offset(uint8_t reg)
279 {
280 g_assert(reg < 16);
281 return vec_reg_offset(reg, 0, MO_32);
282 }
283
284 static TCGv_i64 load_reg(int reg)
285 {
286 TCGv_i64 r = tcg_temp_new_i64();
287 tcg_gen_mov_i64(r, regs[reg]);
288 return r;
289 }
290
291 static TCGv_i64 load_freg(int reg)
292 {
293 TCGv_i64 r = tcg_temp_new_i64();
294
295 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
296 return r;
297 }
298
299 static TCGv_i64 load_freg32_i64(int reg)
300 {
301 TCGv_i64 r = tcg_temp_new_i64();
302
303 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
304 return r;
305 }
306
307 static void store_reg(int reg, TCGv_i64 v)
308 {
309 tcg_gen_mov_i64(regs[reg], v);
310 }
311
312 static void store_freg(int reg, TCGv_i64 v)
313 {
314 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
315 }
316
317 static void store_reg32_i64(int reg, TCGv_i64 v)
318 {
319 /* 32 bit register writes keep the upper half */
320 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
321 }
322
323 static void store_reg32h_i64(int reg, TCGv_i64 v)
324 {
325 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
326 }
327
328 static void store_freg32_i64(int reg, TCGv_i64 v)
329 {
330 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
331 }
332
333 static void return_low128(TCGv_i64 dest)
334 {
335 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
336 }
337
338 static void update_psw_addr(DisasContext *s)
339 {
340 /* psw.addr */
341 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
342 }
343
344 static void per_branch(DisasContext *s, bool to_next)
345 {
346 #ifndef CONFIG_USER_ONLY
347 tcg_gen_movi_i64(gbea, s->base.pc_next);
348
349 if (s->base.tb->flags & FLAG_MASK_PER) {
350 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
351 gen_helper_per_branch(cpu_env, gbea, next_pc);
352 if (to_next) {
353 tcg_temp_free_i64(next_pc);
354 }
355 }
356 #endif
357 }
358
359 static void per_branch_cond(DisasContext *s, TCGCond cond,
360 TCGv_i64 arg1, TCGv_i64 arg2)
361 {
362 #ifndef CONFIG_USER_ONLY
363 if (s->base.tb->flags & FLAG_MASK_PER) {
364 TCGLabel *lab = gen_new_label();
365 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
366
367 tcg_gen_movi_i64(gbea, s->base.pc_next);
368 gen_helper_per_branch(cpu_env, gbea, psw_addr);
369
370 gen_set_label(lab);
371 } else {
372 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
373 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
374 tcg_temp_free_i64(pc);
375 }
376 #endif
377 }
378
379 static void per_breaking_event(DisasContext *s)
380 {
381 tcg_gen_movi_i64(gbea, s->base.pc_next);
382 }
383
384 static void update_cc_op(DisasContext *s)
385 {
386 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
387 tcg_gen_movi_i32(cc_op, s->cc_op);
388 }
389 }
390
391 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
392 uint64_t pc)
393 {
394 return (uint64_t)translator_lduw(env, &s->base, pc);
395 }
396
397 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
398 uint64_t pc)
399 {
400 return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
401 }
402
403 static int get_mem_index(DisasContext *s)
404 {
405 #ifdef CONFIG_USER_ONLY
406 return MMU_USER_IDX;
407 #else
408 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
409 return MMU_REAL_IDX;
410 }
411
412 switch (s->base.tb->flags & FLAG_MASK_ASC) {
413 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
414 return MMU_PRIMARY_IDX;
415 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
416 return MMU_SECONDARY_IDX;
417 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
418 return MMU_HOME_IDX;
419 default:
420 tcg_abort();
421 break;
422 }
423 #endif
424 }
425
426 static void gen_exception(int excp)
427 {
428 TCGv_i32 tmp = tcg_const_i32(excp);
429 gen_helper_exception(cpu_env, tmp);
430 tcg_temp_free_i32(tmp);
431 }
432
433 static void gen_program_exception(DisasContext *s, int code)
434 {
435 TCGv_i32 tmp;
436
437 /* Remember what pgm exeption this was. */
438 tmp = tcg_const_i32(code);
439 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
440 tcg_temp_free_i32(tmp);
441
442 tmp = tcg_const_i32(s->ilen);
443 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
444 tcg_temp_free_i32(tmp);
445
446 /* update the psw */
447 update_psw_addr(s);
448
449 /* Save off cc. */
450 update_cc_op(s);
451
452 /* Trigger exception. */
453 gen_exception(EXCP_PGM);
454 }
455
456 static inline void gen_illegal_opcode(DisasContext *s)
457 {
458 gen_program_exception(s, PGM_OPERATION);
459 }
460
461 static inline void gen_data_exception(uint8_t dxc)
462 {
463 TCGv_i32 tmp = tcg_const_i32(dxc);
464 gen_helper_data_exception(cpu_env, tmp);
465 tcg_temp_free_i32(tmp);
466 }
467
468 static inline void gen_trap(DisasContext *s)
469 {
470 /* Set DXC to 0xff */
471 gen_data_exception(0xff);
472 }
473
474 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
475 int64_t imm)
476 {
477 tcg_gen_addi_i64(dst, src, imm);
478 if (!(s->base.tb->flags & FLAG_MASK_64)) {
479 if (s->base.tb->flags & FLAG_MASK_32) {
480 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
481 } else {
482 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
483 }
484 }
485 }
486
487 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
488 {
489 TCGv_i64 tmp = tcg_temp_new_i64();
490
491 /*
492 * Note that d2 is limited to 20 bits, signed. If we crop negative
493 * displacements early we create larger immedate addends.
494 */
495 if (b2 && x2) {
496 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
497 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
498 } else if (b2) {
499 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
500 } else if (x2) {
501 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
502 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
503 if (s->base.tb->flags & FLAG_MASK_32) {
504 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
505 } else {
506 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
507 }
508 } else {
509 tcg_gen_movi_i64(tmp, d2);
510 }
511
512 return tmp;
513 }
514
515 static inline bool live_cc_data(DisasContext *s)
516 {
517 return (s->cc_op != CC_OP_DYNAMIC
518 && s->cc_op != CC_OP_STATIC
519 && s->cc_op > 3);
520 }
521
522 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
523 {
524 if (live_cc_data(s)) {
525 tcg_gen_discard_i64(cc_src);
526 tcg_gen_discard_i64(cc_dst);
527 tcg_gen_discard_i64(cc_vr);
528 }
529 s->cc_op = CC_OP_CONST0 + val;
530 }
531
532 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
533 {
534 if (live_cc_data(s)) {
535 tcg_gen_discard_i64(cc_src);
536 tcg_gen_discard_i64(cc_vr);
537 }
538 tcg_gen_mov_i64(cc_dst, dst);
539 s->cc_op = op;
540 }
541
542 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
543 TCGv_i64 dst)
544 {
545 if (live_cc_data(s)) {
546 tcg_gen_discard_i64(cc_vr);
547 }
548 tcg_gen_mov_i64(cc_src, src);
549 tcg_gen_mov_i64(cc_dst, dst);
550 s->cc_op = op;
551 }
552
553 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
554 TCGv_i64 dst, TCGv_i64 vr)
555 {
556 tcg_gen_mov_i64(cc_src, src);
557 tcg_gen_mov_i64(cc_dst, dst);
558 tcg_gen_mov_i64(cc_vr, vr);
559 s->cc_op = op;
560 }
561
562 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
563 {
564 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
565 }
566
567 /* CC value is in env->cc_op */
568 static void set_cc_static(DisasContext *s)
569 {
570 if (live_cc_data(s)) {
571 tcg_gen_discard_i64(cc_src);
572 tcg_gen_discard_i64(cc_dst);
573 tcg_gen_discard_i64(cc_vr);
574 }
575 s->cc_op = CC_OP_STATIC;
576 }
577
578 /* calculates cc into cc_op */
579 static void gen_op_calc_cc(DisasContext *s)
580 {
581 TCGv_i32 local_cc_op = NULL;
582 TCGv_i64 dummy = NULL;
583
584 switch (s->cc_op) {
585 default:
586 dummy = tcg_const_i64(0);
587 /* FALLTHRU */
588 case CC_OP_ADD_64:
589 case CC_OP_SUB_64:
590 case CC_OP_ADD_32:
591 case CC_OP_SUB_32:
592 local_cc_op = tcg_const_i32(s->cc_op);
593 break;
594 case CC_OP_CONST0:
595 case CC_OP_CONST1:
596 case CC_OP_CONST2:
597 case CC_OP_CONST3:
598 case CC_OP_STATIC:
599 case CC_OP_DYNAMIC:
600 break;
601 }
602
603 switch (s->cc_op) {
604 case CC_OP_CONST0:
605 case CC_OP_CONST1:
606 case CC_OP_CONST2:
607 case CC_OP_CONST3:
608 /* s->cc_op is the cc value */
609 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
610 break;
611 case CC_OP_STATIC:
612 /* env->cc_op already is the cc value */
613 break;
614 case CC_OP_NZ:
615 case CC_OP_ABS_64:
616 case CC_OP_NABS_64:
617 case CC_OP_ABS_32:
618 case CC_OP_NABS_32:
619 case CC_OP_LTGT0_32:
620 case CC_OP_LTGT0_64:
621 case CC_OP_COMP_32:
622 case CC_OP_COMP_64:
623 case CC_OP_NZ_F32:
624 case CC_OP_NZ_F64:
625 case CC_OP_FLOGR:
626 case CC_OP_LCBB:
627 case CC_OP_MULS_32:
628 /* 1 argument */
629 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
630 break;
631 case CC_OP_ADDU:
632 case CC_OP_ICM:
633 case CC_OP_LTGT_32:
634 case CC_OP_LTGT_64:
635 case CC_OP_LTUGTU_32:
636 case CC_OP_LTUGTU_64:
637 case CC_OP_TM_32:
638 case CC_OP_TM_64:
639 case CC_OP_SLA:
640 case CC_OP_SUBU:
641 case CC_OP_NZ_F128:
642 case CC_OP_VC:
643 case CC_OP_MULS_64:
644 /* 2 arguments */
645 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
646 break;
647 case CC_OP_ADD_64:
648 case CC_OP_SUB_64:
649 case CC_OP_ADD_32:
650 case CC_OP_SUB_32:
651 /* 3 arguments */
652 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
653 break;
654 case CC_OP_DYNAMIC:
655 /* unknown operation - assume 3 arguments and cc_op in env */
656 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
657 break;
658 default:
659 tcg_abort();
660 }
661
662 if (local_cc_op) {
663 tcg_temp_free_i32(local_cc_op);
664 }
665 if (dummy) {
666 tcg_temp_free_i64(dummy);
667 }
668
669 /* We now have cc in cc_op as constant */
670 set_cc_static(s);
671 }
672
673 static bool use_goto_tb(DisasContext *s, uint64_t dest)
674 {
675 if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
676 return false;
677 }
678 return translator_use_goto_tb(&s->base, dest);
679 }
680
681 static void account_noninline_branch(DisasContext *s, int cc_op)
682 {
683 #ifdef DEBUG_INLINE_BRANCHES
684 inline_branch_miss[cc_op]++;
685 #endif
686 }
687
688 static void account_inline_branch(DisasContext *s, int cc_op)
689 {
690 #ifdef DEBUG_INLINE_BRANCHES
691 inline_branch_hit[cc_op]++;
692 #endif
693 }
694
695 /* Table of mask values to comparison codes, given a comparison as input.
696 For such, CC=3 should not be possible. */
697 static const TCGCond ltgt_cond[16] = {
698 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
699 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
700 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
701 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
702 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
703 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
704 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
705 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
706 };
707
708 /* Table of mask values to comparison codes, given a logic op as input.
709 For such, only CC=0 and CC=1 should be possible. */
710 static const TCGCond nz_cond[16] = {
711 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
712 TCG_COND_NEVER, TCG_COND_NEVER,
713 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
714 TCG_COND_NE, TCG_COND_NE,
715 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
716 TCG_COND_EQ, TCG_COND_EQ,
717 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
718 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
719 };
720
721 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
722 details required to generate a TCG comparison. */
723 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
724 {
725 TCGCond cond;
726 enum cc_op old_cc_op = s->cc_op;
727
728 if (mask == 15 || mask == 0) {
729 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
730 c->u.s32.a = cc_op;
731 c->u.s32.b = cc_op;
732 c->g1 = c->g2 = true;
733 c->is_64 = false;
734 return;
735 }
736
737 /* Find the TCG condition for the mask + cc op. */
738 switch (old_cc_op) {
739 case CC_OP_LTGT0_32:
740 case CC_OP_LTGT0_64:
741 case CC_OP_LTGT_32:
742 case CC_OP_LTGT_64:
743 cond = ltgt_cond[mask];
744 if (cond == TCG_COND_NEVER) {
745 goto do_dynamic;
746 }
747 account_inline_branch(s, old_cc_op);
748 break;
749
750 case CC_OP_LTUGTU_32:
751 case CC_OP_LTUGTU_64:
752 cond = tcg_unsigned_cond(ltgt_cond[mask]);
753 if (cond == TCG_COND_NEVER) {
754 goto do_dynamic;
755 }
756 account_inline_branch(s, old_cc_op);
757 break;
758
759 case CC_OP_NZ:
760 cond = nz_cond[mask];
761 if (cond == TCG_COND_NEVER) {
762 goto do_dynamic;
763 }
764 account_inline_branch(s, old_cc_op);
765 break;
766
767 case CC_OP_TM_32:
768 case CC_OP_TM_64:
769 switch (mask) {
770 case 8:
771 cond = TCG_COND_EQ;
772 break;
773 case 4 | 2 | 1:
774 cond = TCG_COND_NE;
775 break;
776 default:
777 goto do_dynamic;
778 }
779 account_inline_branch(s, old_cc_op);
780 break;
781
782 case CC_OP_ICM:
783 switch (mask) {
784 case 8:
785 cond = TCG_COND_EQ;
786 break;
787 case 4 | 2 | 1:
788 case 4 | 2:
789 cond = TCG_COND_NE;
790 break;
791 default:
792 goto do_dynamic;
793 }
794 account_inline_branch(s, old_cc_op);
795 break;
796
797 case CC_OP_FLOGR:
798 switch (mask & 0xa) {
799 case 8: /* src == 0 -> no one bit found */
800 cond = TCG_COND_EQ;
801 break;
802 case 2: /* src != 0 -> one bit found */
803 cond = TCG_COND_NE;
804 break;
805 default:
806 goto do_dynamic;
807 }
808 account_inline_branch(s, old_cc_op);
809 break;
810
811 case CC_OP_ADDU:
812 case CC_OP_SUBU:
813 switch (mask) {
814 case 8 | 2: /* result == 0 */
815 cond = TCG_COND_EQ;
816 break;
817 case 4 | 1: /* result != 0 */
818 cond = TCG_COND_NE;
819 break;
820 case 8 | 4: /* !carry (borrow) */
821 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
822 break;
823 case 2 | 1: /* carry (!borrow) */
824 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
825 break;
826 default:
827 goto do_dynamic;
828 }
829 account_inline_branch(s, old_cc_op);
830 break;
831
832 default:
833 do_dynamic:
834 /* Calculate cc value. */
835 gen_op_calc_cc(s);
836 /* FALLTHRU */
837
838 case CC_OP_STATIC:
839 /* Jump based on CC. We'll load up the real cond below;
840 the assignment here merely avoids a compiler warning. */
841 account_noninline_branch(s, old_cc_op);
842 old_cc_op = CC_OP_STATIC;
843 cond = TCG_COND_NEVER;
844 break;
845 }
846
847 /* Load up the arguments of the comparison. */
848 c->is_64 = true;
849 c->g1 = c->g2 = false;
850 switch (old_cc_op) {
851 case CC_OP_LTGT0_32:
852 c->is_64 = false;
853 c->u.s32.a = tcg_temp_new_i32();
854 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
855 c->u.s32.b = tcg_const_i32(0);
856 break;
857 case CC_OP_LTGT_32:
858 case CC_OP_LTUGTU_32:
859 c->is_64 = false;
860 c->u.s32.a = tcg_temp_new_i32();
861 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
862 c->u.s32.b = tcg_temp_new_i32();
863 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
864 break;
865
866 case CC_OP_LTGT0_64:
867 case CC_OP_NZ:
868 case CC_OP_FLOGR:
869 c->u.s64.a = cc_dst;
870 c->u.s64.b = tcg_const_i64(0);
871 c->g1 = true;
872 break;
873 case CC_OP_LTGT_64:
874 case CC_OP_LTUGTU_64:
875 c->u.s64.a = cc_src;
876 c->u.s64.b = cc_dst;
877 c->g1 = c->g2 = true;
878 break;
879
880 case CC_OP_TM_32:
881 case CC_OP_TM_64:
882 case CC_OP_ICM:
883 c->u.s64.a = tcg_temp_new_i64();
884 c->u.s64.b = tcg_const_i64(0);
885 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
886 break;
887
888 case CC_OP_ADDU:
889 case CC_OP_SUBU:
890 c->is_64 = true;
891 c->u.s64.b = tcg_const_i64(0);
892 c->g1 = true;
893 switch (mask) {
894 case 8 | 2:
895 case 4 | 1: /* result */
896 c->u.s64.a = cc_dst;
897 break;
898 case 8 | 4:
899 case 2 | 1: /* carry */
900 c->u.s64.a = cc_src;
901 break;
902 default:
903 g_assert_not_reached();
904 }
905 break;
906
907 case CC_OP_STATIC:
908 c->is_64 = false;
909 c->u.s32.a = cc_op;
910 c->g1 = true;
911 switch (mask) {
912 case 0x8 | 0x4 | 0x2: /* cc != 3 */
913 cond = TCG_COND_NE;
914 c->u.s32.b = tcg_const_i32(3);
915 break;
916 case 0x8 | 0x4 | 0x1: /* cc != 2 */
917 cond = TCG_COND_NE;
918 c->u.s32.b = tcg_const_i32(2);
919 break;
920 case 0x8 | 0x2 | 0x1: /* cc != 1 */
921 cond = TCG_COND_NE;
922 c->u.s32.b = tcg_const_i32(1);
923 break;
924 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
925 cond = TCG_COND_EQ;
926 c->g1 = false;
927 c->u.s32.a = tcg_temp_new_i32();
928 c->u.s32.b = tcg_const_i32(0);
929 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
930 break;
931 case 0x8 | 0x4: /* cc < 2 */
932 cond = TCG_COND_LTU;
933 c->u.s32.b = tcg_const_i32(2);
934 break;
935 case 0x8: /* cc == 0 */
936 cond = TCG_COND_EQ;
937 c->u.s32.b = tcg_const_i32(0);
938 break;
939 case 0x4 | 0x2 | 0x1: /* cc != 0 */
940 cond = TCG_COND_NE;
941 c->u.s32.b = tcg_const_i32(0);
942 break;
943 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
944 cond = TCG_COND_NE;
945 c->g1 = false;
946 c->u.s32.a = tcg_temp_new_i32();
947 c->u.s32.b = tcg_const_i32(0);
948 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
949 break;
950 case 0x4: /* cc == 1 */
951 cond = TCG_COND_EQ;
952 c->u.s32.b = tcg_const_i32(1);
953 break;
954 case 0x2 | 0x1: /* cc > 1 */
955 cond = TCG_COND_GTU;
956 c->u.s32.b = tcg_const_i32(1);
957 break;
958 case 0x2: /* cc == 2 */
959 cond = TCG_COND_EQ;
960 c->u.s32.b = tcg_const_i32(2);
961 break;
962 case 0x1: /* cc == 3 */
963 cond = TCG_COND_EQ;
964 c->u.s32.b = tcg_const_i32(3);
965 break;
966 default:
967 /* CC is masked by something else: (8 >> cc) & mask. */
968 cond = TCG_COND_NE;
969 c->g1 = false;
970 c->u.s32.a = tcg_const_i32(8);
971 c->u.s32.b = tcg_const_i32(0);
972 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
973 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
974 break;
975 }
976 break;
977
978 default:
979 abort();
980 }
981 c->cond = cond;
982 }
983
984 static void free_compare(DisasCompare *c)
985 {
986 if (!c->g1) {
987 if (c->is_64) {
988 tcg_temp_free_i64(c->u.s64.a);
989 } else {
990 tcg_temp_free_i32(c->u.s32.a);
991 }
992 }
993 if (!c->g2) {
994 if (c->is_64) {
995 tcg_temp_free_i64(c->u.s64.b);
996 } else {
997 tcg_temp_free_i32(c->u.s32.b);
998 }
999 }
1000 }
1001
1002 /* ====================================================================== */
1003 /* Define the insn format enumeration. */
1004 #define F0(N) FMT_##N,
1005 #define F1(N, X1) F0(N)
1006 #define F2(N, X1, X2) F0(N)
1007 #define F3(N, X1, X2, X3) F0(N)
1008 #define F4(N, X1, X2, X3, X4) F0(N)
1009 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1010 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1011
1012 typedef enum {
1013 #include "insn-format.def"
1014 } DisasFormat;
1015
1016 #undef F0
1017 #undef F1
1018 #undef F2
1019 #undef F3
1020 #undef F4
1021 #undef F5
1022 #undef F6
1023
1024 /* This is the way fields are to be accessed out of DisasFields. */
1025 #define have_field(S, F) have_field1((S), FLD_O_##F)
1026 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1027
1028 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1029 {
1030 return (s->fields.presentO >> c) & 1;
1031 }
1032
1033 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1034 enum DisasFieldIndexC c)
1035 {
1036 assert(have_field1(s, o));
1037 return s->fields.c[c];
1038 }
1039
1040 /* Describe the layout of each field in each format. */
1041 typedef struct DisasField {
1042 unsigned int beg:8;
1043 unsigned int size:8;
1044 unsigned int type:2;
1045 unsigned int indexC:6;
1046 enum DisasFieldIndexO indexO:8;
1047 } DisasField;
1048
1049 typedef struct DisasFormatInfo {
1050 DisasField op[NUM_C_FIELD];
1051 } DisasFormatInfo;
1052
1053 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1054 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1055 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1056 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1057 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1058 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1059 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1060 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1061 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1062 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1063 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1064 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1065 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1066 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1067 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1068
1069 #define F0(N) { { } },
1070 #define F1(N, X1) { { X1 } },
1071 #define F2(N, X1, X2) { { X1, X2 } },
1072 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1073 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1074 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1075 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1076
1077 static const DisasFormatInfo format_info[] = {
1078 #include "insn-format.def"
1079 };
1080
1081 #undef F0
1082 #undef F1
1083 #undef F2
1084 #undef F3
1085 #undef F4
1086 #undef F5
1087 #undef F6
1088 #undef R
1089 #undef M
1090 #undef V
1091 #undef BD
1092 #undef BXD
1093 #undef BDL
1094 #undef BXDL
1095 #undef I
1096 #undef L
1097
1098 /* Generally, we'll extract operands into this structures, operate upon
1099 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1100 of routines below for more details. */
1101 typedef struct {
1102 bool g_out, g_out2, g_in1, g_in2;
1103 TCGv_i64 out, out2, in1, in2;
1104 TCGv_i64 addr1;
1105 } DisasOps;
1106
1107 /* Instructions can place constraints on their operands, raising specification
1108 exceptions if they are violated. To make this easy to automate, each "in1",
1109 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1110 of the following, or 0. To make this easy to document, we'll put the
1111 SPEC_<name> defines next to <name>. */
1112
1113 #define SPEC_r1_even 1
1114 #define SPEC_r2_even 2
1115 #define SPEC_r3_even 4
1116 #define SPEC_r1_f128 8
1117 #define SPEC_r2_f128 16
1118
1119 /* Return values from translate_one, indicating the state of the TB. */
1120
1121 /* We are not using a goto_tb (for whatever reason), but have updated
1122 the PC (for whatever reason), so there's no need to do it again on
1123 exiting the TB. */
1124 #define DISAS_PC_UPDATED DISAS_TARGET_0
1125
1126 /* We have emitted one or more goto_tb. No fixup required. */
1127 #define DISAS_GOTO_TB DISAS_TARGET_1
1128
1129 /* We have updated the PC and CC values. */
1130 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1131
1132 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1133 updated the PC for the next instruction to be executed. */
1134 #define DISAS_PC_STALE DISAS_TARGET_3
1135
1136 /* We are exiting the TB to the main loop. */
1137 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1138
1139
1140 /* Instruction flags */
1141 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1142 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1143 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1144 #define IF_BFP 0x0008 /* binary floating point instruction */
1145 #define IF_DFP 0x0010 /* decimal floating point instruction */
1146 #define IF_PRIV 0x0020 /* privileged instruction */
1147 #define IF_VEC 0x0040 /* vector instruction */
1148 #define IF_IO 0x0080 /* input/output instruction */
1149
1150 struct DisasInsn {
1151 unsigned opc:16;
1152 unsigned flags:16;
1153 DisasFormat fmt:8;
1154 unsigned fac:8;
1155 unsigned spec:8;
1156
1157 const char *name;
1158
1159 /* Pre-process arguments before HELP_OP. */
1160 void (*help_in1)(DisasContext *, DisasOps *);
1161 void (*help_in2)(DisasContext *, DisasOps *);
1162 void (*help_prep)(DisasContext *, DisasOps *);
1163
1164 /*
1165 * Post-process output after HELP_OP.
1166 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1167 */
1168 void (*help_wout)(DisasContext *, DisasOps *);
1169 void (*help_cout)(DisasContext *, DisasOps *);
1170
1171 /* Implement the operation itself. */
1172 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1173
1174 uint64_t data;
1175 };
1176
1177 /* ====================================================================== */
1178 /* Miscellaneous helpers, used by several operations. */
1179
1180 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1181 {
1182 if (dest == s->pc_tmp) {
1183 per_branch(s, true);
1184 return DISAS_NEXT;
1185 }
1186 if (use_goto_tb(s, dest)) {
1187 update_cc_op(s);
1188 per_breaking_event(s);
1189 tcg_gen_goto_tb(0);
1190 tcg_gen_movi_i64(psw_addr, dest);
1191 tcg_gen_exit_tb(s->base.tb, 0);
1192 return DISAS_GOTO_TB;
1193 } else {
1194 tcg_gen_movi_i64(psw_addr, dest);
1195 per_branch(s, false);
1196 return DISAS_PC_UPDATED;
1197 }
1198 }
1199
1200 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1201 bool is_imm, int imm, TCGv_i64 cdest)
1202 {
1203 DisasJumpType ret;
1204 uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1205 TCGLabel *lab;
1206
1207 /* Take care of the special cases first. */
1208 if (c->cond == TCG_COND_NEVER) {
1209 ret = DISAS_NEXT;
1210 goto egress;
1211 }
1212 if (is_imm) {
1213 if (dest == s->pc_tmp) {
1214 /* Branch to next. */
1215 per_branch(s, true);
1216 ret = DISAS_NEXT;
1217 goto egress;
1218 }
1219 if (c->cond == TCG_COND_ALWAYS) {
1220 ret = help_goto_direct(s, dest);
1221 goto egress;
1222 }
1223 } else {
1224 if (!cdest) {
1225 /* E.g. bcr %r0 -> no branch. */
1226 ret = DISAS_NEXT;
1227 goto egress;
1228 }
1229 if (c->cond == TCG_COND_ALWAYS) {
1230 tcg_gen_mov_i64(psw_addr, cdest);
1231 per_branch(s, false);
1232 ret = DISAS_PC_UPDATED;
1233 goto egress;
1234 }
1235 }
1236
1237 if (use_goto_tb(s, s->pc_tmp)) {
1238 if (is_imm && use_goto_tb(s, dest)) {
1239 /* Both exits can use goto_tb. */
1240 update_cc_op(s);
1241
1242 lab = gen_new_label();
1243 if (c->is_64) {
1244 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1245 } else {
1246 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1247 }
1248
1249 /* Branch not taken. */
1250 tcg_gen_goto_tb(0);
1251 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1252 tcg_gen_exit_tb(s->base.tb, 0);
1253
1254 /* Branch taken. */
1255 gen_set_label(lab);
1256 per_breaking_event(s);
1257 tcg_gen_goto_tb(1);
1258 tcg_gen_movi_i64(psw_addr, dest);
1259 tcg_gen_exit_tb(s->base.tb, 1);
1260
1261 ret = DISAS_GOTO_TB;
1262 } else {
1263 /* Fallthru can use goto_tb, but taken branch cannot. */
1264 /* Store taken branch destination before the brcond. This
1265 avoids having to allocate a new local temp to hold it.
1266 We'll overwrite this in the not taken case anyway. */
1267 if (!is_imm) {
1268 tcg_gen_mov_i64(psw_addr, cdest);
1269 }
1270
1271 lab = gen_new_label();
1272 if (c->is_64) {
1273 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1274 } else {
1275 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1276 }
1277
1278 /* Branch not taken. */
1279 update_cc_op(s);
1280 tcg_gen_goto_tb(0);
1281 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1282 tcg_gen_exit_tb(s->base.tb, 0);
1283
1284 gen_set_label(lab);
1285 if (is_imm) {
1286 tcg_gen_movi_i64(psw_addr, dest);
1287 }
1288 per_breaking_event(s);
1289 ret = DISAS_PC_UPDATED;
1290 }
1291 } else {
1292 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1293 Most commonly we're single-stepping or some other condition that
1294 disables all use of goto_tb. Just update the PC and exit. */
1295
1296 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1297 if (is_imm) {
1298 cdest = tcg_const_i64(dest);
1299 }
1300
1301 if (c->is_64) {
1302 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1303 cdest, next);
1304 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1305 } else {
1306 TCGv_i32 t0 = tcg_temp_new_i32();
1307 TCGv_i64 t1 = tcg_temp_new_i64();
1308 TCGv_i64 z = tcg_const_i64(0);
1309 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1310 tcg_gen_extu_i32_i64(t1, t0);
1311 tcg_temp_free_i32(t0);
1312 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1313 per_branch_cond(s, TCG_COND_NE, t1, z);
1314 tcg_temp_free_i64(t1);
1315 tcg_temp_free_i64(z);
1316 }
1317
1318 if (is_imm) {
1319 tcg_temp_free_i64(cdest);
1320 }
1321 tcg_temp_free_i64(next);
1322
1323 ret = DISAS_PC_UPDATED;
1324 }
1325
1326 egress:
1327 free_compare(c);
1328 return ret;
1329 }
1330
1331 /* ====================================================================== */
1332 /* The operations. These perform the bulk of the work for any insn,
1333 usually after the operands have been loaded and output initialized. */
1334
1335 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1336 {
1337 tcg_gen_abs_i64(o->out, o->in2);
1338 return DISAS_NEXT;
1339 }
1340
1341 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1342 {
1343 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1344 return DISAS_NEXT;
1345 }
1346
1347 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1348 {
1349 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1350 return DISAS_NEXT;
1351 }
1352
1353 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1354 {
1355 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1356 tcg_gen_mov_i64(o->out2, o->in2);
1357 return DISAS_NEXT;
1358 }
1359
1360 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1361 {
1362 tcg_gen_add_i64(o->out, o->in1, o->in2);
1363 return DISAS_NEXT;
1364 }
1365
1366 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1367 {
1368 tcg_gen_movi_i64(cc_src, 0);
1369 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1370 return DISAS_NEXT;
1371 }
1372
1373 /* Compute carry into cc_src. */
1374 static void compute_carry(DisasContext *s)
1375 {
1376 switch (s->cc_op) {
1377 case CC_OP_ADDU:
1378 /* The carry value is already in cc_src (1,0). */
1379 break;
1380 case CC_OP_SUBU:
1381 tcg_gen_addi_i64(cc_src, cc_src, 1);
1382 break;
1383 default:
1384 gen_op_calc_cc(s);
1385 /* fall through */
1386 case CC_OP_STATIC:
1387 /* The carry flag is the msb of CC; compute into cc_src. */
1388 tcg_gen_extu_i32_i64(cc_src, cc_op);
1389 tcg_gen_shri_i64(cc_src, cc_src, 1);
1390 break;
1391 }
1392 }
1393
1394 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1395 {
1396 compute_carry(s);
1397 tcg_gen_add_i64(o->out, o->in1, o->in2);
1398 tcg_gen_add_i64(o->out, o->out, cc_src);
1399 return DISAS_NEXT;
1400 }
1401
1402 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1403 {
1404 compute_carry(s);
1405
1406 TCGv_i64 zero = tcg_const_i64(0);
1407 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1408 tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1409 tcg_temp_free_i64(zero);
1410
1411 return DISAS_NEXT;
1412 }
1413
1414 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1415 {
1416 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1417
1418 o->in1 = tcg_temp_new_i64();
1419 if (non_atomic) {
1420 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1421 } else {
1422 /* Perform the atomic addition in memory. */
1423 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1424 s->insn->data);
1425 }
1426
1427 /* Recompute also for atomic case: needed for setting CC. */
1428 tcg_gen_add_i64(o->out, o->in1, o->in2);
1429
1430 if (non_atomic) {
1431 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1432 }
1433 return DISAS_NEXT;
1434 }
1435
1436 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1437 {
1438 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1439
1440 o->in1 = tcg_temp_new_i64();
1441 if (non_atomic) {
1442 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1443 } else {
1444 /* Perform the atomic addition in memory. */
1445 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1446 s->insn->data);
1447 }
1448
1449 /* Recompute also for atomic case: needed for setting CC. */
1450 tcg_gen_movi_i64(cc_src, 0);
1451 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1452
1453 if (non_atomic) {
1454 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1455 }
1456 return DISAS_NEXT;
1457 }
1458
1459 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1460 {
1461 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1462 return DISAS_NEXT;
1463 }
1464
1465 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1466 {
1467 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1468 return DISAS_NEXT;
1469 }
1470
1471 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1472 {
1473 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1474 return_low128(o->out2);
1475 return DISAS_NEXT;
1476 }
1477
1478 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1479 {
1480 tcg_gen_and_i64(o->out, o->in1, o->in2);
1481 return DISAS_NEXT;
1482 }
1483
1484 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1485 {
1486 int shift = s->insn->data & 0xff;
1487 int size = s->insn->data >> 8;
1488 uint64_t mask = ((1ull << size) - 1) << shift;
1489
1490 assert(!o->g_in2);
1491 tcg_gen_shli_i64(o->in2, o->in2, shift);
1492 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1493 tcg_gen_and_i64(o->out, o->in1, o->in2);
1494
1495 /* Produce the CC from only the bits manipulated. */
1496 tcg_gen_andi_i64(cc_dst, o->out, mask);
1497 set_cc_nz_u64(s, cc_dst);
1498 return DISAS_NEXT;
1499 }
1500
1501 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1502 {
1503 tcg_gen_andc_i64(o->out, o->in1, o->in2);
1504 return DISAS_NEXT;
1505 }
1506
1507 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1508 {
1509 tcg_gen_orc_i64(o->out, o->in1, o->in2);
1510 return DISAS_NEXT;
1511 }
1512
1513 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1514 {
1515 tcg_gen_nand_i64(o->out, o->in1, o->in2);
1516 return DISAS_NEXT;
1517 }
1518
1519 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1520 {
1521 tcg_gen_nor_i64(o->out, o->in1, o->in2);
1522 return DISAS_NEXT;
1523 }
1524
1525 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1526 {
1527 tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1528 return DISAS_NEXT;
1529 }
1530
1531 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1532 {
1533 o->in1 = tcg_temp_new_i64();
1534
1535 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1536 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1537 } else {
1538 /* Perform the atomic operation in memory. */
1539 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1540 s->insn->data);
1541 }
1542
1543 /* Recompute also for atomic case: needed for setting CC. */
1544 tcg_gen_and_i64(o->out, o->in1, o->in2);
1545
1546 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1547 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1548 }
1549 return DISAS_NEXT;
1550 }
1551
1552 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1553 {
1554 pc_to_link_info(o->out, s, s->pc_tmp);
1555 if (o->in2) {
1556 tcg_gen_mov_i64(psw_addr, o->in2);
1557 per_branch(s, false);
1558 return DISAS_PC_UPDATED;
1559 } else {
1560 return DISAS_NEXT;
1561 }
1562 }
1563
1564 static void save_link_info(DisasContext *s, DisasOps *o)
1565 {
1566 TCGv_i64 t;
1567
1568 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1569 pc_to_link_info(o->out, s, s->pc_tmp);
1570 return;
1571 }
1572 gen_op_calc_cc(s);
1573 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1574 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1575 t = tcg_temp_new_i64();
1576 tcg_gen_shri_i64(t, psw_mask, 16);
1577 tcg_gen_andi_i64(t, t, 0x0f000000);
1578 tcg_gen_or_i64(o->out, o->out, t);
1579 tcg_gen_extu_i32_i64(t, cc_op);
1580 tcg_gen_shli_i64(t, t, 28);
1581 tcg_gen_or_i64(o->out, o->out, t);
1582 tcg_temp_free_i64(t);
1583 }
1584
1585 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1586 {
1587 save_link_info(s, o);
1588 if (o->in2) {
1589 tcg_gen_mov_i64(psw_addr, o->in2);
1590 per_branch(s, false);
1591 return DISAS_PC_UPDATED;
1592 } else {
1593 return DISAS_NEXT;
1594 }
1595 }
1596
1597 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1598 {
1599 pc_to_link_info(o->out, s, s->pc_tmp);
1600 return help_goto_direct(s, s->base.pc_next + (int64_t)get_field(s, i2) * 2);
1601 }
1602
1603 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1604 {
1605 int m1 = get_field(s, m1);
1606 bool is_imm = have_field(s, i2);
1607 int imm = is_imm ? get_field(s, i2) : 0;
1608 DisasCompare c;
1609
1610 /* BCR with R2 = 0 causes no branching */
1611 if (have_field(s, r2) && get_field(s, r2) == 0) {
1612 if (m1 == 14) {
1613 /* Perform serialization */
1614 /* FIXME: check for fast-BCR-serialization facility */
1615 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1616 }
1617 if (m1 == 15) {
1618 /* Perform serialization */
1619 /* FIXME: perform checkpoint-synchronisation */
1620 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1621 }
1622 return DISAS_NEXT;
1623 }
1624
1625 disas_jcc(s, &c, m1);
1626 return help_branch(s, &c, is_imm, imm, o->in2);
1627 }
1628
1629 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1630 {
1631 int r1 = get_field(s, r1);
1632 bool is_imm = have_field(s, i2);
1633 int imm = is_imm ? get_field(s, i2) : 0;
1634 DisasCompare c;
1635 TCGv_i64 t;
1636
1637 c.cond = TCG_COND_NE;
1638 c.is_64 = false;
1639 c.g1 = false;
1640 c.g2 = false;
1641
1642 t = tcg_temp_new_i64();
1643 tcg_gen_subi_i64(t, regs[r1], 1);
1644 store_reg32_i64(r1, t);
1645 c.u.s32.a = tcg_temp_new_i32();
1646 c.u.s32.b = tcg_const_i32(0);
1647 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1648 tcg_temp_free_i64(t);
1649
1650 return help_branch(s, &c, is_imm, imm, o->in2);
1651 }
1652
1653 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1654 {
1655 int r1 = get_field(s, r1);
1656 int imm = get_field(s, i2);
1657 DisasCompare c;
1658 TCGv_i64 t;
1659
1660 c.cond = TCG_COND_NE;
1661 c.is_64 = false;
1662 c.g1 = false;
1663 c.g2 = false;
1664
1665 t = tcg_temp_new_i64();
1666 tcg_gen_shri_i64(t, regs[r1], 32);
1667 tcg_gen_subi_i64(t, t, 1);
1668 store_reg32h_i64(r1, t);
1669 c.u.s32.a = tcg_temp_new_i32();
1670 c.u.s32.b = tcg_const_i32(0);
1671 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1672 tcg_temp_free_i64(t);
1673
1674 return help_branch(s, &c, 1, imm, o->in2);
1675 }
1676
1677 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1678 {
1679 int r1 = get_field(s, r1);
1680 bool is_imm = have_field(s, i2);
1681 int imm = is_imm ? get_field(s, i2) : 0;
1682 DisasCompare c;
1683
1684 c.cond = TCG_COND_NE;
1685 c.is_64 = true;
1686 c.g1 = true;
1687 c.g2 = false;
1688
1689 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1690 c.u.s64.a = regs[r1];
1691 c.u.s64.b = tcg_const_i64(0);
1692
1693 return help_branch(s, &c, is_imm, imm, o->in2);
1694 }
1695
1696 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1697 {
1698 int r1 = get_field(s, r1);
1699 int r3 = get_field(s, r3);
1700 bool is_imm = have_field(s, i2);
1701 int imm = is_imm ? get_field(s, i2) : 0;
1702 DisasCompare c;
1703 TCGv_i64 t;
1704
1705 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1706 c.is_64 = false;
1707 c.g1 = false;
1708 c.g2 = false;
1709
1710 t = tcg_temp_new_i64();
1711 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1712 c.u.s32.a = tcg_temp_new_i32();
1713 c.u.s32.b = tcg_temp_new_i32();
1714 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1715 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1716 store_reg32_i64(r1, t);
1717 tcg_temp_free_i64(t);
1718
1719 return help_branch(s, &c, is_imm, imm, o->in2);
1720 }
1721
1722 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1723 {
1724 int r1 = get_field(s, r1);
1725 int r3 = get_field(s, r3);
1726 bool is_imm = have_field(s, i2);
1727 int imm = is_imm ? get_field(s, i2) : 0;
1728 DisasCompare c;
1729
1730 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1731 c.is_64 = true;
1732
1733 if (r1 == (r3 | 1)) {
1734 c.u.s64.b = load_reg(r3 | 1);
1735 c.g2 = false;
1736 } else {
1737 c.u.s64.b = regs[r3 | 1];
1738 c.g2 = true;
1739 }
1740
1741 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1742 c.u.s64.a = regs[r1];
1743 c.g1 = true;
1744
1745 return help_branch(s, &c, is_imm, imm, o->in2);
1746 }
1747
1748 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1749 {
1750 int imm, m3 = get_field(s, m3);
1751 bool is_imm;
1752 DisasCompare c;
1753
1754 c.cond = ltgt_cond[m3];
1755 if (s->insn->data) {
1756 c.cond = tcg_unsigned_cond(c.cond);
1757 }
1758 c.is_64 = c.g1 = c.g2 = true;
1759 c.u.s64.a = o->in1;
1760 c.u.s64.b = o->in2;
1761
1762 is_imm = have_field(s, i4);
1763 if (is_imm) {
1764 imm = get_field(s, i4);
1765 } else {
1766 imm = 0;
1767 o->out = get_address(s, 0, get_field(s, b4),
1768 get_field(s, d4));
1769 }
1770
1771 return help_branch(s, &c, is_imm, imm, o->out);
1772 }
1773
1774 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1775 {
1776 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1777 set_cc_static(s);
1778 return DISAS_NEXT;
1779 }
1780
1781 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1782 {
1783 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1784 set_cc_static(s);
1785 return DISAS_NEXT;
1786 }
1787
1788 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1789 {
1790 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1791 set_cc_static(s);
1792 return DISAS_NEXT;
1793 }
1794
1795 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1796 bool m4_with_fpe)
1797 {
1798 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1799 uint8_t m3 = get_field(s, m3);
1800 uint8_t m4 = get_field(s, m4);
1801
1802 /* m3 field was introduced with FPE */
1803 if (!fpe && m3_with_fpe) {
1804 m3 = 0;
1805 }
1806 /* m4 field was introduced with FPE */
1807 if (!fpe && m4_with_fpe) {
1808 m4 = 0;
1809 }
1810
1811 /* Check for valid rounding modes. Mode 3 was introduced later. */
1812 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1813 gen_program_exception(s, PGM_SPECIFICATION);
1814 return NULL;
1815 }
1816
1817 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1818 }
1819
1820 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1821 {
1822 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1823
1824 if (!m34) {
1825 return DISAS_NORETURN;
1826 }
1827 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1828 tcg_temp_free_i32(m34);
1829 set_cc_static(s);
1830 return DISAS_NEXT;
1831 }
1832
1833 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1834 {
1835 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1836
1837 if (!m34) {
1838 return DISAS_NORETURN;
1839 }
1840 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1841 tcg_temp_free_i32(m34);
1842 set_cc_static(s);
1843 return DISAS_NEXT;
1844 }
1845
1846 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1847 {
1848 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1849
1850 if (!m34) {
1851 return DISAS_NORETURN;
1852 }
1853 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1854 tcg_temp_free_i32(m34);
1855 set_cc_static(s);
1856 return DISAS_NEXT;
1857 }
1858
1859 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1860 {
1861 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1862
1863 if (!m34) {
1864 return DISAS_NORETURN;
1865 }
1866 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1867 tcg_temp_free_i32(m34);
1868 set_cc_static(s);
1869 return DISAS_NEXT;
1870 }
1871
1872 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1873 {
1874 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1875
1876 if (!m34) {
1877 return DISAS_NORETURN;
1878 }
1879 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1880 tcg_temp_free_i32(m34);
1881 set_cc_static(s);
1882 return DISAS_NEXT;
1883 }
1884
1885 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1886 {
1887 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1888
1889 if (!m34) {
1890 return DISAS_NORETURN;
1891 }
1892 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1893 tcg_temp_free_i32(m34);
1894 set_cc_static(s);
1895 return DISAS_NEXT;
1896 }
1897
1898 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1899 {
1900 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1901
1902 if (!m34) {
1903 return DISAS_NORETURN;
1904 }
1905 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1906 tcg_temp_free_i32(m34);
1907 set_cc_static(s);
1908 return DISAS_NEXT;
1909 }
1910
1911 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1912 {
1913 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1914
1915 if (!m34) {
1916 return DISAS_NORETURN;
1917 }
1918 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1919 tcg_temp_free_i32(m34);
1920 set_cc_static(s);
1921 return DISAS_NEXT;
1922 }
1923
1924 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1925 {
1926 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1927
1928 if (!m34) {
1929 return DISAS_NORETURN;
1930 }
1931 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1932 tcg_temp_free_i32(m34);
1933 set_cc_static(s);
1934 return DISAS_NEXT;
1935 }
1936
1937 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1938 {
1939 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1940
1941 if (!m34) {
1942 return DISAS_NORETURN;
1943 }
1944 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1945 tcg_temp_free_i32(m34);
1946 set_cc_static(s);
1947 return DISAS_NEXT;
1948 }
1949
1950 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1951 {
1952 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1953
1954 if (!m34) {
1955 return DISAS_NORETURN;
1956 }
1957 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1958 tcg_temp_free_i32(m34);
1959 set_cc_static(s);
1960 return DISAS_NEXT;
1961 }
1962
1963 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1964 {
1965 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1966
1967 if (!m34) {
1968 return DISAS_NORETURN;
1969 }
1970 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1971 tcg_temp_free_i32(m34);
1972 set_cc_static(s);
1973 return DISAS_NEXT;
1974 }
1975
1976 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1977 {
1978 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1979
1980 if (!m34) {
1981 return DISAS_NORETURN;
1982 }
1983 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1984 tcg_temp_free_i32(m34);
1985 return DISAS_NEXT;
1986 }
1987
1988 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1989 {
1990 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1991
1992 if (!m34) {
1993 return DISAS_NORETURN;
1994 }
1995 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1996 tcg_temp_free_i32(m34);
1997 return DISAS_NEXT;
1998 }
1999
2000 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2001 {
2002 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2003
2004 if (!m34) {
2005 return DISAS_NORETURN;
2006 }
2007 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2008 tcg_temp_free_i32(m34);
2009 return_low128(o->out2);
2010 return DISAS_NEXT;
2011 }
2012
2013 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2014 {
2015 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2016
2017 if (!m34) {
2018 return DISAS_NORETURN;
2019 }
2020 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2021 tcg_temp_free_i32(m34);
2022 return DISAS_NEXT;
2023 }
2024
2025 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2026 {
2027 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2028
2029 if (!m34) {
2030 return DISAS_NORETURN;
2031 }
2032 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2033 tcg_temp_free_i32(m34);
2034 return DISAS_NEXT;
2035 }
2036
2037 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2038 {
2039 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2040
2041 if (!m34) {
2042 return DISAS_NORETURN;
2043 }
2044 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2045 tcg_temp_free_i32(m34);
2046 return_low128(o->out2);
2047 return DISAS_NEXT;
2048 }
2049
2050 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2051 {
2052 int r2 = get_field(s, r2);
2053 TCGv_i64 len = tcg_temp_new_i64();
2054
2055 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2056 set_cc_static(s);
2057 return_low128(o->out);
2058
2059 tcg_gen_add_i64(regs[r2], regs[r2], len);
2060 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2061 tcg_temp_free_i64(len);
2062
2063 return DISAS_NEXT;
2064 }
2065
2066 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2067 {
2068 int l = get_field(s, l1);
2069 TCGv_i32 vl;
2070
2071 switch (l + 1) {
2072 case 1:
2073 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2074 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2075 break;
2076 case 2:
2077 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2078 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2079 break;
2080 case 4:
2081 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2082 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2083 break;
2084 case 8:
2085 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2086 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2087 break;
2088 default:
2089 vl = tcg_const_i32(l);
2090 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2091 tcg_temp_free_i32(vl);
2092 set_cc_static(s);
2093 return DISAS_NEXT;
2094 }
2095 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2096 return DISAS_NEXT;
2097 }
2098
2099 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2100 {
2101 int r1 = get_field(s, r1);
2102 int r2 = get_field(s, r2);
2103 TCGv_i32 t1, t2;
2104
2105 /* r1 and r2 must be even. */
2106 if (r1 & 1 || r2 & 1) {
2107 gen_program_exception(s, PGM_SPECIFICATION);
2108 return DISAS_NORETURN;
2109 }
2110
2111 t1 = tcg_const_i32(r1);
2112 t2 = tcg_const_i32(r2);
2113 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2114 tcg_temp_free_i32(t1);
2115 tcg_temp_free_i32(t2);
2116 set_cc_static(s);
2117 return DISAS_NEXT;
2118 }
2119
2120 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2121 {
2122 int r1 = get_field(s, r1);
2123 int r3 = get_field(s, r3);
2124 TCGv_i32 t1, t3;
2125
2126 /* r1 and r3 must be even. */
2127 if (r1 & 1 || r3 & 1) {
2128 gen_program_exception(s, PGM_SPECIFICATION);
2129 return DISAS_NORETURN;
2130 }
2131
2132 t1 = tcg_const_i32(r1);
2133 t3 = tcg_const_i32(r3);
2134 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2135 tcg_temp_free_i32(t1);
2136 tcg_temp_free_i32(t3);
2137 set_cc_static(s);
2138 return DISAS_NEXT;
2139 }
2140
2141 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2142 {
2143 int r1 = get_field(s, r1);
2144 int r3 = get_field(s, r3);
2145 TCGv_i32 t1, t3;
2146
2147 /* r1 and r3 must be even. */
2148 if (r1 & 1 || r3 & 1) {
2149 gen_program_exception(s, PGM_SPECIFICATION);
2150 return DISAS_NORETURN;
2151 }
2152
2153 t1 = tcg_const_i32(r1);
2154 t3 = tcg_const_i32(r3);
2155 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2156 tcg_temp_free_i32(t1);
2157 tcg_temp_free_i32(t3);
2158 set_cc_static(s);
2159 return DISAS_NEXT;
2160 }
2161
2162 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2163 {
2164 TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2165 TCGv_i32 t1 = tcg_temp_new_i32();
2166 tcg_gen_extrl_i64_i32(t1, o->in1);
2167 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2168 set_cc_static(s);
2169 tcg_temp_free_i32(t1);
2170 tcg_temp_free_i32(m3);
2171 return DISAS_NEXT;
2172 }
2173
2174 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2175 {
2176 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2177 set_cc_static(s);
2178 return_low128(o->in2);
2179 return DISAS_NEXT;
2180 }
2181
2182 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2183 {
2184 TCGv_i64 t = tcg_temp_new_i64();
2185 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2186 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2187 tcg_gen_or_i64(o->out, o->out, t);
2188 tcg_temp_free_i64(t);
2189 return DISAS_NEXT;
2190 }
2191
2192 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2193 {
2194 int d2 = get_field(s, d2);
2195 int b2 = get_field(s, b2);
2196 TCGv_i64 addr, cc;
2197
2198 /* Note that in1 = R3 (new value) and
2199 in2 = (zero-extended) R1 (expected value). */
2200
2201 addr = get_address(s, 0, b2, d2);
2202 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2203 get_mem_index(s), s->insn->data | MO_ALIGN);
2204 tcg_temp_free_i64(addr);
2205
2206 /* Are the memory and expected values (un)equal? Note that this setcond
2207 produces the output CC value, thus the NE sense of the test. */
2208 cc = tcg_temp_new_i64();
2209 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2210 tcg_gen_extrl_i64_i32(cc_op, cc);
2211 tcg_temp_free_i64(cc);
2212 set_cc_static(s);
2213
2214 return DISAS_NEXT;
2215 }
2216
2217 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2218 {
2219 int r1 = get_field(s, r1);
2220 int r3 = get_field(s, r3);
2221 int d2 = get_field(s, d2);
2222 int b2 = get_field(s, b2);
2223 DisasJumpType ret = DISAS_NEXT;
2224 TCGv_i64 addr;
2225 TCGv_i32 t_r1, t_r3;
2226
2227 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2228 addr = get_address(s, 0, b2, d2);
2229 t_r1 = tcg_const_i32(r1);
2230 t_r3 = tcg_const_i32(r3);
2231 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2232 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2233 } else if (HAVE_CMPXCHG128) {
2234 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2235 } else {
2236 gen_helper_exit_atomic(cpu_env);
2237 ret = DISAS_NORETURN;
2238 }
2239 tcg_temp_free_i64(addr);
2240 tcg_temp_free_i32(t_r1);
2241 tcg_temp_free_i32(t_r3);
2242
2243 set_cc_static(s);
2244 return ret;
2245 }
2246
2247 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2248 {
2249 int r3 = get_field(s, r3);
2250 TCGv_i32 t_r3 = tcg_const_i32(r3);
2251
2252 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2253 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2254 } else {
2255 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2256 }
2257 tcg_temp_free_i32(t_r3);
2258
2259 set_cc_static(s);
2260 return DISAS_NEXT;
2261 }
2262
2263 #ifndef CONFIG_USER_ONLY
2264 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2265 {
2266 MemOp mop = s->insn->data;
2267 TCGv_i64 addr, old, cc;
2268 TCGLabel *lab = gen_new_label();
2269
2270 /* Note that in1 = R1 (zero-extended expected value),
2271 out = R1 (original reg), out2 = R1+1 (new value). */
2272
2273 addr = tcg_temp_new_i64();
2274 old = tcg_temp_new_i64();
2275 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2276 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2277 get_mem_index(s), mop | MO_ALIGN);
2278 tcg_temp_free_i64(addr);
2279
2280 /* Are the memory and expected values (un)equal? */
2281 cc = tcg_temp_new_i64();
2282 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2283 tcg_gen_extrl_i64_i32(cc_op, cc);
2284
2285 /* Write back the output now, so that it happens before the
2286 following branch, so that we don't need local temps. */
2287 if ((mop & MO_SIZE) == MO_32) {
2288 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2289 } else {
2290 tcg_gen_mov_i64(o->out, old);
2291 }
2292 tcg_temp_free_i64(old);
2293
2294 /* If the comparison was equal, and the LSB of R2 was set,
2295 then we need to flush the TLB (for all cpus). */
2296 tcg_gen_xori_i64(cc, cc, 1);
2297 tcg_gen_and_i64(cc, cc, o->in2);
2298 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2299 tcg_temp_free_i64(cc);
2300
2301 gen_helper_purge(cpu_env);
2302 gen_set_label(lab);
2303
2304 return DISAS_NEXT;
2305 }
2306 #endif
2307
2308 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2309 {
2310 TCGv_i64 t1 = tcg_temp_new_i64();
2311 TCGv_i32 t2 = tcg_temp_new_i32();
2312 tcg_gen_extrl_i64_i32(t2, o->in1);
2313 gen_helper_cvd(t1, t2);
2314 tcg_temp_free_i32(t2);
2315 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2316 tcg_temp_free_i64(t1);
2317 return DISAS_NEXT;
2318 }
2319
2320 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2321 {
2322 int m3 = get_field(s, m3);
2323 TCGLabel *lab = gen_new_label();
2324 TCGCond c;
2325
2326 c = tcg_invert_cond(ltgt_cond[m3]);
2327 if (s->insn->data) {
2328 c = tcg_unsigned_cond(c);
2329 }
2330 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2331
2332 /* Trap. */
2333 gen_trap(s);
2334
2335 gen_set_label(lab);
2336 return DISAS_NEXT;
2337 }
2338
2339 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2340 {
2341 int m3 = get_field(s, m3);
2342 int r1 = get_field(s, r1);
2343 int r2 = get_field(s, r2);
2344 TCGv_i32 tr1, tr2, chk;
2345
2346 /* R1 and R2 must both be even. */
2347 if ((r1 | r2) & 1) {
2348 gen_program_exception(s, PGM_SPECIFICATION);
2349 return DISAS_NORETURN;
2350 }
2351 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2352 m3 = 0;
2353 }
2354
2355 tr1 = tcg_const_i32(r1);
2356 tr2 = tcg_const_i32(r2);
2357 chk = tcg_const_i32(m3);
2358
2359 switch (s->insn->data) {
2360 case 12:
2361 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2362 break;
2363 case 14:
2364 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2365 break;
2366 case 21:
2367 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2368 break;
2369 case 24:
2370 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2371 break;
2372 case 41:
2373 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2374 break;
2375 case 42:
2376 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2377 break;
2378 default:
2379 g_assert_not_reached();
2380 }
2381
2382 tcg_temp_free_i32(tr1);
2383 tcg_temp_free_i32(tr2);
2384 tcg_temp_free_i32(chk);
2385 set_cc_static(s);
2386 return DISAS_NEXT;
2387 }
2388
2389 #ifndef CONFIG_USER_ONLY
2390 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2391 {
2392 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2393 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2394 TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2395
2396 gen_helper_diag(cpu_env, r1, r3, func_code);
2397
2398 tcg_temp_free_i32(func_code);
2399 tcg_temp_free_i32(r3);
2400 tcg_temp_free_i32(r1);
2401 return DISAS_NEXT;
2402 }
2403 #endif
2404
2405 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2406 {
2407 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2408 return_low128(o->out);
2409 return DISAS_NEXT;
2410 }
2411
2412 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2413 {
2414 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2415 return_low128(o->out);
2416 return DISAS_NEXT;
2417 }
2418
2419 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2420 {
2421 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2422 return_low128(o->out);
2423 return DISAS_NEXT;
2424 }
2425
2426 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2427 {
2428 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2429 return_low128(o->out);
2430 return DISAS_NEXT;
2431 }
2432
2433 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2434 {
2435 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2436 return DISAS_NEXT;
2437 }
2438
2439 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2440 {
2441 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2442 return DISAS_NEXT;
2443 }
2444
2445 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2446 {
2447 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2448 return_low128(o->out2);
2449 return DISAS_NEXT;
2450 }
2451
2452 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2453 {
2454 int r2 = get_field(s, r2);
2455 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2456 return DISAS_NEXT;
2457 }
2458
2459 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2460 {
2461 /* No cache information provided. */
2462 tcg_gen_movi_i64(o->out, -1);
2463 return DISAS_NEXT;
2464 }
2465
2466 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2467 {
2468 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2469 return DISAS_NEXT;
2470 }
2471
2472 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2473 {
2474 int r1 = get_field(s, r1);
2475 int r2 = get_field(s, r2);
2476 TCGv_i64 t = tcg_temp_new_i64();
2477
2478 /* Note the "subsequently" in the PoO, which implies a defined result
2479 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2480 tcg_gen_shri_i64(t, psw_mask, 32);
2481 store_reg32_i64(r1, t);
2482 if (r2 != 0) {
2483 store_reg32_i64(r2, psw_mask);
2484 }
2485
2486 tcg_temp_free_i64(t);
2487 return DISAS_NEXT;
2488 }
2489
2490 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2491 {
2492 int r1 = get_field(s, r1);
2493 TCGv_i32 ilen;
2494 TCGv_i64 v1;
2495
2496 /* Nested EXECUTE is not allowed. */
2497 if (unlikely(s->ex_value)) {
2498 gen_program_exception(s, PGM_EXECUTE);
2499 return DISAS_NORETURN;
2500 }
2501
2502 update_psw_addr(s);
2503 update_cc_op(s);
2504
2505 if (r1 == 0) {
2506 v1 = tcg_const_i64(0);
2507 } else {
2508 v1 = regs[r1];
2509 }
2510
2511 ilen = tcg_const_i32(s->ilen);
2512 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2513 tcg_temp_free_i32(ilen);
2514
2515 if (r1 == 0) {
2516 tcg_temp_free_i64(v1);
2517 }
2518
2519 return DISAS_PC_CC_UPDATED;
2520 }
2521
2522 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2523 {
2524 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2525
2526 if (!m34) {
2527 return DISAS_NORETURN;
2528 }
2529 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2530 tcg_temp_free_i32(m34);
2531 return DISAS_NEXT;
2532 }
2533
2534 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2535 {
2536 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2537
2538 if (!m34) {
2539 return DISAS_NORETURN;
2540 }
2541 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2542 tcg_temp_free_i32(m34);
2543 return DISAS_NEXT;
2544 }
2545
2546 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2547 {
2548 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2549
2550 if (!m34) {
2551 return DISAS_NORETURN;
2552 }
2553 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2554 return_low128(o->out2);
2555 tcg_temp_free_i32(m34);
2556 return DISAS_NEXT;
2557 }
2558
2559 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2560 {
2561 /* We'll use the original input for cc computation, since we get to
2562 compare that against 0, which ought to be better than comparing
2563 the real output against 64. It also lets cc_dst be a convenient
2564 temporary during our computation. */
2565 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2566
2567 /* R1 = IN ? CLZ(IN) : 64. */
2568 tcg_gen_clzi_i64(o->out, o->in2, 64);
2569
2570 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2571 value by 64, which is undefined. But since the shift is 64 iff the
2572 input is zero, we still get the correct result after and'ing. */
2573 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2574 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2575 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2576 return DISAS_NEXT;
2577 }
2578
2579 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2580 {
2581 int m3 = get_field(s, m3);
2582 int pos, len, base = s->insn->data;
2583 TCGv_i64 tmp = tcg_temp_new_i64();
2584 uint64_t ccm;
2585
2586 switch (m3) {
2587 case 0xf:
2588 /* Effectively a 32-bit load. */
2589 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2590 len = 32;
2591 goto one_insert;
2592
2593 case 0xc:
2594 case 0x6:
2595 case 0x3:
2596 /* Effectively a 16-bit load. */
2597 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2598 len = 16;
2599 goto one_insert;
2600
2601 case 0x8:
2602 case 0x4:
2603 case 0x2:
2604 case 0x1:
2605 /* Effectively an 8-bit load. */
2606 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2607 len = 8;
2608 goto one_insert;
2609
2610 one_insert:
2611 pos = base + ctz32(m3) * 8;
2612 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2613 ccm = ((1ull << len) - 1) << pos;
2614 break;
2615
2616 default:
2617 /* This is going to be a sequence of loads and inserts. */
2618 pos = base + 32 - 8;
2619 ccm = 0;
2620 while (m3) {
2621 if (m3 & 0x8) {
2622 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2623 tcg_gen_addi_i64(o->in2, o->in2, 1);
2624 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2625 ccm |= 0xff << pos;
2626 }
2627 m3 = (m3 << 1) & 0xf;
2628 pos -= 8;
2629 }
2630 break;
2631 }
2632
2633 tcg_gen_movi_i64(tmp, ccm);
2634 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2635 tcg_temp_free_i64(tmp);
2636 return DISAS_NEXT;
2637 }
2638
2639 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2640 {
2641 int shift = s->insn->data & 0xff;
2642 int size = s->insn->data >> 8;
2643 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2644 return DISAS_NEXT;
2645 }
2646
2647 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2648 {
2649 TCGv_i64 t1, t2;
2650
2651 gen_op_calc_cc(s);
2652 t1 = tcg_temp_new_i64();
2653 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2654 t2 = tcg_temp_new_i64();
2655 tcg_gen_extu_i32_i64(t2, cc_op);
2656 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2657 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2658 tcg_temp_free_i64(t1);
2659 tcg_temp_free_i64(t2);
2660 return DISAS_NEXT;
2661 }
2662
2663 #ifndef CONFIG_USER_ONLY
2664 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2665 {
2666 TCGv_i32 m4;
2667
2668 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2669 m4 = tcg_const_i32(get_field(s, m4));
2670 } else {
2671 m4 = tcg_const_i32(0);
2672 }
2673 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2674 tcg_temp_free_i32(m4);
2675 return DISAS_NEXT;
2676 }
2677
2678 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2679 {
2680 TCGv_i32 m4;
2681
2682 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2683 m4 = tcg_const_i32(get_field(s, m4));
2684 } else {
2685 m4 = tcg_const_i32(0);
2686 }
2687 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2688 tcg_temp_free_i32(m4);
2689 return DISAS_NEXT;
2690 }
2691
2692 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2693 {
2694 gen_helper_iske(o->out, cpu_env, o->in2);
2695 return DISAS_NEXT;
2696 }
2697 #endif
2698
2699 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2700 {
2701 int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2702 int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2703 int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2704 TCGv_i32 t_r1, t_r2, t_r3, type;
2705
2706 switch (s->insn->data) {
2707 case S390_FEAT_TYPE_KMA:
2708 if (r3 == r1 || r3 == r2) {
2709 gen_program_exception(s, PGM_SPECIFICATION);
2710 return DISAS_NORETURN;
2711 }
2712 /* FALL THROUGH */
2713 case S390_FEAT_TYPE_KMCTR:
2714 if (r3 & 1 || !r3) {
2715 gen_program_exception(s, PGM_SPECIFICATION);
2716 return DISAS_NORETURN;
2717 }
2718 /* FALL THROUGH */
2719 case S390_FEAT_TYPE_PPNO:
2720 case S390_FEAT_TYPE_KMF:
2721 case S390_FEAT_TYPE_KMC:
2722 case S390_FEAT_TYPE_KMO:
2723 case S390_FEAT_TYPE_KM:
2724 if (r1 & 1 || !r1) {
2725 gen_program_exception(s, PGM_SPECIFICATION);
2726 return DISAS_NORETURN;
2727 }
2728 /* FALL THROUGH */
2729 case S390_FEAT_TYPE_KMAC:
2730 case S390_FEAT_TYPE_KIMD:
2731 case S390_FEAT_TYPE_KLMD:
2732 if (r2 & 1 || !r2) {
2733 gen_program_exception(s, PGM_SPECIFICATION);
2734 return DISAS_NORETURN;
2735 }
2736 /* FALL THROUGH */
2737 case S390_FEAT_TYPE_PCKMO:
2738 case S390_FEAT_TYPE_PCC:
2739 break;
2740 default:
2741 g_assert_not_reached();
2742 };
2743
2744 t_r1 = tcg_const_i32(r1);
2745 t_r2 = tcg_const_i32(r2);
2746 t_r3 = tcg_const_i32(r3);
2747 type = tcg_const_i32(s->insn->data);
2748 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2749 set_cc_static(s);
2750 tcg_temp_free_i32(t_r1);
2751 tcg_temp_free_i32(t_r2);
2752 tcg_temp_free_i32(t_r3);
2753 tcg_temp_free_i32(type);
2754 return DISAS_NEXT;
2755 }
2756
2757 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2758 {
2759 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2760 set_cc_static(s);
2761 return DISAS_NEXT;
2762 }
2763
2764 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2765 {
2766 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2767 set_cc_static(s);
2768 return DISAS_NEXT;
2769 }
2770
2771 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2772 {
2773 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2774 set_cc_static(s);
2775 return DISAS_NEXT;
2776 }
2777
2778 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2779 {
2780 /* The real output is indeed the original value in memory;
2781 recompute the addition for the computation of CC. */
2782 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2783 s->insn->data | MO_ALIGN);
2784 /* However, we need to recompute the addition for setting CC. */
2785 tcg_gen_add_i64(o->out, o->in1, o->in2);
2786 return DISAS_NEXT;
2787 }
2788
2789 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2790 {
2791 /* The real output is indeed the original value in memory;
2792 recompute the addition for the computation of CC. */
2793 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2794 s->insn->data | MO_ALIGN);
2795 /* However, we need to recompute the operation for setting CC. */
2796 tcg_gen_and_i64(o->out, o->in1, o->in2);
2797 return DISAS_NEXT;
2798 }
2799
2800 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2801 {
2802 /* The real output is indeed the original value in memory;
2803 recompute the addition for the computation of CC. */
2804 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2805 s->insn->data | MO_ALIGN);
2806 /* However, we need to recompute the operation for setting CC. */
2807 tcg_gen_or_i64(o->out, o->in1, o->in2);
2808 return DISAS_NEXT;
2809 }
2810
2811 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2812 {
2813 /* The real output is indeed the original value in memory;
2814 recompute the addition for the computation of CC. */
2815 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2816 s->insn->data | MO_ALIGN);
2817 /* However, we need to recompute the operation for setting CC. */
2818 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2819 return DISAS_NEXT;
2820 }
2821
2822 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2823 {
2824 gen_helper_ldeb(o->out, cpu_env, o->in2);
2825 return DISAS_NEXT;
2826 }
2827
2828 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2829 {
2830 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2831
2832 if (!m34) {
2833 return DISAS_NORETURN;
2834 }
2835 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2836 tcg_temp_free_i32(m34);
2837 return DISAS_NEXT;
2838 }
2839
2840 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2841 {
2842 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2843
2844 if (!m34) {
2845 return DISAS_NORETURN;
2846 }
2847 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2848 tcg_temp_free_i32(m34);
2849 return DISAS_NEXT;
2850 }
2851
2852 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2853 {
2854 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2855
2856 if (!m34) {
2857 return DISAS_NORETURN;
2858 }
2859 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2860 tcg_temp_free_i32(m34);
2861 return DISAS_NEXT;
2862 }
2863
2864 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2865 {
2866 gen_helper_lxdb(o->out, cpu_env, o->in2);
2867 return_low128(o->out2);
2868 return DISAS_NEXT;
2869 }
2870
2871 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2872 {
2873 gen_helper_lxeb(o->out, cpu_env, o->in2);
2874 return_low128(o->out2);
2875 return DISAS_NEXT;
2876 }
2877
2878 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2879 {
2880 tcg_gen_shli_i64(o->out, o->in2, 32);
2881 return DISAS_NEXT;
2882 }
2883
2884 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2885 {
2886 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2887 return DISAS_NEXT;
2888 }
2889
2890 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2891 {
2892 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2893 return DISAS_NEXT;
2894 }
2895
2896 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2897 {
2898 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2899 return DISAS_NEXT;
2900 }
2901
2902 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2903 {
2904 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2905 return DISAS_NEXT;
2906 }
2907
2908 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2909 {
2910 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2911 return DISAS_NEXT;
2912 }
2913
2914 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2915 {
2916 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2917 return DISAS_NEXT;
2918 }
2919
2920 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2921 {
2922 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2923 return DISAS_NEXT;
2924 }
2925
2926 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2927 {
2928 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2929 return DISAS_NEXT;
2930 }
2931
2932 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2933 {
2934 TCGLabel *lab = gen_new_label();
2935 store_reg32_i64(get_field(s, r1), o->in2);
2936 /* The value is stored even in case of trap. */
2937 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2938 gen_trap(s);
2939 gen_set_label(lab);
2940 return DISAS_NEXT;
2941 }
2942
2943 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2944 {
2945 TCGLabel *lab = gen_new_label();
2946 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2947 /* The value is stored even in case of trap. */
2948 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2949 gen_trap(s);
2950 gen_set_label(lab);
2951 return DISAS_NEXT;
2952 }
2953
2954 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2955 {
2956 TCGLabel *lab = gen_new_label();
2957 store_reg32h_i64(get_field(s, r1), o->in2);
2958 /* The value is stored even in case of trap. */
2959 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2960 gen_trap(s);
2961 gen_set_label(lab);
2962 return DISAS_NEXT;
2963 }
2964
2965 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2966 {
2967 TCGLabel *lab = gen_new_label();
2968 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2969 /* The value is stored even in case of trap. */
2970 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2971 gen_trap(s);
2972 gen_set_label(lab);
2973 return DISAS_NEXT;
2974 }
2975
2976 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2977 {
2978 TCGLabel *lab = gen_new_label();
2979 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2980 /* The value is stored even in case of trap. */
2981 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2982 gen_trap(s);
2983 gen_set_label(lab);
2984 return DISAS_NEXT;
2985 }
2986
2987 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2988 {
2989 DisasCompare c;
2990
2991 if (have_field(s, m3)) {
2992 /* LOAD * ON CONDITION */
2993 disas_jcc(s, &c, get_field(s, m3));
2994 } else {
2995 /* SELECT */
2996 disas_jcc(s, &c, get_field(s, m4));
2997 }
2998
2999 if (c.is_64) {
3000 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
3001 o->in2, o->in1);
3002 free_compare(&c);
3003 } else {
3004 TCGv_i32 t32 = tcg_temp_new_i32();
3005 TCGv_i64 t, z;
3006
3007 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
3008 free_compare(&c);
3009
3010 t = tcg_temp_new_i64();
3011 tcg_gen_extu_i32_i64(t, t32);
3012 tcg_temp_free_i32(t32);
3013
3014 z = tcg_const_i64(0);
3015 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3016 tcg_temp_free_i64(t);
3017 tcg_temp_free_i64(z);
3018 }
3019
3020 return DISAS_NEXT;
3021 }
3022
3023 #ifndef CONFIG_USER_ONLY
3024 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3025 {
3026 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3027 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3028 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3029 tcg_temp_free_i32(r1);
3030 tcg_temp_free_i32(r3);
3031 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3032 return DISAS_PC_STALE_NOCHAIN;
3033 }
3034
3035 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3036 {
3037 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3038 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3039 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3040 tcg_temp_free_i32(r1);
3041 tcg_temp_free_i32(r3);
3042 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3043 return DISAS_PC_STALE_NOCHAIN;
3044 }
3045
3046 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3047 {
3048 gen_helper_lra(o->out, cpu_env, o->in2);
3049 set_cc_static(s);
3050 return DISAS_NEXT;
3051 }
3052
3053 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3054 {
3055 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3056 return DISAS_NEXT;
3057 }
3058
3059 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3060 {
3061 TCGv_i64 t1, t2;
3062
3063 per_breaking_event(s);
3064
3065 t1 = tcg_temp_new_i64();
3066 t2 = tcg_temp_new_i64();
3067 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3068 MO_TEUL | MO_ALIGN_8);
3069 tcg_gen_addi_i64(o->in2, o->in2, 4);
3070 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3071 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3072 tcg_gen_shli_i64(t1, t1, 32);
3073 gen_helper_load_psw(cpu_env, t1, t2);
3074 tcg_temp_free_i64(t1);
3075 tcg_temp_free_i64(t2);
3076 return DISAS_NORETURN;
3077 }
3078
3079 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3080 {
3081 TCGv_i64 t1, t2;
3082
3083 per_breaking_event(s);
3084
3085 t1 = tcg_temp_new_i64();
3086 t2 = tcg_temp_new_i64();
3087 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3088 MO_TEUQ | MO_ALIGN_8);
3089 tcg_gen_addi_i64(o->in2, o->in2, 8);
3090 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3091 gen_helper_load_psw(cpu_env, t1, t2);
3092 tcg_temp_free_i64(t1);
3093 tcg_temp_free_i64(t2);
3094 return DISAS_NORETURN;
3095 }
3096 #endif
3097
3098 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3099 {
3100 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3101 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3102 gen_helper_lam(cpu_env, r1, o->in2, r3);
3103 tcg_temp_free_i32(r1);
3104 tcg_temp_free_i32(r3);
3105 return DISAS_NEXT;
3106 }
3107
3108 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3109 {
3110 int r1 = get_field(s, r1);
3111 int r3 = get_field(s, r3);
3112 TCGv_i64 t1, t2;
3113
3114 /* Only one register to read. */
3115 t1 = tcg_temp_new_i64();
3116 if (unlikely(r1 == r3)) {
3117 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3118 store_reg32_i64(r1, t1);
3119 tcg_temp_free(t1);
3120 return DISAS_NEXT;
3121 }
3122
3123 /* First load the values of the first and last registers to trigger
3124 possible page faults. */
3125 t2 = tcg_temp_new_i64();
3126 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3127 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3128 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3129 store_reg32_i64(r1, t1);
3130 store_reg32_i64(r3, t2);
3131
3132 /* Only two registers to read. */
3133 if (((r1 + 1) & 15) == r3) {
3134 tcg_temp_free(t2);
3135 tcg_temp_free(t1);
3136 return DISAS_NEXT;
3137 }
3138
3139 /* Then load the remaining registers. Page fault can't occur. */
3140 r3 = (r3 - 1) & 15;
3141 tcg_gen_movi_i64(t2, 4);
3142 while (r1 != r3) {
3143 r1 = (r1 + 1) & 15;
3144 tcg_gen_add_i64(o->in2, o->in2, t2);
3145 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3146 store_reg32_i64(r1, t1);
3147 }
3148 tcg_temp_free(t2);
3149 tcg_temp_free(t1);
3150
3151 return DISAS_NEXT;
3152 }
3153
3154 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3155 {
3156 int r1 = get_field(s, r1);
3157 int r3 = get_field(s, r3);
3158 TCGv_i64 t1, t2;
3159
3160 /* Only one register to read. */
3161 t1 = tcg_temp_new_i64();
3162 if (unlikely(r1 == r3)) {
3163 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3164 store_reg32h_i64(r1, t1);
3165 tcg_temp_free(t1);
3166 return DISAS_NEXT;
3167 }
3168
3169 /* First load the values of the first and last registers to trigger
3170 possible page faults. */
3171 t2 = tcg_temp_new_i64();
3172 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3173 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3174 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3175 store_reg32h_i64(r1, t1);
3176 store_reg32h_i64(r3, t2);
3177
3178 /* Only two registers to read. */
3179 if (((r1 + 1) & 15) == r3) {
3180 tcg_temp_free(t2);
3181 tcg_temp_free(t1);
3182 return DISAS_NEXT;
3183 }
3184
3185 /* Then load the remaining registers. Page fault can't occur. */
3186 r3 = (r3 - 1) & 15;
3187 tcg_gen_movi_i64(t2, 4);
3188 while (r1 != r3) {
3189 r1 = (r1 + 1) & 15;
3190 tcg_gen_add_i64(o->in2, o->in2, t2);
3191 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3192 store_reg32h_i64(r1, t1);
3193 }
3194 tcg_temp_free(t2);
3195 tcg_temp_free(t1);
3196
3197 return DISAS_NEXT;
3198 }
3199
3200 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3201 {
3202 int r1 = get_field(s, r1);
3203 int r3 = get_field(s, r3);
3204 TCGv_i64 t1, t2;
3205
3206 /* Only one register to read. */
3207 if (unlikely(r1 == r3)) {
3208 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3209 return DISAS_NEXT;
3210 }
3211
3212 /* First load the values of the first and last registers to trigger
3213 possible page faults. */
3214 t1 = tcg_temp_new_i64();
3215 t2 = tcg_temp_new_i64();
3216 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3217 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3218 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3219 tcg_gen_mov_i64(regs[r1], t1);
3220 tcg_temp_free(t2);
3221
3222 /* Only two registers to read. */
3223 if (((r1 + 1) & 15) == r3) {
3224 tcg_temp_free(t1);
3225 return DISAS_NEXT;
3226 }
3227
3228 /* Then load the remaining registers. Page fault can't occur. */
3229 r3 = (r3 - 1) & 15;
3230 tcg_gen_movi_i64(t1, 8);
3231 while (r1 != r3) {
3232 r1 = (r1 + 1) & 15;
3233 tcg_gen_add_i64(o->in2, o->in2, t1);
3234 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3235 }
3236 tcg_temp_free(t1);
3237
3238 return DISAS_NEXT;
3239 }
3240
3241 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3242 {
3243 TCGv_i64 a1, a2;
3244 MemOp mop = s->insn->data;
3245
3246 /* In a parallel context, stop the world and single step. */
3247 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3248 update_psw_addr(s);
3249 update_cc_op(s);
3250 gen_exception(EXCP_ATOMIC);
3251 return DISAS_NORETURN;
3252 }
3253
3254 /* In a serial context, perform the two loads ... */
3255 a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3256 a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3257 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3258 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3259 tcg_temp_free_i64(a1);
3260 tcg_temp_free_i64(a2);
3261
3262 /* ... and indicate that we performed them while interlocked. */
3263 gen_op_movi_cc(s, 0);
3264 return DISAS_NEXT;
3265 }
3266
3267 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3268 {
3269 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3270 gen_helper_lpq(o->out, cpu_env, o->in2);
3271 } else if (HAVE_ATOMIC128) {
3272 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3273 } else {
3274 gen_helper_exit_atomic(cpu_env);
3275 return DISAS_NORETURN;
3276 }
3277 return_low128(o->out2);
3278 return DISAS_NEXT;
3279 }
3280
3281 #ifndef CONFIG_USER_ONLY
3282 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3283 {
3284 tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3285 return DISAS_NEXT;
3286 }
3287 #endif
3288
3289 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3290 {
3291 tcg_gen_andi_i64(o->out, o->in2, -256);
3292 return DISAS_NEXT;
3293 }
3294
3295 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3296 {
3297 const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3298
3299 if (get_field(s, m3) > 6) {
3300 gen_program_exception(s, PGM_SPECIFICATION);
3301 return DISAS_NORETURN;
3302 }
3303
3304 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3305 tcg_gen_neg_i64(o->addr1, o->addr1);
3306 tcg_gen_movi_i64(o->out, 16);
3307 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3308 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3309 return DISAS_NEXT;
3310 }
3311
3312 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3313 {
3314 #if !defined(CONFIG_USER_ONLY)
3315 TCGv_i32 i2;
3316 #endif
3317 const uint16_t monitor_class = get_field(s, i2);
3318
3319 if (monitor_class & 0xff00) {
3320 gen_program_exception(s, PGM_SPECIFICATION);
3321 return DISAS_NORETURN;
3322 }
3323
3324 #if !defined(CONFIG_USER_ONLY)
3325 i2 = tcg_const_i32(monitor_class);
3326 gen_helper_monitor_call(cpu_env, o->addr1, i2);
3327 tcg_temp_free_i32(i2);
3328 #endif
3329 /* Defaults to a NOP. */
3330 return DISAS_NEXT;
3331 }
3332
3333 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3334 {
3335 o->out = o->in2;
3336 o->g_out = o->g_in2;
3337 o->in2 = NULL;
3338 o->g_in2 = false;
3339 return DISAS_NEXT;
3340 }
3341
3342 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3343 {
3344 int b2 = get_field(s, b2);
3345 TCGv ar1 = tcg_temp_new_i64();
3346
3347 o->out = o->in2;
3348 o->g_out = o->g_in2;
3349 o->in2 = NULL;
3350 o->g_in2 = false;
3351
3352 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3353 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3354 tcg_gen_movi_i64(ar1, 0);
3355 break;
3356 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3357 tcg_gen_movi_i64(ar1, 1);
3358 break;
3359 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3360 if (b2) {
3361 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3362 } else {
3363 tcg_gen_movi_i64(ar1, 0);
3364 }
3365 break;
3366 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3367 tcg_gen_movi_i64(ar1, 2);
3368 break;
3369 }
3370
3371 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3372 tcg_temp_free_i64(ar1);
3373
3374 return DISAS_NEXT;
3375 }
3376
3377 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3378 {
3379 o->out = o->in1;
3380 o->out2 = o->in2;
3381 o->g_out = o->g_in1;
3382 o->g_out2 = o->g_in2;
3383 o->in1 = NULL;
3384 o->in2 = NULL;
3385 o->g_in1 = o->g_in2 = false;
3386 return DISAS_NEXT;
3387 }
3388
3389 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3390 {
3391 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3392 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3393 tcg_temp_free_i32(l);
3394 return DISAS_NEXT;
3395 }
3396
3397 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3398 {
3399 gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3400 return DISAS_NEXT;
3401 }
3402
3403 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3404 {
3405 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3406 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3407 tcg_temp_free_i32(l);
3408 return DISAS_NEXT;
3409 }
3410
3411 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3412 {
3413 int r1 = get_field(s, r1);
3414 int r2 = get_field(s, r2);
3415 TCGv_i32 t1, t2;
3416
3417 /* r1 and r2 must be even. */
3418 if (r1 & 1 || r2 & 1) {
3419 gen_program_exception(s, PGM_SPECIFICATION);
3420 return DISAS_NORETURN;
3421 }
3422
3423 t1 = tcg_const_i32(r1);
3424 t2 = tcg_const_i32(r2);
3425 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3426 tcg_temp_free_i32(t1);
3427 tcg_temp_free_i32(t2);
3428 set_cc_static(s);
3429 return DISAS_NEXT;
3430 }
3431
3432 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3433 {
3434 int r1 = get_field(s, r1);
3435 int r3 = get_field(s, r3);
3436 TCGv_i32 t1, t3;
3437
3438 /* r1 and r3 must be even. */
3439 if (r1 & 1 || r3 & 1) {
3440 gen_program_exception(s, PGM_SPECIFICATION);
3441 return DISAS_NORETURN;
3442 }
3443
3444 t1 = tcg_const_i32(r1);
3445 t3 = tcg_const_i32(r3);
3446 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3447 tcg_temp_free_i32(t1);
3448 tcg_temp_free_i32(t3);
3449 set_cc_static(s);
3450 return DISAS_NEXT;
3451 }
3452
3453 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3454 {
3455 int r1 = get_field(s, r1);
3456 int r3 = get_field(s, r3);
3457 TCGv_i32 t1, t3;
3458
3459 /* r1 and r3 must be even. */
3460 if (r1 & 1 || r3 & 1) {
3461 gen_program_exception(s, PGM_SPECIFICATION);
3462 return DISAS_NORETURN;
3463 }
3464
3465 t1 = tcg_const_i32(r1);
3466 t3 = tcg_const_i32(r3);
3467 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3468 tcg_temp_free_i32(t1);
3469 tcg_temp_free_i32(t3);
3470 set_cc_static(s);
3471 return DISAS_NEXT;
3472 }
3473
3474 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3475 {
3476 int r3 = get_field(s, r3);
3477 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3478 set_cc_static(s);
3479 return DISAS_NEXT;
3480 }
3481
3482 #ifndef CONFIG_USER_ONLY
3483 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3484 {
3485 int r1 = get_field(s, l1);
3486 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3487 set_cc_static(s);
3488 return DISAS_NEXT;
3489 }
3490
3491 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3492 {
3493 int r1 = get_field(s, l1);
3494 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3495 set_cc_static(s);
3496 return DISAS_NEXT;
3497 }
3498 #endif
3499
3500 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3501 {
3502 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3503 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3504 tcg_temp_free_i32(l);
3505 return DISAS_NEXT;
3506 }
3507
3508 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3509 {
3510 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3511 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3512 tcg_temp_free_i32(l);
3513 return DISAS_NEXT;
3514 }
3515
3516 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3517 {
3518 TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3519 TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3520
3521 gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3522 tcg_temp_free_i32(t1);
3523 tcg_temp_free_i32(t2);
3524 set_cc_static(s);
3525 return DISAS_NEXT;
3526 }
3527
3528 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3529 {
3530 TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3531 TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3532
3533 gen_helper_mvst(cc_op, cpu_env, t1, t2);
3534 tcg_temp_free_i32(t1);
3535 tcg_temp_free_i32(t2);
3536 set_cc_static(s);
3537 return DISAS_NEXT;
3538 }
3539
3540 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3541 {
3542 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3543 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3544 tcg_temp_free_i32(l);
3545 return DISAS_NEXT;
3546 }
3547
3548 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3549 {
3550 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3551 return DISAS_NEXT;
3552 }
3553
3554 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3555 {
3556 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3557 return DISAS_NEXT;
3558 }
3559
3560 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3561 {
3562 tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3563 return DISAS_NEXT;
3564 }
3565
3566 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3567 {
3568 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3569 return DISAS_NEXT;
3570 }
3571
3572 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3573 {
3574 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3575 return DISAS_NEXT;
3576 }
3577
3578 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3579 {
3580 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3581 return DISAS_NEXT;
3582 }
3583
3584 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3585 {
3586 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3587 return_low128(o->out2);
3588 return DISAS_NEXT;
3589 }
3590
3591 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3592 {
3593 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3594 return_low128(o->out2);
3595 return DISAS_NEXT;
3596 }
3597
3598 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3599 {
3600 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3601 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3602 tcg_temp_free_i64(r3);
3603 return DISAS_NEXT;
3604 }
3605
3606 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3607 {
3608 TCGv_i64 r3 = load_freg(get_field(s, r3));
3609 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3610 tcg_temp_free_i64(r3);
3611 return DISAS_NEXT;
3612 }
3613
3614 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3615 {
3616 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3617 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3618 tcg_temp_free_i64(r3);
3619 return DISAS_NEXT;
3620 }
3621
3622 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3623 {
3624 TCGv_i64 r3 = load_freg(get_field(s, r3));
3625 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3626 tcg_temp_free_i64(r3);
3627 return DISAS_NEXT;
3628 }
3629
3630 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3631 {
3632 TCGv_i64 z, n;
3633 z = tcg_const_i64(0);
3634 n = tcg_temp_new_i64();
3635 tcg_gen_neg_i64(n, o->in2);
3636 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3637 tcg_temp_free_i64(n);
3638 tcg_temp_free_i64(z);
3639 return DISAS_NEXT;
3640 }
3641
3642 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3643 {
3644 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3645 return DISAS_NEXT;
3646 }
3647
3648 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3649 {
3650 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3651 return DISAS_NEXT;
3652 }
3653
3654 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3655 {
3656 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3657 tcg_gen_mov_i64(o->out2, o->in2);
3658 return DISAS_NEXT;
3659 }
3660
3661 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3662 {
3663 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3664 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3665 tcg_temp_free_i32(l);
3666 set_cc_static(s);
3667 return DISAS_NEXT;
3668 }
3669
3670 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3671 {
3672 tcg_gen_neg_i64(o->out, o->in2);
3673 return DISAS_NEXT;
3674 }
3675
3676 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3677 {
3678 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3679 return DISAS_NEXT;
3680 }
3681
3682 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3683 {
3684 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3685 return DISAS_NEXT;
3686 }
3687
3688 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3689 {
3690 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3691 tcg_gen_mov_i64(o->out2, o->in2);
3692 return DISAS_NEXT;
3693 }
3694
3695 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3696 {
3697 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3698 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3699 tcg_temp_free_i32(l);
3700 set_cc_static(s);
3701 return DISAS_NEXT;
3702 }
3703
3704 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3705 {
3706 tcg_gen_or_i64(o->out, o->in1, o->in2);
3707 return DISAS_NEXT;
3708 }
3709
3710 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3711 {
3712 int shift = s->insn->data & 0xff;
3713 int size = s->insn->data >> 8;
3714 uint64_t mask = ((1ull << size) - 1) << shift;
3715
3716 assert(!o->g_in2);
3717 tcg_gen_shli_i64(o->in2, o->in2, shift);
3718 tcg_gen_or_i64(o->out, o->in1, o->in2);
3719
3720 /* Produce the CC from only the bits manipulated. */
3721 tcg_gen_andi_i64(cc_dst, o->out, mask);
3722 set_cc_nz_u64(s, cc_dst);
3723 return DISAS_NEXT;
3724 }
3725
3726 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3727 {
3728 o->in1 = tcg_temp_new_i64();
3729
3730 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3731 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3732 } else {
3733 /* Perform the atomic operation in memory. */
3734 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3735 s->insn->data);
3736 }
3737
3738 /* Recompute also for atomic case: needed for setting CC. */
3739 tcg_gen_or_i64(o->out, o->in1, o->in2);
3740
3741 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3742 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3743 }
3744 return DISAS_NEXT;
3745 }
3746
3747 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3748 {
3749 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3750 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3751 tcg_temp_free_i32(l);
3752 return DISAS_NEXT;
3753 }
3754
3755 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3756 {
3757 int l2 = get_field(s, l2) + 1;
3758 TCGv_i32 l;
3759
3760 /* The length must not exceed 32 bytes. */
3761 if (l2 > 32) {
3762 gen_program_exception(s, PGM_SPECIFICATION);
3763 return DISAS_NORETURN;
3764 }
3765 l = tcg_const_i32(l2);
3766 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3767 tcg_temp_free_i32(l);
3768 return DISAS_NEXT;
3769 }
3770
3771 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3772 {
3773 int l2 = get_field(s, l2) + 1;
3774 TCGv_i32 l;
3775
3776 /* The length must be even and should not exceed 64 bytes. */
3777 if ((l2 & 1) || (l2 > 64)) {
3778 gen_program_exception(s, PGM_SPECIFICATION);
3779 return DISAS_NORETURN;
3780 }
3781 l = tcg_const_i32(l2);
3782 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3783 tcg_temp_free_i32(l);
3784 return DISAS_NEXT;
3785 }
3786
3787 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3788 {
3789 const uint8_t m3 = get_field(s, m3);
3790
3791 if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3792 tcg_gen_ctpop_i64(o->out, o->in2);
3793 } else {
3794 gen_helper_popcnt(o->out, o->in2);
3795 }
3796 return DISAS_NEXT;
3797 }
3798
3799 #ifndef CONFIG_USER_ONLY
3800 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3801 {
3802 gen_helper_ptlb(cpu_env);
3803 return DISAS_NEXT;
3804 }
3805 #endif
3806
3807 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3808 {
3809 int i3 = get_field(s, i3);
3810 int i4 = get_field(s, i4);
3811 int i5 = get_field(s, i5);
3812 int do_zero = i4 & 0x80;
3813 uint64_t mask, imask, pmask;
3814 int pos, len, rot;
3815
3816 /* Adjust the arguments for the specific insn. */
3817 switch (s->fields.op2) {
3818 case 0x55: /* risbg */
3819 case 0x59: /* risbgn */
3820 i3 &= 63;
3821 i4 &= 63;
3822 pmask = ~0;
3823 break;
3824 case 0x5d: /* risbhg */
3825 i3 &= 31;
3826 i4 &= 31;
3827 pmask = 0xffffffff00000000ull;
3828 break;
3829 case 0x51: /* risblg */
3830 i3 = (i3 & 31) + 32;
3831 i4 = (i4 & 31) + 32;
3832 pmask = 0x00000000ffffffffull;
3833 break;
3834 default:
3835 g_assert_not_reached();
3836 }
3837
3838 /* MASK is the set of bits to be inserted from R2. */
3839 if (i3 <= i4) {
3840 /* [0...i3---i4...63] */
3841 mask = (-1ull >> i3) & (-1ull << (63 - i4));
3842 } else {
3843 /* [0---i4...i3---63] */
3844 mask = (-1ull >> i3) | (-1ull << (63 - i4));
3845 }
3846 /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3847 mask &= pmask;
3848
3849 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3850 insns, we need to keep the other half of the register. */
3851 imask = ~mask | ~pmask;
3852 if (do_zero) {
3853 imask = ~pmask;
3854 }
3855
3856 len = i4 - i3 + 1;
3857 pos = 63 - i4;
3858 rot = i5 & 63;
3859
3860 /* In some cases we can implement this with extract. */
3861 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3862 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3863 return DISAS_NEXT;
3864 }
3865
3866 /* In some cases we can implement this with deposit. */
3867 if (len > 0 && (imask == 0 || ~mask == imask)) {
3868 /* Note that we rotate the bits to be inserted to the lsb, not to
3869 the position as described in the PoO. */
3870 rot = (rot - pos) & 63;
3871 } else {
3872 pos = -1;
3873 }
3874
3875 /* Rotate the input as necessary. */
3876 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3877
3878 /* Insert the selected bits into the output. */
3879 if (pos >= 0) {
3880 if (imask == 0) {
3881 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3882 } else {
3883 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3884 }
3885 } else if (imask == 0) {
3886 tcg_gen_andi_i64(o->out, o->in2, mask);
3887 } else {
3888 tcg_gen_andi_i64(o->in2, o->in2, mask);
3889 tcg_gen_andi_i64(o->out, o->out, imask);
3890 tcg_gen_or_i64(o->out, o->out, o->in2);
3891 }
3892 return DISAS_NEXT;
3893 }
3894
3895 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3896 {
3897 int i3 = get_field(s, i3);
3898 int i4 = get_field(s, i4);
3899 int i5 = get_field(s, i5);
3900 uint64_t mask;
3901
3902 /* If this is a test-only form, arrange to discard the result. */
3903 if (i3 & 0x80) {
3904 o->out = tcg_temp_new_i64();
3905 o->g_out = false;
3906 }
3907
3908 i3 &= 63;
3909 i4 &= 63;
3910 i5 &= 63;
3911
3912 /* MASK is the set of bits to be operated on from R2.
3913 Take care for I3/I4 wraparound. */
3914 mask = ~0ull >> i3;
3915 if (i3 <= i4) {
3916 mask ^= ~0ull >> i4 >> 1;
3917 } else {
3918 mask |= ~(~0ull >> i4 >> 1);
3919 }
3920
3921 /* Rotate the input as necessary. */
3922 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3923
3924 /* Operate. */
3925 switch (s->fields.op2) {
3926 case 0x54: /* AND */
3927 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3928 tcg_gen_and_i64(o->out, o->out, o->in2);
3929 break;
3930 case 0x56: /* OR */
3931 tcg_gen_andi_i64(o->in2, o->in2, mask);
3932 tcg_gen_or_i64(o->out, o->out, o->in2);
3933 break;
3934 case 0x57: /* XOR */
3935 tcg_gen_andi_i64(o->in2, o->in2, mask);
3936 tcg_gen_xor_i64(o->out, o->out, o->in2);
3937 break;
3938 default:
3939 abort();
3940 }
3941
3942 /* Set the CC. */
3943 tcg_gen_andi_i64(cc_dst, o->out, mask);
3944 set_cc_nz_u64(s, cc_dst);
3945 return DISAS_NEXT;
3946 }
3947
3948 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3949 {
3950 tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3951 return DISAS_NEXT;
3952 }
3953
3954 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3955 {
3956 tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3957 return DISAS_NEXT;
3958 }
3959
3960 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3961 {
3962 tcg_gen_bswap64_i64(o->out, o->in2);
3963 return DISAS_NEXT;
3964 }
3965
3966 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3967 {
3968 TCGv_i32 t1 = tcg_temp_new_i32();
3969 TCGv_i32 t2 = tcg_temp_new_i32();
3970 TCGv_i32 to = tcg_temp_new_i32();
3971 tcg_gen_extrl_i64_i32(t1, o->in1);
3972 tcg_gen_extrl_i64_i32(t2, o->in2);
3973 tcg_gen_rotl_i32(to, t1, t2);
3974 tcg_gen_extu_i32_i64(o->out, to);
3975 tcg_temp_free_i32(t1);
3976 tcg_temp_free_i32(t2);
3977 tcg_temp_free_i32(to);
3978 return DISAS_NEXT;
3979 }
3980
3981 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3982 {
3983 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3984 return DISAS_NEXT;
3985 }
3986
3987 #ifndef CONFIG_USER_ONLY
3988 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3989 {
3990 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3991 set_cc_static(s);
3992 return DISAS_NEXT;
3993 }
3994
3995 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3996 {
3997 gen_helper_sacf(cpu_env, o->in2);
3998 /* Addressing mode has changed, so end the block. */
3999 return DISAS_PC_STALE;
4000 }
4001 #endif
4002
4003 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
4004 {
4005 int sam = s->insn->data;
4006 TCGv_i64 tsam;
4007 uint64_t mask;
4008
4009 switch (sam) {
4010 case 0:
4011 mask = 0xffffff;
4012 break;
4013 case 1:
4014 mask = 0x7fffffff;
4015 break;
4016 default:
4017 mask = -1;
4018 break;
4019 }
4020
4021 /* Bizarre but true, we check the address of the current insn for the
4022 specification exception, not the next to be executed. Thus the PoO
4023 documents that Bad Things Happen two bytes before the end. */
4024 if (s->base.pc_next & ~mask) {
4025 gen_program_exception(s, PGM_SPECIFICATION);
4026 return DISAS_NORETURN;
4027 }
4028 s->pc_tmp &= mask;
4029
4030 tsam = tcg_const_i64(sam);
4031 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
4032 tcg_temp_free_i64(tsam);
4033
4034 /* Always exit the TB, since we (may have) changed execution mode. */
4035 return DISAS_PC_STALE;
4036 }
4037
4038 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
4039 {
4040 int r1 = get_field(s, r1);
4041 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
4042 return DISAS_NEXT;
4043 }
4044
4045 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4046 {
4047 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4048 return DISAS_NEXT;
4049 }
4050
4051 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4052 {
4053 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4054 return DISAS_NEXT;
4055 }
4056
4057 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4058 {
4059 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4060 return_low128(o->out2);
4061 return DISAS_NEXT;
4062 }
4063
4064 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4065 {
4066 gen_helper_sqeb(o->out, cpu_env, o->in2);
4067 return DISAS_NEXT;
4068 }
4069
4070 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4071 {
4072 gen_helper_sqdb(o->out, cpu_env, o->in2);
4073 return DISAS_NEXT;
4074 }
4075
4076 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4077 {
4078 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4079 return_low128(o->out2);
4080 return DISAS_NEXT;
4081 }
4082
4083 #ifndef CONFIG_USER_ONLY
4084 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4085 {
4086 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4087 set_cc_static(s);
4088 return DISAS_NEXT;
4089 }
4090
4091 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4092 {
4093 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4094 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4095 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4096 set_cc_static(s);
4097 tcg_temp_free_i32(r1);
4098 tcg_temp_free_i32(r3);
4099 return DISAS_NEXT;
4100 }
4101 #endif
4102
4103 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4104 {
4105 DisasCompare c;
4106 TCGv_i64 a, h;
4107 TCGLabel *lab;
4108 int r1;
4109
4110 disas_jcc(s, &c, get_field(s, m3));
4111
4112 /* We want to store when the condition is fulfilled, so branch
4113 out when it's not */
4114 c.cond = tcg_invert_cond(c.cond);
4115
4116 lab = gen_new_label();
4117 if (c.is_64) {
4118 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4119 } else {
4120 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4121 }
4122 free_compare(&c);
4123
4124 r1 = get_field(s, r1);
4125 a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4126 switch (s->insn->data) {
4127 case 1: /* STOCG */
4128 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4129 break;
4130 case 0: /* STOC */
4131 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4132 break;
4133 case 2: /* STOCFH */
4134 h = tcg_temp_new_i64();
4135 tcg_gen_shri_i64(h, regs[r1], 32);
4136 tcg_gen_qemu_st32(h, a, get_mem_index(s));
4137 tcg_temp_free_i64(h);
4138 break;
4139 default:
4140 g_assert_not_reached();
4141 }
4142 tcg_temp_free_i64(a);
4143
4144 gen_set_label(lab);
4145 return DISAS_NEXT;
4146 }
4147
4148 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4149 {
4150 TCGv_i64 t;
4151 uint64_t sign = 1ull << s->insn->data;
4152 if (s->insn->data == 31) {
4153 t = tcg_temp_new_i64();
4154 tcg_gen_shli_i64(t, o->in1, 32);
4155 } else {
4156 t = o->in1;
4157 }
4158 gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
4159 if (s->insn->data == 31) {
4160 tcg_temp_free_i64(t);
4161 }
4162 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4163 /* The arithmetic left shift is curious in that it does not affect
4164 the sign bit. Copy that over from the source unchanged. */
4165 tcg_gen_andi_i64(o->out, o->out, ~sign);
4166 tcg_gen_andi_i64(o->in1, o->in1, sign);
4167 tcg_gen_or_i64(o->out, o->out, o->in1);
4168 return DISAS_NEXT;
4169 }
4170
4171 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4172 {
4173 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4174 return DISAS_NEXT;
4175 }
4176
4177 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4178 {
4179 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4180 return DISAS_NEXT;
4181 }
4182
4183 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4184 {
4185 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4186 return DISAS_NEXT;
4187 }
4188
4189 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4190 {
4191 gen_helper_sfpc(cpu_env, o->in2);
4192 return DISAS_NEXT;
4193 }
4194
4195 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4196 {
4197 gen_helper_sfas(cpu_env, o->in2);
4198 return DISAS_NEXT;
4199 }
4200
4201 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4202 {
4203 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4204 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4205 gen_helper_srnm(cpu_env, o->addr1);
4206 return DISAS_NEXT;
4207 }
4208
4209 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4210 {
4211 /* Bits 0-55 are are ignored. */
4212 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4213 gen_helper_srnm(cpu_env, o->addr1);
4214 return DISAS_NEXT;
4215 }
4216
4217 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4218 {
4219 TCGv_i64 tmp = tcg_temp_new_i64();
4220
4221 /* Bits other than 61-63 are ignored. */
4222 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4223
4224 /* No need to call a helper, we don't implement dfp */
4225 tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4226 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4227 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4228
4229 tcg_temp_free_i64(tmp);
4230 return DISAS_NEXT;
4231 }
4232
4233 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4234 {
4235 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4236 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4237 set_cc_static(s);
4238
4239 tcg_gen_shri_i64(o->in1, o->in1, 24);
4240 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4241 return DISAS_NEXT;
4242 }
4243
4244 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4245 {
4246 int b1 = get_field(s, b1);
4247 int d1 = get_field(s, d1);
4248 int b2 = get_field(s, b2);
4249 int d2 = get_field(s, d2);
4250 int r3 = get_field(s, r3);
4251 TCGv_i64 tmp = tcg_temp_new_i64();
4252
4253 /* fetch all operands first */
4254 o->in1 = tcg_temp_new_i64();
4255 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4256 o->in2 = tcg_temp_new_i64();
4257 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4258 o->addr1 = tcg_temp_new_i64();
4259 gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4260
4261 /* load the third operand into r3 before modifying anything */
4262 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4263
4264 /* subtract CPU timer from first operand and store in GR0 */
4265 gen_helper_stpt(tmp, cpu_env);
4266 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4267
4268 /* store second operand in GR1 */
4269 tcg_gen_mov_i64(regs[1], o->in2);
4270
4271 tcg_temp_free_i64(tmp);
4272 return DISAS_NEXT;
4273 }
4274
4275 #ifndef CONFIG_USER_ONLY
4276 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4277 {
4278 tcg_gen_shri_i64(o->in2, o->in2, 4);
4279 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4280 return DISAS_NEXT;
4281 }
4282
4283 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4284 {
4285 gen_helper_sske(cpu_env, o->in1, o->in2);
4286 return DISAS_NEXT;
4287 }
4288
4289 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4290 {
4291 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4292 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4293 return DISAS_PC_STALE_NOCHAIN;
4294 }
4295
4296 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4297 {
4298 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4299 return DISAS_NEXT;
4300 }
4301 #endif
4302
4303 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4304 {
4305 gen_helper_stck(o->out, cpu_env);
4306 /* ??? We don't implement clock states. */
4307 gen_op_movi_cc(s, 0);
4308 return DISAS_NEXT;
4309 }
4310
4311 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4312 {
4313 TCGv_i64 c1 = tcg_temp_new_i64();
4314 TCGv_i64 c2 = tcg_temp_new_i64();
4315 TCGv_i64 todpr = tcg_temp_new_i64();
4316 gen_helper_stck(c1, cpu_env);
4317 /* 16 bit value store in an uint32_t (only valid bits set) */
4318 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4319 /* Shift the 64-bit value into its place as a zero-extended
4320 104-bit value. Note that "bit positions 64-103 are always
4321 non-zero so that they compare differently to STCK"; we set
4322 the least significant bit to 1. */
4323 tcg_gen_shli_i64(c2, c1, 56);
4324 tcg_gen_shri_i64(c1, c1, 8);
4325 tcg_gen_ori_i64(c2, c2, 0x10000);
4326 tcg_gen_or_i64(c2, c2, todpr);
4327 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4328 tcg_gen_addi_i64(o->in2, o->in2, 8);
4329 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4330 tcg_temp_free_i64(c1);
4331 tcg_temp_free_i64(c2);
4332 tcg_temp_free_i64(todpr);
4333 /* ??? We don't implement clock states. */
4334 gen_op_movi_cc(s, 0);
4335 return DISAS_NEXT;
4336 }
4337
4338 #ifndef CONFIG_USER_ONLY
4339 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4340 {
4341 gen_helper_sck(cc_op, cpu_env, o->in2);
4342 set_cc_static(s);
4343 return DISAS_NEXT;
4344 }
4345
4346 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4347 {
4348 gen_helper_sckc(cpu_env, o->in2);
4349 return DISAS_NEXT;
4350 }
4351
4352 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4353 {
4354 gen_helper_sckpf(cpu_env, regs[0]);
4355 return DISAS_NEXT;
4356 }
4357
4358 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4359 {
4360 gen_helper_stckc(o->out, cpu_env);
4361 return DISAS_NEXT;
4362 }
4363
4364 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4365 {
4366 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4367 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4368 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4369 tcg_temp_free_i32(r1);
4370 tcg_temp_free_i32(r3);
4371 return DISAS_NEXT;
4372 }
4373
4374 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4375 {
4376 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4377 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4378 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4379 tcg_temp_free_i32(r1);
4380 tcg_temp_free_i32(r3);
4381 return DISAS_NEXT;
4382 }
4383
4384 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4385 {
4386 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4387 return DISAS_NEXT;
4388 }
4389
4390 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4391 {
4392 gen_helper_spt(cpu_env, o->in2);
4393 return DISAS_NEXT;
4394 }
4395
4396 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4397 {
4398 gen_helper_stfl(cpu_env);
4399 return DISAS_NEXT;
4400 }
4401
4402 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4403 {
4404 gen_helper_stpt(o->out, cpu_env);
4405 return DISAS_NEXT;
4406 }
4407
4408 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4409 {
4410 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4411 set_cc_static(s);
4412 return DISAS_NEXT;
4413 }
4414
4415 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4416 {
4417 gen_helper_spx(cpu_env, o->in2);
4418 return DISAS_NEXT;
4419 }
4420
4421 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4422 {
4423 gen_helper_xsch(cpu_env, regs[1]);
4424 set_cc_static(s);
4425 return DISAS_NEXT;
4426 }
4427
4428 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4429 {
4430 gen_helper_csch(cpu_env, regs[1]);
4431 set_cc_static(s);
4432 return DISAS_NEXT;
4433 }
4434
4435 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4436 {
4437 gen_helper_hsch(cpu_env, regs[1]);
4438 set_cc_static(s);
4439 return DISAS_NEXT;
4440 }
4441
4442 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4443 {
4444 gen_helper_msch(cpu_env, regs[1], o->in2);
4445 set_cc_static(s);
4446 return DISAS_NEXT;
4447 }
4448
4449 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4450 {
4451 gen_helper_rchp(cpu_env, regs[1]);
4452 set_cc_static(s);
4453 return DISAS_NEXT;
4454 }
4455
4456 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4457 {
4458 gen_helper_rsch(cpu_env, regs[1]);
4459 set_cc_static(s);
4460 return DISAS_NEXT;
4461 }
4462
4463 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4464 {
4465 gen_helper_sal(cpu_env, regs[1]);
4466 return DISAS_NEXT;
4467 }
4468
4469 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4470 {
4471 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4472 return DISAS_NEXT;
4473 }
4474
4475 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4476 {
4477 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4478 gen_op_movi_cc(s, 3);
4479 return DISAS_NEXT;
4480 }
4481
4482 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4483 {
4484 /* The instruction is suppressed if not provided. */
4485 return DISAS_NEXT;
4486 }
4487
4488 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4489 {
4490 gen_helper_ssch(cpu_env, regs[1], o->in2);
4491 set_cc_static(s);
4492 return DISAS_NEXT;
4493 }
4494
4495 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4496 {
4497 gen_helper_stsch(cpu_env, regs[1], o->in2);
4498 set_cc_static(s);
4499 return DISAS_NEXT;
4500 }
4501
4502 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4503 {
4504 gen_helper_stcrw(cpu_env, o->in2);
4505 set_cc_static(s);
4506 return DISAS_NEXT;
4507 }
4508
4509 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4510 {
4511 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4512 set_cc_static(s);
4513 return DISAS_NEXT;
4514 }
4515
4516 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4517 {
4518 gen_helper_tsch(cpu_env, regs[1], o->in2);
4519 set_cc_static(s);
4520 return DISAS_NEXT;
4521 }
4522
4523 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4524 {
4525 gen_helper_chsc(cpu_env, o->in2);
4526 set_cc_static(s);
4527 return DISAS_NEXT;
4528 }
4529
4530 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4531 {
4532 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4533 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4534 return DISAS_NEXT;
4535 }
4536
4537 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4538 {
4539 uint64_t i2 = get_field(s, i2);
4540 TCGv_i64 t;
4541
4542 /* It is important to do what the instruction name says: STORE THEN.
4543 If we let the output hook perform the store then if we fault and
4544 restart, we'll have the wrong SYSTEM MASK in place. */
4545 t = tcg_temp_new_i64();
4546 tcg_gen_shri_i64(t, psw_mask, 56);
4547 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4548 tcg_temp_free_i64(t);
4549
4550 if (s->fields.op == 0xac) {
4551 tcg_gen_andi_i64(psw_mask, psw_mask,
4552 (i2 << 56) | 0x00ffffffffffffffull);
4553 } else {
4554 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4555 }
4556
4557 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4558 return DISAS_PC_STALE_NOCHAIN;
4559 }
4560
4561 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4562 {
4563 tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4564
4565 if (s->base.tb->flags & FLAG_MASK_PER) {
4566 update_psw_addr(s);
4567 gen_helper_per_store_real(cpu_env);
4568 }
4569 return DISAS_NEXT;
4570 }
4571 #endif
4572
4573 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4574 {
4575 gen_helper_stfle(cc_op, cpu_env, o->in2);
4576 set_cc_static(s);
4577 return DISAS_NEXT;
4578 }
4579
4580 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4581 {
4582 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4583 return DISAS_NEXT;
4584 }
4585
4586 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4587 {
4588 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4589 return DISAS_NEXT;
4590 }
4591
4592 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4593 {
4594 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4595 return DISAS_NEXT;
4596 }
4597
4598 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4599 {
4600 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4601 return DISAS_NEXT;
4602 }
4603
4604 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4605 {
4606 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4607 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4608 gen_helper_stam(cpu_env, r1, o->in2, r3);
4609 tcg_temp_free_i32(r1);
4610 tcg_temp_free_i32(r3);
4611 return DISAS_NEXT;
4612 }
4613
4614 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4615 {
4616 int m3 = get_field(s, m3);
4617 int pos, base = s->insn->data;
4618 TCGv_i64 tmp = tcg_temp_new_i64();
4619
4620 pos = base + ctz32(m3) * 8;
4621 switch (m3) {
4622 case 0xf:
4623 /* Effectively a 32-bit store. */
4624 tcg_gen_shri_i64(tmp, o->in1, pos);
4625 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4626 break;
4627
4628 case 0xc:
4629 case 0x6:
4630 case 0x3:
4631 /* Effectively a 16-bit store. */
4632 tcg_gen_shri_i64(tmp, o->in1, pos);
4633 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4634 break;
4635
4636 case 0x8:
4637 case 0x4:
4638 case 0x2:
4639 case 0x1:
4640 /* Effectively an 8-bit store. */
4641 tcg_gen_shri_i64(tmp, o->in1, pos);
4642 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4643 break;
4644
4645 default:
4646 /* This is going to be a sequence of shifts and stores. */
4647 pos = base + 32 - 8;
4648 while (m3) {
4649 if (m3 & 0x8) {
4650 tcg_gen_shri_i64(tmp, o->in1, pos);
4651 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4652 tcg_gen_addi_i64(o->in2, o->in2, 1);
4653 }
4654 m3 = (m3 << 1) & 0xf;
4655 pos -= 8;
4656 }
4657 break;
4658 }
4659 tcg_temp_free_i64(tmp);
4660 return DISAS_NEXT;
4661 }
4662
4663 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4664 {
4665 int r1 = get_field(s, r1);
4666 int r3 = get_field(s, r3);
4667 int size = s->insn->data;
4668 TCGv_i64 tsize = tcg_const_i64(size);
4669
4670 while (1) {
4671 if (size == 8) {
4672 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4673 } else {
4674 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4675 }
4676 if (r1 == r3) {
4677 break;
4678 }
4679 tcg_gen_add_i64(o->in2, o->in2, tsize);
4680 r1 = (r1 + 1) & 15;
4681 }
4682
4683 tcg_temp_free_i64(tsize);
4684 return DISAS_NEXT;
4685 }
4686
4687 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4688 {
4689 int r1 = get_field(s, r1);
4690 int r3 = get_field(s, r3);
4691 TCGv_i64 t = tcg_temp_new_i64();
4692 TCGv_i64 t4 = tcg_const_i64(4);
4693 TCGv_i64 t32 = tcg_const_i64(32);
4694
4695 while (1) {
4696 tcg_gen_shl_i64(t, regs[r1], t32);
4697 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4698 if (r1 == r3) {
4699 break;
4700 }
4701 tcg_gen_add_i64(o->in2, o->in2, t4);
4702 r1 = (r1 + 1) & 15;
4703 }
4704
4705 tcg_temp_free_i64(t);
4706 tcg_temp_free_i64(t4);
4707 tcg_temp_free_i64(t32);
4708 return DISAS_NEXT;
4709 }
4710
4711 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4712 {
4713 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4714 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4715 } else if (HAVE_ATOMIC128) {
4716 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4717 } else {
4718 gen_helper_exit_atomic(cpu_env);
4719 return DISAS_NORETURN;
4720 }
4721 return DISAS_NEXT;
4722 }
4723
4724 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4725 {
4726 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4727 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4728
4729 gen_helper_srst(cpu_env, r1, r2);
4730
4731 tcg_temp_free_i32(r1);
4732 tcg_temp_free_i32(r2);
4733 set_cc_static(s);
4734 return DISAS_NEXT;
4735 }
4736
4737 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4738 {
4739 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4740 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4741
4742 gen_helper_srstu(cpu_env, r1, r2);
4743
4744 tcg_temp_free_i32(r1);
4745 tcg_temp_free_i32(r2);
4746 set_cc_static(s);
4747 return DISAS_NEXT;
4748 }
4749
4750 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4751 {
4752 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4753 return DISAS_NEXT;
4754 }
4755
4756 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4757 {
4758 tcg_gen_movi_i64(cc_src, 0);
4759 tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4760 return DISAS_NEXT;
4761 }
4762
4763 /* Compute borrow (0, -1) into cc_src. */
4764 static void compute_borrow(DisasContext *s)
4765 {
4766 switch (s->cc_op) {
4767 case CC_OP_SUBU:
4768 /* The borrow value is already in cc_src (0,-1). */
4769 break;
4770 default:
4771 gen_op_calc_cc(s);
4772 /* fall through */
4773 case CC_OP_STATIC:
4774 /* The carry flag is the msb of CC; compute into cc_src. */
4775 tcg_gen_extu_i32_i64(cc_src, cc_op);
4776 tcg_gen_shri_i64(cc_src, cc_src, 1);
4777 /* fall through */
4778 case CC_OP_ADDU:
4779 /* Convert carry (1,0) to borrow (0,-1). */
4780 tcg_gen_subi_i64(cc_src, cc_src, 1);
4781 break;
4782 }
4783 }
4784
4785 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4786 {
4787 compute_borrow(s);
4788
4789 /* Borrow is {0, -1}, so add to subtract. */
4790 tcg_gen_add_i64(o->out, o->in1, cc_src);
4791 tcg_gen_sub_i64(o->out, o->out, o->in2);
4792 return DISAS_NEXT;
4793 }
4794
4795 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4796 {
4797 compute_borrow(s);
4798
4799 /*
4800 * Borrow is {0, -1}, so add to subtract; replicate the
4801 * borrow input to produce 128-bit -1 for the addition.
4802 */
4803 TCGv_i64 zero = tcg_const_i64(0);
4804 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4805 tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4806 tcg_temp_free_i64(zero);
4807
4808 return DISAS_NEXT;
4809 }
4810
4811 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4812 {
4813 TCGv_i32 t;
4814
4815 update_psw_addr(s);
4816 update_cc_op(s);
4817
4818 t = tcg_const_i32(get_field(s, i1) & 0xff);
4819 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4820 tcg_temp_free_i32(t);
4821
4822 t = tcg_const_i32(s->ilen);
4823 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4824 tcg_temp_free_i32(t);
4825
4826 gen_exception(EXCP_SVC);
4827 return DISAS_NORETURN;
4828 }
4829
4830 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4831 {
4832 int cc = 0;
4833
4834 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4835 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4836 gen_op_movi_cc(s, cc);
4837 return DISAS_NEXT;
4838 }
4839
4840 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4841 {
4842 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4843 set_cc_static(s);
4844 return DISAS_NEXT;
4845 }
4846
4847 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4848 {
4849 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4850 set_cc_static(s);
4851 return DISAS_NEXT;
4852 }
4853
4854 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4855 {
4856 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4857 set_cc_static(s);
4858 return DISAS_NEXT;
4859 }
4860
4861 #ifndef CONFIG_USER_ONLY
4862
4863 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4864 {
4865 gen_helper_testblock(cc_op, cpu_env, o->in2);
4866 set_cc_static(s);
4867 return DISAS_NEXT;
4868 }
4869
4870 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4871 {
4872 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4873 set_cc_static(s);
4874 return DISAS_NEXT;
4875 }
4876
4877 #endif
4878
4879 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4880 {
4881 TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4882 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4883 tcg_temp_free_i32(l1);
4884 set_cc_static(s);
4885 return DISAS_NEXT;
4886 }
4887
4888 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4889 {
4890 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4891 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4892 tcg_temp_free_i32(l);
4893 set_cc_static(s);
4894 return DISAS_NEXT;
4895 }
4896
4897 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4898 {
4899 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4900 return_low128(o->out2);
4901 set_cc_static(s);
4902 return DISAS_NEXT;
4903 }
4904
4905 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4906 {
4907 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4908 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4909 tcg_temp_free_i32(l);
4910 set_cc_static(s);
4911 return DISAS_NEXT;
4912 }
4913
4914 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4915 {
4916 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4917 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4918 tcg_temp_free_i32(l);
4919 set_cc_static(s);
4920 return DISAS_NEXT;
4921 }
4922
4923 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4924 {
4925 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4926 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4927 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4928 TCGv_i32 tst = tcg_temp_new_i32();
4929 int m3 = get_field(s, m3);
4930
4931 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4932 m3 = 0;
4933 }
4934 if (m3 & 1) {
4935 tcg_gen_movi_i32(tst, -1);
4936 } else {
4937 tcg_gen_extrl_i64_i32(tst, regs[0]);
4938 if (s->insn->opc & 3) {
4939 tcg_gen_ext8u_i32(tst, tst);
4940 } else {
4941 tcg_gen_ext16u_i32(tst, tst);
4942 }
4943 }
4944 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4945
4946 tcg_temp_free_i32(r1);
4947 tcg_temp_free_i32(r2);
4948 tcg_temp_free_i32(sizes);
4949 tcg_temp_free_i32(tst);
4950 set_cc_static(s);
4951 return DISAS_NEXT;
4952 }
4953
4954 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4955 {
4956 TCGv_i32 t1 = tcg_const_i32(0xff);
4957 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4958 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4959 tcg_temp_free_i32(t1);
4960 set_cc_static(s);
4961 return DISAS_NEXT;
4962 }
4963
4964 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4965 {
4966 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4967 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4968 tcg_temp_free_i32(l);
4969 return DISAS_NEXT;
4970 }
4971
4972 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4973 {
4974 int l1 = get_field(s, l1) + 1;
4975 TCGv_i32 l;
4976
4977 /* The length must not exceed 32 bytes. */
4978 if (l1 > 32) {
4979 gen_program_exception(s, PGM_SPECIFICATION);
4980 return DISAS_NORETURN;
4981 }
4982 l = tcg_const_i32(l1);
4983 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4984 tcg_temp_free_i32(l);
4985 set_cc_static(s);
4986 return DISAS_NEXT;
4987 }
4988
4989 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4990 {
4991 int l1 = get_field(s, l1) + 1;
4992 TCGv_i32 l;
4993
4994 /* The length must be even and should not exceed 64 bytes. */
4995 if ((l1 & 1) || (l1 > 64)) {
4996 gen_program_exception(s, PGM_SPECIFICATION);
4997 return DISAS_NORETURN;
4998 }
4999 l = tcg_const_i32(l1);
5000 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
5001 tcg_temp_free_i32(l);
5002 set_cc_static(s);
5003 return DISAS_NEXT;
5004 }
5005
5006
5007 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
5008 {
5009 int d1 = get_field(s, d1);
5010 int d2 = get_field(s, d2);
5011 int b1 = get_field(s, b1);
5012 int b2 = get_field(s, b2);
5013 int l = get_field(s, l1);
5014 TCGv_i32 t32;
5015
5016 o->addr1 = get_address(s, 0, b1, d1);
5017
5018 /* If the addresses are identical, this is a store/memset of zero. */
5019 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
5020 o->in2 = tcg_const_i64(0);
5021
5022 l++;
5023 while (l >= 8) {
5024 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
5025 l -= 8;
5026 if (l > 0) {
5027 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
5028 }
5029 }
5030 if (l >= 4) {
5031 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
5032 l -= 4;
5033 if (l > 0) {
5034 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
5035 }
5036 }
5037 if (l >= 2) {
5038 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
5039 l -= 2;
5040 if (l > 0) {
5041 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
5042 }
5043 }
5044 if (l) {
5045 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
5046 }
5047 gen_op_movi_cc(s, 0);
5048 return DISAS_NEXT;
5049 }
5050
5051 /* But in general we'll defer to a helper. */
5052 o->in2 = get_address(s, 0, b2, d2);
5053 t32 = tcg_const_i32(l);
5054 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
5055 tcg_temp_free_i32(t32);
5056 set_cc_static(s);
5057 return DISAS_NEXT;
5058 }
5059
5060 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
5061 {
5062 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5063 return DISAS_NEXT;
5064 }
5065
5066 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
5067 {
5068 int shift = s->insn->data & 0xff;
5069 int size = s->insn->data >> 8;
5070 uint64_t mask = ((1ull << size) - 1) << shift;
5071
5072 assert(!o->g_in2);
5073 tcg_gen_shli_i64(o->in2, o->in2, shift);
5074 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5075
5076 /* Produce the CC from only the bits manipulated. */
5077 tcg_gen_andi_i64(cc_dst, o->out, mask);
5078 set_cc_nz_u64(s, cc_dst);
5079 return DISAS_NEXT;
5080 }
5081
5082 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5083 {
5084 o->in1 = tcg_temp_new_i64();
5085
5086 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5087 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5088 } else {
5089 /* Perform the atomic operation in memory. */
5090 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5091 s->insn->data);
5092 }
5093
5094 /* Recompute also for atomic case: needed for setting CC. */
5095 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5096
5097 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5098 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5099 }
5100 return DISAS_NEXT;
5101 }
5102
5103 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5104 {
5105 o->out = tcg_const_i64(0);
5106 return DISAS_NEXT;
5107 }
5108
5109 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5110 {
5111 o->out = tcg_const_i64(0);
5112 o->out2 = o->out;
5113 o->g_out2 = true;
5114 return DISAS_NEXT;
5115 }
5116
5117 #ifndef CONFIG_USER_ONLY
5118 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5119 {
5120 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5121
5122 gen_helper_clp(cpu_env, r2);
5123 tcg_temp_free_i32(r2);
5124 set_cc_static(s);
5125 return DISAS_NEXT;
5126 }
5127
5128 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5129 {
5130 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5131 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5132
5133 gen_helper_pcilg(cpu_env, r1, r2);
5134 tcg_temp_free_i32(r1);
5135 tcg_temp_free_i32(r2);
5136 set_cc_static(s);
5137 return DISAS_NEXT;
5138 }
5139
5140 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5141 {
5142 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5143 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5144
5145 gen_helper_pcistg(cpu_env, r1, r2);
5146 tcg_temp_free_i32(r1);
5147 tcg_temp_free_i32(r2);
5148 set_cc_static(s);
5149 return DISAS_NEXT;
5150 }
5151
5152 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5153 {
5154 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5155 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5156
5157 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5158 tcg_temp_free_i32(ar);
5159 tcg_temp_free_i32(r1);
5160 set_cc_static(s);
5161 return DISAS_NEXT;
5162 }
5163
5164 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5165 {
5166 gen_helper_sic(cpu_env, o->in1, o->in2);
5167 return DISAS_NEXT;
5168 }
5169
5170 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5171 {
5172 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5173 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5174
5175 gen_helper_rpcit(cpu_env, r1, r2);
5176 tcg_temp_free_i32(r1);
5177 tcg_temp_free_i32(r2);
5178 set_cc_static(s);
5179 return DISAS_NEXT;
5180 }
5181
5182 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5183 {
5184 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5185 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5186 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5187
5188 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5189 tcg_temp_free_i32(ar);
5190 tcg_temp_free_i32(r1);
5191 tcg_temp_free_i32(r3);
5192 set_cc_static(s);
5193 return DISAS_NEXT;
5194 }
5195
5196 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5197 {
5198 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5199 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5200
5201 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5202 tcg_temp_free_i32(ar);
5203 tcg_temp_free_i32(r1);
5204 set_cc_static(s);
5205 return DISAS_NEXT;
5206 }
5207 #endif
5208
5209 #include "translate_vx.c.inc"
5210
5211 /* ====================================================================== */
5212 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5213 the original inputs), update the various cc data structures in order to
5214 be able to compute the new condition code. */
5215
5216 static void cout_abs32(DisasContext *s, DisasOps *o)
5217 {
5218 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5219 }
5220
5221 static void cout_abs64(DisasContext *s, DisasOps *o)
5222 {
5223 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5224 }
5225
5226 static void cout_adds32(DisasContext *s, DisasOps *o)
5227 {
5228 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5229 }
5230
5231 static void cout_adds64(DisasContext *s, DisasOps *o)
5232 {
5233 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5234 }
5235
5236 static void cout_addu32(DisasContext *s, DisasOps *o)
5237 {
5238 tcg_gen_shri_i64(cc_src, o->out, 32);
5239 tcg_gen_ext32u_i64(cc_dst, o->out);
5240 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5241 }
5242
5243 static void cout_addu64(DisasContext *s, DisasOps *o)
5244 {
5245 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5246 }
5247
5248 static void cout_cmps32(DisasContext *s, DisasOps *o)
5249 {
5250 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5251 }
5252
5253 static void cout_cmps64(DisasContext *s, DisasOps *o)
5254 {
5255 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5256 }
5257
5258 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5259 {
5260 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5261 }
5262
5263 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5264 {
5265 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5266 }
5267
5268 static void cout_f32(DisasContext *s, DisasOps *o)
5269 {
5270 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5271 }
5272
5273 static void cout_f64(DisasContext *s, DisasOps *o)
5274 {
5275 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5276 }
5277
5278 static void cout_f128(DisasContext *s, DisasOps *o)
5279 {
5280 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5281 }
5282
5283 static void cout_nabs32(DisasContext *s, DisasOps *o)
5284 {
5285 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5286 }
5287
5288 static void cout_nabs64(DisasContext *s, DisasOps *o)
5289 {
5290 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5291 }
5292
5293 static void cout_neg32(DisasContext *s, DisasOps *o)
5294 {
5295 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5296 }
5297
5298 static void cout_neg64(DisasContext *s, DisasOps *o)
5299 {
5300 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5301 }
5302
5303 static void cout_nz32(DisasContext *s, DisasOps *o)
5304 {
5305 tcg_gen_ext32u_i64(cc_dst, o->out);
5306 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5307 }
5308
5309 static void cout_nz64(DisasContext *s, DisasOps *o)
5310 {
5311 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5312 }
5313
5314 static void cout_s32(DisasContext *s, DisasOps *o)
5315 {
5316 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5317 }
5318
5319 static void cout_s64(DisasContext *s, DisasOps *o)
5320 {
5321 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5322 }
5323
5324 static void cout_subs32(DisasContext *s, DisasOps *o)
5325 {
5326 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5327 }
5328
5329 static void cout_subs64(DisasContext *s, DisasOps *o)
5330 {
5331 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5332 }
5333
5334 static void cout_subu32(DisasContext *s, DisasOps *o)
5335 {
5336 tcg_gen_sari_i64(cc_src, o->out, 32);
5337 tcg_gen_ext32u_i64(cc_dst, o->out);
5338 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5339 }
5340
5341 static void cout_subu64(DisasContext *s, DisasOps *o)
5342 {
5343 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5344 }
5345
5346 static void cout_tm32(DisasContext *s, DisasOps *o)
5347 {
5348 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5349 }
5350
5351 static void cout_tm64(DisasContext *s, DisasOps *o)
5352 {
5353 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5354 }
5355
5356 static void cout_muls32(DisasContext *s, DisasOps *o)
5357 {
5358 gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5359 }
5360
5361 static void cout_muls64(DisasContext *s, DisasOps *o)
5362 {
5363 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5364 gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5365 }
5366
5367 /* ====================================================================== */
5368 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5369 with the TCG register to which we will write. Used in combination with
5370 the "wout" generators, in some cases we need a new temporary, and in
5371 some cases we can write to a TCG global. */
5372
5373 static void prep_new(DisasContext *s, DisasOps *o)
5374 {
5375 o->out = tcg_temp_new_i64();
5376 }
5377 #define SPEC_prep_new 0
5378
5379 static void prep_new_P(DisasContext *s, DisasOps *o)
5380 {
5381 o->out = tcg_temp_new_i64();
5382 o->out2 = tcg_temp_new_i64();
5383 }
5384 #define SPEC_prep_new_P 0
5385
5386 static void prep_r1(DisasContext *s, DisasOps *o)
5387 {
5388 o->out = regs[get_field(s, r1)];
5389 o->g_out = true;
5390 }
5391 #define SPEC_prep_r1 0
5392
5393 static void prep_r1_P(DisasContext *s, DisasOps *o)
5394 {
5395 int r1 = get_field(s, r1);
5396 o->out = regs[r1];
5397 o->out2 = regs[r1 + 1];
5398 o->g_out = o->g_out2 = true;
5399 }
5400 #define SPEC_prep_r1_P SPEC_r1_even
5401
5402 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5403 static void prep_x1(DisasContext *s, DisasOps *o)
5404 {
5405 o->out = load_freg(get_field(s, r1));
5406 o->out2 = load_freg(get_field(s, r1) + 2);
5407 }
5408 #define SPEC_prep_x1 SPEC_r1_f128
5409
5410 /* ====================================================================== */
5411 /* The "Write OUTput" generators. These generally perform some non-trivial
5412 copy of data to TCG globals, or to main memory. The trivial cases are
5413 generally handled by having a "prep" generator install the TCG global
5414 as the destination of the operation. */
5415
5416 static void wout_r1(DisasContext *s, DisasOps *o)
5417 {
5418 store_reg(get_field(s, r1), o->out);
5419 }
5420 #define SPEC_wout_r1 0
5421
5422 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5423 {
5424 store_reg(get_field(s, r1), o->out2);
5425 }
5426 #define SPEC_wout_out2_r1 0
5427
5428 static void wout_r1_8(DisasContext *s, DisasOps *o)
5429 {
5430 int r1 = get_field(s, r1);
5431 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5432 }
5433 #define SPEC_wout_r1_8 0
5434
5435 static void wout_r1_16(DisasContext *s, DisasOps *o)
5436 {
5437 int r1 = get_field(s, r1);
5438 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5439 }
5440 #define SPEC_wout_r1_16 0
5441
5442 static void wout_r1_32(DisasContext *s, DisasOps *o)
5443 {
5444 store_reg32_i64(get_field(s, r1), o->out);
5445 }
5446 #define SPEC_wout_r1_32 0
5447
5448 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5449 {
5450 store_reg32h_i64(get_field(s, r1), o->out);
5451 }
5452 #define SPEC_wout_r1_32h 0
5453
5454 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5455 {
5456 int r1 = get_field(s, r1);
5457 store_reg32_i64(r1, o->out);
5458 store_reg32_i64(r1 + 1, o->out2);
5459 }
5460 #define SPEC_wout_r1_P32 SPEC_r1_even
5461
5462 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5463 {
5464 int r1 = get_field(s, r1);
5465 TCGv_i64 t = tcg_temp_new_i64();
5466 store_reg32_i64(r1 + 1, o->out);
5467 tcg_gen_shri_i64(t, o->out, 32);
5468 store_reg32_i64(r1, t);
5469 tcg_temp_free_i64(t);
5470 }
5471 #define SPEC_wout_r1_D32 SPEC_r1_even
5472
5473 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5474 {
5475 int r3 = get_field(s, r3);
5476 store_reg32_i64(r3, o->out);
5477 store_reg32_i64(r3 + 1, o->out2);
5478 }
5479 #define SPEC_wout_r3_P32 SPEC_r3_even
5480
5481 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5482 {
5483 int r3 = get_field(s, r3);
5484 store_reg(r3, o->out);
5485 store_reg(r3 + 1, o->out2);
5486 }
5487 #define SPEC_wout_r3_P64 SPEC_r3_even
5488
5489 static void wout_e1(DisasContext *s, DisasOps *o)
5490 {
5491 store_freg32_i64(get_field(s, r1), o->out);
5492 }
5493 #define SPEC_wout_e1 0
5494
5495 static void wout_f1(DisasContext *s, DisasOps *o)
5496 {
5497 store_freg(get_field(s, r1), o->out);
5498 }
5499 #define SPEC_wout_f1 0
5500
5501 static void wout_x1(DisasContext *s, DisasOps *o)
5502 {
5503 int f1 = get_field(s, r1);
5504 store_freg(f1, o->out);
5505 store_freg(f1 + 2, o->out2);
5506 }
5507 #define SPEC_wout_x1 SPEC_r1_f128
5508
5509 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5510 {
5511 if (get_field(s, r1) != get_field(s, r2)) {
5512 store_reg32_i64(get_field(s, r1), o->out);
5513 }
5514 }
5515 #define SPEC_wout_cond_r1r2_32 0
5516
5517 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5518 {
5519 if (get_field(s, r1) != get_field(s, r2)) {
5520 store_freg32_i64(get_field(s, r1), o->out);
5521 }
5522 }
5523 #define SPEC_wout_cond_e1e2 0
5524
5525 static void wout_m1_8(DisasContext *s, DisasOps *o)
5526 {
5527 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5528 }
5529 #define SPEC_wout_m1_8 0
5530
5531 static void wout_m1_16(DisasContext *s, DisasOps *o)
5532 {
5533 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5534 }
5535 #define SPEC_wout_m1_16 0
5536
5537 #ifndef CONFIG_USER_ONLY
5538 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5539 {
5540 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5541 }
5542 #define SPEC_wout_m1_16a 0
5543 #endif
5544
5545 static void wout_m1_32(DisasContext *s, DisasOps *o)
5546 {
5547 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5548 }
5549 #define SPEC_wout_m1_32 0
5550
5551 #ifndef CONFIG_USER_ONLY
5552 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5553 {
5554 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5555 }
5556 #define SPEC_wout_m1_32a 0
5557 #endif
5558
5559 static void wout_m1_64(DisasContext *s, DisasOps *o)
5560 {
5561 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5562 }
5563 #define SPEC_wout_m1_64 0
5564
5565 #ifndef CONFIG_USER_ONLY
5566 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5567 {
5568 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5569 }
5570 #define SPEC_wout_m1_64a 0
5571 #endif
5572
5573 static void wout_m2_32(DisasContext *s, DisasOps *o)
5574 {
5575 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5576 }
5577 #define SPEC_wout_m2_32 0
5578
5579 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5580 {
5581 store_reg(get_field(s, r1), o->in2);
5582 }
5583 #define SPEC_wout_in2_r1 0
5584
5585 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5586 {
5587 store_reg32_i64(get_field(s, r1), o->in2);
5588 }
5589 #define SPEC_wout_in2_r1_32 0
5590
5591 /* ====================================================================== */
5592 /* The "INput 1" generators. These load the first operand to an insn. */
5593
5594 static void in1_r1(DisasContext *s, DisasOps *o)
5595 {
5596 o->in1 = load_reg(get_field(s, r1));
5597 }
5598 #define SPEC_in1_r1 0
5599
5600 static void in1_r1_o(DisasContext *s, DisasOps *o)
5601 {
5602 o->in1 = regs[get_field(s, r1)];
5603 o->g_in1 = true;
5604 }
5605 #define SPEC_in1_r1_o 0
5606
5607 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5608 {
5609 o->in1 = tcg_temp_new_i64();
5610 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5611 }
5612 #define SPEC_in1_r1_32s 0
5613
5614 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5615 {
5616 o->in1 = tcg_temp_new_i64();
5617 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5618 }
5619 #define SPEC_in1_r1_32u 0
5620
5621 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5622 {
5623 o->in1 = tcg_temp_new_i64();
5624 tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5625 }
5626 #define SPEC_in1_r1_sr32 0
5627
5628 static void in1_r1p1(DisasContext *s, DisasOps *o)
5629 {
5630 o->in1 = load_reg(get_field(s, r1) + 1);
5631 }
5632 #define SPEC_in1_r1p1 SPEC_r1_even
5633
5634 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5635 {
5636 o->in1 = regs[get_field(s, r1) + 1];
5637 o->g_in1 = true;
5638 }
5639 #define SPEC_in1_r1p1_o SPEC_r1_even
5640
5641 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5642 {
5643 o->in1 = tcg_temp_new_i64();
5644 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5645 }
5646 #define SPEC_in1_r1p1_32s SPEC_r1_even
5647
5648 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5649 {
5650 o->in1 = tcg_temp_new_i64();
5651 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5652 }
5653 #define SPEC_in1_r1p1_32u SPEC_r1_even
5654
5655 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5656 {
5657 int r1 = get_field(s, r1);
5658 o->in1 = tcg_temp_new_i64();
5659 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5660 }
5661 #define SPEC_in1_r1_D32 SPEC_r1_even
5662
5663 static void in1_r2(DisasContext *s, DisasOps *o)
5664 {
5665 o->in1 = load_reg(get_field(s, r2));
5666 }
5667 #define SPEC_in1_r2 0
5668
5669 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5670 {
5671 o->in1 = tcg_temp_new_i64();
5672 tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5673 }
5674 #define SPEC_in1_r2_sr32 0
5675
5676 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5677 {
5678 o->in1 = tcg_temp_new_i64();
5679 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5680 }
5681 #define SPEC_in1_r2_32u 0
5682
5683 static void in1_r3(DisasContext *s, DisasOps *o)
5684 {
5685 o->in1 = load_reg(get_field(s, r3));
5686 }
5687 #define SPEC_in1_r3 0
5688
5689 static void in1_r3_o(DisasContext *s, DisasOps *o)
5690 {
5691 o->in1 = regs[get_field(s, r3)];
5692 o->g_in1 = true;
5693 }
5694 #define SPEC_in1_r3_o 0
5695
5696 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5697 {
5698 o->in1 = tcg_temp_new_i64();
5699 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5700 }
5701 #define SPEC_in1_r3_32s 0
5702
5703 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5704 {
5705 o->in1 = tcg_temp_new_i64();
5706 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5707 }
5708 #define SPEC_in1_r3_32u 0
5709
5710 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5711 {
5712 int r3 = get_field(s, r3);
5713 o->in1 = tcg_temp_new_i64();
5714 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5715 }
5716 #define SPEC_in1_r3_D32 SPEC_r3_even
5717
5718 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5719 {
5720 o->in1 = tcg_temp_new_i64();
5721 tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5722 }
5723 #define SPEC_in1_r3_sr32 0
5724
5725 static void in1_e1(DisasContext *s, DisasOps *o)
5726 {
5727 o->in1 = load_freg32_i64(get_field(s, r1));
5728 }
5729 #define SPEC_in1_e1 0
5730
5731 static void in1_f1(DisasContext *s, DisasOps *o)
5732 {
5733 o->in1 = load_freg(get_field(s, r1));
5734 }
5735 #define SPEC_in1_f1 0
5736
5737 /* Load the high double word of an extended (128-bit) format FP number */
5738 static void in1_x2h(DisasContext *s, DisasOps *o)
5739 {
5740 o->in1 = load_freg(get_field(s, r2));
5741 }
5742 #define SPEC_in1_x2h SPEC_r2_f128
5743
5744 static void in1_f3(DisasContext *s, DisasOps *o)
5745 {
5746 o->in1 = load_freg(get_field(s, r3));
5747 }
5748 #define SPEC_in1_f3 0
5749
5750 static void in1_la1(DisasContext *s, DisasOps *o)
5751 {
5752 o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5753 }
5754 #define SPEC_in1_la1 0
5755
5756 static void in1_la2(DisasContext *s, DisasOps *o)
5757 {
5758 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5759 o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5760 }
5761 #define SPEC_in1_la2 0
5762
5763 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5764 {
5765 in1_la1(s, o);
5766 o->in1 = tcg_temp_new_i64();
5767 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5768 }
5769 #define SPEC_in1_m1_8u 0
5770
5771 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5772 {
5773 in1_la1(s, o);
5774 o->in1 = tcg_temp_new_i64();
5775 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5776 }
5777 #define SPEC_in1_m1_16s 0
5778
5779 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5780 {
5781 in1_la1(s, o);
5782 o->in1 = tcg_temp_new_i64();
5783 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5784 }
5785 #define SPEC_in1_m1_16u 0
5786
5787 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5788 {
5789 in1_la1(s, o);
5790 o->in1 = tcg_temp_new_i64();
5791 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5792 }
5793 #define SPEC_in1_m1_32s 0
5794
5795 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5796 {
5797 in1_la1(s, o);
5798 o->in1 = tcg_temp_new_i64();
5799 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5800 }
5801 #define SPEC_in1_m1_32u 0
5802
5803 static void in1_m1_64(DisasContext *s, DisasOps *o)
5804 {
5805 in1_la1(s, o);
5806 o->in1 = tcg_temp_new_i64();
5807 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5808 }
5809 #define SPEC_in1_m1_64 0
5810
5811 /* ====================================================================== */
5812 /* The "INput 2" generators. These load the second operand to an insn. */
5813
5814 static void in2_r1_o(DisasContext *s, DisasOps *o)
5815 {
5816 o->in2 = regs[get_field(s, r1)];
5817 o->g_in2 = true;
5818 }
5819 #define SPEC_in2_r1_o 0
5820
5821 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5822 {
5823 o->in2 = tcg_temp_new_i64();
5824 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5825 }
5826 #define SPEC_in2_r1_16u 0
5827
5828 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5829 {
5830 o->in2 = tcg_temp_new_i64();
5831 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5832 }
5833 #define SPEC_in2_r1_32u 0
5834
5835 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5836 {
5837 int r1 = get_field(s, r1);
5838 o->in2 = tcg_temp_new_i64();
5839 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5840 }
5841 #define SPEC_in2_r1_D32 SPEC_r1_even
5842
5843 static void in2_r2(DisasContext *s, DisasOps *o)
5844 {
5845 o->in2 = load_reg(get_field(s, r2));
5846 }
5847 #define SPEC_in2_r2 0
5848
5849 static void in2_r2_o(DisasContext *s, DisasOps *o)
5850 {
5851 o->in2 = regs[get_field(s, r2)];
5852 o->g_in2 = true;
5853 }
5854 #define SPEC_in2_r2_o 0
5855
5856 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5857 {
5858 int r2 = get_field(s, r2);
5859 if (r2 != 0) {
5860 o->in2 = load_reg(r2);
5861 }
5862 }
5863 #define SPEC_in2_r2_nz 0
5864
5865 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5866 {
5867 o->in2 = tcg_temp_new_i64();
5868 tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5869 }
5870 #define SPEC_in2_r2_8s 0
5871
5872 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5873 {
5874 o->in2 = tcg_temp_new_i64();
5875 tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5876 }
5877 #define SPEC_in2_r2_8u 0
5878
5879 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5880 {
5881 o->in2 = tcg_temp_new_i64();
5882 tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5883 }
5884 #define SPEC_in2_r2_16s 0
5885
5886 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5887 {
5888 o->in2 = tcg_temp_new_i64();
5889 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5890 }
5891 #define SPEC_in2_r2_16u 0
5892
5893 static void in2_r3(DisasContext *s, DisasOps *o)
5894 {
5895 o->in2 = load_reg(get_field(s, r3));
5896 }
5897 #define SPEC_in2_r3 0
5898
5899 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5900 {
5901 o->in2 = tcg_temp_new_i64();
5902 tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5903 }
5904 #define SPEC_in2_r3_sr32 0
5905
5906 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5907 {
5908 o->in2 = tcg_temp_new_i64();
5909 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5910 }
5911 #define SPEC_in2_r3_32u 0
5912
5913 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5914 {
5915 o->in2 = tcg_temp_new_i64();
5916 tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5917 }
5918 #define SPEC_in2_r2_32s 0
5919
5920 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5921 {
5922 o->in2 = tcg_temp_new_i64();
5923 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5924 }
5925 #define SPEC_in2_r2_32u 0
5926
5927 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5928 {
5929 o->in2 = tcg_temp_new_i64();
5930 tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5931 }
5932 #define SPEC_in2_r2_sr32 0
5933
5934 static void in2_e2(DisasContext *s, DisasOps *o)
5935 {
5936 o->in2 = load_freg32_i64(get_field(s, r2));
5937 }
5938 #define SPEC_in2_e2 0
5939
5940 static void in2_f2(DisasContext *s, DisasOps *o)
5941 {
5942 o->in2 = load_freg(get_field(s, r2));
5943 }
5944 #define SPEC_in2_f2 0
5945
5946 /* Load the low double word of an extended (128-bit) format FP number */
5947 static void in2_x2l(DisasContext *s, DisasOps *o)
5948 {
5949 o->in2 = load_freg(get_field(s, r2) + 2);
5950 }
5951 #define SPEC_in2_x2l SPEC_r2_f128
5952
5953 static void in2_ra2(DisasContext *s, DisasOps *o)
5954 {
5955 int r2 = get_field(s, r2);
5956
5957 /* Note: *don't* treat !r2 as 0, use the reg value. */
5958 o->in2 = tcg_temp_new_i64();
5959 gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5960 }
5961 #define SPEC_in2_ra2 0
5962
5963 static void in2_a2(DisasContext *s, DisasOps *o)
5964 {
5965 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5966 o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5967 }
5968 #define SPEC_in2_a2 0
5969
5970 static void in2_ri2(DisasContext *s, DisasOps *o)
5971 {
5972 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5973 }
5974 #define SPEC_in2_ri2 0
5975
5976 static void in2_sh(DisasContext *s, DisasOps *o)
5977 {
5978 int b2 = get_field(s, b2);
5979 int d2 = get_field(s, d2);
5980
5981 if (b2 == 0) {
5982 o->in2 = tcg_const_i64(d2 & 0x3f);
5983 } else {
5984 o->in2 = get_address(s, 0, b2, d2);
5985 tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5986 }
5987 }
5988 #define SPEC_in2_sh 0
5989
5990 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5991 {
5992 in2_a2(s, o);
5993 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5994 }
5995 #define SPEC_in2_m2_8u 0
5996
5997 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5998 {
5999 in2_a2(s, o);
6000 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
6001 }
6002 #define SPEC_in2_m2_16s 0
6003
6004 static void in2_m2_16u(DisasContext *s, DisasOps *o)
6005 {
6006 in2_a2(s, o);
6007 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6008 }
6009 #define SPEC_in2_m2_16u 0
6010
6011 static void in2_m2_32s(DisasContext *s, DisasOps *o)
6012 {
6013 in2_a2(s, o);
6014 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6015 }
6016 #define SPEC_in2_m2_32s 0
6017
6018 static void in2_m2_32u(DisasContext *s, DisasOps *o)
6019 {
6020 in2_a2(s, o);
6021 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6022 }
6023 #define SPEC_in2_m2_32u 0
6024
6025 #ifndef CONFIG_USER_ONLY
6026 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
6027 {
6028 in2_a2(s, o);
6029 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
6030 }
6031 #define SPEC_in2_m2_32ua 0
6032 #endif
6033
6034 static void in2_m2_64(DisasContext *s, DisasOps *o)
6035 {
6036 in2_a2(s, o);
6037 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6038 }
6039 #define SPEC_in2_m2_64 0
6040
6041 static void in2_m2_64w(DisasContext *s, DisasOps *o)
6042 {
6043 in2_a2(s, o);
6044 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6045 gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
6046 }
6047 #define SPEC_in2_m2_64w 0
6048
6049 #ifndef CONFIG_USER_ONLY
6050 static void in2_m2_64a(DisasContext *s, DisasOps *o)
6051 {
6052 in2_a2(s, o);
6053 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
6054 }
6055 #define SPEC_in2_m2_64a 0
6056 #endif
6057
6058 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
6059 {
6060 in2_ri2(s, o);
6061 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6062 }
6063 #define SPEC_in2_mri2_16u 0
6064
6065 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
6066 {
6067 in2_ri2(s, o);
6068 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6069 }
6070 #define SPEC_in2_mri2_32s 0
6071
6072 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
6073 {
6074 in2_ri2(s, o);
6075 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6076 }
6077 #define SPEC_in2_mri2_32u 0
6078
6079 static void in2_mri2_64(DisasContext *s, DisasOps *o)
6080 {
6081 in2_ri2(s, o);
6082 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6083 }
6084 #define SPEC_in2_mri2_64 0
6085
6086 static void in2_i2(DisasContext *s, DisasOps *o)
6087 {
6088 o->in2 = tcg_const_i64(get_field(s, i2));
6089 }
6090 #define SPEC_in2_i2 0
6091
6092 static void in2_i2_8u(DisasContext *s, DisasOps *o)
6093 {
6094 o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6095 }
6096 #define SPEC_in2_i2_8u 0
6097
6098 static void in2_i2_16u(DisasContext *s, DisasOps *o)
6099 {
6100 o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6101 }
6102 #define SPEC_in2_i2_16u 0
6103
6104 static void in2_i2_32u(DisasContext *s, DisasOps *o)
6105 {
6106 o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6107 }
6108 #define SPEC_in2_i2_32u 0
6109
6110 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6111 {
6112 uint64_t i2 = (uint16_t)get_field(s, i2);
6113 o->in2 = tcg_const_i64(i2 << s->insn->data);
6114 }
6115 #define SPEC_in2_i2_16u_shl 0
6116
6117 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6118 {
6119 uint64_t i2 = (uint32_t)get_field(s, i2);
6120 o->in2 = tcg_const_i64(i2 << s->insn->data);
6121 }
6122 #define SPEC_in2_i2_32u_shl 0
6123
6124 #ifndef CONFIG_USER_ONLY
6125 static void in2_insn(DisasContext *s, DisasOps *o)
6126 {
6127 o->in2 = tcg_const_i64(s->fields.raw_insn);
6128 }
6129 #define SPEC_in2_insn 0
6130 #endif
6131
6132 /* ====================================================================== */
6133
6134 /* Find opc within the table of insns. This is formulated as a switch
6135 statement so that (1) we get compile-time notice of cut-paste errors
6136 for duplicated opcodes, and (2) the compiler generates the binary
6137 search tree, rather than us having to post-process the table. */
6138
6139 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6140 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6141
6142 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6143 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6144
6145 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6146 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6147
6148 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6149
6150 enum DisasInsnEnum {
6151 #include "insn-data.def"
6152 };
6153
6154 #undef E
6155 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6156 .opc = OPC, \
6157 .flags = FL, \
6158 .fmt = FMT_##FT, \
6159 .fac = FAC_##FC, \
6160 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6161 .name = #NM, \
6162 .help_in1 = in1_##I1, \
6163 .help_in2 = in2_##I2, \
6164 .help_prep = prep_##P, \
6165 .help_wout = wout_##W, \
6166 .help_cout = cout_##CC, \
6167 .help_op = op_##OP, \
6168 .data = D \
6169 },
6170
6171 /* Allow 0 to be used for NULL in the table below. */
6172 #define in1_0 NULL
6173 #define in2_0 NULL
6174 #define prep_0 NULL
6175 #define wout_0 NULL
6176 #define cout_0 NULL
6177 #define op_0 NULL
6178
6179 #define SPEC_in1_0 0
6180 #define SPEC_in2_0 0
6181 #define SPEC_prep_0 0
6182 #define SPEC_wout_0 0
6183
6184 /* Give smaller names to the various facilities. */
6185 #define FAC_Z S390_FEAT_ZARCH
6186 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6187 #define FAC_DFP S390_FEAT_DFP
6188 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6189 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6190 #define FAC_EE S390_FEAT_EXECUTE_EXT
6191 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6192 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6193 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6194 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6195 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6196 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6197 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6198 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6199 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6200 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6201 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6202 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6203 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6204 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6205 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6206 #define FAC_SFLE S390_FEAT_STFLE
6207 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6208 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6209 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6210 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6211 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6212 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6213 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6214 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6215 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6216 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6217 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6218 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6219 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6220 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6221 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6222 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6223 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6224 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6225 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6226 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6227 #define FAC_MIE3 S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6228
6229 static const DisasInsn insn_info[] = {
6230 #include "insn-data.def"
6231 };
6232
6233 #undef E
6234 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6235 case OPC: return &insn_info[insn_ ## NM];
6236
6237 static const DisasInsn *lookup_opc(uint16_t opc)
6238 {
6239 switch (opc) {
6240 #include "insn-data.def"
6241 default:
6242 return NULL;
6243 }
6244 }
6245
6246 #undef F
6247 #undef E
6248 #undef D
6249 #undef C
6250
6251 /* Extract a field from the insn. The INSN should be left-aligned in
6252 the uint64_t so that we can more easily utilize the big-bit-endian
6253 definitions we extract from the Principals of Operation. */
6254
6255 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6256 {
6257 uint32_t r, m;
6258
6259 if (f->size == 0) {
6260 return;
6261 }
6262
6263 /* Zero extract the field from the insn. */
6264 r = (insn << f->beg) >> (64 - f->size);
6265
6266 /* Sign-extend, or un-swap the field as necessary. */
6267 switch (f->type) {
6268 case 0: /* unsigned */
6269 break;
6270 case 1: /* signed */
6271 assert(f->size <= 32);
6272 m = 1u << (f->size - 1);
6273 r = (r ^ m) - m;
6274 break;
6275 case 2: /* dl+dh split, signed 20 bit. */
6276 r = ((int8_t)r << 12) | (r >> 8);
6277 break;
6278 case 3: /* MSB stored in RXB */
6279 g_assert(f->size == 4);
6280 switch (f->beg) {
6281 case 8:
6282 r |= extract64(insn, 63 - 36, 1) << 4;
6283 break;
6284 case 12:
6285 r |= extract64(insn, 63 - 37, 1) << 4;
6286 break;
6287 case 16:
6288 r |= extract64(insn, 63 - 38, 1) << 4;
6289 break;
6290 case 32:
6291 r |= extract64(insn, 63 - 39, 1) << 4;
6292 break;
6293 default:
6294 g_assert_not_reached();
6295 }
6296 break;
6297 default:
6298 abort();
6299 }
6300
6301 /*
6302 * Validate that the "compressed" encoding we selected above is valid.
6303 * I.e. we haven't made two different original fields overlap.
6304 */
6305 assert(((o->presentC >> f->indexC) & 1) == 0);
6306 o->presentC |= 1 << f->indexC;
6307 o->presentO |= 1 << f->indexO;
6308
6309 o->c[f->indexC] = r;
6310 }
6311
6312 /* Lookup the insn at the current PC, extracting the operands into O and
6313 returning the info struct for the insn. Returns NULL for invalid insn. */
6314
6315 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6316 {
6317 uint64_t insn, pc = s->base.pc_next;
6318 int op, op2, ilen;
6319 const DisasInsn *info;
6320
6321 if (unlikely(s->ex_value)) {
6322 /* Drop the EX data now, so that it's clear on exception paths. */
6323 TCGv_i64 zero = tcg_const_i64(0);
6324 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6325 tcg_temp_free_i64(zero);
6326
6327 /* Extract the values saved by EXECUTE. */
6328 insn = s->ex_value & 0xffffffffffff0000ull;
6329 ilen = s->ex_value & 0xf;
6330 op = insn >> 56;
6331 } else {
6332 insn = ld_code2(env, s, pc);
6333 op = (insn >> 8) & 0xff;
6334 ilen = get_ilen(op);
6335 switch (ilen) {
6336 case 2:
6337 insn = insn << 48;
6338 break;
6339 case 4:
6340 insn = ld_code4(env, s, pc) << 32;
6341 break;
6342 case 6:
6343 insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6344 break;
6345 default:
6346 g_assert_not_reached();
6347 }
6348 }
6349 s->pc_tmp = s->base.pc_next + ilen;
6350 s->ilen = ilen;
6351
6352 /* We can't actually determine the insn format until we've looked up
6353 the full insn opcode. Which we can't do without locating the
6354 secondary opcode. Assume by default that OP2 is at bit 40; for
6355 those smaller insns that don't actually have a secondary opcode
6356 this will correctly result in OP2 = 0. */
6357 switch (op) {
6358 case 0x01: /* E */
6359 case 0x80: /* S */
6360 case 0x82: /* S */
6361 case 0x93: /* S */
6362 case 0xb2: /* S, RRF, RRE, IE */
6363 case 0xb3: /* RRE, RRD, RRF */
6364 case 0xb9: /* RRE, RRF */
6365 case 0xe5: /* SSE, SIL */
6366 op2 = (insn << 8) >> 56;
6367 break;
6368 case 0xa5: /* RI */
6369 case 0xa7: /* RI */
6370 case 0xc0: /* RIL */
6371 case 0xc2: /* RIL */
6372 case 0xc4: /* RIL */
6373 case 0xc6: /* RIL */
6374 case 0xc8: /* SSF */
6375 case 0xcc: /* RIL */
6376 op2 = (insn << 12) >> 60;
6377 break;
6378 case 0xc5: /* MII */
6379 case 0xc7: /* SMI */
6380 case 0xd0 ... 0xdf: /* SS */
6381 case 0xe1: /* SS */
6382 case 0xe2: /* SS */
6383 case 0xe8: /* SS */
6384 case 0xe9: /* SS */
6385 case 0xea: /* SS */
6386 case 0xee ... 0xf3: /* SS */
6387 case 0xf8 ... 0xfd: /* SS */
6388 op2 = 0;
6389 break;
6390 default:
6391 op2 = (insn << 40) >> 56;
6392 break;
6393 }
6394
6395 memset(&s->fields, 0, sizeof(s->fields));
6396 s->fields.raw_insn = insn;
6397 s->fields.op = op;
6398 s->fields.op2 = op2;
6399
6400 /* Lookup the instruction. */
6401 info = lookup_opc(op << 8 | op2);
6402 s->insn = info;
6403
6404 /* If we found it, extract the operands. */
6405 if (info != NULL) {
6406 DisasFormat fmt = info->fmt;
6407 int i;
6408
6409 for (i = 0; i < NUM_C_FIELD; ++i) {
6410 extract_field(&s->fields, &format_info[fmt].op[i], insn);
6411 }
6412 }
6413 return info;
6414 }
6415
6416 static bool is_afp_reg(int reg)
6417 {
6418 return reg % 2 || reg > 6;
6419 }
6420
6421 static bool is_fp_pair(int reg)
6422 {
6423 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6424 return !(reg & 0x2);
6425 }
6426
6427 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6428 {
6429 const DisasInsn *insn;
6430 DisasJumpType ret = DISAS_NEXT;
6431 DisasOps o = {};
6432 bool icount = false;
6433
6434 /* Search for the insn in the table. */
6435 insn = extract_insn(env, s);
6436
6437 /* Update insn_start now that we know the ILEN. */
6438 tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6439
6440 /* Not found means unimplemented/illegal opcode. */
6441 if (insn == NULL) {
6442 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6443 s->fields.op, s->fields.op2);
6444 gen_illegal_opcode(s);
6445 ret = DISAS_NORETURN;
6446 goto out;
6447 }
6448
6449 #ifndef CONFIG_USER_ONLY
6450 if (s->base.tb->flags & FLAG_MASK_PER) {
6451 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6452 gen_helper_per_ifetch(cpu_env, addr);
6453 tcg_temp_free_i64(addr);
6454 }
6455 #endif
6456
6457 /* process flags */
6458 if (insn->flags) {
6459 /* privileged instruction */
6460 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6461 gen_program_exception(s, PGM_PRIVILEGED);
6462 ret = DISAS_NORETURN;
6463 goto out;
6464 }
6465
6466 /* if AFP is not enabled, instructions and registers are forbidden */
6467 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6468 uint8_t dxc = 0;
6469
6470 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6471 dxc = 1;
6472 }
6473 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6474 dxc = 1;
6475 }
6476 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6477 dxc = 1;
6478 }
6479 if (insn->flags & IF_BFP) {
6480 dxc = 2;
6481 }
6482 if (insn->flags & IF_DFP) {
6483 dxc = 3;
6484 }
6485 if (insn->flags & IF_VEC) {
6486 dxc = 0xfe;
6487 }
6488 if (dxc) {
6489 gen_data_exception(dxc);
6490 ret = DISAS_NORETURN;
6491 goto out;
6492 }
6493 }
6494
6495 /* if vector instructions not enabled, executing them is forbidden */
6496 if (insn->flags & IF_VEC) {
6497 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6498 gen_data_exception(0xfe);
6499 ret = DISAS_NORETURN;
6500 goto out;
6501 }
6502 }
6503
6504 /* input/output is the special case for icount mode */
6505 if (unlikely(insn->flags & IF_IO)) {
6506 icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6507 if (icount) {
6508 gen_io_start();
6509 }
6510 }
6511 }
6512
6513 /* Check for insn specification exceptions. */
6514 if (insn->spec) {
6515 if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6516 (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6517 (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6518 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6519 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6520 gen_program_exception(s, PGM_SPECIFICATION);
6521 ret = DISAS_NORETURN;
6522 goto out;
6523 }
6524 }
6525
6526 /* Implement the instruction. */
6527 if (insn->help_in1) {
6528 insn->help_in1(s, &o);
6529 }
6530 if (insn->help_in2) {
6531 insn->help_in2(s, &o);
6532 }
6533 if (insn->help_prep) {
6534 insn->help_prep(s, &o);
6535 }
6536 if (insn->help_op) {
6537 ret = insn->help_op(s, &o);
6538 }
6539 if (ret != DISAS_NORETURN) {
6540 if (insn->help_wout) {
6541 insn->help_wout(s, &o);
6542 }
6543 if (insn->help_cout) {
6544 insn->help_cout(s, &o);
6545 }
6546 }
6547
6548 /* Free any temporaries created by the helpers. */
6549 if (o.out && !o.g_out) {
6550 tcg_temp_free_i64(o.out);
6551 }
6552 if (o.out2 && !o.g_out2) {
6553 tcg_temp_free_i64(o.out2);
6554 }
6555 if (o.in1 && !o.g_in1) {
6556 tcg_temp_free_i64(o.in1);
6557 }
6558 if (o.in2 && !o.g_in2) {
6559 tcg_temp_free_i64(o.in2);
6560 }
6561 if (o.addr1) {
6562 tcg_temp_free_i64(o.addr1);
6563 }
6564
6565 /* io should be the last instruction in tb when icount is enabled */
6566 if (unlikely(icount && ret == DISAS_NEXT)) {
6567 ret = DISAS_PC_STALE;
6568 }
6569
6570 #ifndef CONFIG_USER_ONLY
6571 if (s->base.tb->flags & FLAG_MASK_PER) {
6572 /* An exception might be triggered, save PSW if not already done. */
6573 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6574 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6575 }
6576
6577 /* Call the helper to check for a possible PER exception. */
6578 gen_helper_per_check_exception(cpu_env);
6579 }
6580 #endif
6581
6582 out:
6583 /* Advance to the next instruction. */
6584 s->base.pc_next = s->pc_tmp;
6585 return ret;
6586 }
6587
6588 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6589 {
6590 DisasContext *dc = container_of(dcbase, DisasContext, base);
6591
6592 /* 31-bit mode */
6593 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6594 dc->base.pc_first &= 0x7fffffff;
6595 dc->base.pc_next = dc->base.pc_first;
6596 }
6597
6598 dc->cc_op = CC_OP_DYNAMIC;
6599 dc->ex_value = dc->base.tb->cs_base;
6600 }
6601
6602 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6603 {
6604 }
6605
6606 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6607 {
6608 DisasContext *dc = container_of(dcbase, DisasContext, base);
6609
6610 /* Delay the set of ilen until we've read the insn. */
6611 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6612 dc->insn_start = tcg_last_op();
6613 }
6614
6615 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6616 {
6617 CPUS390XState *env = cs->env_ptr;
6618 DisasContext *dc = container_of(dcbase, DisasContext, base);
6619
6620 dc->base.is_jmp = translate_one(env, dc);
6621 if (dc->base.is_jmp == DISAS_NEXT) {
6622 uint64_t page_start;
6623
6624 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6625 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6626 dc->base.is_jmp = DISAS_TOO_MANY;
6627 }
6628 }
6629 }
6630
6631 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6632 {
6633 DisasContext *dc = container_of(dcbase, DisasContext, base);
6634
6635 switch (dc->base.is_jmp) {
6636 case DISAS_GOTO_TB:
6637 case DISAS_NORETURN:
6638 break;
6639 case DISAS_TOO_MANY:
6640 case DISAS_PC_STALE:
6641 case DISAS_PC_STALE_NOCHAIN:
6642 update_psw_addr(dc);
6643 /* FALLTHRU */
6644 case DISAS_PC_UPDATED:
6645 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6646 cc op type is in env */
6647 update_cc_op(dc);
6648 /* FALLTHRU */
6649 case DISAS_PC_CC_UPDATED:
6650 /* Exit the TB, either by raising a debug exception or by return. */
6651 if ((dc->base.tb->flags & FLAG_MASK_PER) ||
6652 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6653 tcg_gen_exit_tb(NULL, 0);
6654 } else {
6655 tcg_gen_lookup_and_goto_ptr();
6656 }
6657 break;
6658 default:
6659 g_assert_not_reached();
6660 }
6661 }
6662
6663 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6664 CPUState *cs, FILE *logfile)
6665 {
6666 DisasContext *dc = container_of(dcbase, DisasContext, base);
6667
6668 if (unlikely(dc->ex_value)) {
6669 /* ??? Unfortunately target_disas can't use host memory. */
6670 fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6671 } else {
6672 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6673 target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6674 }
6675 }
6676
6677 static const TranslatorOps s390x_tr_ops = {
6678 .init_disas_context = s390x_tr_init_disas_context,
6679 .tb_start = s390x_tr_tb_start,
6680 .insn_start = s390x_tr_insn_start,
6681 .translate_insn = s390x_tr_translate_insn,
6682 .tb_stop = s390x_tr_tb_stop,
6683 .disas_log = s390x_tr_disas_log,
6684 };
6685
6686 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6687 {
6688 DisasContext dc;
6689
6690 translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6691 }
6692
6693 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6694 target_ulong *data)
6695 {
6696 int cc_op = data[1];
6697
6698 env->psw.addr = data[0];
6699
6700 /* Update the CC opcode if it is not already up-to-date. */
6701 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6702 env->cc_op = cc_op;
6703 }
6704
6705 /* Record ILEN. */
6706 env->int_pgm_ilen = data[2];
6707 }