]> git.proxmox.com Git - mirror_qemu.git/blob - target/s390x/tcg/translate.c
target/s390x: Fix LAALG not updating cc_src
[mirror_qemu.git] / target / s390x / tcg / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
43
44 #include "exec/translator.h"
45 #include "exec/log.h"
46 #include "qemu/atomic128.h"
47
48 #define HELPER_H "helper.h"
49 #include "exec/helper-info.c.inc"
50 #undef HELPER_H
51
52
53 /* Information that (most) every instruction needs to manipulate. */
54 typedef struct DisasContext DisasContext;
55 typedef struct DisasInsn DisasInsn;
56 typedef struct DisasFields DisasFields;
57
58 /*
59 * Define a structure to hold the decoded fields. We'll store each inside
60 * an array indexed by an enum. In order to conserve memory, we'll arrange
61 * for fields that do not exist at the same time to overlap, thus the "C"
62 * for compact. For checking purposes there is an "O" for original index
63 * as well that will be applied to availability bitmaps.
64 */
65
66 enum DisasFieldIndexO {
67 FLD_O_r1,
68 FLD_O_r2,
69 FLD_O_r3,
70 FLD_O_m1,
71 FLD_O_m3,
72 FLD_O_m4,
73 FLD_O_m5,
74 FLD_O_m6,
75 FLD_O_b1,
76 FLD_O_b2,
77 FLD_O_b4,
78 FLD_O_d1,
79 FLD_O_d2,
80 FLD_O_d4,
81 FLD_O_x2,
82 FLD_O_l1,
83 FLD_O_l2,
84 FLD_O_i1,
85 FLD_O_i2,
86 FLD_O_i3,
87 FLD_O_i4,
88 FLD_O_i5,
89 FLD_O_v1,
90 FLD_O_v2,
91 FLD_O_v3,
92 FLD_O_v4,
93 };
94
95 enum DisasFieldIndexC {
96 FLD_C_r1 = 0,
97 FLD_C_m1 = 0,
98 FLD_C_b1 = 0,
99 FLD_C_i1 = 0,
100 FLD_C_v1 = 0,
101
102 FLD_C_r2 = 1,
103 FLD_C_b2 = 1,
104 FLD_C_i2 = 1,
105
106 FLD_C_r3 = 2,
107 FLD_C_m3 = 2,
108 FLD_C_i3 = 2,
109 FLD_C_v3 = 2,
110
111 FLD_C_m4 = 3,
112 FLD_C_b4 = 3,
113 FLD_C_i4 = 3,
114 FLD_C_l1 = 3,
115 FLD_C_v4 = 3,
116
117 FLD_C_i5 = 4,
118 FLD_C_d1 = 4,
119 FLD_C_m5 = 4,
120
121 FLD_C_d2 = 5,
122 FLD_C_m6 = 5,
123
124 FLD_C_d4 = 6,
125 FLD_C_x2 = 6,
126 FLD_C_l2 = 6,
127 FLD_C_v2 = 6,
128
129 NUM_C_FIELD = 7
130 };
131
132 struct DisasFields {
133 uint64_t raw_insn;
134 unsigned op:8;
135 unsigned op2:8;
136 unsigned presentC:16;
137 unsigned int presentO;
138 int c[NUM_C_FIELD];
139 };
140
141 struct DisasContext {
142 DisasContextBase base;
143 const DisasInsn *insn;
144 TCGOp *insn_start;
145 DisasFields fields;
146 uint64_t ex_value;
147 /*
148 * During translate_one(), pc_tmp is used to determine the instruction
149 * to be executed after base.pc_next - e.g. next sequential instruction
150 * or a branch target.
151 */
152 uint64_t pc_tmp;
153 uint32_t ilen;
154 enum cc_op cc_op;
155 bool exit_to_mainloop;
156 };
157
158 /* Information carried about a condition to be evaluated. */
159 typedef struct {
160 TCGCond cond:8;
161 bool is_64;
162 union {
163 struct { TCGv_i64 a, b; } s64;
164 struct { TCGv_i32 a, b; } s32;
165 } u;
166 } DisasCompare;
167
168 #ifdef DEBUG_INLINE_BRANCHES
169 static uint64_t inline_branch_hit[CC_OP_MAX];
170 static uint64_t inline_branch_miss[CC_OP_MAX];
171 #endif
172
173 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
174 {
175 if (s->base.tb->flags & FLAG_MASK_32) {
176 if (s->base.tb->flags & FLAG_MASK_64) {
177 tcg_gen_movi_i64(out, pc);
178 return;
179 }
180 pc |= 0x80000000;
181 }
182 assert(!(s->base.tb->flags & FLAG_MASK_64));
183 tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
184 }
185
186 static TCGv_i64 psw_addr;
187 static TCGv_i64 psw_mask;
188 static TCGv_i64 gbea;
189
190 static TCGv_i32 cc_op;
191 static TCGv_i64 cc_src;
192 static TCGv_i64 cc_dst;
193 static TCGv_i64 cc_vr;
194
195 static char cpu_reg_names[16][4];
196 static TCGv_i64 regs[16];
197
198 void s390x_translate_init(void)
199 {
200 int i;
201
202 psw_addr = tcg_global_mem_new_i64(tcg_env,
203 offsetof(CPUS390XState, psw.addr),
204 "psw_addr");
205 psw_mask = tcg_global_mem_new_i64(tcg_env,
206 offsetof(CPUS390XState, psw.mask),
207 "psw_mask");
208 gbea = tcg_global_mem_new_i64(tcg_env,
209 offsetof(CPUS390XState, gbea),
210 "gbea");
211
212 cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op),
213 "cc_op");
214 cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src),
215 "cc_src");
216 cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst),
217 "cc_dst");
218 cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr),
219 "cc_vr");
220
221 for (i = 0; i < 16; i++) {
222 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
223 regs[i] = tcg_global_mem_new(tcg_env,
224 offsetof(CPUS390XState, regs[i]),
225 cpu_reg_names[i]);
226 }
227 }
228
229 static inline int vec_full_reg_offset(uint8_t reg)
230 {
231 g_assert(reg < 32);
232 return offsetof(CPUS390XState, vregs[reg][0]);
233 }
234
235 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
236 {
237 /* Convert element size (es) - e.g. MO_8 - to bytes */
238 const uint8_t bytes = 1 << es;
239 int offs = enr * bytes;
240
241 /*
242 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
243 * of the 16 byte vector, on both, little and big endian systems.
244 *
245 * Big Endian (target/possible host)
246 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
247 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
248 * W: [ 0][ 1] - [ 2][ 3]
249 * DW: [ 0] - [ 1]
250 *
251 * Little Endian (possible host)
252 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
253 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
254 * W: [ 1][ 0] - [ 3][ 2]
255 * DW: [ 0] - [ 1]
256 *
257 * For 16 byte elements, the two 8 byte halves will not form a host
258 * int128 if the host is little endian, since they're in the wrong order.
259 * Some operations (e.g. xor) do not care. For operations like addition,
260 * the two 8 byte elements have to be loaded separately. Let's force all
261 * 16 byte operations to handle it in a special way.
262 */
263 g_assert(es <= MO_64);
264 #if !HOST_BIG_ENDIAN
265 offs ^= (8 - bytes);
266 #endif
267 return offs + vec_full_reg_offset(reg);
268 }
269
270 static inline int freg64_offset(uint8_t reg)
271 {
272 g_assert(reg < 16);
273 return vec_reg_offset(reg, 0, MO_64);
274 }
275
276 static inline int freg32_offset(uint8_t reg)
277 {
278 g_assert(reg < 16);
279 return vec_reg_offset(reg, 0, MO_32);
280 }
281
282 static TCGv_i64 load_reg(int reg)
283 {
284 TCGv_i64 r = tcg_temp_new_i64();
285 tcg_gen_mov_i64(r, regs[reg]);
286 return r;
287 }
288
289 static TCGv_i64 load_freg(int reg)
290 {
291 TCGv_i64 r = tcg_temp_new_i64();
292
293 tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg));
294 return r;
295 }
296
297 static TCGv_i64 load_freg32_i64(int reg)
298 {
299 TCGv_i64 r = tcg_temp_new_i64();
300
301 tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg));
302 return r;
303 }
304
305 static TCGv_i128 load_freg_128(int reg)
306 {
307 TCGv_i64 h = load_freg(reg);
308 TCGv_i64 l = load_freg(reg + 2);
309 TCGv_i128 r = tcg_temp_new_i128();
310
311 tcg_gen_concat_i64_i128(r, l, h);
312 return r;
313 }
314
315 static void store_reg(int reg, TCGv_i64 v)
316 {
317 tcg_gen_mov_i64(regs[reg], v);
318 }
319
320 static void store_freg(int reg, TCGv_i64 v)
321 {
322 tcg_gen_st_i64(v, tcg_env, freg64_offset(reg));
323 }
324
325 static void store_reg32_i64(int reg, TCGv_i64 v)
326 {
327 /* 32 bit register writes keep the upper half */
328 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
329 }
330
331 static void store_reg32h_i64(int reg, TCGv_i64 v)
332 {
333 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
334 }
335
336 static void store_freg32_i64(int reg, TCGv_i64 v)
337 {
338 tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg));
339 }
340
341 static void update_psw_addr(DisasContext *s)
342 {
343 /* psw.addr */
344 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
345 }
346
347 static void per_branch(DisasContext *s, bool to_next)
348 {
349 #ifndef CONFIG_USER_ONLY
350 tcg_gen_movi_i64(gbea, s->base.pc_next);
351
352 if (s->base.tb->flags & FLAG_MASK_PER) {
353 TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
354 gen_helper_per_branch(tcg_env, gbea, next_pc);
355 }
356 #endif
357 }
358
359 static void per_branch_cond(DisasContext *s, TCGCond cond,
360 TCGv_i64 arg1, TCGv_i64 arg2)
361 {
362 #ifndef CONFIG_USER_ONLY
363 if (s->base.tb->flags & FLAG_MASK_PER) {
364 TCGLabel *lab = gen_new_label();
365 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
366
367 tcg_gen_movi_i64(gbea, s->base.pc_next);
368 gen_helper_per_branch(tcg_env, gbea, psw_addr);
369
370 gen_set_label(lab);
371 } else {
372 TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
373 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
374 }
375 #endif
376 }
377
378 static void per_breaking_event(DisasContext *s)
379 {
380 tcg_gen_movi_i64(gbea, s->base.pc_next);
381 }
382
383 static void update_cc_op(DisasContext *s)
384 {
385 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
386 tcg_gen_movi_i32(cc_op, s->cc_op);
387 }
388 }
389
390 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
391 uint64_t pc)
392 {
393 return (uint64_t)translator_lduw(env, &s->base, pc);
394 }
395
396 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
397 uint64_t pc)
398 {
399 return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
400 }
401
402 static int get_mem_index(DisasContext *s)
403 {
404 #ifdef CONFIG_USER_ONLY
405 return MMU_USER_IDX;
406 #else
407 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408 return MMU_REAL_IDX;
409 }
410
411 switch (s->base.tb->flags & FLAG_MASK_ASC) {
412 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413 return MMU_PRIMARY_IDX;
414 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415 return MMU_SECONDARY_IDX;
416 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417 return MMU_HOME_IDX;
418 default:
419 g_assert_not_reached();
420 break;
421 }
422 #endif
423 }
424
425 static void gen_exception(int excp)
426 {
427 gen_helper_exception(tcg_env, tcg_constant_i32(excp));
428 }
429
430 static void gen_program_exception(DisasContext *s, int code)
431 {
432 /* Remember what pgm exception this was. */
433 tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
434 offsetof(CPUS390XState, int_pgm_code));
435
436 tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env,
437 offsetof(CPUS390XState, int_pgm_ilen));
438
439 /* update the psw */
440 update_psw_addr(s);
441
442 /* Save off cc. */
443 update_cc_op(s);
444
445 /* Trigger exception. */
446 gen_exception(EXCP_PGM);
447 }
448
449 static inline void gen_illegal_opcode(DisasContext *s)
450 {
451 gen_program_exception(s, PGM_OPERATION);
452 }
453
454 static inline void gen_data_exception(uint8_t dxc)
455 {
456 gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc));
457 }
458
459 static inline void gen_trap(DisasContext *s)
460 {
461 /* Set DXC to 0xff */
462 gen_data_exception(0xff);
463 }
464
465 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
466 int64_t imm)
467 {
468 tcg_gen_addi_i64(dst, src, imm);
469 if (!(s->base.tb->flags & FLAG_MASK_64)) {
470 if (s->base.tb->flags & FLAG_MASK_32) {
471 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
472 } else {
473 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
474 }
475 }
476 }
477
478 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
479 {
480 TCGv_i64 tmp = tcg_temp_new_i64();
481
482 /*
483 * Note that d2 is limited to 20 bits, signed. If we crop negative
484 * displacements early we create larger immediate addends.
485 */
486 if (b2 && x2) {
487 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
488 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
489 } else if (b2) {
490 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
491 } else if (x2) {
492 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
493 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
494 if (s->base.tb->flags & FLAG_MASK_32) {
495 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
496 } else {
497 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
498 }
499 } else {
500 tcg_gen_movi_i64(tmp, d2);
501 }
502
503 return tmp;
504 }
505
506 static inline bool live_cc_data(DisasContext *s)
507 {
508 return (s->cc_op != CC_OP_DYNAMIC
509 && s->cc_op != CC_OP_STATIC
510 && s->cc_op > 3);
511 }
512
513 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
514 {
515 if (live_cc_data(s)) {
516 tcg_gen_discard_i64(cc_src);
517 tcg_gen_discard_i64(cc_dst);
518 tcg_gen_discard_i64(cc_vr);
519 }
520 s->cc_op = CC_OP_CONST0 + val;
521 }
522
523 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
524 {
525 if (live_cc_data(s)) {
526 tcg_gen_discard_i64(cc_src);
527 tcg_gen_discard_i64(cc_vr);
528 }
529 tcg_gen_mov_i64(cc_dst, dst);
530 s->cc_op = op;
531 }
532
533 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
534 TCGv_i64 dst)
535 {
536 if (live_cc_data(s)) {
537 tcg_gen_discard_i64(cc_vr);
538 }
539 tcg_gen_mov_i64(cc_src, src);
540 tcg_gen_mov_i64(cc_dst, dst);
541 s->cc_op = op;
542 }
543
544 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
545 TCGv_i64 dst, TCGv_i64 vr)
546 {
547 tcg_gen_mov_i64(cc_src, src);
548 tcg_gen_mov_i64(cc_dst, dst);
549 tcg_gen_mov_i64(cc_vr, vr);
550 s->cc_op = op;
551 }
552
553 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
554 {
555 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
556 }
557
558 /* CC value is in env->cc_op */
559 static void set_cc_static(DisasContext *s)
560 {
561 if (live_cc_data(s)) {
562 tcg_gen_discard_i64(cc_src);
563 tcg_gen_discard_i64(cc_dst);
564 tcg_gen_discard_i64(cc_vr);
565 }
566 s->cc_op = CC_OP_STATIC;
567 }
568
569 /* calculates cc into cc_op */
570 static void gen_op_calc_cc(DisasContext *s)
571 {
572 TCGv_i32 local_cc_op = NULL;
573 TCGv_i64 dummy = NULL;
574
575 switch (s->cc_op) {
576 default:
577 dummy = tcg_constant_i64(0);
578 /* FALLTHRU */
579 case CC_OP_ADD_64:
580 case CC_OP_SUB_64:
581 case CC_OP_ADD_32:
582 case CC_OP_SUB_32:
583 local_cc_op = tcg_constant_i32(s->cc_op);
584 break;
585 case CC_OP_CONST0:
586 case CC_OP_CONST1:
587 case CC_OP_CONST2:
588 case CC_OP_CONST3:
589 case CC_OP_STATIC:
590 case CC_OP_DYNAMIC:
591 break;
592 }
593
594 switch (s->cc_op) {
595 case CC_OP_CONST0:
596 case CC_OP_CONST1:
597 case CC_OP_CONST2:
598 case CC_OP_CONST3:
599 /* s->cc_op is the cc value */
600 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
601 break;
602 case CC_OP_STATIC:
603 /* env->cc_op already is the cc value */
604 break;
605 case CC_OP_NZ:
606 tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
607 tcg_gen_extrl_i64_i32(cc_op, cc_dst);
608 break;
609 case CC_OP_ABS_64:
610 case CC_OP_NABS_64:
611 case CC_OP_ABS_32:
612 case CC_OP_NABS_32:
613 case CC_OP_LTGT0_32:
614 case CC_OP_LTGT0_64:
615 case CC_OP_COMP_32:
616 case CC_OP_COMP_64:
617 case CC_OP_NZ_F32:
618 case CC_OP_NZ_F64:
619 case CC_OP_FLOGR:
620 case CC_OP_LCBB:
621 case CC_OP_MULS_32:
622 /* 1 argument */
623 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy);
624 break;
625 case CC_OP_ADDU:
626 case CC_OP_ICM:
627 case CC_OP_LTGT_32:
628 case CC_OP_LTGT_64:
629 case CC_OP_LTUGTU_32:
630 case CC_OP_LTUGTU_64:
631 case CC_OP_TM_32:
632 case CC_OP_TM_64:
633 case CC_OP_SLA:
634 case CC_OP_SUBU:
635 case CC_OP_NZ_F128:
636 case CC_OP_VC:
637 case CC_OP_MULS_64:
638 /* 2 arguments */
639 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy);
640 break;
641 case CC_OP_ADD_64:
642 case CC_OP_SUB_64:
643 case CC_OP_ADD_32:
644 case CC_OP_SUB_32:
645 /* 3 arguments */
646 gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr);
647 break;
648 case CC_OP_DYNAMIC:
649 /* unknown operation - assume 3 arguments and cc_op in env */
650 gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr);
651 break;
652 default:
653 g_assert_not_reached();
654 }
655
656 /* We now have cc in cc_op as constant */
657 set_cc_static(s);
658 }
659
660 static bool use_goto_tb(DisasContext *s, uint64_t dest)
661 {
662 if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
663 return false;
664 }
665 return translator_use_goto_tb(&s->base, dest);
666 }
667
668 static void account_noninline_branch(DisasContext *s, int cc_op)
669 {
670 #ifdef DEBUG_INLINE_BRANCHES
671 inline_branch_miss[cc_op]++;
672 #endif
673 }
674
675 static void account_inline_branch(DisasContext *s, int cc_op)
676 {
677 #ifdef DEBUG_INLINE_BRANCHES
678 inline_branch_hit[cc_op]++;
679 #endif
680 }
681
682 /* Table of mask values to comparison codes, given a comparison as input.
683 For such, CC=3 should not be possible. */
684 static const TCGCond ltgt_cond[16] = {
685 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
686 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
687 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
688 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
689 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
690 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
691 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
692 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
693 };
694
695 /* Table of mask values to comparison codes, given a logic op as input.
696 For such, only CC=0 and CC=1 should be possible. */
697 static const TCGCond nz_cond[16] = {
698 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
699 TCG_COND_NEVER, TCG_COND_NEVER,
700 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
701 TCG_COND_NE, TCG_COND_NE,
702 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
703 TCG_COND_EQ, TCG_COND_EQ,
704 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
705 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
706 };
707
708 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
709 details required to generate a TCG comparison. */
710 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
711 {
712 TCGCond cond;
713 enum cc_op old_cc_op = s->cc_op;
714
715 if (mask == 15 || mask == 0) {
716 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
717 c->u.s32.a = cc_op;
718 c->u.s32.b = cc_op;
719 c->is_64 = false;
720 return;
721 }
722
723 /* Find the TCG condition for the mask + cc op. */
724 switch (old_cc_op) {
725 case CC_OP_LTGT0_32:
726 case CC_OP_LTGT0_64:
727 case CC_OP_LTGT_32:
728 case CC_OP_LTGT_64:
729 cond = ltgt_cond[mask];
730 if (cond == TCG_COND_NEVER) {
731 goto do_dynamic;
732 }
733 account_inline_branch(s, old_cc_op);
734 break;
735
736 case CC_OP_LTUGTU_32:
737 case CC_OP_LTUGTU_64:
738 cond = tcg_unsigned_cond(ltgt_cond[mask]);
739 if (cond == TCG_COND_NEVER) {
740 goto do_dynamic;
741 }
742 account_inline_branch(s, old_cc_op);
743 break;
744
745 case CC_OP_NZ:
746 cond = nz_cond[mask];
747 if (cond == TCG_COND_NEVER) {
748 goto do_dynamic;
749 }
750 account_inline_branch(s, old_cc_op);
751 break;
752
753 case CC_OP_TM_32:
754 case CC_OP_TM_64:
755 switch (mask) {
756 case 8:
757 cond = TCG_COND_EQ;
758 break;
759 case 4 | 2 | 1:
760 cond = TCG_COND_NE;
761 break;
762 default:
763 goto do_dynamic;
764 }
765 account_inline_branch(s, old_cc_op);
766 break;
767
768 case CC_OP_ICM:
769 switch (mask) {
770 case 8:
771 cond = TCG_COND_EQ;
772 break;
773 case 4 | 2 | 1:
774 case 4 | 2:
775 cond = TCG_COND_NE;
776 break;
777 default:
778 goto do_dynamic;
779 }
780 account_inline_branch(s, old_cc_op);
781 break;
782
783 case CC_OP_FLOGR:
784 switch (mask & 0xa) {
785 case 8: /* src == 0 -> no one bit found */
786 cond = TCG_COND_EQ;
787 break;
788 case 2: /* src != 0 -> one bit found */
789 cond = TCG_COND_NE;
790 break;
791 default:
792 goto do_dynamic;
793 }
794 account_inline_branch(s, old_cc_op);
795 break;
796
797 case CC_OP_ADDU:
798 case CC_OP_SUBU:
799 switch (mask) {
800 case 8 | 2: /* result == 0 */
801 cond = TCG_COND_EQ;
802 break;
803 case 4 | 1: /* result != 0 */
804 cond = TCG_COND_NE;
805 break;
806 case 8 | 4: /* !carry (borrow) */
807 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
808 break;
809 case 2 | 1: /* carry (!borrow) */
810 cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
811 break;
812 default:
813 goto do_dynamic;
814 }
815 account_inline_branch(s, old_cc_op);
816 break;
817
818 default:
819 do_dynamic:
820 /* Calculate cc value. */
821 gen_op_calc_cc(s);
822 /* FALLTHRU */
823
824 case CC_OP_STATIC:
825 /* Jump based on CC. We'll load up the real cond below;
826 the assignment here merely avoids a compiler warning. */
827 account_noninline_branch(s, old_cc_op);
828 old_cc_op = CC_OP_STATIC;
829 cond = TCG_COND_NEVER;
830 break;
831 }
832
833 /* Load up the arguments of the comparison. */
834 c->is_64 = true;
835 switch (old_cc_op) {
836 case CC_OP_LTGT0_32:
837 c->is_64 = false;
838 c->u.s32.a = tcg_temp_new_i32();
839 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
840 c->u.s32.b = tcg_constant_i32(0);
841 break;
842 case CC_OP_LTGT_32:
843 case CC_OP_LTUGTU_32:
844 c->is_64 = false;
845 c->u.s32.a = tcg_temp_new_i32();
846 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
847 c->u.s32.b = tcg_temp_new_i32();
848 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
849 break;
850
851 case CC_OP_LTGT0_64:
852 case CC_OP_NZ:
853 case CC_OP_FLOGR:
854 c->u.s64.a = cc_dst;
855 c->u.s64.b = tcg_constant_i64(0);
856 break;
857 case CC_OP_LTGT_64:
858 case CC_OP_LTUGTU_64:
859 c->u.s64.a = cc_src;
860 c->u.s64.b = cc_dst;
861 break;
862
863 case CC_OP_TM_32:
864 case CC_OP_TM_64:
865 case CC_OP_ICM:
866 c->u.s64.a = tcg_temp_new_i64();
867 c->u.s64.b = tcg_constant_i64(0);
868 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
869 break;
870
871 case CC_OP_ADDU:
872 case CC_OP_SUBU:
873 c->is_64 = true;
874 c->u.s64.b = tcg_constant_i64(0);
875 switch (mask) {
876 case 8 | 2:
877 case 4 | 1: /* result */
878 c->u.s64.a = cc_dst;
879 break;
880 case 8 | 4:
881 case 2 | 1: /* carry */
882 c->u.s64.a = cc_src;
883 break;
884 default:
885 g_assert_not_reached();
886 }
887 break;
888
889 case CC_OP_STATIC:
890 c->is_64 = false;
891 c->u.s32.a = cc_op;
892 switch (mask) {
893 case 0x8 | 0x4 | 0x2: /* cc != 3 */
894 cond = TCG_COND_NE;
895 c->u.s32.b = tcg_constant_i32(3);
896 break;
897 case 0x8 | 0x4 | 0x1: /* cc != 2 */
898 cond = TCG_COND_NE;
899 c->u.s32.b = tcg_constant_i32(2);
900 break;
901 case 0x8 | 0x2 | 0x1: /* cc != 1 */
902 cond = TCG_COND_NE;
903 c->u.s32.b = tcg_constant_i32(1);
904 break;
905 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
906 cond = TCG_COND_EQ;
907 c->u.s32.a = tcg_temp_new_i32();
908 c->u.s32.b = tcg_constant_i32(0);
909 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
910 break;
911 case 0x8 | 0x4: /* cc < 2 */
912 cond = TCG_COND_LTU;
913 c->u.s32.b = tcg_constant_i32(2);
914 break;
915 case 0x8: /* cc == 0 */
916 cond = TCG_COND_EQ;
917 c->u.s32.b = tcg_constant_i32(0);
918 break;
919 case 0x4 | 0x2 | 0x1: /* cc != 0 */
920 cond = TCG_COND_NE;
921 c->u.s32.b = tcg_constant_i32(0);
922 break;
923 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
924 cond = TCG_COND_NE;
925 c->u.s32.a = tcg_temp_new_i32();
926 c->u.s32.b = tcg_constant_i32(0);
927 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
928 break;
929 case 0x4: /* cc == 1 */
930 cond = TCG_COND_EQ;
931 c->u.s32.b = tcg_constant_i32(1);
932 break;
933 case 0x2 | 0x1: /* cc > 1 */
934 cond = TCG_COND_GTU;
935 c->u.s32.b = tcg_constant_i32(1);
936 break;
937 case 0x2: /* cc == 2 */
938 cond = TCG_COND_EQ;
939 c->u.s32.b = tcg_constant_i32(2);
940 break;
941 case 0x1: /* cc == 3 */
942 cond = TCG_COND_EQ;
943 c->u.s32.b = tcg_constant_i32(3);
944 break;
945 default:
946 /* CC is masked by something else: (8 >> cc) & mask. */
947 cond = TCG_COND_NE;
948 c->u.s32.a = tcg_temp_new_i32();
949 c->u.s32.b = tcg_constant_i32(0);
950 tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
951 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
952 break;
953 }
954 break;
955
956 default:
957 abort();
958 }
959 c->cond = cond;
960 }
961
962 /* ====================================================================== */
963 /* Define the insn format enumeration. */
964 #define F0(N) FMT_##N,
965 #define F1(N, X1) F0(N)
966 #define F2(N, X1, X2) F0(N)
967 #define F3(N, X1, X2, X3) F0(N)
968 #define F4(N, X1, X2, X3, X4) F0(N)
969 #define F5(N, X1, X2, X3, X4, X5) F0(N)
970 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
971
972 typedef enum {
973 #include "insn-format.h.inc"
974 } DisasFormat;
975
976 #undef F0
977 #undef F1
978 #undef F2
979 #undef F3
980 #undef F4
981 #undef F5
982 #undef F6
983
984 /* This is the way fields are to be accessed out of DisasFields. */
985 #define have_field(S, F) have_field1((S), FLD_O_##F)
986 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
987
988 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
989 {
990 return (s->fields.presentO >> c) & 1;
991 }
992
993 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
994 enum DisasFieldIndexC c)
995 {
996 assert(have_field1(s, o));
997 return s->fields.c[c];
998 }
999
1000 /* Describe the layout of each field in each format. */
1001 typedef struct DisasField {
1002 unsigned int beg:8;
1003 unsigned int size:8;
1004 unsigned int type:2;
1005 unsigned int indexC:6;
1006 enum DisasFieldIndexO indexO:8;
1007 } DisasField;
1008
1009 typedef struct DisasFormatInfo {
1010 DisasField op[NUM_C_FIELD];
1011 } DisasFormatInfo;
1012
1013 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1014 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1015 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1016 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1017 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1018 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1020 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1021 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1022 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1023 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1025 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1026 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1027 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1028
1029 #define F0(N) { { } },
1030 #define F1(N, X1) { { X1 } },
1031 #define F2(N, X1, X2) { { X1, X2 } },
1032 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1033 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1034 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1035 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1036
1037 static const DisasFormatInfo format_info[] = {
1038 #include "insn-format.h.inc"
1039 };
1040
1041 #undef F0
1042 #undef F1
1043 #undef F2
1044 #undef F3
1045 #undef F4
1046 #undef F5
1047 #undef F6
1048 #undef R
1049 #undef M
1050 #undef V
1051 #undef BD
1052 #undef BXD
1053 #undef BDL
1054 #undef BXDL
1055 #undef I
1056 #undef L
1057
1058 /* Generally, we'll extract operands into this structures, operate upon
1059 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1060 of routines below for more details. */
1061 typedef struct {
1062 TCGv_i64 out, out2, in1, in2;
1063 TCGv_i64 addr1;
1064 TCGv_i128 out_128, in1_128, in2_128;
1065 } DisasOps;
1066
1067 /* Instructions can place constraints on their operands, raising specification
1068 exceptions if they are violated. To make this easy to automate, each "in1",
1069 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1070 of the following, or 0. To make this easy to document, we'll put the
1071 SPEC_<name> defines next to <name>. */
1072
1073 #define SPEC_r1_even 1
1074 #define SPEC_r2_even 2
1075 #define SPEC_r3_even 4
1076 #define SPEC_r1_f128 8
1077 #define SPEC_r2_f128 16
1078
1079 /* Return values from translate_one, indicating the state of the TB. */
1080
1081 /* We are not using a goto_tb (for whatever reason), but have updated
1082 the PC (for whatever reason), so there's no need to do it again on
1083 exiting the TB. */
1084 #define DISAS_PC_UPDATED DISAS_TARGET_0
1085
1086 /* We have updated the PC and CC values. */
1087 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1088
1089
1090 /* Instruction flags */
1091 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1092 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1093 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1094 #define IF_BFP 0x0008 /* binary floating point instruction */
1095 #define IF_DFP 0x0010 /* decimal floating point instruction */
1096 #define IF_PRIV 0x0020 /* privileged instruction */
1097 #define IF_VEC 0x0040 /* vector instruction */
1098 #define IF_IO 0x0080 /* input/output instruction */
1099
1100 struct DisasInsn {
1101 unsigned opc:16;
1102 unsigned flags:16;
1103 DisasFormat fmt:8;
1104 unsigned fac:8;
1105 unsigned spec:8;
1106
1107 const char *name;
1108
1109 /* Pre-process arguments before HELP_OP. */
1110 void (*help_in1)(DisasContext *, DisasOps *);
1111 void (*help_in2)(DisasContext *, DisasOps *);
1112 void (*help_prep)(DisasContext *, DisasOps *);
1113
1114 /*
1115 * Post-process output after HELP_OP.
1116 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1117 */
1118 void (*help_wout)(DisasContext *, DisasOps *);
1119 void (*help_cout)(DisasContext *, DisasOps *);
1120
1121 /* Implement the operation itself. */
1122 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1123
1124 uint64_t data;
1125 };
1126
1127 /* ====================================================================== */
1128 /* Miscellaneous helpers, used by several operations. */
1129
1130 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1131 {
1132 if (dest == s->pc_tmp) {
1133 per_branch(s, true);
1134 return DISAS_NEXT;
1135 }
1136 if (use_goto_tb(s, dest)) {
1137 update_cc_op(s);
1138 per_breaking_event(s);
1139 tcg_gen_goto_tb(0);
1140 tcg_gen_movi_i64(psw_addr, dest);
1141 tcg_gen_exit_tb(s->base.tb, 0);
1142 return DISAS_NORETURN;
1143 } else {
1144 tcg_gen_movi_i64(psw_addr, dest);
1145 per_branch(s, false);
1146 return DISAS_PC_UPDATED;
1147 }
1148 }
1149
1150 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1151 bool is_imm, int imm, TCGv_i64 cdest)
1152 {
1153 DisasJumpType ret;
1154 uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1155 TCGLabel *lab;
1156
1157 /* Take care of the special cases first. */
1158 if (c->cond == TCG_COND_NEVER) {
1159 ret = DISAS_NEXT;
1160 goto egress;
1161 }
1162 if (is_imm) {
1163 if (dest == s->pc_tmp) {
1164 /* Branch to next. */
1165 per_branch(s, true);
1166 ret = DISAS_NEXT;
1167 goto egress;
1168 }
1169 if (c->cond == TCG_COND_ALWAYS) {
1170 ret = help_goto_direct(s, dest);
1171 goto egress;
1172 }
1173 } else {
1174 if (!cdest) {
1175 /* E.g. bcr %r0 -> no branch. */
1176 ret = DISAS_NEXT;
1177 goto egress;
1178 }
1179 if (c->cond == TCG_COND_ALWAYS) {
1180 tcg_gen_mov_i64(psw_addr, cdest);
1181 per_branch(s, false);
1182 ret = DISAS_PC_UPDATED;
1183 goto egress;
1184 }
1185 }
1186
1187 if (use_goto_tb(s, s->pc_tmp)) {
1188 if (is_imm && use_goto_tb(s, dest)) {
1189 /* Both exits can use goto_tb. */
1190 update_cc_op(s);
1191
1192 lab = gen_new_label();
1193 if (c->is_64) {
1194 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1195 } else {
1196 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1197 }
1198
1199 /* Branch not taken. */
1200 tcg_gen_goto_tb(0);
1201 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1202 tcg_gen_exit_tb(s->base.tb, 0);
1203
1204 /* Branch taken. */
1205 gen_set_label(lab);
1206 per_breaking_event(s);
1207 tcg_gen_goto_tb(1);
1208 tcg_gen_movi_i64(psw_addr, dest);
1209 tcg_gen_exit_tb(s->base.tb, 1);
1210
1211 ret = DISAS_NORETURN;
1212 } else {
1213 /* Fallthru can use goto_tb, but taken branch cannot. */
1214 /* Store taken branch destination before the brcond. This
1215 avoids having to allocate a new local temp to hold it.
1216 We'll overwrite this in the not taken case anyway. */
1217 if (!is_imm) {
1218 tcg_gen_mov_i64(psw_addr, cdest);
1219 }
1220
1221 lab = gen_new_label();
1222 if (c->is_64) {
1223 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1224 } else {
1225 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1226 }
1227
1228 /* Branch not taken. */
1229 update_cc_op(s);
1230 tcg_gen_goto_tb(0);
1231 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1232 tcg_gen_exit_tb(s->base.tb, 0);
1233
1234 gen_set_label(lab);
1235 if (is_imm) {
1236 tcg_gen_movi_i64(psw_addr, dest);
1237 }
1238 per_breaking_event(s);
1239 ret = DISAS_PC_UPDATED;
1240 }
1241 } else {
1242 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1243 Most commonly we're single-stepping or some other condition that
1244 disables all use of goto_tb. Just update the PC and exit. */
1245
1246 TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1247 if (is_imm) {
1248 cdest = tcg_constant_i64(dest);
1249 }
1250
1251 if (c->is_64) {
1252 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1253 cdest, next);
1254 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1255 } else {
1256 TCGv_i32 t0 = tcg_temp_new_i32();
1257 TCGv_i64 t1 = tcg_temp_new_i64();
1258 TCGv_i64 z = tcg_constant_i64(0);
1259 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1260 tcg_gen_extu_i32_i64(t1, t0);
1261 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1262 per_branch_cond(s, TCG_COND_NE, t1, z);
1263 }
1264
1265 ret = DISAS_PC_UPDATED;
1266 }
1267
1268 egress:
1269 return ret;
1270 }
1271
1272 /* ====================================================================== */
1273 /* The operations. These perform the bulk of the work for any insn,
1274 usually after the operands have been loaded and output initialized. */
1275
1276 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1277 {
1278 tcg_gen_abs_i64(o->out, o->in2);
1279 return DISAS_NEXT;
1280 }
1281
1282 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1283 {
1284 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1285 return DISAS_NEXT;
1286 }
1287
1288 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1289 {
1290 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1291 return DISAS_NEXT;
1292 }
1293
1294 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1295 {
1296 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1297 tcg_gen_mov_i64(o->out2, o->in2);
1298 return DISAS_NEXT;
1299 }
1300
1301 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1302 {
1303 tcg_gen_add_i64(o->out, o->in1, o->in2);
1304 return DISAS_NEXT;
1305 }
1306
1307 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1308 {
1309 tcg_gen_movi_i64(cc_src, 0);
1310 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1311 return DISAS_NEXT;
1312 }
1313
1314 /* Compute carry into cc_src. */
1315 static void compute_carry(DisasContext *s)
1316 {
1317 switch (s->cc_op) {
1318 case CC_OP_ADDU:
1319 /* The carry value is already in cc_src (1,0). */
1320 break;
1321 case CC_OP_SUBU:
1322 tcg_gen_addi_i64(cc_src, cc_src, 1);
1323 break;
1324 default:
1325 gen_op_calc_cc(s);
1326 /* fall through */
1327 case CC_OP_STATIC:
1328 /* The carry flag is the msb of CC; compute into cc_src. */
1329 tcg_gen_extu_i32_i64(cc_src, cc_op);
1330 tcg_gen_shri_i64(cc_src, cc_src, 1);
1331 break;
1332 }
1333 }
1334
1335 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1336 {
1337 compute_carry(s);
1338 tcg_gen_add_i64(o->out, o->in1, o->in2);
1339 tcg_gen_add_i64(o->out, o->out, cc_src);
1340 return DISAS_NEXT;
1341 }
1342
1343 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1344 {
1345 compute_carry(s);
1346
1347 TCGv_i64 zero = tcg_constant_i64(0);
1348 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1349 tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1350
1351 return DISAS_NEXT;
1352 }
1353
1354 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1355 {
1356 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1357
1358 o->in1 = tcg_temp_new_i64();
1359 if (non_atomic) {
1360 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1361 } else {
1362 /* Perform the atomic addition in memory. */
1363 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1364 s->insn->data);
1365 }
1366
1367 /* Recompute also for atomic case: needed for setting CC. */
1368 tcg_gen_add_i64(o->out, o->in1, o->in2);
1369
1370 if (non_atomic) {
1371 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1372 }
1373 return DISAS_NEXT;
1374 }
1375
1376 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1377 {
1378 bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1379
1380 o->in1 = tcg_temp_new_i64();
1381 if (non_atomic) {
1382 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1383 } else {
1384 /* Perform the atomic addition in memory. */
1385 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1386 s->insn->data);
1387 }
1388
1389 /* Recompute also for atomic case: needed for setting CC. */
1390 tcg_gen_movi_i64(cc_src, 0);
1391 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1392
1393 if (non_atomic) {
1394 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1395 }
1396 return DISAS_NEXT;
1397 }
1398
1399 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1400 {
1401 gen_helper_aeb(o->out, tcg_env, o->in1, o->in2);
1402 return DISAS_NEXT;
1403 }
1404
1405 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1406 {
1407 gen_helper_adb(o->out, tcg_env, o->in1, o->in2);
1408 return DISAS_NEXT;
1409 }
1410
1411 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1412 {
1413 gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128);
1414 return DISAS_NEXT;
1415 }
1416
1417 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1418 {
1419 tcg_gen_and_i64(o->out, o->in1, o->in2);
1420 return DISAS_NEXT;
1421 }
1422
1423 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1424 {
1425 int shift = s->insn->data & 0xff;
1426 int size = s->insn->data >> 8;
1427 uint64_t mask = ((1ull << size) - 1) << shift;
1428 TCGv_i64 t = tcg_temp_new_i64();
1429
1430 tcg_gen_shli_i64(t, o->in2, shift);
1431 tcg_gen_ori_i64(t, t, ~mask);
1432 tcg_gen_and_i64(o->out, o->in1, t);
1433
1434 /* Produce the CC from only the bits manipulated. */
1435 tcg_gen_andi_i64(cc_dst, o->out, mask);
1436 set_cc_nz_u64(s, cc_dst);
1437 return DISAS_NEXT;
1438 }
1439
1440 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1441 {
1442 tcg_gen_andc_i64(o->out, o->in1, o->in2);
1443 return DISAS_NEXT;
1444 }
1445
1446 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1447 {
1448 tcg_gen_orc_i64(o->out, o->in1, o->in2);
1449 return DISAS_NEXT;
1450 }
1451
1452 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1453 {
1454 tcg_gen_nand_i64(o->out, o->in1, o->in2);
1455 return DISAS_NEXT;
1456 }
1457
1458 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1459 {
1460 tcg_gen_nor_i64(o->out, o->in1, o->in2);
1461 return DISAS_NEXT;
1462 }
1463
1464 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1465 {
1466 tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1467 return DISAS_NEXT;
1468 }
1469
1470 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1471 {
1472 o->in1 = tcg_temp_new_i64();
1473
1474 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1475 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1476 } else {
1477 /* Perform the atomic operation in memory. */
1478 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1479 s->insn->data);
1480 }
1481
1482 /* Recompute also for atomic case: needed for setting CC. */
1483 tcg_gen_and_i64(o->out, o->in1, o->in2);
1484
1485 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1486 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1487 }
1488 return DISAS_NEXT;
1489 }
1490
1491 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1492 {
1493 pc_to_link_info(o->out, s, s->pc_tmp);
1494 if (o->in2) {
1495 tcg_gen_mov_i64(psw_addr, o->in2);
1496 per_branch(s, false);
1497 return DISAS_PC_UPDATED;
1498 } else {
1499 return DISAS_NEXT;
1500 }
1501 }
1502
1503 static void save_link_info(DisasContext *s, DisasOps *o)
1504 {
1505 TCGv_i64 t;
1506
1507 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1508 pc_to_link_info(o->out, s, s->pc_tmp);
1509 return;
1510 }
1511 gen_op_calc_cc(s);
1512 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1513 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1514 t = tcg_temp_new_i64();
1515 tcg_gen_shri_i64(t, psw_mask, 16);
1516 tcg_gen_andi_i64(t, t, 0x0f000000);
1517 tcg_gen_or_i64(o->out, o->out, t);
1518 tcg_gen_extu_i32_i64(t, cc_op);
1519 tcg_gen_shli_i64(t, t, 28);
1520 tcg_gen_or_i64(o->out, o->out, t);
1521 }
1522
1523 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1524 {
1525 save_link_info(s, o);
1526 if (o->in2) {
1527 tcg_gen_mov_i64(psw_addr, o->in2);
1528 per_branch(s, false);
1529 return DISAS_PC_UPDATED;
1530 } else {
1531 return DISAS_NEXT;
1532 }
1533 }
1534
1535 /*
1536 * Disassemble the target of a branch. The results are returned in a form
1537 * suitable for passing into help_branch():
1538 *
1539 * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1540 * branches, whose DisasContext *S contains the relative immediate field RI,
1541 * are considered fixed. All the other branches are considered computed.
1542 * - int IMM is the value of RI.
1543 * - TCGv_i64 CDEST is the address of the computed target.
1544 */
1545 #define disas_jdest(s, ri, is_imm, imm, cdest) do { \
1546 if (have_field(s, ri)) { \
1547 if (unlikely(s->ex_value)) { \
1548 cdest = tcg_temp_new_i64(); \
1549 tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\
1550 tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2); \
1551 is_imm = false; \
1552 } else { \
1553 is_imm = true; \
1554 } \
1555 } else { \
1556 is_imm = false; \
1557 } \
1558 imm = is_imm ? get_field(s, ri) : 0; \
1559 } while (false)
1560
1561 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1562 {
1563 DisasCompare c;
1564 bool is_imm;
1565 int imm;
1566
1567 pc_to_link_info(o->out, s, s->pc_tmp);
1568
1569 disas_jdest(s, i2, is_imm, imm, o->in2);
1570 disas_jcc(s, &c, 0xf);
1571 return help_branch(s, &c, is_imm, imm, o->in2);
1572 }
1573
1574 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1575 {
1576 int m1 = get_field(s, m1);
1577 DisasCompare c;
1578 bool is_imm;
1579 int imm;
1580
1581 /* BCR with R2 = 0 causes no branching */
1582 if (have_field(s, r2) && get_field(s, r2) == 0) {
1583 if (m1 == 14) {
1584 /* Perform serialization */
1585 /* FIXME: check for fast-BCR-serialization facility */
1586 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1587 }
1588 if (m1 == 15) {
1589 /* Perform serialization */
1590 /* FIXME: perform checkpoint-synchronisation */
1591 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1592 }
1593 return DISAS_NEXT;
1594 }
1595
1596 disas_jdest(s, i2, is_imm, imm, o->in2);
1597 disas_jcc(s, &c, m1);
1598 return help_branch(s, &c, is_imm, imm, o->in2);
1599 }
1600
1601 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1602 {
1603 int r1 = get_field(s, r1);
1604 DisasCompare c;
1605 bool is_imm;
1606 TCGv_i64 t;
1607 int imm;
1608
1609 c.cond = TCG_COND_NE;
1610 c.is_64 = false;
1611
1612 t = tcg_temp_new_i64();
1613 tcg_gen_subi_i64(t, regs[r1], 1);
1614 store_reg32_i64(r1, t);
1615 c.u.s32.a = tcg_temp_new_i32();
1616 c.u.s32.b = tcg_constant_i32(0);
1617 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1618
1619 disas_jdest(s, i2, is_imm, imm, o->in2);
1620 return help_branch(s, &c, is_imm, imm, o->in2);
1621 }
1622
1623 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1624 {
1625 int r1 = get_field(s, r1);
1626 int imm = get_field(s, i2);
1627 DisasCompare c;
1628 TCGv_i64 t;
1629
1630 c.cond = TCG_COND_NE;
1631 c.is_64 = false;
1632
1633 t = tcg_temp_new_i64();
1634 tcg_gen_shri_i64(t, regs[r1], 32);
1635 tcg_gen_subi_i64(t, t, 1);
1636 store_reg32h_i64(r1, t);
1637 c.u.s32.a = tcg_temp_new_i32();
1638 c.u.s32.b = tcg_constant_i32(0);
1639 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1640
1641 return help_branch(s, &c, 1, imm, o->in2);
1642 }
1643
1644 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1645 {
1646 int r1 = get_field(s, r1);
1647 DisasCompare c;
1648 bool is_imm;
1649 int imm;
1650
1651 c.cond = TCG_COND_NE;
1652 c.is_64 = true;
1653
1654 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1655 c.u.s64.a = regs[r1];
1656 c.u.s64.b = tcg_constant_i64(0);
1657
1658 disas_jdest(s, i2, is_imm, imm, o->in2);
1659 return help_branch(s, &c, is_imm, imm, o->in2);
1660 }
1661
1662 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1663 {
1664 int r1 = get_field(s, r1);
1665 int r3 = get_field(s, r3);
1666 DisasCompare c;
1667 bool is_imm;
1668 TCGv_i64 t;
1669 int imm;
1670
1671 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1672 c.is_64 = false;
1673
1674 t = tcg_temp_new_i64();
1675 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1676 c.u.s32.a = tcg_temp_new_i32();
1677 c.u.s32.b = tcg_temp_new_i32();
1678 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1679 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1680 store_reg32_i64(r1, t);
1681
1682 disas_jdest(s, i2, is_imm, imm, o->in2);
1683 return help_branch(s, &c, is_imm, imm, o->in2);
1684 }
1685
1686 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1687 {
1688 int r1 = get_field(s, r1);
1689 int r3 = get_field(s, r3);
1690 DisasCompare c;
1691 bool is_imm;
1692 int imm;
1693
1694 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1695 c.is_64 = true;
1696
1697 if (r1 == (r3 | 1)) {
1698 c.u.s64.b = load_reg(r3 | 1);
1699 } else {
1700 c.u.s64.b = regs[r3 | 1];
1701 }
1702
1703 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1704 c.u.s64.a = regs[r1];
1705
1706 disas_jdest(s, i2, is_imm, imm, o->in2);
1707 return help_branch(s, &c, is_imm, imm, o->in2);
1708 }
1709
1710 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1711 {
1712 int imm, m3 = get_field(s, m3);
1713 bool is_imm;
1714 DisasCompare c;
1715
1716 c.cond = ltgt_cond[m3];
1717 if (s->insn->data) {
1718 c.cond = tcg_unsigned_cond(c.cond);
1719 }
1720 c.is_64 = true;
1721 c.u.s64.a = o->in1;
1722 c.u.s64.b = o->in2;
1723
1724 o->out = NULL;
1725 disas_jdest(s, i4, is_imm, imm, o->out);
1726 if (!is_imm && !o->out) {
1727 imm = 0;
1728 o->out = get_address(s, 0, get_field(s, b4),
1729 get_field(s, d4));
1730 }
1731
1732 return help_branch(s, &c, is_imm, imm, o->out);
1733 }
1734
1735 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1736 {
1737 gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2);
1738 set_cc_static(s);
1739 return DISAS_NEXT;
1740 }
1741
1742 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1743 {
1744 gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2);
1745 set_cc_static(s);
1746 return DISAS_NEXT;
1747 }
1748
1749 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1750 {
1751 gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128);
1752 set_cc_static(s);
1753 return DISAS_NEXT;
1754 }
1755
1756 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1757 bool m4_with_fpe)
1758 {
1759 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1760 uint8_t m3 = get_field(s, m3);
1761 uint8_t m4 = get_field(s, m4);
1762
1763 /* m3 field was introduced with FPE */
1764 if (!fpe && m3_with_fpe) {
1765 m3 = 0;
1766 }
1767 /* m4 field was introduced with FPE */
1768 if (!fpe && m4_with_fpe) {
1769 m4 = 0;
1770 }
1771
1772 /* Check for valid rounding modes. Mode 3 was introduced later. */
1773 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1774 gen_program_exception(s, PGM_SPECIFICATION);
1775 return NULL;
1776 }
1777
1778 return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1779 }
1780
1781 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1782 {
1783 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1784
1785 if (!m34) {
1786 return DISAS_NORETURN;
1787 }
1788 gen_helper_cfeb(o->out, tcg_env, o->in2, m34);
1789 set_cc_static(s);
1790 return DISAS_NEXT;
1791 }
1792
1793 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1794 {
1795 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1796
1797 if (!m34) {
1798 return DISAS_NORETURN;
1799 }
1800 gen_helper_cfdb(o->out, tcg_env, o->in2, m34);
1801 set_cc_static(s);
1802 return DISAS_NEXT;
1803 }
1804
1805 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1806 {
1807 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1808
1809 if (!m34) {
1810 return DISAS_NORETURN;
1811 }
1812 gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34);
1813 set_cc_static(s);
1814 return DISAS_NEXT;
1815 }
1816
1817 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1818 {
1819 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1820
1821 if (!m34) {
1822 return DISAS_NORETURN;
1823 }
1824 gen_helper_cgeb(o->out, tcg_env, o->in2, m34);
1825 set_cc_static(s);
1826 return DISAS_NEXT;
1827 }
1828
1829 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1830 {
1831 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1832
1833 if (!m34) {
1834 return DISAS_NORETURN;
1835 }
1836 gen_helper_cgdb(o->out, tcg_env, o->in2, m34);
1837 set_cc_static(s);
1838 return DISAS_NEXT;
1839 }
1840
1841 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1842 {
1843 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1844
1845 if (!m34) {
1846 return DISAS_NORETURN;
1847 }
1848 gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34);
1849 set_cc_static(s);
1850 return DISAS_NEXT;
1851 }
1852
1853 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1854 {
1855 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1856
1857 if (!m34) {
1858 return DISAS_NORETURN;
1859 }
1860 gen_helper_clfeb(o->out, tcg_env, o->in2, m34);
1861 set_cc_static(s);
1862 return DISAS_NEXT;
1863 }
1864
1865 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1866 {
1867 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1868
1869 if (!m34) {
1870 return DISAS_NORETURN;
1871 }
1872 gen_helper_clfdb(o->out, tcg_env, o->in2, m34);
1873 set_cc_static(s);
1874 return DISAS_NEXT;
1875 }
1876
1877 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1878 {
1879 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1880
1881 if (!m34) {
1882 return DISAS_NORETURN;
1883 }
1884 gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34);
1885 set_cc_static(s);
1886 return DISAS_NEXT;
1887 }
1888
1889 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1890 {
1891 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1892
1893 if (!m34) {
1894 return DISAS_NORETURN;
1895 }
1896 gen_helper_clgeb(o->out, tcg_env, o->in2, m34);
1897 set_cc_static(s);
1898 return DISAS_NEXT;
1899 }
1900
1901 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1902 {
1903 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1904
1905 if (!m34) {
1906 return DISAS_NORETURN;
1907 }
1908 gen_helper_clgdb(o->out, tcg_env, o->in2, m34);
1909 set_cc_static(s);
1910 return DISAS_NEXT;
1911 }
1912
1913 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1914 {
1915 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1916
1917 if (!m34) {
1918 return DISAS_NORETURN;
1919 }
1920 gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34);
1921 set_cc_static(s);
1922 return DISAS_NEXT;
1923 }
1924
1925 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1926 {
1927 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1928
1929 if (!m34) {
1930 return DISAS_NORETURN;
1931 }
1932 gen_helper_cegb(o->out, tcg_env, o->in2, m34);
1933 return DISAS_NEXT;
1934 }
1935
1936 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1937 {
1938 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1939
1940 if (!m34) {
1941 return DISAS_NORETURN;
1942 }
1943 gen_helper_cdgb(o->out, tcg_env, o->in2, m34);
1944 return DISAS_NEXT;
1945 }
1946
1947 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1948 {
1949 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1950
1951 if (!m34) {
1952 return DISAS_NORETURN;
1953 }
1954 gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34);
1955 return DISAS_NEXT;
1956 }
1957
1958 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1959 {
1960 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1961
1962 if (!m34) {
1963 return DISAS_NORETURN;
1964 }
1965 gen_helper_celgb(o->out, tcg_env, o->in2, m34);
1966 return DISAS_NEXT;
1967 }
1968
1969 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1970 {
1971 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1972
1973 if (!m34) {
1974 return DISAS_NORETURN;
1975 }
1976 gen_helper_cdlgb(o->out, tcg_env, o->in2, m34);
1977 return DISAS_NEXT;
1978 }
1979
1980 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1981 {
1982 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1983
1984 if (!m34) {
1985 return DISAS_NORETURN;
1986 }
1987 gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34);
1988 return DISAS_NEXT;
1989 }
1990
1991 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1992 {
1993 int r2 = get_field(s, r2);
1994 TCGv_i128 pair = tcg_temp_new_i128();
1995 TCGv_i64 len = tcg_temp_new_i64();
1996
1997 gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]);
1998 set_cc_static(s);
1999 tcg_gen_extr_i128_i64(o->out, len, pair);
2000
2001 tcg_gen_add_i64(regs[r2], regs[r2], len);
2002 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2003
2004 return DISAS_NEXT;
2005 }
2006
2007 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2008 {
2009 int l = get_field(s, l1);
2010 TCGv_i64 src;
2011 TCGv_i32 vl;
2012 MemOp mop;
2013
2014 switch (l + 1) {
2015 case 1:
2016 case 2:
2017 case 4:
2018 case 8:
2019 mop = ctz32(l + 1) | MO_TE;
2020 /* Do not update cc_src yet: loading cc_dst may cause an exception. */
2021 src = tcg_temp_new_i64();
2022 tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop);
2023 tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
2024 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst);
2025 return DISAS_NEXT;
2026 default:
2027 vl = tcg_constant_i32(l);
2028 gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2);
2029 set_cc_static(s);
2030 return DISAS_NEXT;
2031 }
2032 }
2033
2034 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2035 {
2036 int r1 = get_field(s, r1);
2037 int r2 = get_field(s, r2);
2038 TCGv_i32 t1, t2;
2039
2040 /* r1 and r2 must be even. */
2041 if (r1 & 1 || r2 & 1) {
2042 gen_program_exception(s, PGM_SPECIFICATION);
2043 return DISAS_NORETURN;
2044 }
2045
2046 t1 = tcg_constant_i32(r1);
2047 t2 = tcg_constant_i32(r2);
2048 gen_helper_clcl(cc_op, tcg_env, t1, t2);
2049 set_cc_static(s);
2050 return DISAS_NEXT;
2051 }
2052
2053 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2054 {
2055 int r1 = get_field(s, r1);
2056 int r3 = get_field(s, r3);
2057 TCGv_i32 t1, t3;
2058
2059 /* r1 and r3 must be even. */
2060 if (r1 & 1 || r3 & 1) {
2061 gen_program_exception(s, PGM_SPECIFICATION);
2062 return DISAS_NORETURN;
2063 }
2064
2065 t1 = tcg_constant_i32(r1);
2066 t3 = tcg_constant_i32(r3);
2067 gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3);
2068 set_cc_static(s);
2069 return DISAS_NEXT;
2070 }
2071
2072 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2073 {
2074 int r1 = get_field(s, r1);
2075 int r3 = get_field(s, r3);
2076 TCGv_i32 t1, t3;
2077
2078 /* r1 and r3 must be even. */
2079 if (r1 & 1 || r3 & 1) {
2080 gen_program_exception(s, PGM_SPECIFICATION);
2081 return DISAS_NORETURN;
2082 }
2083
2084 t1 = tcg_constant_i32(r1);
2085 t3 = tcg_constant_i32(r3);
2086 gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3);
2087 set_cc_static(s);
2088 return DISAS_NEXT;
2089 }
2090
2091 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2092 {
2093 TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2094 TCGv_i32 t1 = tcg_temp_new_i32();
2095
2096 tcg_gen_extrl_i64_i32(t1, o->in1);
2097 gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2);
2098 set_cc_static(s);
2099 return DISAS_NEXT;
2100 }
2101
2102 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2103 {
2104 TCGv_i128 pair = tcg_temp_new_i128();
2105
2106 gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2);
2107 tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2108
2109 set_cc_static(s);
2110 return DISAS_NEXT;
2111 }
2112
2113 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2114 {
2115 TCGv_i64 t = tcg_temp_new_i64();
2116 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2117 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2118 tcg_gen_or_i64(o->out, o->out, t);
2119 return DISAS_NEXT;
2120 }
2121
2122 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2123 {
2124 int d2 = get_field(s, d2);
2125 int b2 = get_field(s, b2);
2126 TCGv_i64 addr, cc;
2127
2128 /* Note that in1 = R3 (new value) and
2129 in2 = (zero-extended) R1 (expected value). */
2130
2131 addr = get_address(s, 0, b2, d2);
2132 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2133 get_mem_index(s), s->insn->data | MO_ALIGN);
2134
2135 /* Are the memory and expected values (un)equal? Note that this setcond
2136 produces the output CC value, thus the NE sense of the test. */
2137 cc = tcg_temp_new_i64();
2138 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2139 tcg_gen_extrl_i64_i32(cc_op, cc);
2140 set_cc_static(s);
2141
2142 return DISAS_NEXT;
2143 }
2144
2145 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2146 {
2147 int r1 = get_field(s, r1);
2148
2149 o->out_128 = tcg_temp_new_i128();
2150 tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2151
2152 /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value. */
2153 tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2154 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2155
2156 /*
2157 * Extract result into cc_dst:cc_src, compare vs the expected value
2158 * in the as yet unmodified input registers, then update CC_OP.
2159 */
2160 tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2161 tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2162 tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2163 tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2164 set_cc_nz_u64(s, cc_dst);
2165
2166 return DISAS_NEXT;
2167 }
2168
2169 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2170 {
2171 int r3 = get_field(s, r3);
2172 TCGv_i32 t_r3 = tcg_constant_i32(r3);
2173
2174 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2175 gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2176 } else {
2177 gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2178 }
2179
2180 set_cc_static(s);
2181 return DISAS_NEXT;
2182 }
2183
2184 #ifndef CONFIG_USER_ONLY
2185 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2186 {
2187 MemOp mop = s->insn->data;
2188 TCGv_i64 addr, old, cc;
2189 TCGLabel *lab = gen_new_label();
2190
2191 /* Note that in1 = R1 (zero-extended expected value),
2192 out = R1 (original reg), out2 = R1+1 (new value). */
2193
2194 addr = tcg_temp_new_i64();
2195 old = tcg_temp_new_i64();
2196 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2197 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2198 get_mem_index(s), mop | MO_ALIGN);
2199
2200 /* Are the memory and expected values (un)equal? */
2201 cc = tcg_temp_new_i64();
2202 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2203 tcg_gen_extrl_i64_i32(cc_op, cc);
2204
2205 /* Write back the output now, so that it happens before the
2206 following branch, so that we don't need local temps. */
2207 if ((mop & MO_SIZE) == MO_32) {
2208 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2209 } else {
2210 tcg_gen_mov_i64(o->out, old);
2211 }
2212
2213 /* If the comparison was equal, and the LSB of R2 was set,
2214 then we need to flush the TLB (for all cpus). */
2215 tcg_gen_xori_i64(cc, cc, 1);
2216 tcg_gen_and_i64(cc, cc, o->in2);
2217 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2218
2219 gen_helper_purge(tcg_env);
2220 gen_set_label(lab);
2221
2222 return DISAS_NEXT;
2223 }
2224 #endif
2225
2226 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2227 {
2228 TCGv_i64 t1 = tcg_temp_new_i64();
2229 TCGv_i32 t2 = tcg_temp_new_i32();
2230 tcg_gen_extrl_i64_i32(t2, o->in1);
2231 gen_helper_cvd(t1, t2);
2232 tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2233 return DISAS_NEXT;
2234 }
2235
2236 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2237 {
2238 int m3 = get_field(s, m3);
2239 TCGLabel *lab = gen_new_label();
2240 TCGCond c;
2241
2242 c = tcg_invert_cond(ltgt_cond[m3]);
2243 if (s->insn->data) {
2244 c = tcg_unsigned_cond(c);
2245 }
2246 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2247
2248 /* Trap. */
2249 gen_trap(s);
2250
2251 gen_set_label(lab);
2252 return DISAS_NEXT;
2253 }
2254
2255 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2256 {
2257 int m3 = get_field(s, m3);
2258 int r1 = get_field(s, r1);
2259 int r2 = get_field(s, r2);
2260 TCGv_i32 tr1, tr2, chk;
2261
2262 /* R1 and R2 must both be even. */
2263 if ((r1 | r2) & 1) {
2264 gen_program_exception(s, PGM_SPECIFICATION);
2265 return DISAS_NORETURN;
2266 }
2267 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2268 m3 = 0;
2269 }
2270
2271 tr1 = tcg_constant_i32(r1);
2272 tr2 = tcg_constant_i32(r2);
2273 chk = tcg_constant_i32(m3);
2274
2275 switch (s->insn->data) {
2276 case 12:
2277 gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk);
2278 break;
2279 case 14:
2280 gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk);
2281 break;
2282 case 21:
2283 gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk);
2284 break;
2285 case 24:
2286 gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk);
2287 break;
2288 case 41:
2289 gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk);
2290 break;
2291 case 42:
2292 gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk);
2293 break;
2294 default:
2295 g_assert_not_reached();
2296 }
2297
2298 set_cc_static(s);
2299 return DISAS_NEXT;
2300 }
2301
2302 #ifndef CONFIG_USER_ONLY
2303 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2304 {
2305 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2306 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2307 TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2308
2309 gen_helper_diag(tcg_env, r1, r3, func_code);
2310 return DISAS_NEXT;
2311 }
2312 #endif
2313
2314 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2315 {
2316 gen_helper_divs32(o->out, tcg_env, o->in1, o->in2);
2317 tcg_gen_extr32_i64(o->out2, o->out, o->out);
2318 return DISAS_NEXT;
2319 }
2320
2321 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2322 {
2323 gen_helper_divu32(o->out, tcg_env, o->in1, o->in2);
2324 tcg_gen_extr32_i64(o->out2, o->out, o->out);
2325 return DISAS_NEXT;
2326 }
2327
2328 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2329 {
2330 TCGv_i128 t = tcg_temp_new_i128();
2331
2332 gen_helper_divs64(t, tcg_env, o->in1, o->in2);
2333 tcg_gen_extr_i128_i64(o->out2, o->out, t);
2334 return DISAS_NEXT;
2335 }
2336
2337 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2338 {
2339 TCGv_i128 t = tcg_temp_new_i128();
2340
2341 gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2);
2342 tcg_gen_extr_i128_i64(o->out2, o->out, t);
2343 return DISAS_NEXT;
2344 }
2345
2346 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2347 {
2348 gen_helper_deb(o->out, tcg_env, o->in1, o->in2);
2349 return DISAS_NEXT;
2350 }
2351
2352 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2353 {
2354 gen_helper_ddb(o->out, tcg_env, o->in1, o->in2);
2355 return DISAS_NEXT;
2356 }
2357
2358 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2359 {
2360 gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
2361 return DISAS_NEXT;
2362 }
2363
2364 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2365 {
2366 int r2 = get_field(s, r2);
2367 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2]));
2368 return DISAS_NEXT;
2369 }
2370
2371 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2372 {
2373 /* No cache information provided. */
2374 tcg_gen_movi_i64(o->out, -1);
2375 return DISAS_NEXT;
2376 }
2377
2378 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2379 {
2380 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc));
2381 return DISAS_NEXT;
2382 }
2383
2384 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2385 {
2386 int r1 = get_field(s, r1);
2387 int r2 = get_field(s, r2);
2388 TCGv_i64 t = tcg_temp_new_i64();
2389 TCGv_i64 t_cc = tcg_temp_new_i64();
2390
2391 /* Note the "subsequently" in the PoO, which implies a defined result
2392 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2393 gen_op_calc_cc(s);
2394 tcg_gen_extu_i32_i64(t_cc, cc_op);
2395 tcg_gen_shri_i64(t, psw_mask, 32);
2396 tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2397 store_reg32_i64(r1, t);
2398 if (r2 != 0) {
2399 store_reg32_i64(r2, psw_mask);
2400 }
2401 return DISAS_NEXT;
2402 }
2403
2404 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2405 {
2406 int r1 = get_field(s, r1);
2407 TCGv_i32 ilen;
2408 TCGv_i64 v1;
2409
2410 /* Nested EXECUTE is not allowed. */
2411 if (unlikely(s->ex_value)) {
2412 gen_program_exception(s, PGM_EXECUTE);
2413 return DISAS_NORETURN;
2414 }
2415
2416 update_psw_addr(s);
2417 update_cc_op(s);
2418
2419 if (r1 == 0) {
2420 v1 = tcg_constant_i64(0);
2421 } else {
2422 v1 = regs[r1];
2423 }
2424
2425 ilen = tcg_constant_i32(s->ilen);
2426 gen_helper_ex(tcg_env, ilen, v1, o->in2);
2427
2428 return DISAS_PC_CC_UPDATED;
2429 }
2430
2431 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2432 {
2433 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2434
2435 if (!m34) {
2436 return DISAS_NORETURN;
2437 }
2438 gen_helper_fieb(o->out, tcg_env, o->in2, m34);
2439 return DISAS_NEXT;
2440 }
2441
2442 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2443 {
2444 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2445
2446 if (!m34) {
2447 return DISAS_NORETURN;
2448 }
2449 gen_helper_fidb(o->out, tcg_env, o->in2, m34);
2450 return DISAS_NEXT;
2451 }
2452
2453 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2454 {
2455 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2456
2457 if (!m34) {
2458 return DISAS_NORETURN;
2459 }
2460 gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34);
2461 return DISAS_NEXT;
2462 }
2463
2464 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2465 {
2466 /* We'll use the original input for cc computation, since we get to
2467 compare that against 0, which ought to be better than comparing
2468 the real output against 64. It also lets cc_dst be a convenient
2469 temporary during our computation. */
2470 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2471
2472 /* R1 = IN ? CLZ(IN) : 64. */
2473 tcg_gen_clzi_i64(o->out, o->in2, 64);
2474
2475 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2476 value by 64, which is undefined. But since the shift is 64 iff the
2477 input is zero, we still get the correct result after and'ing. */
2478 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2479 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2480 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2481 return DISAS_NEXT;
2482 }
2483
2484 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2485 {
2486 int m3 = get_field(s, m3);
2487 int pos, len, base = s->insn->data;
2488 TCGv_i64 tmp = tcg_temp_new_i64();
2489 uint64_t ccm;
2490
2491 switch (m3) {
2492 case 0xf:
2493 /* Effectively a 32-bit load. */
2494 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2495 len = 32;
2496 goto one_insert;
2497
2498 case 0xc:
2499 case 0x6:
2500 case 0x3:
2501 /* Effectively a 16-bit load. */
2502 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2503 len = 16;
2504 goto one_insert;
2505
2506 case 0x8:
2507 case 0x4:
2508 case 0x2:
2509 case 0x1:
2510 /* Effectively an 8-bit load. */
2511 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2512 len = 8;
2513 goto one_insert;
2514
2515 one_insert:
2516 pos = base + ctz32(m3) * 8;
2517 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2518 ccm = ((1ull << len) - 1) << pos;
2519 break;
2520
2521 case 0:
2522 /* Recognize access exceptions for the first byte. */
2523 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2524 gen_op_movi_cc(s, 0);
2525 return DISAS_NEXT;
2526
2527 default:
2528 /* This is going to be a sequence of loads and inserts. */
2529 pos = base + 32 - 8;
2530 ccm = 0;
2531 while (m3) {
2532 if (m3 & 0x8) {
2533 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2534 tcg_gen_addi_i64(o->in2, o->in2, 1);
2535 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2536 ccm |= 0xffull << pos;
2537 }
2538 m3 = (m3 << 1) & 0xf;
2539 pos -= 8;
2540 }
2541 break;
2542 }
2543
2544 tcg_gen_movi_i64(tmp, ccm);
2545 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2546 return DISAS_NEXT;
2547 }
2548
2549 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2550 {
2551 int shift = s->insn->data & 0xff;
2552 int size = s->insn->data >> 8;
2553 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2554 return DISAS_NEXT;
2555 }
2556
2557 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2558 {
2559 TCGv_i64 t1, t2;
2560
2561 gen_op_calc_cc(s);
2562 t1 = tcg_temp_new_i64();
2563 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2564 t2 = tcg_temp_new_i64();
2565 tcg_gen_extu_i32_i64(t2, cc_op);
2566 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2567 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2568 return DISAS_NEXT;
2569 }
2570
2571 #ifndef CONFIG_USER_ONLY
2572 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2573 {
2574 TCGv_i32 m4;
2575
2576 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2577 m4 = tcg_constant_i32(get_field(s, m4));
2578 } else {
2579 m4 = tcg_constant_i32(0);
2580 }
2581 gen_helper_idte(tcg_env, o->in1, o->in2, m4);
2582 return DISAS_NEXT;
2583 }
2584
2585 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2586 {
2587 TCGv_i32 m4;
2588
2589 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2590 m4 = tcg_constant_i32(get_field(s, m4));
2591 } else {
2592 m4 = tcg_constant_i32(0);
2593 }
2594 gen_helper_ipte(tcg_env, o->in1, o->in2, m4);
2595 return DISAS_NEXT;
2596 }
2597
2598 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2599 {
2600 gen_helper_iske(o->out, tcg_env, o->in2);
2601 return DISAS_NEXT;
2602 }
2603 #endif
2604
2605 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2606 {
2607 int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2608 int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2609 int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2610 TCGv_i32 t_r1, t_r2, t_r3, type;
2611
2612 switch (s->insn->data) {
2613 case S390_FEAT_TYPE_KMA:
2614 if (r3 == r1 || r3 == r2) {
2615 gen_program_exception(s, PGM_SPECIFICATION);
2616 return DISAS_NORETURN;
2617 }
2618 /* FALL THROUGH */
2619 case S390_FEAT_TYPE_KMCTR:
2620 if (r3 & 1 || !r3) {
2621 gen_program_exception(s, PGM_SPECIFICATION);
2622 return DISAS_NORETURN;
2623 }
2624 /* FALL THROUGH */
2625 case S390_FEAT_TYPE_PPNO:
2626 case S390_FEAT_TYPE_KMF:
2627 case S390_FEAT_TYPE_KMC:
2628 case S390_FEAT_TYPE_KMO:
2629 case S390_FEAT_TYPE_KM:
2630 if (r1 & 1 || !r1) {
2631 gen_program_exception(s, PGM_SPECIFICATION);
2632 return DISAS_NORETURN;
2633 }
2634 /* FALL THROUGH */
2635 case S390_FEAT_TYPE_KMAC:
2636 case S390_FEAT_TYPE_KIMD:
2637 case S390_FEAT_TYPE_KLMD:
2638 if (r2 & 1 || !r2) {
2639 gen_program_exception(s, PGM_SPECIFICATION);
2640 return DISAS_NORETURN;
2641 }
2642 /* FALL THROUGH */
2643 case S390_FEAT_TYPE_PCKMO:
2644 case S390_FEAT_TYPE_PCC:
2645 break;
2646 default:
2647 g_assert_not_reached();
2648 };
2649
2650 t_r1 = tcg_constant_i32(r1);
2651 t_r2 = tcg_constant_i32(r2);
2652 t_r3 = tcg_constant_i32(r3);
2653 type = tcg_constant_i32(s->insn->data);
2654 gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type);
2655 set_cc_static(s);
2656 return DISAS_NEXT;
2657 }
2658
2659 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2660 {
2661 gen_helper_keb(cc_op, tcg_env, o->in1, o->in2);
2662 set_cc_static(s);
2663 return DISAS_NEXT;
2664 }
2665
2666 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2667 {
2668 gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2);
2669 set_cc_static(s);
2670 return DISAS_NEXT;
2671 }
2672
2673 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2674 {
2675 gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128);
2676 set_cc_static(s);
2677 return DISAS_NEXT;
2678 }
2679
2680 static DisasJumpType help_laa(DisasContext *s, DisasOps *o, bool addu64)
2681 {
2682 /* The real output is indeed the original value in memory;
2683 recompute the addition for the computation of CC. */
2684 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2685 s->insn->data | MO_ALIGN);
2686 /* However, we need to recompute the addition for setting CC. */
2687 if (addu64) {
2688 tcg_gen_movi_i64(cc_src, 0);
2689 tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
2690 } else {
2691 tcg_gen_add_i64(o->out, o->in1, o->in2);
2692 }
2693 return DISAS_NEXT;
2694 }
2695
2696 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2697 {
2698 return help_laa(s, o, false);
2699 }
2700
2701 static DisasJumpType op_laa_addu64(DisasContext *s, DisasOps *o)
2702 {
2703 return help_laa(s, o, true);
2704 }
2705
2706 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2707 {
2708 /* The real output is indeed the original value in memory;
2709 recompute the addition for the computation of CC. */
2710 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2711 s->insn->data | MO_ALIGN);
2712 /* However, we need to recompute the operation for setting CC. */
2713 tcg_gen_and_i64(o->out, o->in1, o->in2);
2714 return DISAS_NEXT;
2715 }
2716
2717 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2718 {
2719 /* The real output is indeed the original value in memory;
2720 recompute the addition for the computation of CC. */
2721 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2722 s->insn->data | MO_ALIGN);
2723 /* However, we need to recompute the operation for setting CC. */
2724 tcg_gen_or_i64(o->out, o->in1, o->in2);
2725 return DISAS_NEXT;
2726 }
2727
2728 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2729 {
2730 /* The real output is indeed the original value in memory;
2731 recompute the addition for the computation of CC. */
2732 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2733 s->insn->data | MO_ALIGN);
2734 /* However, we need to recompute the operation for setting CC. */
2735 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2736 return DISAS_NEXT;
2737 }
2738
2739 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2740 {
2741 gen_helper_ldeb(o->out, tcg_env, o->in2);
2742 return DISAS_NEXT;
2743 }
2744
2745 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2746 {
2747 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2748
2749 if (!m34) {
2750 return DISAS_NORETURN;
2751 }
2752 gen_helper_ledb(o->out, tcg_env, o->in2, m34);
2753 return DISAS_NEXT;
2754 }
2755
2756 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2757 {
2758 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2759
2760 if (!m34) {
2761 return DISAS_NORETURN;
2762 }
2763 gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34);
2764 return DISAS_NEXT;
2765 }
2766
2767 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2768 {
2769 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2770
2771 if (!m34) {
2772 return DISAS_NORETURN;
2773 }
2774 gen_helper_lexb(o->out, tcg_env, o->in2_128, m34);
2775 return DISAS_NEXT;
2776 }
2777
2778 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2779 {
2780 gen_helper_lxdb(o->out_128, tcg_env, o->in2);
2781 return DISAS_NEXT;
2782 }
2783
2784 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2785 {
2786 gen_helper_lxeb(o->out_128, tcg_env, o->in2);
2787 return DISAS_NEXT;
2788 }
2789
2790 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2791 {
2792 tcg_gen_shli_i64(o->out, o->in2, 32);
2793 return DISAS_NEXT;
2794 }
2795
2796 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2797 {
2798 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2799 return DISAS_NEXT;
2800 }
2801
2802 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2803 {
2804 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2805 return DISAS_NEXT;
2806 }
2807
2808 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2809 {
2810 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2811 return DISAS_NEXT;
2812 }
2813
2814 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2815 {
2816 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2817 return DISAS_NEXT;
2818 }
2819
2820 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2821 {
2822 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2823 return DISAS_NEXT;
2824 }
2825
2826 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2827 {
2828 tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2829 MO_TESL | s->insn->data);
2830 return DISAS_NEXT;
2831 }
2832
2833 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2834 {
2835 tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2836 MO_TEUL | s->insn->data);
2837 return DISAS_NEXT;
2838 }
2839
2840 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2841 {
2842 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2843 MO_TEUQ | s->insn->data);
2844 return DISAS_NEXT;
2845 }
2846
2847 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2848 {
2849 TCGLabel *lab = gen_new_label();
2850 store_reg32_i64(get_field(s, r1), o->in2);
2851 /* The value is stored even in case of trap. */
2852 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2853 gen_trap(s);
2854 gen_set_label(lab);
2855 return DISAS_NEXT;
2856 }
2857
2858 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2859 {
2860 TCGLabel *lab = gen_new_label();
2861 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2862 /* The value is stored even in case of trap. */
2863 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2864 gen_trap(s);
2865 gen_set_label(lab);
2866 return DISAS_NEXT;
2867 }
2868
2869 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2870 {
2871 TCGLabel *lab = gen_new_label();
2872 store_reg32h_i64(get_field(s, r1), o->in2);
2873 /* The value is stored even in case of trap. */
2874 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2875 gen_trap(s);
2876 gen_set_label(lab);
2877 return DISAS_NEXT;
2878 }
2879
2880 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2881 {
2882 TCGLabel *lab = gen_new_label();
2883
2884 tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2885 /* The value is stored even in case of trap. */
2886 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2887 gen_trap(s);
2888 gen_set_label(lab);
2889 return DISAS_NEXT;
2890 }
2891
2892 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2893 {
2894 TCGLabel *lab = gen_new_label();
2895 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2896 /* The value is stored even in case of trap. */
2897 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2898 gen_trap(s);
2899 gen_set_label(lab);
2900 return DISAS_NEXT;
2901 }
2902
2903 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2904 {
2905 DisasCompare c;
2906
2907 if (have_field(s, m3)) {
2908 /* LOAD * ON CONDITION */
2909 disas_jcc(s, &c, get_field(s, m3));
2910 } else {
2911 /* SELECT */
2912 disas_jcc(s, &c, get_field(s, m4));
2913 }
2914
2915 if (c.is_64) {
2916 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2917 o->in2, o->in1);
2918 } else {
2919 TCGv_i32 t32 = tcg_temp_new_i32();
2920 TCGv_i64 t, z;
2921
2922 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2923
2924 t = tcg_temp_new_i64();
2925 tcg_gen_extu_i32_i64(t, t32);
2926
2927 z = tcg_constant_i64(0);
2928 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2929 }
2930
2931 return DISAS_NEXT;
2932 }
2933
2934 #ifndef CONFIG_USER_ONLY
2935 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2936 {
2937 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2938 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2939
2940 gen_helper_lctl(tcg_env, r1, o->in2, r3);
2941 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2942 s->exit_to_mainloop = true;
2943 return DISAS_TOO_MANY;
2944 }
2945
2946 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2947 {
2948 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2949 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2950
2951 gen_helper_lctlg(tcg_env, r1, o->in2, r3);
2952 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2953 s->exit_to_mainloop = true;
2954 return DISAS_TOO_MANY;
2955 }
2956
2957 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2958 {
2959 gen_helper_lra(o->out, tcg_env, o->out, o->in2);
2960 set_cc_static(s);
2961 return DISAS_NEXT;
2962 }
2963
2964 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2965 {
2966 tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp));
2967 return DISAS_NEXT;
2968 }
2969
2970 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2971 {
2972 TCGv_i64 mask, addr;
2973
2974 per_breaking_event(s);
2975
2976 /*
2977 * Convert the short PSW into the normal PSW, similar to what
2978 * s390_cpu_load_normal() does.
2979 */
2980 mask = tcg_temp_new_i64();
2981 addr = tcg_temp_new_i64();
2982 tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2983 tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2984 tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2985 tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2986 gen_helper_load_psw(tcg_env, mask, addr);
2987 return DISAS_NORETURN;
2988 }
2989
2990 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2991 {
2992 TCGv_i64 t1, t2;
2993
2994 per_breaking_event(s);
2995
2996 t1 = tcg_temp_new_i64();
2997 t2 = tcg_temp_new_i64();
2998 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2999 MO_TEUQ | MO_ALIGN_8);
3000 tcg_gen_addi_i64(o->in2, o->in2, 8);
3001 tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
3002 gen_helper_load_psw(tcg_env, t1, t2);
3003 return DISAS_NORETURN;
3004 }
3005 #endif
3006
3007 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3008 {
3009 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3010 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3011
3012 gen_helper_lam(tcg_env, r1, o->in2, r3);
3013 return DISAS_NEXT;
3014 }
3015
3016 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3017 {
3018 int r1 = get_field(s, r1);
3019 int r3 = get_field(s, r3);
3020 TCGv_i64 t1, t2;
3021
3022 /* Only one register to read. */
3023 t1 = tcg_temp_new_i64();
3024 if (unlikely(r1 == r3)) {
3025 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3026 store_reg32_i64(r1, t1);
3027 return DISAS_NEXT;
3028 }
3029
3030 /* First load the values of the first and last registers to trigger
3031 possible page faults. */
3032 t2 = tcg_temp_new_i64();
3033 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3034 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3035 tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3036 store_reg32_i64(r1, t1);
3037 store_reg32_i64(r3, t2);
3038
3039 /* Only two registers to read. */
3040 if (((r1 + 1) & 15) == r3) {
3041 return DISAS_NEXT;
3042 }
3043
3044 /* Then load the remaining registers. Page fault can't occur. */
3045 r3 = (r3 - 1) & 15;
3046 tcg_gen_movi_i64(t2, 4);
3047 while (r1 != r3) {
3048 r1 = (r1 + 1) & 15;
3049 tcg_gen_add_i64(o->in2, o->in2, t2);
3050 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3051 store_reg32_i64(r1, t1);
3052 }
3053 return DISAS_NEXT;
3054 }
3055
3056 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3057 {
3058 int r1 = get_field(s, r1);
3059 int r3 = get_field(s, r3);
3060 TCGv_i64 t1, t2;
3061
3062 /* Only one register to read. */
3063 t1 = tcg_temp_new_i64();
3064 if (unlikely(r1 == r3)) {
3065 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3066 store_reg32h_i64(r1, t1);
3067 return DISAS_NEXT;
3068 }
3069
3070 /* First load the values of the first and last registers to trigger
3071 possible page faults. */
3072 t2 = tcg_temp_new_i64();
3073 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3074 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3075 tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3076 store_reg32h_i64(r1, t1);
3077 store_reg32h_i64(r3, t2);
3078
3079 /* Only two registers to read. */
3080 if (((r1 + 1) & 15) == r3) {
3081 return DISAS_NEXT;
3082 }
3083
3084 /* Then load the remaining registers. Page fault can't occur. */
3085 r3 = (r3 - 1) & 15;
3086 tcg_gen_movi_i64(t2, 4);
3087 while (r1 != r3) {
3088 r1 = (r1 + 1) & 15;
3089 tcg_gen_add_i64(o->in2, o->in2, t2);
3090 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3091 store_reg32h_i64(r1, t1);
3092 }
3093 return DISAS_NEXT;
3094 }
3095
3096 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3097 {
3098 int r1 = get_field(s, r1);
3099 int r3 = get_field(s, r3);
3100 TCGv_i64 t1, t2;
3101
3102 /* Only one register to read. */
3103 if (unlikely(r1 == r3)) {
3104 tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3105 return DISAS_NEXT;
3106 }
3107
3108 /* First load the values of the first and last registers to trigger
3109 possible page faults. */
3110 t1 = tcg_temp_new_i64();
3111 t2 = tcg_temp_new_i64();
3112 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3113 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3114 tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3115 tcg_gen_mov_i64(regs[r1], t1);
3116
3117 /* Only two registers to read. */
3118 if (((r1 + 1) & 15) == r3) {
3119 return DISAS_NEXT;
3120 }
3121
3122 /* Then load the remaining registers. Page fault can't occur. */
3123 r3 = (r3 - 1) & 15;
3124 tcg_gen_movi_i64(t1, 8);
3125 while (r1 != r3) {
3126 r1 = (r1 + 1) & 15;
3127 tcg_gen_add_i64(o->in2, o->in2, t1);
3128 tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3129 }
3130 return DISAS_NEXT;
3131 }
3132
3133 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3134 {
3135 TCGv_i64 a1, a2;
3136 MemOp mop = s->insn->data;
3137
3138 /* In a parallel context, stop the world and single step. */
3139 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3140 update_psw_addr(s);
3141 update_cc_op(s);
3142 gen_exception(EXCP_ATOMIC);
3143 return DISAS_NORETURN;
3144 }
3145
3146 /* In a serial context, perform the two loads ... */
3147 a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3148 a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3149 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3150 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3151
3152 /* ... and indicate that we performed them while interlocked. */
3153 gen_op_movi_cc(s, 0);
3154 return DISAS_NEXT;
3155 }
3156
3157 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3158 {
3159 o->out_128 = tcg_temp_new_i128();
3160 tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3161 MO_TE | MO_128 | MO_ALIGN);
3162 return DISAS_NEXT;
3163 }
3164
3165 #ifndef CONFIG_USER_ONLY
3166 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3167 {
3168 tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3169 return DISAS_NEXT;
3170 }
3171 #endif
3172
3173 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3174 {
3175 tcg_gen_andi_i64(o->out, o->in2, -256);
3176 return DISAS_NEXT;
3177 }
3178
3179 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3180 {
3181 const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3182
3183 if (get_field(s, m3) > 6) {
3184 gen_program_exception(s, PGM_SPECIFICATION);
3185 return DISAS_NORETURN;
3186 }
3187
3188 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3189 tcg_gen_neg_i64(o->addr1, o->addr1);
3190 tcg_gen_movi_i64(o->out, 16);
3191 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3192 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3193 return DISAS_NEXT;
3194 }
3195
3196 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3197 {
3198 const uint8_t monitor_class = get_field(s, i2);
3199
3200 if (monitor_class & 0xf0) {
3201 gen_program_exception(s, PGM_SPECIFICATION);
3202 return DISAS_NORETURN;
3203 }
3204
3205 #if !defined(CONFIG_USER_ONLY)
3206 gen_helper_monitor_call(tcg_env, o->addr1,
3207 tcg_constant_i32(monitor_class));
3208 #endif
3209 /* Defaults to a NOP. */
3210 return DISAS_NEXT;
3211 }
3212
3213 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3214 {
3215 o->out = o->in2;
3216 o->in2 = NULL;
3217 return DISAS_NEXT;
3218 }
3219
3220 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3221 {
3222 int b2 = get_field(s, b2);
3223 TCGv ar1 = tcg_temp_new_i64();
3224
3225 o->out = o->in2;
3226 o->in2 = NULL;
3227
3228 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3229 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3230 tcg_gen_movi_i64(ar1, 0);
3231 break;
3232 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3233 tcg_gen_movi_i64(ar1, 1);
3234 break;
3235 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3236 if (b2) {
3237 tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2]));
3238 } else {
3239 tcg_gen_movi_i64(ar1, 0);
3240 }
3241 break;
3242 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3243 tcg_gen_movi_i64(ar1, 2);
3244 break;
3245 }
3246
3247 tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[1]));
3248 return DISAS_NEXT;
3249 }
3250
3251 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3252 {
3253 o->out = o->in1;
3254 o->out2 = o->in2;
3255 o->in1 = NULL;
3256 o->in2 = NULL;
3257 return DISAS_NEXT;
3258 }
3259
3260 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3261 {
3262 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3263
3264 gen_helper_mvc(tcg_env, l, o->addr1, o->in2);
3265 return DISAS_NEXT;
3266 }
3267
3268 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3269 {
3270 gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2);
3271 return DISAS_NEXT;
3272 }
3273
3274 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3275 {
3276 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3277
3278 gen_helper_mvcin(tcg_env, l, o->addr1, o->in2);
3279 return DISAS_NEXT;
3280 }
3281
3282 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3283 {
3284 int r1 = get_field(s, r1);
3285 int r2 = get_field(s, r2);
3286 TCGv_i32 t1, t2;
3287
3288 /* r1 and r2 must be even. */
3289 if (r1 & 1 || r2 & 1) {
3290 gen_program_exception(s, PGM_SPECIFICATION);
3291 return DISAS_NORETURN;
3292 }
3293
3294 t1 = tcg_constant_i32(r1);
3295 t2 = tcg_constant_i32(r2);
3296 gen_helper_mvcl(cc_op, tcg_env, t1, t2);
3297 set_cc_static(s);
3298 return DISAS_NEXT;
3299 }
3300
3301 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3302 {
3303 int r1 = get_field(s, r1);
3304 int r3 = get_field(s, r3);
3305 TCGv_i32 t1, t3;
3306
3307 /* r1 and r3 must be even. */
3308 if (r1 & 1 || r3 & 1) {
3309 gen_program_exception(s, PGM_SPECIFICATION);
3310 return DISAS_NORETURN;
3311 }
3312
3313 t1 = tcg_constant_i32(r1);
3314 t3 = tcg_constant_i32(r3);
3315 gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3);
3316 set_cc_static(s);
3317 return DISAS_NEXT;
3318 }
3319
3320 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3321 {
3322 int r1 = get_field(s, r1);
3323 int r3 = get_field(s, r3);
3324 TCGv_i32 t1, t3;
3325
3326 /* r1 and r3 must be even. */
3327 if (r1 & 1 || r3 & 1) {
3328 gen_program_exception(s, PGM_SPECIFICATION);
3329 return DISAS_NORETURN;
3330 }
3331
3332 t1 = tcg_constant_i32(r1);
3333 t3 = tcg_constant_i32(r3);
3334 gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3);
3335 set_cc_static(s);
3336 return DISAS_NEXT;
3337 }
3338
3339 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3340 {
3341 int r3 = get_field(s, r3);
3342 gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]);
3343 set_cc_static(s);
3344 return DISAS_NEXT;
3345 }
3346
3347 #ifndef CONFIG_USER_ONLY
3348 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3349 {
3350 int r1 = get_field(s, l1);
3351 int r3 = get_field(s, r3);
3352 gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3353 set_cc_static(s);
3354 return DISAS_NEXT;
3355 }
3356
3357 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3358 {
3359 int r1 = get_field(s, l1);
3360 int r3 = get_field(s, r3);
3361 gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3362 set_cc_static(s);
3363 return DISAS_NEXT;
3364 }
3365 #endif
3366
3367 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3368 {
3369 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3370
3371 gen_helper_mvn(tcg_env, l, o->addr1, o->in2);
3372 return DISAS_NEXT;
3373 }
3374
3375 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3376 {
3377 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3378
3379 gen_helper_mvo(tcg_env, l, o->addr1, o->in2);
3380 return DISAS_NEXT;
3381 }
3382
3383 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3384 {
3385 TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3386 TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3387
3388 gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2);
3389 set_cc_static(s);
3390 return DISAS_NEXT;
3391 }
3392
3393 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3394 {
3395 TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3396 TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3397
3398 gen_helper_mvst(cc_op, tcg_env, t1, t2);
3399 set_cc_static(s);
3400 return DISAS_NEXT;
3401 }
3402
3403 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3404 {
3405 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3406
3407 gen_helper_mvz(tcg_env, l, o->addr1, o->in2);
3408 return DISAS_NEXT;
3409 }
3410
3411 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3412 {
3413 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3414 return DISAS_NEXT;
3415 }
3416
3417 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3418 {
3419 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3420 return DISAS_NEXT;
3421 }
3422
3423 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3424 {
3425 tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3426 return DISAS_NEXT;
3427 }
3428
3429 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3430 {
3431 gen_helper_meeb(o->out, tcg_env, o->in1, o->in2);
3432 return DISAS_NEXT;
3433 }
3434
3435 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3436 {
3437 gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2);
3438 return DISAS_NEXT;
3439 }
3440
3441 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3442 {
3443 gen_helper_mdb(o->out, tcg_env, o->in1, o->in2);
3444 return DISAS_NEXT;
3445 }
3446
3447 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3448 {
3449 gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3450 return DISAS_NEXT;
3451 }
3452
3453 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3454 {
3455 gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2);
3456 return DISAS_NEXT;
3457 }
3458
3459 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3460 {
3461 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3462 gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3);
3463 return DISAS_NEXT;
3464 }
3465
3466 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3467 {
3468 TCGv_i64 r3 = load_freg(get_field(s, r3));
3469 gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3);
3470 return DISAS_NEXT;
3471 }
3472
3473 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3474 {
3475 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3476 gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3);
3477 return DISAS_NEXT;
3478 }
3479
3480 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3481 {
3482 TCGv_i64 r3 = load_freg(get_field(s, r3));
3483 gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3);
3484 return DISAS_NEXT;
3485 }
3486
3487 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3488 {
3489 TCGv_i64 z = tcg_constant_i64(0);
3490 TCGv_i64 n = tcg_temp_new_i64();
3491
3492 tcg_gen_neg_i64(n, o->in2);
3493 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3494 return DISAS_NEXT;
3495 }
3496
3497 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3498 {
3499 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3500 return DISAS_NEXT;
3501 }
3502
3503 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3504 {
3505 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3506 return DISAS_NEXT;
3507 }
3508
3509 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3510 {
3511 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3512 tcg_gen_mov_i64(o->out2, o->in2);
3513 return DISAS_NEXT;
3514 }
3515
3516 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3517 {
3518 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3519
3520 gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2);
3521 set_cc_static(s);
3522 return DISAS_NEXT;
3523 }
3524
3525 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3526 {
3527 tcg_gen_neg_i64(o->out, o->in2);
3528 return DISAS_NEXT;
3529 }
3530
3531 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3532 {
3533 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3534 return DISAS_NEXT;
3535 }
3536
3537 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3538 {
3539 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3540 return DISAS_NEXT;
3541 }
3542
3543 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3544 {
3545 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3546 tcg_gen_mov_i64(o->out2, o->in2);
3547 return DISAS_NEXT;
3548 }
3549
3550 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3551 {
3552 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3553
3554 gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2);
3555 set_cc_static(s);
3556 return DISAS_NEXT;
3557 }
3558
3559 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3560 {
3561 tcg_gen_or_i64(o->out, o->in1, o->in2);
3562 return DISAS_NEXT;
3563 }
3564
3565 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3566 {
3567 int shift = s->insn->data & 0xff;
3568 int size = s->insn->data >> 8;
3569 uint64_t mask = ((1ull << size) - 1) << shift;
3570 TCGv_i64 t = tcg_temp_new_i64();
3571
3572 tcg_gen_shli_i64(t, o->in2, shift);
3573 tcg_gen_or_i64(o->out, o->in1, t);
3574
3575 /* Produce the CC from only the bits manipulated. */
3576 tcg_gen_andi_i64(cc_dst, o->out, mask);
3577 set_cc_nz_u64(s, cc_dst);
3578 return DISAS_NEXT;
3579 }
3580
3581 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3582 {
3583 o->in1 = tcg_temp_new_i64();
3584
3585 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3586 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3587 } else {
3588 /* Perform the atomic operation in memory. */
3589 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3590 s->insn->data);
3591 }
3592
3593 /* Recompute also for atomic case: needed for setting CC. */
3594 tcg_gen_or_i64(o->out, o->in1, o->in2);
3595
3596 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3597 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3598 }
3599 return DISAS_NEXT;
3600 }
3601
3602 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3603 {
3604 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3605
3606 gen_helper_pack(tcg_env, l, o->addr1, o->in2);
3607 return DISAS_NEXT;
3608 }
3609
3610 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3611 {
3612 int l2 = get_field(s, l2) + 1;
3613 TCGv_i32 l;
3614
3615 /* The length must not exceed 32 bytes. */
3616 if (l2 > 32) {
3617 gen_program_exception(s, PGM_SPECIFICATION);
3618 return DISAS_NORETURN;
3619 }
3620 l = tcg_constant_i32(l2);
3621 gen_helper_pka(tcg_env, o->addr1, o->in2, l);
3622 return DISAS_NEXT;
3623 }
3624
3625 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3626 {
3627 int l2 = get_field(s, l2) + 1;
3628 TCGv_i32 l;
3629
3630 /* The length must be even and should not exceed 64 bytes. */
3631 if ((l2 & 1) || (l2 > 64)) {
3632 gen_program_exception(s, PGM_SPECIFICATION);
3633 return DISAS_NORETURN;
3634 }
3635 l = tcg_constant_i32(l2);
3636 gen_helper_pku(tcg_env, o->addr1, o->in2, l);
3637 return DISAS_NEXT;
3638 }
3639
3640 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3641 {
3642 const uint8_t m3 = get_field(s, m3);
3643
3644 if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3645 tcg_gen_ctpop_i64(o->out, o->in2);
3646 } else {
3647 gen_helper_popcnt(o->out, o->in2);
3648 }
3649 return DISAS_NEXT;
3650 }
3651
3652 #ifndef CONFIG_USER_ONLY
3653 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3654 {
3655 gen_helper_ptlb(tcg_env);
3656 return DISAS_NEXT;
3657 }
3658 #endif
3659
3660 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3661 {
3662 int i3 = get_field(s, i3);
3663 int i4 = get_field(s, i4);
3664 int i5 = get_field(s, i5);
3665 int do_zero = i4 & 0x80;
3666 uint64_t mask, imask, pmask;
3667 int pos, len, rot;
3668
3669 /* Adjust the arguments for the specific insn. */
3670 switch (s->fields.op2) {
3671 case 0x55: /* risbg */
3672 case 0x59: /* risbgn */
3673 i3 &= 63;
3674 i4 &= 63;
3675 pmask = ~0;
3676 break;
3677 case 0x5d: /* risbhg */
3678 i3 &= 31;
3679 i4 &= 31;
3680 pmask = 0xffffffff00000000ull;
3681 break;
3682 case 0x51: /* risblg */
3683 i3 = (i3 & 31) + 32;
3684 i4 = (i4 & 31) + 32;
3685 pmask = 0x00000000ffffffffull;
3686 break;
3687 default:
3688 g_assert_not_reached();
3689 }
3690
3691 /* MASK is the set of bits to be inserted from R2. */
3692 if (i3 <= i4) {
3693 /* [0...i3---i4...63] */
3694 mask = (-1ull >> i3) & (-1ull << (63 - i4));
3695 } else {
3696 /* [0---i4...i3---63] */
3697 mask = (-1ull >> i3) | (-1ull << (63 - i4));
3698 }
3699 /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3700 mask &= pmask;
3701
3702 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3703 insns, we need to keep the other half of the register. */
3704 imask = ~mask | ~pmask;
3705 if (do_zero) {
3706 imask = ~pmask;
3707 }
3708
3709 len = i4 - i3 + 1;
3710 pos = 63 - i4;
3711 rot = i5 & 63;
3712
3713 /* In some cases we can implement this with extract. */
3714 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3715 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3716 return DISAS_NEXT;
3717 }
3718
3719 /* In some cases we can implement this with deposit. */
3720 if (len > 0 && (imask == 0 || ~mask == imask)) {
3721 /* Note that we rotate the bits to be inserted to the lsb, not to
3722 the position as described in the PoO. */
3723 rot = (rot - pos) & 63;
3724 } else {
3725 pos = -1;
3726 }
3727
3728 /* Rotate the input as necessary. */
3729 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3730
3731 /* Insert the selected bits into the output. */
3732 if (pos >= 0) {
3733 if (imask == 0) {
3734 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3735 } else {
3736 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3737 }
3738 } else if (imask == 0) {
3739 tcg_gen_andi_i64(o->out, o->in2, mask);
3740 } else {
3741 tcg_gen_andi_i64(o->in2, o->in2, mask);
3742 tcg_gen_andi_i64(o->out, o->out, imask);
3743 tcg_gen_or_i64(o->out, o->out, o->in2);
3744 }
3745 return DISAS_NEXT;
3746 }
3747
3748 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3749 {
3750 int i3 = get_field(s, i3);
3751 int i4 = get_field(s, i4);
3752 int i5 = get_field(s, i5);
3753 TCGv_i64 orig_out;
3754 uint64_t mask;
3755
3756 /* If this is a test-only form, arrange to discard the result. */
3757 if (i3 & 0x80) {
3758 tcg_debug_assert(o->out != NULL);
3759 orig_out = o->out;
3760 o->out = tcg_temp_new_i64();
3761 tcg_gen_mov_i64(o->out, orig_out);
3762 }
3763
3764 i3 &= 63;
3765 i4 &= 63;
3766 i5 &= 63;
3767
3768 /* MASK is the set of bits to be operated on from R2.
3769 Take care for I3/I4 wraparound. */
3770 mask = ~0ull >> i3;
3771 if (i3 <= i4) {
3772 mask ^= ~0ull >> i4 >> 1;
3773 } else {
3774 mask |= ~(~0ull >> i4 >> 1);
3775 }
3776
3777 /* Rotate the input as necessary. */
3778 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3779
3780 /* Operate. */
3781 switch (s->fields.op2) {
3782 case 0x54: /* AND */
3783 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3784 tcg_gen_and_i64(o->out, o->out, o->in2);
3785 break;
3786 case 0x56: /* OR */
3787 tcg_gen_andi_i64(o->in2, o->in2, mask);
3788 tcg_gen_or_i64(o->out, o->out, o->in2);
3789 break;
3790 case 0x57: /* XOR */
3791 tcg_gen_andi_i64(o->in2, o->in2, mask);
3792 tcg_gen_xor_i64(o->out, o->out, o->in2);
3793 break;
3794 default:
3795 abort();
3796 }
3797
3798 /* Set the CC. */
3799 tcg_gen_andi_i64(cc_dst, o->out, mask);
3800 set_cc_nz_u64(s, cc_dst);
3801 return DISAS_NEXT;
3802 }
3803
3804 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3805 {
3806 tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3807 return DISAS_NEXT;
3808 }
3809
3810 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3811 {
3812 tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3813 return DISAS_NEXT;
3814 }
3815
3816 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3817 {
3818 tcg_gen_bswap64_i64(o->out, o->in2);
3819 return DISAS_NEXT;
3820 }
3821
3822 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3823 {
3824 TCGv_i32 t1 = tcg_temp_new_i32();
3825 TCGv_i32 t2 = tcg_temp_new_i32();
3826 TCGv_i32 to = tcg_temp_new_i32();
3827 tcg_gen_extrl_i64_i32(t1, o->in1);
3828 tcg_gen_extrl_i64_i32(t2, o->in2);
3829 tcg_gen_rotl_i32(to, t1, t2);
3830 tcg_gen_extu_i32_i64(o->out, to);
3831 return DISAS_NEXT;
3832 }
3833
3834 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3835 {
3836 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3837 return DISAS_NEXT;
3838 }
3839
3840 #ifndef CONFIG_USER_ONLY
3841 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3842 {
3843 gen_helper_rrbe(cc_op, tcg_env, o->in2);
3844 set_cc_static(s);
3845 return DISAS_NEXT;
3846 }
3847
3848 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3849 {
3850 gen_helper_sacf(tcg_env, o->in2);
3851 /* Addressing mode has changed, so end the block. */
3852 return DISAS_TOO_MANY;
3853 }
3854 #endif
3855
3856 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3857 {
3858 int sam = s->insn->data;
3859 TCGv_i64 tsam;
3860 uint64_t mask;
3861
3862 switch (sam) {
3863 case 0:
3864 mask = 0xffffff;
3865 break;
3866 case 1:
3867 mask = 0x7fffffff;
3868 break;
3869 default:
3870 mask = -1;
3871 break;
3872 }
3873
3874 /* Bizarre but true, we check the address of the current insn for the
3875 specification exception, not the next to be executed. Thus the PoO
3876 documents that Bad Things Happen two bytes before the end. */
3877 if (s->base.pc_next & ~mask) {
3878 gen_program_exception(s, PGM_SPECIFICATION);
3879 return DISAS_NORETURN;
3880 }
3881 s->pc_tmp &= mask;
3882
3883 tsam = tcg_constant_i64(sam);
3884 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3885
3886 /* Always exit the TB, since we (may have) changed execution mode. */
3887 return DISAS_TOO_MANY;
3888 }
3889
3890 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3891 {
3892 int r1 = get_field(s, r1);
3893 tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3894 return DISAS_NEXT;
3895 }
3896
3897 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3898 {
3899 gen_helper_seb(o->out, tcg_env, o->in1, o->in2);
3900 return DISAS_NEXT;
3901 }
3902
3903 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3904 {
3905 gen_helper_sdb(o->out, tcg_env, o->in1, o->in2);
3906 return DISAS_NEXT;
3907 }
3908
3909 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3910 {
3911 gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3912 return DISAS_NEXT;
3913 }
3914
3915 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3916 {
3917 gen_helper_sqeb(o->out, tcg_env, o->in2);
3918 return DISAS_NEXT;
3919 }
3920
3921 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3922 {
3923 gen_helper_sqdb(o->out, tcg_env, o->in2);
3924 return DISAS_NEXT;
3925 }
3926
3927 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3928 {
3929 gen_helper_sqxb(o->out_128, tcg_env, o->in2_128);
3930 return DISAS_NEXT;
3931 }
3932
3933 #ifndef CONFIG_USER_ONLY
3934 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3935 {
3936 gen_helper_servc(cc_op, tcg_env, o->in2, o->in1);
3937 set_cc_static(s);
3938 return DISAS_NEXT;
3939 }
3940
3941 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3942 {
3943 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3944 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3945
3946 gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3);
3947 set_cc_static(s);
3948 return DISAS_NEXT;
3949 }
3950 #endif
3951
3952 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3953 {
3954 DisasCompare c;
3955 TCGv_i64 a, h;
3956 TCGLabel *lab;
3957 int r1;
3958
3959 disas_jcc(s, &c, get_field(s, m3));
3960
3961 /* We want to store when the condition is fulfilled, so branch
3962 out when it's not */
3963 c.cond = tcg_invert_cond(c.cond);
3964
3965 lab = gen_new_label();
3966 if (c.is_64) {
3967 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3968 } else {
3969 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3970 }
3971
3972 r1 = get_field(s, r1);
3973 a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3974 switch (s->insn->data) {
3975 case 1: /* STOCG */
3976 tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3977 break;
3978 case 0: /* STOC */
3979 tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3980 break;
3981 case 2: /* STOCFH */
3982 h = tcg_temp_new_i64();
3983 tcg_gen_shri_i64(h, regs[r1], 32);
3984 tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3985 break;
3986 default:
3987 g_assert_not_reached();
3988 }
3989
3990 gen_set_label(lab);
3991 return DISAS_NEXT;
3992 }
3993
3994 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3995 {
3996 TCGv_i64 t;
3997 uint64_t sign = 1ull << s->insn->data;
3998 if (s->insn->data == 31) {
3999 t = tcg_temp_new_i64();
4000 tcg_gen_shli_i64(t, o->in1, 32);
4001 } else {
4002 t = o->in1;
4003 }
4004 gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
4005 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4006 /* The arithmetic left shift is curious in that it does not affect
4007 the sign bit. Copy that over from the source unchanged. */
4008 tcg_gen_andi_i64(o->out, o->out, ~sign);
4009 tcg_gen_andi_i64(o->in1, o->in1, sign);
4010 tcg_gen_or_i64(o->out, o->out, o->in1);
4011 return DISAS_NEXT;
4012 }
4013
4014 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4015 {
4016 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4017 return DISAS_NEXT;
4018 }
4019
4020 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4021 {
4022 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4023 return DISAS_NEXT;
4024 }
4025
4026 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4027 {
4028 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4029 return DISAS_NEXT;
4030 }
4031
4032 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4033 {
4034 gen_helper_sfpc(tcg_env, o->in2);
4035 return DISAS_NEXT;
4036 }
4037
4038 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4039 {
4040 gen_helper_sfas(tcg_env, o->in2);
4041 return DISAS_NEXT;
4042 }
4043
4044 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4045 {
4046 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4047 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4048 gen_helper_srnm(tcg_env, o->addr1);
4049 return DISAS_NEXT;
4050 }
4051
4052 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4053 {
4054 /* Bits 0-55 are are ignored. */
4055 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4056 gen_helper_srnm(tcg_env, o->addr1);
4057 return DISAS_NEXT;
4058 }
4059
4060 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4061 {
4062 TCGv_i64 tmp = tcg_temp_new_i64();
4063
4064 /* Bits other than 61-63 are ignored. */
4065 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4066
4067 /* No need to call a helper, we don't implement dfp */
4068 tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4069 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4070 tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4071 return DISAS_NEXT;
4072 }
4073
4074 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4075 {
4076 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4077 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4078 set_cc_static(s);
4079
4080 tcg_gen_shri_i64(o->in1, o->in1, 24);
4081 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4082 return DISAS_NEXT;
4083 }
4084
4085 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4086 {
4087 int b1 = get_field(s, b1);
4088 int d1 = get_field(s, d1);
4089 int b2 = get_field(s, b2);
4090 int d2 = get_field(s, d2);
4091 int r3 = get_field(s, r3);
4092 TCGv_i64 tmp = tcg_temp_new_i64();
4093
4094 /* fetch all operands first */
4095 o->in1 = tcg_temp_new_i64();
4096 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4097 o->in2 = tcg_temp_new_i64();
4098 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4099 o->addr1 = tcg_temp_new_i64();
4100 gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4101
4102 /* load the third operand into r3 before modifying anything */
4103 tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4104
4105 /* subtract CPU timer from first operand and store in GR0 */
4106 gen_helper_stpt(tmp, tcg_env);
4107 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4108
4109 /* store second operand in GR1 */
4110 tcg_gen_mov_i64(regs[1], o->in2);
4111 return DISAS_NEXT;
4112 }
4113
4114 #ifndef CONFIG_USER_ONLY
4115 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4116 {
4117 tcg_gen_shri_i64(o->in2, o->in2, 4);
4118 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4119 return DISAS_NEXT;
4120 }
4121
4122 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4123 {
4124 gen_helper_sske(tcg_env, o->in1, o->in2);
4125 return DISAS_NEXT;
4126 }
4127
4128 static void gen_check_psw_mask(DisasContext *s)
4129 {
4130 TCGv_i64 reserved = tcg_temp_new_i64();
4131 TCGLabel *ok = gen_new_label();
4132
4133 tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4134 tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4135 gen_program_exception(s, PGM_SPECIFICATION);
4136 gen_set_label(ok);
4137 }
4138
4139 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4140 {
4141 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4142
4143 gen_check_psw_mask(s);
4144
4145 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4146 s->exit_to_mainloop = true;
4147 return DISAS_TOO_MANY;
4148 }
4149
4150 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4151 {
4152 tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id));
4153 return DISAS_NEXT;
4154 }
4155 #endif
4156
4157 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4158 {
4159 gen_helper_stck(o->out, tcg_env);
4160 /* ??? We don't implement clock states. */
4161 gen_op_movi_cc(s, 0);
4162 return DISAS_NEXT;
4163 }
4164
4165 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4166 {
4167 TCGv_i64 c1 = tcg_temp_new_i64();
4168 TCGv_i64 c2 = tcg_temp_new_i64();
4169 TCGv_i64 todpr = tcg_temp_new_i64();
4170 gen_helper_stck(c1, tcg_env);
4171 /* 16 bit value store in an uint32_t (only valid bits set) */
4172 tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr));
4173 /* Shift the 64-bit value into its place as a zero-extended
4174 104-bit value. Note that "bit positions 64-103 are always
4175 non-zero so that they compare differently to STCK"; we set
4176 the least significant bit to 1. */
4177 tcg_gen_shli_i64(c2, c1, 56);
4178 tcg_gen_shri_i64(c1, c1, 8);
4179 tcg_gen_ori_i64(c2, c2, 0x10000);
4180 tcg_gen_or_i64(c2, c2, todpr);
4181 tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4182 tcg_gen_addi_i64(o->in2, o->in2, 8);
4183 tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4184 /* ??? We don't implement clock states. */
4185 gen_op_movi_cc(s, 0);
4186 return DISAS_NEXT;
4187 }
4188
4189 #ifndef CONFIG_USER_ONLY
4190 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4191 {
4192 gen_helper_sck(cc_op, tcg_env, o->in2);
4193 set_cc_static(s);
4194 return DISAS_NEXT;
4195 }
4196
4197 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4198 {
4199 gen_helper_sckc(tcg_env, o->in2);
4200 return DISAS_NEXT;
4201 }
4202
4203 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4204 {
4205 gen_helper_sckpf(tcg_env, regs[0]);
4206 return DISAS_NEXT;
4207 }
4208
4209 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4210 {
4211 gen_helper_stckc(o->out, tcg_env);
4212 return DISAS_NEXT;
4213 }
4214
4215 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4216 {
4217 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4218 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4219
4220 gen_helper_stctg(tcg_env, r1, o->in2, r3);
4221 return DISAS_NEXT;
4222 }
4223
4224 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4225 {
4226 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4227 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4228
4229 gen_helper_stctl(tcg_env, r1, o->in2, r3);
4230 return DISAS_NEXT;
4231 }
4232
4233 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4234 {
4235 tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid));
4236 return DISAS_NEXT;
4237 }
4238
4239 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4240 {
4241 gen_helper_spt(tcg_env, o->in2);
4242 return DISAS_NEXT;
4243 }
4244
4245 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4246 {
4247 gen_helper_stfl(tcg_env);
4248 return DISAS_NEXT;
4249 }
4250
4251 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4252 {
4253 gen_helper_stpt(o->out, tcg_env);
4254 return DISAS_NEXT;
4255 }
4256
4257 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4258 {
4259 gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]);
4260 set_cc_static(s);
4261 return DISAS_NEXT;
4262 }
4263
4264 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4265 {
4266 gen_helper_spx(tcg_env, o->in2);
4267 return DISAS_NEXT;
4268 }
4269
4270 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4271 {
4272 gen_helper_xsch(tcg_env, regs[1]);
4273 set_cc_static(s);
4274 return DISAS_NEXT;
4275 }
4276
4277 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4278 {
4279 gen_helper_csch(tcg_env, regs[1]);
4280 set_cc_static(s);
4281 return DISAS_NEXT;
4282 }
4283
4284 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4285 {
4286 gen_helper_hsch(tcg_env, regs[1]);
4287 set_cc_static(s);
4288 return DISAS_NEXT;
4289 }
4290
4291 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4292 {
4293 gen_helper_msch(tcg_env, regs[1], o->in2);
4294 set_cc_static(s);
4295 return DISAS_NEXT;
4296 }
4297
4298 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4299 {
4300 gen_helper_rchp(tcg_env, regs[1]);
4301 set_cc_static(s);
4302 return DISAS_NEXT;
4303 }
4304
4305 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4306 {
4307 gen_helper_rsch(tcg_env, regs[1]);
4308 set_cc_static(s);
4309 return DISAS_NEXT;
4310 }
4311
4312 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4313 {
4314 gen_helper_sal(tcg_env, regs[1]);
4315 return DISAS_NEXT;
4316 }
4317
4318 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4319 {
4320 gen_helper_schm(tcg_env, regs[1], regs[2], o->in2);
4321 return DISAS_NEXT;
4322 }
4323
4324 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4325 {
4326 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4327 gen_op_movi_cc(s, 3);
4328 return DISAS_NEXT;
4329 }
4330
4331 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4332 {
4333 /* The instruction is suppressed if not provided. */
4334 return DISAS_NEXT;
4335 }
4336
4337 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4338 {
4339 gen_helper_ssch(tcg_env, regs[1], o->in2);
4340 set_cc_static(s);
4341 return DISAS_NEXT;
4342 }
4343
4344 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4345 {
4346 gen_helper_stsch(tcg_env, regs[1], o->in2);
4347 set_cc_static(s);
4348 return DISAS_NEXT;
4349 }
4350
4351 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4352 {
4353 gen_helper_stcrw(tcg_env, o->in2);
4354 set_cc_static(s);
4355 return DISAS_NEXT;
4356 }
4357
4358 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4359 {
4360 gen_helper_tpi(cc_op, tcg_env, o->addr1);
4361 set_cc_static(s);
4362 return DISAS_NEXT;
4363 }
4364
4365 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4366 {
4367 gen_helper_tsch(tcg_env, regs[1], o->in2);
4368 set_cc_static(s);
4369 return DISAS_NEXT;
4370 }
4371
4372 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4373 {
4374 gen_helper_chsc(tcg_env, o->in2);
4375 set_cc_static(s);
4376 return DISAS_NEXT;
4377 }
4378
4379 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4380 {
4381 tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa));
4382 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4383 return DISAS_NEXT;
4384 }
4385
4386 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4387 {
4388 uint64_t i2 = get_field(s, i2);
4389 TCGv_i64 t;
4390
4391 /* It is important to do what the instruction name says: STORE THEN.
4392 If we let the output hook perform the store then if we fault and
4393 restart, we'll have the wrong SYSTEM MASK in place. */
4394 t = tcg_temp_new_i64();
4395 tcg_gen_shri_i64(t, psw_mask, 56);
4396 tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4397
4398 if (s->fields.op == 0xac) {
4399 tcg_gen_andi_i64(psw_mask, psw_mask,
4400 (i2 << 56) | 0x00ffffffffffffffull);
4401 } else {
4402 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4403 }
4404
4405 gen_check_psw_mask(s);
4406
4407 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4408 s->exit_to_mainloop = true;
4409 return DISAS_TOO_MANY;
4410 }
4411
4412 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4413 {
4414 tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4415
4416 if (s->base.tb->flags & FLAG_MASK_PER) {
4417 update_psw_addr(s);
4418 gen_helper_per_store_real(tcg_env);
4419 }
4420 return DISAS_NEXT;
4421 }
4422 #endif
4423
4424 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4425 {
4426 gen_helper_stfle(cc_op, tcg_env, o->in2);
4427 set_cc_static(s);
4428 return DISAS_NEXT;
4429 }
4430
4431 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4432 {
4433 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4434 return DISAS_NEXT;
4435 }
4436
4437 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4438 {
4439 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4440 return DISAS_NEXT;
4441 }
4442
4443 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4444 {
4445 tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4446 MO_TEUL | s->insn->data);
4447 return DISAS_NEXT;
4448 }
4449
4450 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4451 {
4452 tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4453 MO_TEUQ | s->insn->data);
4454 return DISAS_NEXT;
4455 }
4456
4457 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4458 {
4459 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4460 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4461
4462 gen_helper_stam(tcg_env, r1, o->in2, r3);
4463 return DISAS_NEXT;
4464 }
4465
4466 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4467 {
4468 int m3 = get_field(s, m3);
4469 int pos, base = s->insn->data;
4470 TCGv_i64 tmp = tcg_temp_new_i64();
4471
4472 pos = base + ctz32(m3) * 8;
4473 switch (m3) {
4474 case 0xf:
4475 /* Effectively a 32-bit store. */
4476 tcg_gen_shri_i64(tmp, o->in1, pos);
4477 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4478 break;
4479
4480 case 0xc:
4481 case 0x6:
4482 case 0x3:
4483 /* Effectively a 16-bit store. */
4484 tcg_gen_shri_i64(tmp, o->in1, pos);
4485 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4486 break;
4487
4488 case 0x8:
4489 case 0x4:
4490 case 0x2:
4491 case 0x1:
4492 /* Effectively an 8-bit store. */
4493 tcg_gen_shri_i64(tmp, o->in1, pos);
4494 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4495 break;
4496
4497 default:
4498 /* This is going to be a sequence of shifts and stores. */
4499 pos = base + 32 - 8;
4500 while (m3) {
4501 if (m3 & 0x8) {
4502 tcg_gen_shri_i64(tmp, o->in1, pos);
4503 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4504 tcg_gen_addi_i64(o->in2, o->in2, 1);
4505 }
4506 m3 = (m3 << 1) & 0xf;
4507 pos -= 8;
4508 }
4509 break;
4510 }
4511 return DISAS_NEXT;
4512 }
4513
4514 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4515 {
4516 int r1 = get_field(s, r1);
4517 int r3 = get_field(s, r3);
4518 int size = s->insn->data;
4519 TCGv_i64 tsize = tcg_constant_i64(size);
4520
4521 while (1) {
4522 tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4523 size == 8 ? MO_TEUQ : MO_TEUL);
4524 if (r1 == r3) {
4525 break;
4526 }
4527 tcg_gen_add_i64(o->in2, o->in2, tsize);
4528 r1 = (r1 + 1) & 15;
4529 }
4530
4531 return DISAS_NEXT;
4532 }
4533
4534 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4535 {
4536 int r1 = get_field(s, r1);
4537 int r3 = get_field(s, r3);
4538 TCGv_i64 t = tcg_temp_new_i64();
4539 TCGv_i64 t4 = tcg_constant_i64(4);
4540 TCGv_i64 t32 = tcg_constant_i64(32);
4541
4542 while (1) {
4543 tcg_gen_shl_i64(t, regs[r1], t32);
4544 tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4545 if (r1 == r3) {
4546 break;
4547 }
4548 tcg_gen_add_i64(o->in2, o->in2, t4);
4549 r1 = (r1 + 1) & 15;
4550 }
4551 return DISAS_NEXT;
4552 }
4553
4554 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4555 {
4556 TCGv_i128 t16 = tcg_temp_new_i128();
4557
4558 tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4559 tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4560 MO_TE | MO_128 | MO_ALIGN);
4561 return DISAS_NEXT;
4562 }
4563
4564 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4565 {
4566 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4567 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4568
4569 gen_helper_srst(tcg_env, r1, r2);
4570 set_cc_static(s);
4571 return DISAS_NEXT;
4572 }
4573
4574 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4575 {
4576 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4577 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4578
4579 gen_helper_srstu(tcg_env, r1, r2);
4580 set_cc_static(s);
4581 return DISAS_NEXT;
4582 }
4583
4584 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4585 {
4586 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4587 return DISAS_NEXT;
4588 }
4589
4590 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4591 {
4592 tcg_gen_movi_i64(cc_src, 0);
4593 tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4594 return DISAS_NEXT;
4595 }
4596
4597 /* Compute borrow (0, -1) into cc_src. */
4598 static void compute_borrow(DisasContext *s)
4599 {
4600 switch (s->cc_op) {
4601 case CC_OP_SUBU:
4602 /* The borrow value is already in cc_src (0,-1). */
4603 break;
4604 default:
4605 gen_op_calc_cc(s);
4606 /* fall through */
4607 case CC_OP_STATIC:
4608 /* The carry flag is the msb of CC; compute into cc_src. */
4609 tcg_gen_extu_i32_i64(cc_src, cc_op);
4610 tcg_gen_shri_i64(cc_src, cc_src, 1);
4611 /* fall through */
4612 case CC_OP_ADDU:
4613 /* Convert carry (1,0) to borrow (0,-1). */
4614 tcg_gen_subi_i64(cc_src, cc_src, 1);
4615 break;
4616 }
4617 }
4618
4619 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4620 {
4621 compute_borrow(s);
4622
4623 /* Borrow is {0, -1}, so add to subtract. */
4624 tcg_gen_add_i64(o->out, o->in1, cc_src);
4625 tcg_gen_sub_i64(o->out, o->out, o->in2);
4626 return DISAS_NEXT;
4627 }
4628
4629 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4630 {
4631 compute_borrow(s);
4632
4633 /*
4634 * Borrow is {0, -1}, so add to subtract; replicate the
4635 * borrow input to produce 128-bit -1 for the addition.
4636 */
4637 TCGv_i64 zero = tcg_constant_i64(0);
4638 tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4639 tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4640
4641 return DISAS_NEXT;
4642 }
4643
4644 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4645 {
4646 TCGv_i32 t;
4647
4648 update_psw_addr(s);
4649 update_cc_op(s);
4650
4651 t = tcg_constant_i32(get_field(s, i1) & 0xff);
4652 tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code));
4653
4654 t = tcg_constant_i32(s->ilen);
4655 tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen));
4656
4657 gen_exception(EXCP_SVC);
4658 return DISAS_NORETURN;
4659 }
4660
4661 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4662 {
4663 int cc = 0;
4664
4665 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4666 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4667 gen_op_movi_cc(s, cc);
4668 return DISAS_NEXT;
4669 }
4670
4671 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4672 {
4673 gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2);
4674 set_cc_static(s);
4675 return DISAS_NEXT;
4676 }
4677
4678 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4679 {
4680 gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2);
4681 set_cc_static(s);
4682 return DISAS_NEXT;
4683 }
4684
4685 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4686 {
4687 gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2);
4688 set_cc_static(s);
4689 return DISAS_NEXT;
4690 }
4691
4692 #ifndef CONFIG_USER_ONLY
4693
4694 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4695 {
4696 gen_helper_testblock(cc_op, tcg_env, o->in2);
4697 set_cc_static(s);
4698 return DISAS_NEXT;
4699 }
4700
4701 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4702 {
4703 gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2);
4704 set_cc_static(s);
4705 return DISAS_NEXT;
4706 }
4707
4708 #endif
4709
4710 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4711 {
4712 TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4713
4714 gen_helper_tp(cc_op, tcg_env, o->addr1, l1);
4715 set_cc_static(s);
4716 return DISAS_NEXT;
4717 }
4718
4719 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4720 {
4721 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4722
4723 gen_helper_tr(tcg_env, l, o->addr1, o->in2);
4724 set_cc_static(s);
4725 return DISAS_NEXT;
4726 }
4727
4728 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4729 {
4730 TCGv_i128 pair = tcg_temp_new_i128();
4731
4732 gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2);
4733 tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4734 set_cc_static(s);
4735 return DISAS_NEXT;
4736 }
4737
4738 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4739 {
4740 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4741
4742 gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2);
4743 set_cc_static(s);
4744 return DISAS_NEXT;
4745 }
4746
4747 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4748 {
4749 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4750
4751 gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2);
4752 set_cc_static(s);
4753 return DISAS_NEXT;
4754 }
4755
4756 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4757 {
4758 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4759 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4760 TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4761 TCGv_i32 tst = tcg_temp_new_i32();
4762 int m3 = get_field(s, m3);
4763
4764 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4765 m3 = 0;
4766 }
4767 if (m3 & 1) {
4768 tcg_gen_movi_i32(tst, -1);
4769 } else {
4770 tcg_gen_extrl_i64_i32(tst, regs[0]);
4771 if (s->insn->opc & 3) {
4772 tcg_gen_ext8u_i32(tst, tst);
4773 } else {
4774 tcg_gen_ext16u_i32(tst, tst);
4775 }
4776 }
4777 gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes);
4778
4779 set_cc_static(s);
4780 return DISAS_NEXT;
4781 }
4782
4783 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4784 {
4785 TCGv_i32 t1 = tcg_constant_i32(0xff);
4786
4787 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4788 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4789 set_cc_static(s);
4790 return DISAS_NEXT;
4791 }
4792
4793 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4794 {
4795 TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4796
4797 gen_helper_unpk(tcg_env, l, o->addr1, o->in2);
4798 return DISAS_NEXT;
4799 }
4800
4801 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4802 {
4803 int l1 = get_field(s, l1) + 1;
4804 TCGv_i32 l;
4805
4806 /* The length must not exceed 32 bytes. */
4807 if (l1 > 32) {
4808 gen_program_exception(s, PGM_SPECIFICATION);
4809 return DISAS_NORETURN;
4810 }
4811 l = tcg_constant_i32(l1);
4812 gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2);
4813 set_cc_static(s);
4814 return DISAS_NEXT;
4815 }
4816
4817 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4818 {
4819 int l1 = get_field(s, l1) + 1;
4820 TCGv_i32 l;
4821
4822 /* The length must be even and should not exceed 64 bytes. */
4823 if ((l1 & 1) || (l1 > 64)) {
4824 gen_program_exception(s, PGM_SPECIFICATION);
4825 return DISAS_NORETURN;
4826 }
4827 l = tcg_constant_i32(l1);
4828 gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2);
4829 set_cc_static(s);
4830 return DISAS_NEXT;
4831 }
4832
4833
4834 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4835 {
4836 int d1 = get_field(s, d1);
4837 int d2 = get_field(s, d2);
4838 int b1 = get_field(s, b1);
4839 int b2 = get_field(s, b2);
4840 int l = get_field(s, l1);
4841 TCGv_i32 t32;
4842
4843 o->addr1 = get_address(s, 0, b1, d1);
4844
4845 /* If the addresses are identical, this is a store/memset of zero. */
4846 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4847 o->in2 = tcg_constant_i64(0);
4848
4849 l++;
4850 while (l >= 8) {
4851 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4852 l -= 8;
4853 if (l > 0) {
4854 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4855 }
4856 }
4857 if (l >= 4) {
4858 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4859 l -= 4;
4860 if (l > 0) {
4861 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4862 }
4863 }
4864 if (l >= 2) {
4865 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4866 l -= 2;
4867 if (l > 0) {
4868 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4869 }
4870 }
4871 if (l) {
4872 tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4873 }
4874 gen_op_movi_cc(s, 0);
4875 return DISAS_NEXT;
4876 }
4877
4878 /* But in general we'll defer to a helper. */
4879 o->in2 = get_address(s, 0, b2, d2);
4880 t32 = tcg_constant_i32(l);
4881 gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2);
4882 set_cc_static(s);
4883 return DISAS_NEXT;
4884 }
4885
4886 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4887 {
4888 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4889 return DISAS_NEXT;
4890 }
4891
4892 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4893 {
4894 int shift = s->insn->data & 0xff;
4895 int size = s->insn->data >> 8;
4896 uint64_t mask = ((1ull << size) - 1) << shift;
4897 TCGv_i64 t = tcg_temp_new_i64();
4898
4899 tcg_gen_shli_i64(t, o->in2, shift);
4900 tcg_gen_xor_i64(o->out, o->in1, t);
4901
4902 /* Produce the CC from only the bits manipulated. */
4903 tcg_gen_andi_i64(cc_dst, o->out, mask);
4904 set_cc_nz_u64(s, cc_dst);
4905 return DISAS_NEXT;
4906 }
4907
4908 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4909 {
4910 o->in1 = tcg_temp_new_i64();
4911
4912 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4913 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4914 } else {
4915 /* Perform the atomic operation in memory. */
4916 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4917 s->insn->data);
4918 }
4919
4920 /* Recompute also for atomic case: needed for setting CC. */
4921 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4922
4923 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4924 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4925 }
4926 return DISAS_NEXT;
4927 }
4928
4929 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4930 {
4931 o->out = tcg_constant_i64(0);
4932 return DISAS_NEXT;
4933 }
4934
4935 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4936 {
4937 o->out = tcg_constant_i64(0);
4938 o->out2 = o->out;
4939 return DISAS_NEXT;
4940 }
4941
4942 #ifndef CONFIG_USER_ONLY
4943 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4944 {
4945 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4946
4947 gen_helper_clp(tcg_env, r2);
4948 set_cc_static(s);
4949 return DISAS_NEXT;
4950 }
4951
4952 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4953 {
4954 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4955 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4956
4957 gen_helper_pcilg(tcg_env, r1, r2);
4958 set_cc_static(s);
4959 return DISAS_NEXT;
4960 }
4961
4962 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4963 {
4964 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4965 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4966
4967 gen_helper_pcistg(tcg_env, r1, r2);
4968 set_cc_static(s);
4969 return DISAS_NEXT;
4970 }
4971
4972 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4973 {
4974 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4975 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4976
4977 gen_helper_stpcifc(tcg_env, r1, o->addr1, ar);
4978 set_cc_static(s);
4979 return DISAS_NEXT;
4980 }
4981
4982 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4983 {
4984 gen_helper_sic(tcg_env, o->in1, o->in2);
4985 return DISAS_NEXT;
4986 }
4987
4988 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4989 {
4990 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4991 TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4992
4993 gen_helper_rpcit(tcg_env, r1, r2);
4994 set_cc_static(s);
4995 return DISAS_NEXT;
4996 }
4997
4998 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4999 {
5000 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5001 TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
5002 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5003
5004 gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar);
5005 set_cc_static(s);
5006 return DISAS_NEXT;
5007 }
5008
5009 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5010 {
5011 TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5012 TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5013
5014 gen_helper_mpcifc(tcg_env, r1, o->addr1, ar);
5015 set_cc_static(s);
5016 return DISAS_NEXT;
5017 }
5018 #endif
5019
5020 #include "translate_vx.c.inc"
5021
5022 /* ====================================================================== */
5023 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5024 the original inputs), update the various cc data structures in order to
5025 be able to compute the new condition code. */
5026
5027 static void cout_abs32(DisasContext *s, DisasOps *o)
5028 {
5029 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5030 }
5031
5032 static void cout_abs64(DisasContext *s, DisasOps *o)
5033 {
5034 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5035 }
5036
5037 static void cout_adds32(DisasContext *s, DisasOps *o)
5038 {
5039 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5040 }
5041
5042 static void cout_adds64(DisasContext *s, DisasOps *o)
5043 {
5044 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5045 }
5046
5047 static void cout_addu32(DisasContext *s, DisasOps *o)
5048 {
5049 tcg_gen_shri_i64(cc_src, o->out, 32);
5050 tcg_gen_ext32u_i64(cc_dst, o->out);
5051 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5052 }
5053
5054 static void cout_addu64(DisasContext *s, DisasOps *o)
5055 {
5056 gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5057 }
5058
5059 static void cout_cmps32(DisasContext *s, DisasOps *o)
5060 {
5061 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5062 }
5063
5064 static void cout_cmps64(DisasContext *s, DisasOps *o)
5065 {
5066 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5067 }
5068
5069 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5070 {
5071 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5072 }
5073
5074 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5075 {
5076 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5077 }
5078
5079 static void cout_f32(DisasContext *s, DisasOps *o)
5080 {
5081 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5082 }
5083
5084 static void cout_f64(DisasContext *s, DisasOps *o)
5085 {
5086 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5087 }
5088
5089 static void cout_f128(DisasContext *s, DisasOps *o)
5090 {
5091 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5092 }
5093
5094 static void cout_nabs32(DisasContext *s, DisasOps *o)
5095 {
5096 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5097 }
5098
5099 static void cout_nabs64(DisasContext *s, DisasOps *o)
5100 {
5101 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5102 }
5103
5104 static void cout_neg32(DisasContext *s, DisasOps *o)
5105 {
5106 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5107 }
5108
5109 static void cout_neg64(DisasContext *s, DisasOps *o)
5110 {
5111 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5112 }
5113
5114 static void cout_nz32(DisasContext *s, DisasOps *o)
5115 {
5116 tcg_gen_ext32u_i64(cc_dst, o->out);
5117 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5118 }
5119
5120 static void cout_nz64(DisasContext *s, DisasOps *o)
5121 {
5122 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5123 }
5124
5125 static void cout_s32(DisasContext *s, DisasOps *o)
5126 {
5127 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5128 }
5129
5130 static void cout_s64(DisasContext *s, DisasOps *o)
5131 {
5132 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5133 }
5134
5135 static void cout_subs32(DisasContext *s, DisasOps *o)
5136 {
5137 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5138 }
5139
5140 static void cout_subs64(DisasContext *s, DisasOps *o)
5141 {
5142 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5143 }
5144
5145 static void cout_subu32(DisasContext *s, DisasOps *o)
5146 {
5147 tcg_gen_sari_i64(cc_src, o->out, 32);
5148 tcg_gen_ext32u_i64(cc_dst, o->out);
5149 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5150 }
5151
5152 static void cout_subu64(DisasContext *s, DisasOps *o)
5153 {
5154 gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5155 }
5156
5157 static void cout_tm32(DisasContext *s, DisasOps *o)
5158 {
5159 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5160 }
5161
5162 static void cout_tm64(DisasContext *s, DisasOps *o)
5163 {
5164 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5165 }
5166
5167 static void cout_muls32(DisasContext *s, DisasOps *o)
5168 {
5169 gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5170 }
5171
5172 static void cout_muls64(DisasContext *s, DisasOps *o)
5173 {
5174 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5175 gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5176 }
5177
5178 /* ====================================================================== */
5179 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5180 with the TCG register to which we will write. Used in combination with
5181 the "wout" generators, in some cases we need a new temporary, and in
5182 some cases we can write to a TCG global. */
5183
5184 static void prep_new(DisasContext *s, DisasOps *o)
5185 {
5186 o->out = tcg_temp_new_i64();
5187 }
5188 #define SPEC_prep_new 0
5189
5190 static void prep_new_P(DisasContext *s, DisasOps *o)
5191 {
5192 o->out = tcg_temp_new_i64();
5193 o->out2 = tcg_temp_new_i64();
5194 }
5195 #define SPEC_prep_new_P 0
5196
5197 static void prep_new_x(DisasContext *s, DisasOps *o)
5198 {
5199 o->out_128 = tcg_temp_new_i128();
5200 }
5201 #define SPEC_prep_new_x 0
5202
5203 static void prep_r1(DisasContext *s, DisasOps *o)
5204 {
5205 o->out = regs[get_field(s, r1)];
5206 }
5207 #define SPEC_prep_r1 0
5208
5209 static void prep_r1_P(DisasContext *s, DisasOps *o)
5210 {
5211 int r1 = get_field(s, r1);
5212 o->out = regs[r1];
5213 o->out2 = regs[r1 + 1];
5214 }
5215 #define SPEC_prep_r1_P SPEC_r1_even
5216
5217 /* ====================================================================== */
5218 /* The "Write OUTput" generators. These generally perform some non-trivial
5219 copy of data to TCG globals, or to main memory. The trivial cases are
5220 generally handled by having a "prep" generator install the TCG global
5221 as the destination of the operation. */
5222
5223 static void wout_r1(DisasContext *s, DisasOps *o)
5224 {
5225 store_reg(get_field(s, r1), o->out);
5226 }
5227 #define SPEC_wout_r1 0
5228
5229 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5230 {
5231 store_reg(get_field(s, r1), o->out2);
5232 }
5233 #define SPEC_wout_out2_r1 0
5234
5235 static void wout_r1_8(DisasContext *s, DisasOps *o)
5236 {
5237 int r1 = get_field(s, r1);
5238 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5239 }
5240 #define SPEC_wout_r1_8 0
5241
5242 static void wout_r1_16(DisasContext *s, DisasOps *o)
5243 {
5244 int r1 = get_field(s, r1);
5245 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5246 }
5247 #define SPEC_wout_r1_16 0
5248
5249 static void wout_r1_32(DisasContext *s, DisasOps *o)
5250 {
5251 store_reg32_i64(get_field(s, r1), o->out);
5252 }
5253 #define SPEC_wout_r1_32 0
5254
5255 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5256 {
5257 store_reg32h_i64(get_field(s, r1), o->out);
5258 }
5259 #define SPEC_wout_r1_32h 0
5260
5261 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5262 {
5263 int r1 = get_field(s, r1);
5264 store_reg32_i64(r1, o->out);
5265 store_reg32_i64(r1 + 1, o->out2);
5266 }
5267 #define SPEC_wout_r1_P32 SPEC_r1_even
5268
5269 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5270 {
5271 int r1 = get_field(s, r1);
5272 TCGv_i64 t = tcg_temp_new_i64();
5273 store_reg32_i64(r1 + 1, o->out);
5274 tcg_gen_shri_i64(t, o->out, 32);
5275 store_reg32_i64(r1, t);
5276 }
5277 #define SPEC_wout_r1_D32 SPEC_r1_even
5278
5279 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5280 {
5281 int r1 = get_field(s, r1);
5282 tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5283 }
5284 #define SPEC_wout_r1_D64 SPEC_r1_even
5285
5286 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5287 {
5288 int r3 = get_field(s, r3);
5289 store_reg32_i64(r3, o->out);
5290 store_reg32_i64(r3 + 1, o->out2);
5291 }
5292 #define SPEC_wout_r3_P32 SPEC_r3_even
5293
5294 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5295 {
5296 int r3 = get_field(s, r3);
5297 store_reg(r3, o->out);
5298 store_reg(r3 + 1, o->out2);
5299 }
5300 #define SPEC_wout_r3_P64 SPEC_r3_even
5301
5302 static void wout_e1(DisasContext *s, DisasOps *o)
5303 {
5304 store_freg32_i64(get_field(s, r1), o->out);
5305 }
5306 #define SPEC_wout_e1 0
5307
5308 static void wout_f1(DisasContext *s, DisasOps *o)
5309 {
5310 store_freg(get_field(s, r1), o->out);
5311 }
5312 #define SPEC_wout_f1 0
5313
5314 static void wout_x1(DisasContext *s, DisasOps *o)
5315 {
5316 int f1 = get_field(s, r1);
5317
5318 /* Split out_128 into out+out2 for cout_f128. */
5319 tcg_debug_assert(o->out == NULL);
5320 o->out = tcg_temp_new_i64();
5321 o->out2 = tcg_temp_new_i64();
5322
5323 tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5324 store_freg(f1, o->out);
5325 store_freg(f1 + 2, o->out2);
5326 }
5327 #define SPEC_wout_x1 SPEC_r1_f128
5328
5329 static void wout_x1_P(DisasContext *s, DisasOps *o)
5330 {
5331 int f1 = get_field(s, r1);
5332 store_freg(f1, o->out);
5333 store_freg(f1 + 2, o->out2);
5334 }
5335 #define SPEC_wout_x1_P SPEC_r1_f128
5336
5337 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5338 {
5339 if (get_field(s, r1) != get_field(s, r2)) {
5340 store_reg32_i64(get_field(s, r1), o->out);
5341 }
5342 }
5343 #define SPEC_wout_cond_r1r2_32 0
5344
5345 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5346 {
5347 if (get_field(s, r1) != get_field(s, r2)) {
5348 store_freg32_i64(get_field(s, r1), o->out);
5349 }
5350 }
5351 #define SPEC_wout_cond_e1e2 0
5352
5353 static void wout_m1_8(DisasContext *s, DisasOps *o)
5354 {
5355 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5356 }
5357 #define SPEC_wout_m1_8 0
5358
5359 static void wout_m1_16(DisasContext *s, DisasOps *o)
5360 {
5361 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5362 }
5363 #define SPEC_wout_m1_16 0
5364
5365 #ifndef CONFIG_USER_ONLY
5366 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5367 {
5368 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5369 }
5370 #define SPEC_wout_m1_16a 0
5371 #endif
5372
5373 static void wout_m1_32(DisasContext *s, DisasOps *o)
5374 {
5375 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5376 }
5377 #define SPEC_wout_m1_32 0
5378
5379 #ifndef CONFIG_USER_ONLY
5380 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5381 {
5382 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5383 }
5384 #define SPEC_wout_m1_32a 0
5385 #endif
5386
5387 static void wout_m1_64(DisasContext *s, DisasOps *o)
5388 {
5389 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5390 }
5391 #define SPEC_wout_m1_64 0
5392
5393 #ifndef CONFIG_USER_ONLY
5394 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5395 {
5396 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5397 }
5398 #define SPEC_wout_m1_64a 0
5399 #endif
5400
5401 static void wout_m2_32(DisasContext *s, DisasOps *o)
5402 {
5403 tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5404 }
5405 #define SPEC_wout_m2_32 0
5406
5407 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5408 {
5409 store_reg(get_field(s, r1), o->in2);
5410 }
5411 #define SPEC_wout_in2_r1 0
5412
5413 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5414 {
5415 store_reg32_i64(get_field(s, r1), o->in2);
5416 }
5417 #define SPEC_wout_in2_r1_32 0
5418
5419 /* ====================================================================== */
5420 /* The "INput 1" generators. These load the first operand to an insn. */
5421
5422 static void in1_r1(DisasContext *s, DisasOps *o)
5423 {
5424 o->in1 = load_reg(get_field(s, r1));
5425 }
5426 #define SPEC_in1_r1 0
5427
5428 static void in1_r1_o(DisasContext *s, DisasOps *o)
5429 {
5430 o->in1 = regs[get_field(s, r1)];
5431 }
5432 #define SPEC_in1_r1_o 0
5433
5434 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5435 {
5436 o->in1 = tcg_temp_new_i64();
5437 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5438 }
5439 #define SPEC_in1_r1_32s 0
5440
5441 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5442 {
5443 o->in1 = tcg_temp_new_i64();
5444 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5445 }
5446 #define SPEC_in1_r1_32u 0
5447
5448 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5449 {
5450 o->in1 = tcg_temp_new_i64();
5451 tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5452 }
5453 #define SPEC_in1_r1_sr32 0
5454
5455 static void in1_r1p1(DisasContext *s, DisasOps *o)
5456 {
5457 o->in1 = load_reg(get_field(s, r1) + 1);
5458 }
5459 #define SPEC_in1_r1p1 SPEC_r1_even
5460
5461 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5462 {
5463 o->in1 = regs[get_field(s, r1) + 1];
5464 }
5465 #define SPEC_in1_r1p1_o SPEC_r1_even
5466
5467 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5468 {
5469 o->in1 = tcg_temp_new_i64();
5470 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5471 }
5472 #define SPEC_in1_r1p1_32s SPEC_r1_even
5473
5474 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5475 {
5476 o->in1 = tcg_temp_new_i64();
5477 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5478 }
5479 #define SPEC_in1_r1p1_32u SPEC_r1_even
5480
5481 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5482 {
5483 int r1 = get_field(s, r1);
5484 o->in1 = tcg_temp_new_i64();
5485 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5486 }
5487 #define SPEC_in1_r1_D32 SPEC_r1_even
5488
5489 static void in1_r2(DisasContext *s, DisasOps *o)
5490 {
5491 o->in1 = load_reg(get_field(s, r2));
5492 }
5493 #define SPEC_in1_r2 0
5494
5495 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5496 {
5497 o->in1 = tcg_temp_new_i64();
5498 tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5499 }
5500 #define SPEC_in1_r2_sr32 0
5501
5502 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5503 {
5504 o->in1 = tcg_temp_new_i64();
5505 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5506 }
5507 #define SPEC_in1_r2_32u 0
5508
5509 static void in1_r3(DisasContext *s, DisasOps *o)
5510 {
5511 o->in1 = load_reg(get_field(s, r3));
5512 }
5513 #define SPEC_in1_r3 0
5514
5515 static void in1_r3_o(DisasContext *s, DisasOps *o)
5516 {
5517 o->in1 = regs[get_field(s, r3)];
5518 }
5519 #define SPEC_in1_r3_o 0
5520
5521 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5522 {
5523 o->in1 = tcg_temp_new_i64();
5524 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5525 }
5526 #define SPEC_in1_r3_32s 0
5527
5528 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5529 {
5530 o->in1 = tcg_temp_new_i64();
5531 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5532 }
5533 #define SPEC_in1_r3_32u 0
5534
5535 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5536 {
5537 int r3 = get_field(s, r3);
5538 o->in1 = tcg_temp_new_i64();
5539 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5540 }
5541 #define SPEC_in1_r3_D32 SPEC_r3_even
5542
5543 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5544 {
5545 o->in1 = tcg_temp_new_i64();
5546 tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5547 }
5548 #define SPEC_in1_r3_sr32 0
5549
5550 static void in1_e1(DisasContext *s, DisasOps *o)
5551 {
5552 o->in1 = load_freg32_i64(get_field(s, r1));
5553 }
5554 #define SPEC_in1_e1 0
5555
5556 static void in1_f1(DisasContext *s, DisasOps *o)
5557 {
5558 o->in1 = load_freg(get_field(s, r1));
5559 }
5560 #define SPEC_in1_f1 0
5561
5562 static void in1_x1(DisasContext *s, DisasOps *o)
5563 {
5564 o->in1_128 = load_freg_128(get_field(s, r1));
5565 }
5566 #define SPEC_in1_x1 SPEC_r1_f128
5567
5568 /* Load the high double word of an extended (128-bit) format FP number */
5569 static void in1_x2h(DisasContext *s, DisasOps *o)
5570 {
5571 o->in1 = load_freg(get_field(s, r2));
5572 }
5573 #define SPEC_in1_x2h SPEC_r2_f128
5574
5575 static void in1_f3(DisasContext *s, DisasOps *o)
5576 {
5577 o->in1 = load_freg(get_field(s, r3));
5578 }
5579 #define SPEC_in1_f3 0
5580
5581 static void in1_la1(DisasContext *s, DisasOps *o)
5582 {
5583 o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5584 }
5585 #define SPEC_in1_la1 0
5586
5587 static void in1_la2(DisasContext *s, DisasOps *o)
5588 {
5589 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5590 o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5591 }
5592 #define SPEC_in1_la2 0
5593
5594 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5595 {
5596 in1_la1(s, o);
5597 o->in1 = tcg_temp_new_i64();
5598 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5599 }
5600 #define SPEC_in1_m1_8u 0
5601
5602 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5603 {
5604 in1_la1(s, o);
5605 o->in1 = tcg_temp_new_i64();
5606 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5607 }
5608 #define SPEC_in1_m1_16s 0
5609
5610 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5611 {
5612 in1_la1(s, o);
5613 o->in1 = tcg_temp_new_i64();
5614 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5615 }
5616 #define SPEC_in1_m1_16u 0
5617
5618 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5619 {
5620 in1_la1(s, o);
5621 o->in1 = tcg_temp_new_i64();
5622 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5623 }
5624 #define SPEC_in1_m1_32s 0
5625
5626 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5627 {
5628 in1_la1(s, o);
5629 o->in1 = tcg_temp_new_i64();
5630 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5631 }
5632 #define SPEC_in1_m1_32u 0
5633
5634 static void in1_m1_64(DisasContext *s, DisasOps *o)
5635 {
5636 in1_la1(s, o);
5637 o->in1 = tcg_temp_new_i64();
5638 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5639 }
5640 #define SPEC_in1_m1_64 0
5641
5642 /* ====================================================================== */
5643 /* The "INput 2" generators. These load the second operand to an insn. */
5644
5645 static void in2_r1_o(DisasContext *s, DisasOps *o)
5646 {
5647 o->in2 = regs[get_field(s, r1)];
5648 }
5649 #define SPEC_in2_r1_o 0
5650
5651 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5652 {
5653 o->in2 = tcg_temp_new_i64();
5654 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5655 }
5656 #define SPEC_in2_r1_16u 0
5657
5658 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5659 {
5660 o->in2 = tcg_temp_new_i64();
5661 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5662 }
5663 #define SPEC_in2_r1_32u 0
5664
5665 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5666 {
5667 int r1 = get_field(s, r1);
5668 o->in2 = tcg_temp_new_i64();
5669 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5670 }
5671 #define SPEC_in2_r1_D32 SPEC_r1_even
5672
5673 static void in2_r2(DisasContext *s, DisasOps *o)
5674 {
5675 o->in2 = load_reg(get_field(s, r2));
5676 }
5677 #define SPEC_in2_r2 0
5678
5679 static void in2_r2_o(DisasContext *s, DisasOps *o)
5680 {
5681 o->in2 = regs[get_field(s, r2)];
5682 }
5683 #define SPEC_in2_r2_o 0
5684
5685 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5686 {
5687 int r2 = get_field(s, r2);
5688 if (r2 != 0) {
5689 o->in2 = load_reg(r2);
5690 }
5691 }
5692 #define SPEC_in2_r2_nz 0
5693
5694 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5695 {
5696 o->in2 = tcg_temp_new_i64();
5697 tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5698 }
5699 #define SPEC_in2_r2_8s 0
5700
5701 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5702 {
5703 o->in2 = tcg_temp_new_i64();
5704 tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5705 }
5706 #define SPEC_in2_r2_8u 0
5707
5708 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5709 {
5710 o->in2 = tcg_temp_new_i64();
5711 tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5712 }
5713 #define SPEC_in2_r2_16s 0
5714
5715 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5716 {
5717 o->in2 = tcg_temp_new_i64();
5718 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5719 }
5720 #define SPEC_in2_r2_16u 0
5721
5722 static void in2_r3(DisasContext *s, DisasOps *o)
5723 {
5724 o->in2 = load_reg(get_field(s, r3));
5725 }
5726 #define SPEC_in2_r3 0
5727
5728 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5729 {
5730 int r3 = get_field(s, r3);
5731 o->in2_128 = tcg_temp_new_i128();
5732 tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5733 }
5734 #define SPEC_in2_r3_D64 SPEC_r3_even
5735
5736 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5737 {
5738 o->in2 = tcg_temp_new_i64();
5739 tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5740 }
5741 #define SPEC_in2_r3_sr32 0
5742
5743 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5744 {
5745 o->in2 = tcg_temp_new_i64();
5746 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5747 }
5748 #define SPEC_in2_r3_32u 0
5749
5750 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5751 {
5752 o->in2 = tcg_temp_new_i64();
5753 tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5754 }
5755 #define SPEC_in2_r2_32s 0
5756
5757 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5758 {
5759 o->in2 = tcg_temp_new_i64();
5760 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5761 }
5762 #define SPEC_in2_r2_32u 0
5763
5764 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5765 {
5766 o->in2 = tcg_temp_new_i64();
5767 tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5768 }
5769 #define SPEC_in2_r2_sr32 0
5770
5771 static void in2_e2(DisasContext *s, DisasOps *o)
5772 {
5773 o->in2 = load_freg32_i64(get_field(s, r2));
5774 }
5775 #define SPEC_in2_e2 0
5776
5777 static void in2_f2(DisasContext *s, DisasOps *o)
5778 {
5779 o->in2 = load_freg(get_field(s, r2));
5780 }
5781 #define SPEC_in2_f2 0
5782
5783 static void in2_x2(DisasContext *s, DisasOps *o)
5784 {
5785 o->in2_128 = load_freg_128(get_field(s, r2));
5786 }
5787 #define SPEC_in2_x2 SPEC_r2_f128
5788
5789 /* Load the low double word of an extended (128-bit) format FP number */
5790 static void in2_x2l(DisasContext *s, DisasOps *o)
5791 {
5792 o->in2 = load_freg(get_field(s, r2) + 2);
5793 }
5794 #define SPEC_in2_x2l SPEC_r2_f128
5795
5796 static void in2_ra2(DisasContext *s, DisasOps *o)
5797 {
5798 int r2 = get_field(s, r2);
5799
5800 /* Note: *don't* treat !r2 as 0, use the reg value. */
5801 o->in2 = tcg_temp_new_i64();
5802 gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5803 }
5804 #define SPEC_in2_ra2 0
5805
5806 static void in2_ra2_E(DisasContext *s, DisasOps *o)
5807 {
5808 return in2_ra2(s, o);
5809 }
5810 #define SPEC_in2_ra2_E SPEC_r2_even
5811
5812 static void in2_a2(DisasContext *s, DisasOps *o)
5813 {
5814 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5815 o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5816 }
5817 #define SPEC_in2_a2 0
5818
5819 static TCGv gen_ri2(DisasContext *s)
5820 {
5821 TCGv ri2 = NULL;
5822 bool is_imm;
5823 int imm;
5824
5825 disas_jdest(s, i2, is_imm, imm, ri2);
5826 if (is_imm) {
5827 ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5828 }
5829
5830 return ri2;
5831 }
5832
5833 static void in2_ri2(DisasContext *s, DisasOps *o)
5834 {
5835 o->in2 = gen_ri2(s);
5836 }
5837 #define SPEC_in2_ri2 0
5838
5839 static void in2_sh(DisasContext *s, DisasOps *o)
5840 {
5841 int b2 = get_field(s, b2);
5842 int d2 = get_field(s, d2);
5843
5844 if (b2 == 0) {
5845 o->in2 = tcg_constant_i64(d2 & 0x3f);
5846 } else {
5847 o->in2 = get_address(s, 0, b2, d2);
5848 tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5849 }
5850 }
5851 #define SPEC_in2_sh 0
5852
5853 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5854 {
5855 in2_a2(s, o);
5856 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5857 }
5858 #define SPEC_in2_m2_8u 0
5859
5860 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5861 {
5862 in2_a2(s, o);
5863 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5864 }
5865 #define SPEC_in2_m2_16s 0
5866
5867 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5868 {
5869 in2_a2(s, o);
5870 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5871 }
5872 #define SPEC_in2_m2_16u 0
5873
5874 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5875 {
5876 in2_a2(s, o);
5877 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5878 }
5879 #define SPEC_in2_m2_32s 0
5880
5881 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5882 {
5883 in2_a2(s, o);
5884 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5885 }
5886 #define SPEC_in2_m2_32u 0
5887
5888 #ifndef CONFIG_USER_ONLY
5889 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5890 {
5891 in2_a2(s, o);
5892 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5893 }
5894 #define SPEC_in2_m2_32ua 0
5895 #endif
5896
5897 static void in2_m2_64(DisasContext *s, DisasOps *o)
5898 {
5899 in2_a2(s, o);
5900 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5901 }
5902 #define SPEC_in2_m2_64 0
5903
5904 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5905 {
5906 in2_a2(s, o);
5907 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5908 gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5909 }
5910 #define SPEC_in2_m2_64w 0
5911
5912 #ifndef CONFIG_USER_ONLY
5913 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5914 {
5915 in2_a2(s, o);
5916 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5917 }
5918 #define SPEC_in2_m2_64a 0
5919 #endif
5920
5921 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5922 {
5923 o->in2 = tcg_temp_new_i64();
5924 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5925 }
5926 #define SPEC_in2_mri2_16s 0
5927
5928 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5929 {
5930 o->in2 = tcg_temp_new_i64();
5931 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5932 }
5933 #define SPEC_in2_mri2_16u 0
5934
5935 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5936 {
5937 o->in2 = tcg_temp_new_i64();
5938 tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5939 MO_TESL | MO_ALIGN);
5940 }
5941 #define SPEC_in2_mri2_32s 0
5942
5943 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5944 {
5945 o->in2 = tcg_temp_new_i64();
5946 tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5947 MO_TEUL | MO_ALIGN);
5948 }
5949 #define SPEC_in2_mri2_32u 0
5950
5951 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5952 {
5953 o->in2 = tcg_temp_new_i64();
5954 tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5955 MO_TEUQ | MO_ALIGN);
5956 }
5957 #define SPEC_in2_mri2_64 0
5958
5959 static void in2_i2(DisasContext *s, DisasOps *o)
5960 {
5961 o->in2 = tcg_constant_i64(get_field(s, i2));
5962 }
5963 #define SPEC_in2_i2 0
5964
5965 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5966 {
5967 o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5968 }
5969 #define SPEC_in2_i2_8u 0
5970
5971 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5972 {
5973 o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5974 }
5975 #define SPEC_in2_i2_16u 0
5976
5977 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5978 {
5979 o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5980 }
5981 #define SPEC_in2_i2_32u 0
5982
5983 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5984 {
5985 uint64_t i2 = (uint16_t)get_field(s, i2);
5986 o->in2 = tcg_constant_i64(i2 << s->insn->data);
5987 }
5988 #define SPEC_in2_i2_16u_shl 0
5989
5990 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5991 {
5992 uint64_t i2 = (uint32_t)get_field(s, i2);
5993 o->in2 = tcg_constant_i64(i2 << s->insn->data);
5994 }
5995 #define SPEC_in2_i2_32u_shl 0
5996
5997 #ifndef CONFIG_USER_ONLY
5998 static void in2_insn(DisasContext *s, DisasOps *o)
5999 {
6000 o->in2 = tcg_constant_i64(s->fields.raw_insn);
6001 }
6002 #define SPEC_in2_insn 0
6003 #endif
6004
6005 /* ====================================================================== */
6006
6007 /* Find opc within the table of insns. This is formulated as a switch
6008 statement so that (1) we get compile-time notice of cut-paste errors
6009 for duplicated opcodes, and (2) the compiler generates the binary
6010 search tree, rather than us having to post-process the table. */
6011
6012 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6013 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6014
6015 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6016 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6017
6018 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6019 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6020
6021 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6022
6023 enum DisasInsnEnum {
6024 #include "insn-data.h.inc"
6025 };
6026
6027 #undef E
6028 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6029 .opc = OPC, \
6030 .flags = FL, \
6031 .fmt = FMT_##FT, \
6032 .fac = FAC_##FC, \
6033 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6034 .name = #NM, \
6035 .help_in1 = in1_##I1, \
6036 .help_in2 = in2_##I2, \
6037 .help_prep = prep_##P, \
6038 .help_wout = wout_##W, \
6039 .help_cout = cout_##CC, \
6040 .help_op = op_##OP, \
6041 .data = D \
6042 },
6043
6044 /* Allow 0 to be used for NULL in the table below. */
6045 #define in1_0 NULL
6046 #define in2_0 NULL
6047 #define prep_0 NULL
6048 #define wout_0 NULL
6049 #define cout_0 NULL
6050 #define op_0 NULL
6051
6052 #define SPEC_in1_0 0
6053 #define SPEC_in2_0 0
6054 #define SPEC_prep_0 0
6055 #define SPEC_wout_0 0
6056
6057 /* Give smaller names to the various facilities. */
6058 #define FAC_Z S390_FEAT_ZARCH
6059 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6060 #define FAC_DFP S390_FEAT_DFP
6061 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6062 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6063 #define FAC_EE S390_FEAT_EXECUTE_EXT
6064 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6065 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6066 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6067 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6068 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6069 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6070 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6071 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6072 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6073 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6074 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6075 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6076 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6077 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6078 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6079 #define FAC_SFLE S390_FEAT_STFLE
6080 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6081 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6082 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6083 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6084 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6085 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6086 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6087 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6088 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6089 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6090 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6091 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6092 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6093 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6094 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6095 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6096 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6097 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6098 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6099 #define FAC_VE2 S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6100 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6101 #define FAC_MIE3 S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6102
6103 static const DisasInsn insn_info[] = {
6104 #include "insn-data.h.inc"
6105 };
6106
6107 #undef E
6108 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6109 case OPC: return &insn_info[insn_ ## NM];
6110
6111 static const DisasInsn *lookup_opc(uint16_t opc)
6112 {
6113 switch (opc) {
6114 #include "insn-data.h.inc"
6115 default:
6116 return NULL;
6117 }
6118 }
6119
6120 #undef F
6121 #undef E
6122 #undef D
6123 #undef C
6124
6125 /* Extract a field from the insn. The INSN should be left-aligned in
6126 the uint64_t so that we can more easily utilize the big-bit-endian
6127 definitions we extract from the Principals of Operation. */
6128
6129 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6130 {
6131 uint32_t r, m;
6132
6133 if (f->size == 0) {
6134 return;
6135 }
6136
6137 /* Zero extract the field from the insn. */
6138 r = (insn << f->beg) >> (64 - f->size);
6139
6140 /* Sign-extend, or un-swap the field as necessary. */
6141 switch (f->type) {
6142 case 0: /* unsigned */
6143 break;
6144 case 1: /* signed */
6145 assert(f->size <= 32);
6146 m = 1u << (f->size - 1);
6147 r = (r ^ m) - m;
6148 break;
6149 case 2: /* dl+dh split, signed 20 bit. */
6150 r = ((int8_t)r << 12) | (r >> 8);
6151 break;
6152 case 3: /* MSB stored in RXB */
6153 g_assert(f->size == 4);
6154 switch (f->beg) {
6155 case 8:
6156 r |= extract64(insn, 63 - 36, 1) << 4;
6157 break;
6158 case 12:
6159 r |= extract64(insn, 63 - 37, 1) << 4;
6160 break;
6161 case 16:
6162 r |= extract64(insn, 63 - 38, 1) << 4;
6163 break;
6164 case 32:
6165 r |= extract64(insn, 63 - 39, 1) << 4;
6166 break;
6167 default:
6168 g_assert_not_reached();
6169 }
6170 break;
6171 default:
6172 abort();
6173 }
6174
6175 /*
6176 * Validate that the "compressed" encoding we selected above is valid.
6177 * I.e. we haven't made two different original fields overlap.
6178 */
6179 assert(((o->presentC >> f->indexC) & 1) == 0);
6180 o->presentC |= 1 << f->indexC;
6181 o->presentO |= 1 << f->indexO;
6182
6183 o->c[f->indexC] = r;
6184 }
6185
6186 /* Lookup the insn at the current PC, extracting the operands into O and
6187 returning the info struct for the insn. Returns NULL for invalid insn. */
6188
6189 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6190 {
6191 uint64_t insn, pc = s->base.pc_next;
6192 int op, op2, ilen;
6193 const DisasInsn *info;
6194
6195 if (unlikely(s->ex_value)) {
6196 /* Drop the EX data now, so that it's clear on exception paths. */
6197 tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
6198 offsetof(CPUS390XState, ex_value));
6199
6200 /* Extract the values saved by EXECUTE. */
6201 insn = s->ex_value & 0xffffffffffff0000ull;
6202 ilen = s->ex_value & 0xf;
6203
6204 /* Register insn bytes with translator so plugins work. */
6205 for (int i = 0; i < ilen; i++) {
6206 uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6207 translator_fake_ldb(byte, pc + i);
6208 }
6209 op = insn >> 56;
6210 } else {
6211 insn = ld_code2(env, s, pc);
6212 op = (insn >> 8) & 0xff;
6213 ilen = get_ilen(op);
6214 switch (ilen) {
6215 case 2:
6216 insn = insn << 48;
6217 break;
6218 case 4:
6219 insn = ld_code4(env, s, pc) << 32;
6220 break;
6221 case 6:
6222 insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6223 break;
6224 default:
6225 g_assert_not_reached();
6226 }
6227 }
6228 s->pc_tmp = s->base.pc_next + ilen;
6229 s->ilen = ilen;
6230
6231 /* We can't actually determine the insn format until we've looked up
6232 the full insn opcode. Which we can't do without locating the
6233 secondary opcode. Assume by default that OP2 is at bit 40; for
6234 those smaller insns that don't actually have a secondary opcode
6235 this will correctly result in OP2 = 0. */
6236 switch (op) {
6237 case 0x01: /* E */
6238 case 0x80: /* S */
6239 case 0x82: /* S */
6240 case 0x93: /* S */
6241 case 0xb2: /* S, RRF, RRE, IE */
6242 case 0xb3: /* RRE, RRD, RRF */
6243 case 0xb9: /* RRE, RRF */
6244 case 0xe5: /* SSE, SIL */
6245 op2 = (insn << 8) >> 56;
6246 break;
6247 case 0xa5: /* RI */
6248 case 0xa7: /* RI */
6249 case 0xc0: /* RIL */
6250 case 0xc2: /* RIL */
6251 case 0xc4: /* RIL */
6252 case 0xc6: /* RIL */
6253 case 0xc8: /* SSF */
6254 case 0xcc: /* RIL */
6255 op2 = (insn << 12) >> 60;
6256 break;
6257 case 0xc5: /* MII */
6258 case 0xc7: /* SMI */
6259 case 0xd0 ... 0xdf: /* SS */
6260 case 0xe1: /* SS */
6261 case 0xe2: /* SS */
6262 case 0xe8: /* SS */
6263 case 0xe9: /* SS */
6264 case 0xea: /* SS */
6265 case 0xee ... 0xf3: /* SS */
6266 case 0xf8 ... 0xfd: /* SS */
6267 op2 = 0;
6268 break;
6269 default:
6270 op2 = (insn << 40) >> 56;
6271 break;
6272 }
6273
6274 memset(&s->fields, 0, sizeof(s->fields));
6275 s->fields.raw_insn = insn;
6276 s->fields.op = op;
6277 s->fields.op2 = op2;
6278
6279 /* Lookup the instruction. */
6280 info = lookup_opc(op << 8 | op2);
6281 s->insn = info;
6282
6283 /* If we found it, extract the operands. */
6284 if (info != NULL) {
6285 DisasFormat fmt = info->fmt;
6286 int i;
6287
6288 for (i = 0; i < NUM_C_FIELD; ++i) {
6289 extract_field(&s->fields, &format_info[fmt].op[i], insn);
6290 }
6291 }
6292 return info;
6293 }
6294
6295 static bool is_afp_reg(int reg)
6296 {
6297 return reg % 2 || reg > 6;
6298 }
6299
6300 static bool is_fp_pair(int reg)
6301 {
6302 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6303 return !(reg & 0x2);
6304 }
6305
6306 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6307 {
6308 const DisasInsn *insn;
6309 DisasJumpType ret = DISAS_NEXT;
6310 DisasOps o = {};
6311 bool icount = false;
6312
6313 /* Search for the insn in the table. */
6314 insn = extract_insn(env, s);
6315
6316 /* Update insn_start now that we know the ILEN. */
6317 tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6318
6319 /* Not found means unimplemented/illegal opcode. */
6320 if (insn == NULL) {
6321 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6322 s->fields.op, s->fields.op2);
6323 gen_illegal_opcode(s);
6324 ret = DISAS_NORETURN;
6325 goto out;
6326 }
6327
6328 #ifndef CONFIG_USER_ONLY
6329 if (s->base.tb->flags & FLAG_MASK_PER) {
6330 TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6331 gen_helper_per_ifetch(tcg_env, addr);
6332 }
6333 #endif
6334
6335 /* process flags */
6336 if (insn->flags) {
6337 /* privileged instruction */
6338 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6339 gen_program_exception(s, PGM_PRIVILEGED);
6340 ret = DISAS_NORETURN;
6341 goto out;
6342 }
6343
6344 /* if AFP is not enabled, instructions and registers are forbidden */
6345 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6346 uint8_t dxc = 0;
6347
6348 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6349 dxc = 1;
6350 }
6351 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6352 dxc = 1;
6353 }
6354 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6355 dxc = 1;
6356 }
6357 if (insn->flags & IF_BFP) {
6358 dxc = 2;
6359 }
6360 if (insn->flags & IF_DFP) {
6361 dxc = 3;
6362 }
6363 if (insn->flags & IF_VEC) {
6364 dxc = 0xfe;
6365 }
6366 if (dxc) {
6367 gen_data_exception(dxc);
6368 ret = DISAS_NORETURN;
6369 goto out;
6370 }
6371 }
6372
6373 /* if vector instructions not enabled, executing them is forbidden */
6374 if (insn->flags & IF_VEC) {
6375 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6376 gen_data_exception(0xfe);
6377 ret = DISAS_NORETURN;
6378 goto out;
6379 }
6380 }
6381
6382 /* input/output is the special case for icount mode */
6383 if (unlikely(insn->flags & IF_IO)) {
6384 icount = translator_io_start(&s->base);
6385 }
6386 }
6387
6388 /* Check for insn specification exceptions. */
6389 if (insn->spec) {
6390 if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6391 (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6392 (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6393 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6394 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6395 gen_program_exception(s, PGM_SPECIFICATION);
6396 ret = DISAS_NORETURN;
6397 goto out;
6398 }
6399 }
6400
6401 /* Implement the instruction. */
6402 if (insn->help_in1) {
6403 insn->help_in1(s, &o);
6404 }
6405 if (insn->help_in2) {
6406 insn->help_in2(s, &o);
6407 }
6408 if (insn->help_prep) {
6409 insn->help_prep(s, &o);
6410 }
6411 if (insn->help_op) {
6412 ret = insn->help_op(s, &o);
6413 }
6414 if (ret != DISAS_NORETURN) {
6415 if (insn->help_wout) {
6416 insn->help_wout(s, &o);
6417 }
6418 if (insn->help_cout) {
6419 insn->help_cout(s, &o);
6420 }
6421 }
6422
6423 /* io should be the last instruction in tb when icount is enabled */
6424 if (unlikely(icount && ret == DISAS_NEXT)) {
6425 ret = DISAS_TOO_MANY;
6426 }
6427
6428 #ifndef CONFIG_USER_ONLY
6429 if (s->base.tb->flags & FLAG_MASK_PER) {
6430 /* An exception might be triggered, save PSW if not already done. */
6431 if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6432 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6433 }
6434
6435 /* Call the helper to check for a possible PER exception. */
6436 gen_helper_per_check_exception(tcg_env);
6437 }
6438 #endif
6439
6440 out:
6441 /* Advance to the next instruction. */
6442 s->base.pc_next = s->pc_tmp;
6443 return ret;
6444 }
6445
6446 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6447 {
6448 DisasContext *dc = container_of(dcbase, DisasContext, base);
6449
6450 /* 31-bit mode */
6451 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6452 dc->base.pc_first &= 0x7fffffff;
6453 dc->base.pc_next = dc->base.pc_first;
6454 }
6455
6456 dc->cc_op = CC_OP_DYNAMIC;
6457 dc->ex_value = dc->base.tb->cs_base;
6458 dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6459 }
6460
6461 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6462 {
6463 }
6464
6465 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6466 {
6467 DisasContext *dc = container_of(dcbase, DisasContext, base);
6468
6469 /* Delay the set of ilen until we've read the insn. */
6470 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6471 dc->insn_start = tcg_last_op();
6472 }
6473
6474 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6475 uint64_t pc)
6476 {
6477 uint64_t insn = cpu_lduw_code(env, pc);
6478
6479 return pc + get_ilen((insn >> 8) & 0xff);
6480 }
6481
6482 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6483 {
6484 CPUS390XState *env = cpu_env(cs);
6485 DisasContext *dc = container_of(dcbase, DisasContext, base);
6486
6487 dc->base.is_jmp = translate_one(env, dc);
6488 if (dc->base.is_jmp == DISAS_NEXT) {
6489 if (dc->ex_value ||
6490 !is_same_page(dcbase, dc->base.pc_next) ||
6491 !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6492 dc->base.is_jmp = DISAS_TOO_MANY;
6493 }
6494 }
6495 }
6496
6497 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6498 {
6499 DisasContext *dc = container_of(dcbase, DisasContext, base);
6500
6501 switch (dc->base.is_jmp) {
6502 case DISAS_NORETURN:
6503 break;
6504 case DISAS_TOO_MANY:
6505 update_psw_addr(dc);
6506 /* FALLTHRU */
6507 case DISAS_PC_UPDATED:
6508 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6509 cc op type is in env */
6510 update_cc_op(dc);
6511 /* FALLTHRU */
6512 case DISAS_PC_CC_UPDATED:
6513 /* Exit the TB, either by raising a debug exception or by return. */
6514 if (dc->exit_to_mainloop) {
6515 tcg_gen_exit_tb(NULL, 0);
6516 } else {
6517 tcg_gen_lookup_and_goto_ptr();
6518 }
6519 break;
6520 default:
6521 g_assert_not_reached();
6522 }
6523 }
6524
6525 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6526 CPUState *cs, FILE *logfile)
6527 {
6528 DisasContext *dc = container_of(dcbase, DisasContext, base);
6529
6530 if (unlikely(dc->ex_value)) {
6531 /* ??? Unfortunately target_disas can't use host memory. */
6532 fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6533 } else {
6534 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6535 target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6536 }
6537 }
6538
6539 static const TranslatorOps s390x_tr_ops = {
6540 .init_disas_context = s390x_tr_init_disas_context,
6541 .tb_start = s390x_tr_tb_start,
6542 .insn_start = s390x_tr_insn_start,
6543 .translate_insn = s390x_tr_translate_insn,
6544 .tb_stop = s390x_tr_tb_stop,
6545 .disas_log = s390x_tr_disas_log,
6546 };
6547
6548 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6549 target_ulong pc, void *host_pc)
6550 {
6551 DisasContext dc;
6552
6553 translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6554 }
6555
6556 void s390x_restore_state_to_opc(CPUState *cs,
6557 const TranslationBlock *tb,
6558 const uint64_t *data)
6559 {
6560 S390CPU *cpu = S390_CPU(cs);
6561 CPUS390XState *env = &cpu->env;
6562 int cc_op = data[1];
6563
6564 env->psw.addr = data[0];
6565
6566 /* Update the CC opcode if it is not already up-to-date. */
6567 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6568 env->cc_op = cc_op;
6569 }
6570
6571 /* Record ILEN. */
6572 env->int_pgm_ilen = data[2];
6573 }