]> git.proxmox.com Git - mirror_qemu.git/blob - target/s390x/translate.c
tests/acceptance: Count multiple Tux logos displayed on framebuffer
[mirror_qemu.git] / target / s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
47 #include "exec/log.h"
48 #include "qemu/atomic128.h"
49
50
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
55
56 /*
57 * Define a structure to hold the decoded fields. We'll store each inside
58 * an array indexed by an enum. In order to conserve memory, we'll arrange
59 * for fields that do not exist at the same time to overlap, thus the "C"
60 * for compact. For checking purposes there is an "O" for original index
61 * as well that will be applied to availability bitmaps.
62 */
63
64 enum DisasFieldIndexO {
65 FLD_O_r1,
66 FLD_O_r2,
67 FLD_O_r3,
68 FLD_O_m1,
69 FLD_O_m3,
70 FLD_O_m4,
71 FLD_O_m5,
72 FLD_O_m6,
73 FLD_O_b1,
74 FLD_O_b2,
75 FLD_O_b4,
76 FLD_O_d1,
77 FLD_O_d2,
78 FLD_O_d4,
79 FLD_O_x2,
80 FLD_O_l1,
81 FLD_O_l2,
82 FLD_O_i1,
83 FLD_O_i2,
84 FLD_O_i3,
85 FLD_O_i4,
86 FLD_O_i5,
87 FLD_O_v1,
88 FLD_O_v2,
89 FLD_O_v3,
90 FLD_O_v4,
91 };
92
93 enum DisasFieldIndexC {
94 FLD_C_r1 = 0,
95 FLD_C_m1 = 0,
96 FLD_C_b1 = 0,
97 FLD_C_i1 = 0,
98 FLD_C_v1 = 0,
99
100 FLD_C_r2 = 1,
101 FLD_C_b2 = 1,
102 FLD_C_i2 = 1,
103
104 FLD_C_r3 = 2,
105 FLD_C_m3 = 2,
106 FLD_C_i3 = 2,
107 FLD_C_v3 = 2,
108
109 FLD_C_m4 = 3,
110 FLD_C_b4 = 3,
111 FLD_C_i4 = 3,
112 FLD_C_l1 = 3,
113 FLD_C_v4 = 3,
114
115 FLD_C_i5 = 4,
116 FLD_C_d1 = 4,
117 FLD_C_m5 = 4,
118
119 FLD_C_d2 = 5,
120 FLD_C_m6 = 5,
121
122 FLD_C_d4 = 6,
123 FLD_C_x2 = 6,
124 FLD_C_l2 = 6,
125 FLD_C_v2 = 6,
126
127 NUM_C_FIELD = 7
128 };
129
130 struct DisasFields {
131 uint64_t raw_insn;
132 unsigned op:8;
133 unsigned op2:8;
134 unsigned presentC:16;
135 unsigned int presentO;
136 int c[NUM_C_FIELD];
137 };
138
139 struct DisasContext {
140 DisasContextBase base;
141 const DisasInsn *insn;
142 DisasFields fields;
143 uint64_t ex_value;
144 /*
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
148 */
149 uint64_t pc_tmp;
150 uint32_t ilen;
151 enum cc_op cc_op;
152 bool do_debug;
153 };
154
155 /* Information carried about a condition to be evaluated. */
156 typedef struct {
157 TCGCond cond:8;
158 bool is_64;
159 bool g1;
160 bool g2;
161 union {
162 struct { TCGv_i64 a, b; } s64;
163 struct { TCGv_i32 a, b; } s32;
164 } u;
165 } DisasCompare;
166
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit[CC_OP_MAX];
169 static uint64_t inline_branch_miss[CC_OP_MAX];
170 #endif
171
172 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
173 {
174 TCGv_i64 tmp;
175
176 if (s->base.tb->flags & FLAG_MASK_32) {
177 if (s->base.tb->flags & FLAG_MASK_64) {
178 tcg_gen_movi_i64(out, pc);
179 return;
180 }
181 pc |= 0x80000000;
182 }
183 assert(!(s->base.tb->flags & FLAG_MASK_64));
184 tmp = tcg_const_i64(pc);
185 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
186 tcg_temp_free_i64(tmp);
187 }
188
189 static TCGv_i64 psw_addr;
190 static TCGv_i64 psw_mask;
191 static TCGv_i64 gbea;
192
193 static TCGv_i32 cc_op;
194 static TCGv_i64 cc_src;
195 static TCGv_i64 cc_dst;
196 static TCGv_i64 cc_vr;
197
198 static char cpu_reg_names[16][4];
199 static TCGv_i64 regs[16];
200
201 void s390x_translate_init(void)
202 {
203 int i;
204
205 psw_addr = tcg_global_mem_new_i64(cpu_env,
206 offsetof(CPUS390XState, psw.addr),
207 "psw_addr");
208 psw_mask = tcg_global_mem_new_i64(cpu_env,
209 offsetof(CPUS390XState, psw.mask),
210 "psw_mask");
211 gbea = tcg_global_mem_new_i64(cpu_env,
212 offsetof(CPUS390XState, gbea),
213 "gbea");
214
215 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
216 "cc_op");
217 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
218 "cc_src");
219 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
220 "cc_dst");
221 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
222 "cc_vr");
223
224 for (i = 0; i < 16; i++) {
225 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
226 regs[i] = tcg_global_mem_new(cpu_env,
227 offsetof(CPUS390XState, regs[i]),
228 cpu_reg_names[i]);
229 }
230 }
231
232 static inline int vec_full_reg_offset(uint8_t reg)
233 {
234 g_assert(reg < 32);
235 return offsetof(CPUS390XState, vregs[reg][0]);
236 }
237
238 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
239 {
240 /* Convert element size (es) - e.g. MO_8 - to bytes */
241 const uint8_t bytes = 1 << es;
242 int offs = enr * bytes;
243
244 /*
245 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
246 * of the 16 byte vector, on both, little and big endian systems.
247 *
248 * Big Endian (target/possible host)
249 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
250 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
251 * W: [ 0][ 1] - [ 2][ 3]
252 * DW: [ 0] - [ 1]
253 *
254 * Little Endian (possible host)
255 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
256 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
257 * W: [ 1][ 0] - [ 3][ 2]
258 * DW: [ 0] - [ 1]
259 *
260 * For 16 byte elements, the two 8 byte halves will not form a host
261 * int128 if the host is little endian, since they're in the wrong order.
262 * Some operations (e.g. xor) do not care. For operations like addition,
263 * the two 8 byte elements have to be loaded separately. Let's force all
264 * 16 byte operations to handle it in a special way.
265 */
266 g_assert(es <= MO_64);
267 #ifndef HOST_WORDS_BIGENDIAN
268 offs ^= (8 - bytes);
269 #endif
270 return offs + vec_full_reg_offset(reg);
271 }
272
273 static inline int freg64_offset(uint8_t reg)
274 {
275 g_assert(reg < 16);
276 return vec_reg_offset(reg, 0, MO_64);
277 }
278
279 static inline int freg32_offset(uint8_t reg)
280 {
281 g_assert(reg < 16);
282 return vec_reg_offset(reg, 0, MO_32);
283 }
284
285 static TCGv_i64 load_reg(int reg)
286 {
287 TCGv_i64 r = tcg_temp_new_i64();
288 tcg_gen_mov_i64(r, regs[reg]);
289 return r;
290 }
291
292 static TCGv_i64 load_freg(int reg)
293 {
294 TCGv_i64 r = tcg_temp_new_i64();
295
296 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
297 return r;
298 }
299
300 static TCGv_i64 load_freg32_i64(int reg)
301 {
302 TCGv_i64 r = tcg_temp_new_i64();
303
304 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
305 return r;
306 }
307
308 static void store_reg(int reg, TCGv_i64 v)
309 {
310 tcg_gen_mov_i64(regs[reg], v);
311 }
312
313 static void store_freg(int reg, TCGv_i64 v)
314 {
315 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
316 }
317
318 static void store_reg32_i64(int reg, TCGv_i64 v)
319 {
320 /* 32 bit register writes keep the upper half */
321 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
322 }
323
324 static void store_reg32h_i64(int reg, TCGv_i64 v)
325 {
326 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
327 }
328
329 static void store_freg32_i64(int reg, TCGv_i64 v)
330 {
331 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
332 }
333
334 static void return_low128(TCGv_i64 dest)
335 {
336 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
337 }
338
339 static void update_psw_addr(DisasContext *s)
340 {
341 /* psw.addr */
342 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
343 }
344
345 static void per_branch(DisasContext *s, bool to_next)
346 {
347 #ifndef CONFIG_USER_ONLY
348 tcg_gen_movi_i64(gbea, s->base.pc_next);
349
350 if (s->base.tb->flags & FLAG_MASK_PER) {
351 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
352 gen_helper_per_branch(cpu_env, gbea, next_pc);
353 if (to_next) {
354 tcg_temp_free_i64(next_pc);
355 }
356 }
357 #endif
358 }
359
360 static void per_branch_cond(DisasContext *s, TCGCond cond,
361 TCGv_i64 arg1, TCGv_i64 arg2)
362 {
363 #ifndef CONFIG_USER_ONLY
364 if (s->base.tb->flags & FLAG_MASK_PER) {
365 TCGLabel *lab = gen_new_label();
366 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
367
368 tcg_gen_movi_i64(gbea, s->base.pc_next);
369 gen_helper_per_branch(cpu_env, gbea, psw_addr);
370
371 gen_set_label(lab);
372 } else {
373 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
374 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
375 tcg_temp_free_i64(pc);
376 }
377 #endif
378 }
379
380 static void per_breaking_event(DisasContext *s)
381 {
382 tcg_gen_movi_i64(gbea, s->base.pc_next);
383 }
384
385 static void update_cc_op(DisasContext *s)
386 {
387 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
388 tcg_gen_movi_i32(cc_op, s->cc_op);
389 }
390 }
391
392 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
393 {
394 return (uint64_t)cpu_lduw_code(env, pc);
395 }
396
397 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
398 {
399 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
400 }
401
402 static int get_mem_index(DisasContext *s)
403 {
404 #ifdef CONFIG_USER_ONLY
405 return MMU_USER_IDX;
406 #else
407 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408 return MMU_REAL_IDX;
409 }
410
411 switch (s->base.tb->flags & FLAG_MASK_ASC) {
412 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413 return MMU_PRIMARY_IDX;
414 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415 return MMU_SECONDARY_IDX;
416 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417 return MMU_HOME_IDX;
418 default:
419 tcg_abort();
420 break;
421 }
422 #endif
423 }
424
425 static void gen_exception(int excp)
426 {
427 TCGv_i32 tmp = tcg_const_i32(excp);
428 gen_helper_exception(cpu_env, tmp);
429 tcg_temp_free_i32(tmp);
430 }
431
432 static void gen_program_exception(DisasContext *s, int code)
433 {
434 TCGv_i32 tmp;
435
436 /* Remember what pgm exeption this was. */
437 tmp = tcg_const_i32(code);
438 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
439 tcg_temp_free_i32(tmp);
440
441 tmp = tcg_const_i32(s->ilen);
442 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
443 tcg_temp_free_i32(tmp);
444
445 /* update the psw */
446 update_psw_addr(s);
447
448 /* Save off cc. */
449 update_cc_op(s);
450
451 /* Trigger exception. */
452 gen_exception(EXCP_PGM);
453 }
454
455 static inline void gen_illegal_opcode(DisasContext *s)
456 {
457 gen_program_exception(s, PGM_OPERATION);
458 }
459
460 static inline void gen_data_exception(uint8_t dxc)
461 {
462 TCGv_i32 tmp = tcg_const_i32(dxc);
463 gen_helper_data_exception(cpu_env, tmp);
464 tcg_temp_free_i32(tmp);
465 }
466
467 static inline void gen_trap(DisasContext *s)
468 {
469 /* Set DXC to 0xff */
470 gen_data_exception(0xff);
471 }
472
473 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
474 int64_t imm)
475 {
476 tcg_gen_addi_i64(dst, src, imm);
477 if (!(s->base.tb->flags & FLAG_MASK_64)) {
478 if (s->base.tb->flags & FLAG_MASK_32) {
479 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
480 } else {
481 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
482 }
483 }
484 }
485
486 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
487 {
488 TCGv_i64 tmp = tcg_temp_new_i64();
489
490 /*
491 * Note that d2 is limited to 20 bits, signed. If we crop negative
492 * displacements early we create larger immedate addends.
493 */
494 if (b2 && x2) {
495 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
496 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
497 } else if (b2) {
498 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
499 } else if (x2) {
500 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
501 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
502 if (s->base.tb->flags & FLAG_MASK_32) {
503 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
504 } else {
505 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
506 }
507 } else {
508 tcg_gen_movi_i64(tmp, d2);
509 }
510
511 return tmp;
512 }
513
514 static inline bool live_cc_data(DisasContext *s)
515 {
516 return (s->cc_op != CC_OP_DYNAMIC
517 && s->cc_op != CC_OP_STATIC
518 && s->cc_op > 3);
519 }
520
521 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
522 {
523 if (live_cc_data(s)) {
524 tcg_gen_discard_i64(cc_src);
525 tcg_gen_discard_i64(cc_dst);
526 tcg_gen_discard_i64(cc_vr);
527 }
528 s->cc_op = CC_OP_CONST0 + val;
529 }
530
531 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
532 {
533 if (live_cc_data(s)) {
534 tcg_gen_discard_i64(cc_src);
535 tcg_gen_discard_i64(cc_vr);
536 }
537 tcg_gen_mov_i64(cc_dst, dst);
538 s->cc_op = op;
539 }
540
541 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
542 TCGv_i64 dst)
543 {
544 if (live_cc_data(s)) {
545 tcg_gen_discard_i64(cc_vr);
546 }
547 tcg_gen_mov_i64(cc_src, src);
548 tcg_gen_mov_i64(cc_dst, dst);
549 s->cc_op = op;
550 }
551
552 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
553 TCGv_i64 dst, TCGv_i64 vr)
554 {
555 tcg_gen_mov_i64(cc_src, src);
556 tcg_gen_mov_i64(cc_dst, dst);
557 tcg_gen_mov_i64(cc_vr, vr);
558 s->cc_op = op;
559 }
560
561 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
562 {
563 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
564 }
565
566 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
567 {
568 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
569 }
570
571 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
572 {
573 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
574 }
575
576 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
577 {
578 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
579 }
580
581 /* CC value is in env->cc_op */
582 static void set_cc_static(DisasContext *s)
583 {
584 if (live_cc_data(s)) {
585 tcg_gen_discard_i64(cc_src);
586 tcg_gen_discard_i64(cc_dst);
587 tcg_gen_discard_i64(cc_vr);
588 }
589 s->cc_op = CC_OP_STATIC;
590 }
591
592 /* calculates cc into cc_op */
593 static void gen_op_calc_cc(DisasContext *s)
594 {
595 TCGv_i32 local_cc_op = NULL;
596 TCGv_i64 dummy = NULL;
597
598 switch (s->cc_op) {
599 default:
600 dummy = tcg_const_i64(0);
601 /* FALLTHRU */
602 case CC_OP_ADD_64:
603 case CC_OP_ADDU_64:
604 case CC_OP_ADDC_64:
605 case CC_OP_SUB_64:
606 case CC_OP_SUBU_64:
607 case CC_OP_SUBB_64:
608 case CC_OP_ADD_32:
609 case CC_OP_ADDU_32:
610 case CC_OP_ADDC_32:
611 case CC_OP_SUB_32:
612 case CC_OP_SUBU_32:
613 case CC_OP_SUBB_32:
614 local_cc_op = tcg_const_i32(s->cc_op);
615 break;
616 case CC_OP_CONST0:
617 case CC_OP_CONST1:
618 case CC_OP_CONST2:
619 case CC_OP_CONST3:
620 case CC_OP_STATIC:
621 case CC_OP_DYNAMIC:
622 break;
623 }
624
625 switch (s->cc_op) {
626 case CC_OP_CONST0:
627 case CC_OP_CONST1:
628 case CC_OP_CONST2:
629 case CC_OP_CONST3:
630 /* s->cc_op is the cc value */
631 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
632 break;
633 case CC_OP_STATIC:
634 /* env->cc_op already is the cc value */
635 break;
636 case CC_OP_NZ:
637 case CC_OP_ABS_64:
638 case CC_OP_NABS_64:
639 case CC_OP_ABS_32:
640 case CC_OP_NABS_32:
641 case CC_OP_LTGT0_32:
642 case CC_OP_LTGT0_64:
643 case CC_OP_COMP_32:
644 case CC_OP_COMP_64:
645 case CC_OP_NZ_F32:
646 case CC_OP_NZ_F64:
647 case CC_OP_FLOGR:
648 case CC_OP_LCBB:
649 /* 1 argument */
650 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
651 break;
652 case CC_OP_ICM:
653 case CC_OP_LTGT_32:
654 case CC_OP_LTGT_64:
655 case CC_OP_LTUGTU_32:
656 case CC_OP_LTUGTU_64:
657 case CC_OP_TM_32:
658 case CC_OP_TM_64:
659 case CC_OP_SLA_32:
660 case CC_OP_SLA_64:
661 case CC_OP_NZ_F128:
662 case CC_OP_VC:
663 /* 2 arguments */
664 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
665 break;
666 case CC_OP_ADD_64:
667 case CC_OP_ADDU_64:
668 case CC_OP_ADDC_64:
669 case CC_OP_SUB_64:
670 case CC_OP_SUBU_64:
671 case CC_OP_SUBB_64:
672 case CC_OP_ADD_32:
673 case CC_OP_ADDU_32:
674 case CC_OP_ADDC_32:
675 case CC_OP_SUB_32:
676 case CC_OP_SUBU_32:
677 case CC_OP_SUBB_32:
678 /* 3 arguments */
679 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
680 break;
681 case CC_OP_DYNAMIC:
682 /* unknown operation - assume 3 arguments and cc_op in env */
683 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
684 break;
685 default:
686 tcg_abort();
687 }
688
689 if (local_cc_op) {
690 tcg_temp_free_i32(local_cc_op);
691 }
692 if (dummy) {
693 tcg_temp_free_i64(dummy);
694 }
695
696 /* We now have cc in cc_op as constant */
697 set_cc_static(s);
698 }
699
700 static bool use_exit_tb(DisasContext *s)
701 {
702 return s->base.singlestep_enabled ||
703 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
704 (s->base.tb->flags & FLAG_MASK_PER);
705 }
706
707 static bool use_goto_tb(DisasContext *s, uint64_t dest)
708 {
709 if (unlikely(use_exit_tb(s))) {
710 return false;
711 }
712 #ifndef CONFIG_USER_ONLY
713 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
714 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
715 #else
716 return true;
717 #endif
718 }
719
720 static void account_noninline_branch(DisasContext *s, int cc_op)
721 {
722 #ifdef DEBUG_INLINE_BRANCHES
723 inline_branch_miss[cc_op]++;
724 #endif
725 }
726
727 static void account_inline_branch(DisasContext *s, int cc_op)
728 {
729 #ifdef DEBUG_INLINE_BRANCHES
730 inline_branch_hit[cc_op]++;
731 #endif
732 }
733
734 /* Table of mask values to comparison codes, given a comparison as input.
735 For such, CC=3 should not be possible. */
736 static const TCGCond ltgt_cond[16] = {
737 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
738 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
739 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
740 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
741 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
742 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
743 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
744 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
745 };
746
747 /* Table of mask values to comparison codes, given a logic op as input.
748 For such, only CC=0 and CC=1 should be possible. */
749 static const TCGCond nz_cond[16] = {
750 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
751 TCG_COND_NEVER, TCG_COND_NEVER,
752 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
753 TCG_COND_NE, TCG_COND_NE,
754 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
755 TCG_COND_EQ, TCG_COND_EQ,
756 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
757 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
758 };
759
760 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
761 details required to generate a TCG comparison. */
762 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
763 {
764 TCGCond cond;
765 enum cc_op old_cc_op = s->cc_op;
766
767 if (mask == 15 || mask == 0) {
768 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
769 c->u.s32.a = cc_op;
770 c->u.s32.b = cc_op;
771 c->g1 = c->g2 = true;
772 c->is_64 = false;
773 return;
774 }
775
776 /* Find the TCG condition for the mask + cc op. */
777 switch (old_cc_op) {
778 case CC_OP_LTGT0_32:
779 case CC_OP_LTGT0_64:
780 case CC_OP_LTGT_32:
781 case CC_OP_LTGT_64:
782 cond = ltgt_cond[mask];
783 if (cond == TCG_COND_NEVER) {
784 goto do_dynamic;
785 }
786 account_inline_branch(s, old_cc_op);
787 break;
788
789 case CC_OP_LTUGTU_32:
790 case CC_OP_LTUGTU_64:
791 cond = tcg_unsigned_cond(ltgt_cond[mask]);
792 if (cond == TCG_COND_NEVER) {
793 goto do_dynamic;
794 }
795 account_inline_branch(s, old_cc_op);
796 break;
797
798 case CC_OP_NZ:
799 cond = nz_cond[mask];
800 if (cond == TCG_COND_NEVER) {
801 goto do_dynamic;
802 }
803 account_inline_branch(s, old_cc_op);
804 break;
805
806 case CC_OP_TM_32:
807 case CC_OP_TM_64:
808 switch (mask) {
809 case 8:
810 cond = TCG_COND_EQ;
811 break;
812 case 4 | 2 | 1:
813 cond = TCG_COND_NE;
814 break;
815 default:
816 goto do_dynamic;
817 }
818 account_inline_branch(s, old_cc_op);
819 break;
820
821 case CC_OP_ICM:
822 switch (mask) {
823 case 8:
824 cond = TCG_COND_EQ;
825 break;
826 case 4 | 2 | 1:
827 case 4 | 2:
828 cond = TCG_COND_NE;
829 break;
830 default:
831 goto do_dynamic;
832 }
833 account_inline_branch(s, old_cc_op);
834 break;
835
836 case CC_OP_FLOGR:
837 switch (mask & 0xa) {
838 case 8: /* src == 0 -> no one bit found */
839 cond = TCG_COND_EQ;
840 break;
841 case 2: /* src != 0 -> one bit found */
842 cond = TCG_COND_NE;
843 break;
844 default:
845 goto do_dynamic;
846 }
847 account_inline_branch(s, old_cc_op);
848 break;
849
850 case CC_OP_ADDU_32:
851 case CC_OP_ADDU_64:
852 switch (mask) {
853 case 8 | 2: /* vr == 0 */
854 cond = TCG_COND_EQ;
855 break;
856 case 4 | 1: /* vr != 0 */
857 cond = TCG_COND_NE;
858 break;
859 case 8 | 4: /* no carry -> vr >= src */
860 cond = TCG_COND_GEU;
861 break;
862 case 2 | 1: /* carry -> vr < src */
863 cond = TCG_COND_LTU;
864 break;
865 default:
866 goto do_dynamic;
867 }
868 account_inline_branch(s, old_cc_op);
869 break;
870
871 case CC_OP_SUBU_32:
872 case CC_OP_SUBU_64:
873 /* Note that CC=0 is impossible; treat it as dont-care. */
874 switch (mask & 7) {
875 case 2: /* zero -> op1 == op2 */
876 cond = TCG_COND_EQ;
877 break;
878 case 4 | 1: /* !zero -> op1 != op2 */
879 cond = TCG_COND_NE;
880 break;
881 case 4: /* borrow (!carry) -> op1 < op2 */
882 cond = TCG_COND_LTU;
883 break;
884 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
885 cond = TCG_COND_GEU;
886 break;
887 default:
888 goto do_dynamic;
889 }
890 account_inline_branch(s, old_cc_op);
891 break;
892
893 default:
894 do_dynamic:
895 /* Calculate cc value. */
896 gen_op_calc_cc(s);
897 /* FALLTHRU */
898
899 case CC_OP_STATIC:
900 /* Jump based on CC. We'll load up the real cond below;
901 the assignment here merely avoids a compiler warning. */
902 account_noninline_branch(s, old_cc_op);
903 old_cc_op = CC_OP_STATIC;
904 cond = TCG_COND_NEVER;
905 break;
906 }
907
908 /* Load up the arguments of the comparison. */
909 c->is_64 = true;
910 c->g1 = c->g2 = false;
911 switch (old_cc_op) {
912 case CC_OP_LTGT0_32:
913 c->is_64 = false;
914 c->u.s32.a = tcg_temp_new_i32();
915 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
916 c->u.s32.b = tcg_const_i32(0);
917 break;
918 case CC_OP_LTGT_32:
919 case CC_OP_LTUGTU_32:
920 case CC_OP_SUBU_32:
921 c->is_64 = false;
922 c->u.s32.a = tcg_temp_new_i32();
923 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
924 c->u.s32.b = tcg_temp_new_i32();
925 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
926 break;
927
928 case CC_OP_LTGT0_64:
929 case CC_OP_NZ:
930 case CC_OP_FLOGR:
931 c->u.s64.a = cc_dst;
932 c->u.s64.b = tcg_const_i64(0);
933 c->g1 = true;
934 break;
935 case CC_OP_LTGT_64:
936 case CC_OP_LTUGTU_64:
937 case CC_OP_SUBU_64:
938 c->u.s64.a = cc_src;
939 c->u.s64.b = cc_dst;
940 c->g1 = c->g2 = true;
941 break;
942
943 case CC_OP_TM_32:
944 case CC_OP_TM_64:
945 case CC_OP_ICM:
946 c->u.s64.a = tcg_temp_new_i64();
947 c->u.s64.b = tcg_const_i64(0);
948 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
949 break;
950
951 case CC_OP_ADDU_32:
952 c->is_64 = false;
953 c->u.s32.a = tcg_temp_new_i32();
954 c->u.s32.b = tcg_temp_new_i32();
955 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
956 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
957 tcg_gen_movi_i32(c->u.s32.b, 0);
958 } else {
959 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
960 }
961 break;
962
963 case CC_OP_ADDU_64:
964 c->u.s64.a = cc_vr;
965 c->g1 = true;
966 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
967 c->u.s64.b = tcg_const_i64(0);
968 } else {
969 c->u.s64.b = cc_src;
970 c->g2 = true;
971 }
972 break;
973
974 case CC_OP_STATIC:
975 c->is_64 = false;
976 c->u.s32.a = cc_op;
977 c->g1 = true;
978 switch (mask) {
979 case 0x8 | 0x4 | 0x2: /* cc != 3 */
980 cond = TCG_COND_NE;
981 c->u.s32.b = tcg_const_i32(3);
982 break;
983 case 0x8 | 0x4 | 0x1: /* cc != 2 */
984 cond = TCG_COND_NE;
985 c->u.s32.b = tcg_const_i32(2);
986 break;
987 case 0x8 | 0x2 | 0x1: /* cc != 1 */
988 cond = TCG_COND_NE;
989 c->u.s32.b = tcg_const_i32(1);
990 break;
991 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
992 cond = TCG_COND_EQ;
993 c->g1 = false;
994 c->u.s32.a = tcg_temp_new_i32();
995 c->u.s32.b = tcg_const_i32(0);
996 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
997 break;
998 case 0x8 | 0x4: /* cc < 2 */
999 cond = TCG_COND_LTU;
1000 c->u.s32.b = tcg_const_i32(2);
1001 break;
1002 case 0x8: /* cc == 0 */
1003 cond = TCG_COND_EQ;
1004 c->u.s32.b = tcg_const_i32(0);
1005 break;
1006 case 0x4 | 0x2 | 0x1: /* cc != 0 */
1007 cond = TCG_COND_NE;
1008 c->u.s32.b = tcg_const_i32(0);
1009 break;
1010 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
1011 cond = TCG_COND_NE;
1012 c->g1 = false;
1013 c->u.s32.a = tcg_temp_new_i32();
1014 c->u.s32.b = tcg_const_i32(0);
1015 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
1016 break;
1017 case 0x4: /* cc == 1 */
1018 cond = TCG_COND_EQ;
1019 c->u.s32.b = tcg_const_i32(1);
1020 break;
1021 case 0x2 | 0x1: /* cc > 1 */
1022 cond = TCG_COND_GTU;
1023 c->u.s32.b = tcg_const_i32(1);
1024 break;
1025 case 0x2: /* cc == 2 */
1026 cond = TCG_COND_EQ;
1027 c->u.s32.b = tcg_const_i32(2);
1028 break;
1029 case 0x1: /* cc == 3 */
1030 cond = TCG_COND_EQ;
1031 c->u.s32.b = tcg_const_i32(3);
1032 break;
1033 default:
1034 /* CC is masked by something else: (8 >> cc) & mask. */
1035 cond = TCG_COND_NE;
1036 c->g1 = false;
1037 c->u.s32.a = tcg_const_i32(8);
1038 c->u.s32.b = tcg_const_i32(0);
1039 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
1040 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
1041 break;
1042 }
1043 break;
1044
1045 default:
1046 abort();
1047 }
1048 c->cond = cond;
1049 }
1050
1051 static void free_compare(DisasCompare *c)
1052 {
1053 if (!c->g1) {
1054 if (c->is_64) {
1055 tcg_temp_free_i64(c->u.s64.a);
1056 } else {
1057 tcg_temp_free_i32(c->u.s32.a);
1058 }
1059 }
1060 if (!c->g2) {
1061 if (c->is_64) {
1062 tcg_temp_free_i64(c->u.s64.b);
1063 } else {
1064 tcg_temp_free_i32(c->u.s32.b);
1065 }
1066 }
1067 }
1068
1069 /* ====================================================================== */
1070 /* Define the insn format enumeration. */
1071 #define F0(N) FMT_##N,
1072 #define F1(N, X1) F0(N)
1073 #define F2(N, X1, X2) F0(N)
1074 #define F3(N, X1, X2, X3) F0(N)
1075 #define F4(N, X1, X2, X3, X4) F0(N)
1076 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1077 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1078
1079 typedef enum {
1080 #include "insn-format.def"
1081 } DisasFormat;
1082
1083 #undef F0
1084 #undef F1
1085 #undef F2
1086 #undef F3
1087 #undef F4
1088 #undef F5
1089 #undef F6
1090
1091 /* This is the way fields are to be accessed out of DisasFields. */
1092 #define have_field(S, F) have_field1((S), FLD_O_##F)
1093 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1094
1095 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1096 {
1097 return (s->fields.presentO >> c) & 1;
1098 }
1099
1100 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1101 enum DisasFieldIndexC c)
1102 {
1103 assert(have_field1(s, o));
1104 return s->fields.c[c];
1105 }
1106
1107 /* Describe the layout of each field in each format. */
1108 typedef struct DisasField {
1109 unsigned int beg:8;
1110 unsigned int size:8;
1111 unsigned int type:2;
1112 unsigned int indexC:6;
1113 enum DisasFieldIndexO indexO:8;
1114 } DisasField;
1115
1116 typedef struct DisasFormatInfo {
1117 DisasField op[NUM_C_FIELD];
1118 } DisasFormatInfo;
1119
1120 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1121 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1122 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1123 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1124 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1125 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1126 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1127 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1128 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1129 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1130 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1131 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1132 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1133 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1134 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1135
1136 #define F0(N) { { } },
1137 #define F1(N, X1) { { X1 } },
1138 #define F2(N, X1, X2) { { X1, X2 } },
1139 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1140 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1141 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1142 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1143
1144 static const DisasFormatInfo format_info[] = {
1145 #include "insn-format.def"
1146 };
1147
1148 #undef F0
1149 #undef F1
1150 #undef F2
1151 #undef F3
1152 #undef F4
1153 #undef F5
1154 #undef F6
1155 #undef R
1156 #undef M
1157 #undef V
1158 #undef BD
1159 #undef BXD
1160 #undef BDL
1161 #undef BXDL
1162 #undef I
1163 #undef L
1164
1165 /* Generally, we'll extract operands into this structures, operate upon
1166 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1167 of routines below for more details. */
1168 typedef struct {
1169 bool g_out, g_out2, g_in1, g_in2;
1170 TCGv_i64 out, out2, in1, in2;
1171 TCGv_i64 addr1;
1172 } DisasOps;
1173
1174 /* Instructions can place constraints on their operands, raising specification
1175 exceptions if they are violated. To make this easy to automate, each "in1",
1176 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1177 of the following, or 0. To make this easy to document, we'll put the
1178 SPEC_<name> defines next to <name>. */
1179
1180 #define SPEC_r1_even 1
1181 #define SPEC_r2_even 2
1182 #define SPEC_r3_even 4
1183 #define SPEC_r1_f128 8
1184 #define SPEC_r2_f128 16
1185
1186 /* Return values from translate_one, indicating the state of the TB. */
1187
1188 /* We are not using a goto_tb (for whatever reason), but have updated
1189 the PC (for whatever reason), so there's no need to do it again on
1190 exiting the TB. */
1191 #define DISAS_PC_UPDATED DISAS_TARGET_0
1192
1193 /* We have emitted one or more goto_tb. No fixup required. */
1194 #define DISAS_GOTO_TB DISAS_TARGET_1
1195
1196 /* We have updated the PC and CC values. */
1197 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1198
1199 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1200 updated the PC for the next instruction to be executed. */
1201 #define DISAS_PC_STALE DISAS_TARGET_3
1202
1203 /* We are exiting the TB to the main loop. */
1204 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1205
1206
1207 /* Instruction flags */
1208 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1209 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1210 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1211 #define IF_BFP 0x0008 /* binary floating point instruction */
1212 #define IF_DFP 0x0010 /* decimal floating point instruction */
1213 #define IF_PRIV 0x0020 /* privileged instruction */
1214 #define IF_VEC 0x0040 /* vector instruction */
1215
1216 struct DisasInsn {
1217 unsigned opc:16;
1218 unsigned flags:16;
1219 DisasFormat fmt:8;
1220 unsigned fac:8;
1221 unsigned spec:8;
1222
1223 const char *name;
1224
1225 /* Pre-process arguments before HELP_OP. */
1226 void (*help_in1)(DisasContext *, DisasOps *);
1227 void (*help_in2)(DisasContext *, DisasOps *);
1228 void (*help_prep)(DisasContext *, DisasOps *);
1229
1230 /*
1231 * Post-process output after HELP_OP.
1232 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1233 */
1234 void (*help_wout)(DisasContext *, DisasOps *);
1235 void (*help_cout)(DisasContext *, DisasOps *);
1236
1237 /* Implement the operation itself. */
1238 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1239
1240 uint64_t data;
1241 };
1242
1243 /* ====================================================================== */
1244 /* Miscellaneous helpers, used by several operations. */
1245
1246 static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
1247 {
1248 int b2 = get_field(s, b2);
1249 int d2 = get_field(s, d2);
1250
1251 if (b2 == 0) {
1252 o->in2 = tcg_const_i64(d2 & mask);
1253 } else {
1254 o->in2 = get_address(s, 0, b2, d2);
1255 tcg_gen_andi_i64(o->in2, o->in2, mask);
1256 }
1257 }
1258
1259 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1260 {
1261 if (dest == s->pc_tmp) {
1262 per_branch(s, true);
1263 return DISAS_NEXT;
1264 }
1265 if (use_goto_tb(s, dest)) {
1266 update_cc_op(s);
1267 per_breaking_event(s);
1268 tcg_gen_goto_tb(0);
1269 tcg_gen_movi_i64(psw_addr, dest);
1270 tcg_gen_exit_tb(s->base.tb, 0);
1271 return DISAS_GOTO_TB;
1272 } else {
1273 tcg_gen_movi_i64(psw_addr, dest);
1274 per_branch(s, false);
1275 return DISAS_PC_UPDATED;
1276 }
1277 }
1278
1279 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1280 bool is_imm, int imm, TCGv_i64 cdest)
1281 {
1282 DisasJumpType ret;
1283 uint64_t dest = s->base.pc_next + 2 * imm;
1284 TCGLabel *lab;
1285
1286 /* Take care of the special cases first. */
1287 if (c->cond == TCG_COND_NEVER) {
1288 ret = DISAS_NEXT;
1289 goto egress;
1290 }
1291 if (is_imm) {
1292 if (dest == s->pc_tmp) {
1293 /* Branch to next. */
1294 per_branch(s, true);
1295 ret = DISAS_NEXT;
1296 goto egress;
1297 }
1298 if (c->cond == TCG_COND_ALWAYS) {
1299 ret = help_goto_direct(s, dest);
1300 goto egress;
1301 }
1302 } else {
1303 if (!cdest) {
1304 /* E.g. bcr %r0 -> no branch. */
1305 ret = DISAS_NEXT;
1306 goto egress;
1307 }
1308 if (c->cond == TCG_COND_ALWAYS) {
1309 tcg_gen_mov_i64(psw_addr, cdest);
1310 per_branch(s, false);
1311 ret = DISAS_PC_UPDATED;
1312 goto egress;
1313 }
1314 }
1315
1316 if (use_goto_tb(s, s->pc_tmp)) {
1317 if (is_imm && use_goto_tb(s, dest)) {
1318 /* Both exits can use goto_tb. */
1319 update_cc_op(s);
1320
1321 lab = gen_new_label();
1322 if (c->is_64) {
1323 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1324 } else {
1325 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1326 }
1327
1328 /* Branch not taken. */
1329 tcg_gen_goto_tb(0);
1330 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1331 tcg_gen_exit_tb(s->base.tb, 0);
1332
1333 /* Branch taken. */
1334 gen_set_label(lab);
1335 per_breaking_event(s);
1336 tcg_gen_goto_tb(1);
1337 tcg_gen_movi_i64(psw_addr, dest);
1338 tcg_gen_exit_tb(s->base.tb, 1);
1339
1340 ret = DISAS_GOTO_TB;
1341 } else {
1342 /* Fallthru can use goto_tb, but taken branch cannot. */
1343 /* Store taken branch destination before the brcond. This
1344 avoids having to allocate a new local temp to hold it.
1345 We'll overwrite this in the not taken case anyway. */
1346 if (!is_imm) {
1347 tcg_gen_mov_i64(psw_addr, cdest);
1348 }
1349
1350 lab = gen_new_label();
1351 if (c->is_64) {
1352 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1353 } else {
1354 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1355 }
1356
1357 /* Branch not taken. */
1358 update_cc_op(s);
1359 tcg_gen_goto_tb(0);
1360 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1361 tcg_gen_exit_tb(s->base.tb, 0);
1362
1363 gen_set_label(lab);
1364 if (is_imm) {
1365 tcg_gen_movi_i64(psw_addr, dest);
1366 }
1367 per_breaking_event(s);
1368 ret = DISAS_PC_UPDATED;
1369 }
1370 } else {
1371 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1372 Most commonly we're single-stepping or some other condition that
1373 disables all use of goto_tb. Just update the PC and exit. */
1374
1375 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1376 if (is_imm) {
1377 cdest = tcg_const_i64(dest);
1378 }
1379
1380 if (c->is_64) {
1381 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1382 cdest, next);
1383 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1384 } else {
1385 TCGv_i32 t0 = tcg_temp_new_i32();
1386 TCGv_i64 t1 = tcg_temp_new_i64();
1387 TCGv_i64 z = tcg_const_i64(0);
1388 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1389 tcg_gen_extu_i32_i64(t1, t0);
1390 tcg_temp_free_i32(t0);
1391 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1392 per_branch_cond(s, TCG_COND_NE, t1, z);
1393 tcg_temp_free_i64(t1);
1394 tcg_temp_free_i64(z);
1395 }
1396
1397 if (is_imm) {
1398 tcg_temp_free_i64(cdest);
1399 }
1400 tcg_temp_free_i64(next);
1401
1402 ret = DISAS_PC_UPDATED;
1403 }
1404
1405 egress:
1406 free_compare(c);
1407 return ret;
1408 }
1409
1410 /* ====================================================================== */
1411 /* The operations. These perform the bulk of the work for any insn,
1412 usually after the operands have been loaded and output initialized. */
1413
1414 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1415 {
1416 tcg_gen_abs_i64(o->out, o->in2);
1417 return DISAS_NEXT;
1418 }
1419
1420 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1421 {
1422 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1423 return DISAS_NEXT;
1424 }
1425
1426 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1427 {
1428 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1429 return DISAS_NEXT;
1430 }
1431
1432 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1433 {
1434 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1435 tcg_gen_mov_i64(o->out2, o->in2);
1436 return DISAS_NEXT;
1437 }
1438
1439 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1440 {
1441 tcg_gen_add_i64(o->out, o->in1, o->in2);
1442 return DISAS_NEXT;
1443 }
1444
1445 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1446 {
1447 DisasCompare cmp;
1448 TCGv_i64 carry;
1449
1450 tcg_gen_add_i64(o->out, o->in1, o->in2);
1451
1452 /* The carry flag is the msb of CC, therefore the branch mask that would
1453 create that comparison is 3. Feeding the generated comparison to
1454 setcond produces the carry flag that we desire. */
1455 disas_jcc(s, &cmp, 3);
1456 carry = tcg_temp_new_i64();
1457 if (cmp.is_64) {
1458 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1459 } else {
1460 TCGv_i32 t = tcg_temp_new_i32();
1461 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1462 tcg_gen_extu_i32_i64(carry, t);
1463 tcg_temp_free_i32(t);
1464 }
1465 free_compare(&cmp);
1466
1467 tcg_gen_add_i64(o->out, o->out, carry);
1468 tcg_temp_free_i64(carry);
1469 return DISAS_NEXT;
1470 }
1471
1472 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1473 {
1474 o->in1 = tcg_temp_new_i64();
1475
1476 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1477 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1478 } else {
1479 /* Perform the atomic addition in memory. */
1480 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1481 s->insn->data);
1482 }
1483
1484 /* Recompute also for atomic case: needed for setting CC. */
1485 tcg_gen_add_i64(o->out, o->in1, o->in2);
1486
1487 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1488 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1489 }
1490 return DISAS_NEXT;
1491 }
1492
1493 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1494 {
1495 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1496 return DISAS_NEXT;
1497 }
1498
1499 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1500 {
1501 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1502 return DISAS_NEXT;
1503 }
1504
1505 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1506 {
1507 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1508 return_low128(o->out2);
1509 return DISAS_NEXT;
1510 }
1511
1512 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1513 {
1514 tcg_gen_and_i64(o->out, o->in1, o->in2);
1515 return DISAS_NEXT;
1516 }
1517
1518 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1519 {
1520 int shift = s->insn->data & 0xff;
1521 int size = s->insn->data >> 8;
1522 uint64_t mask = ((1ull << size) - 1) << shift;
1523
1524 assert(!o->g_in2);
1525 tcg_gen_shli_i64(o->in2, o->in2, shift);
1526 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1527 tcg_gen_and_i64(o->out, o->in1, o->in2);
1528
1529 /* Produce the CC from only the bits manipulated. */
1530 tcg_gen_andi_i64(cc_dst, o->out, mask);
1531 set_cc_nz_u64(s, cc_dst);
1532 return DISAS_NEXT;
1533 }
1534
1535 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1536 {
1537 o->in1 = tcg_temp_new_i64();
1538
1539 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1540 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1541 } else {
1542 /* Perform the atomic operation in memory. */
1543 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1544 s->insn->data);
1545 }
1546
1547 /* Recompute also for atomic case: needed for setting CC. */
1548 tcg_gen_and_i64(o->out, o->in1, o->in2);
1549
1550 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1551 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1552 }
1553 return DISAS_NEXT;
1554 }
1555
1556 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1557 {
1558 pc_to_link_info(o->out, s, s->pc_tmp);
1559 if (o->in2) {
1560 tcg_gen_mov_i64(psw_addr, o->in2);
1561 per_branch(s, false);
1562 return DISAS_PC_UPDATED;
1563 } else {
1564 return DISAS_NEXT;
1565 }
1566 }
1567
1568 static void save_link_info(DisasContext *s, DisasOps *o)
1569 {
1570 TCGv_i64 t;
1571
1572 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1573 pc_to_link_info(o->out, s, s->pc_tmp);
1574 return;
1575 }
1576 gen_op_calc_cc(s);
1577 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1578 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1579 t = tcg_temp_new_i64();
1580 tcg_gen_shri_i64(t, psw_mask, 16);
1581 tcg_gen_andi_i64(t, t, 0x0f000000);
1582 tcg_gen_or_i64(o->out, o->out, t);
1583 tcg_gen_extu_i32_i64(t, cc_op);
1584 tcg_gen_shli_i64(t, t, 28);
1585 tcg_gen_or_i64(o->out, o->out, t);
1586 tcg_temp_free_i64(t);
1587 }
1588
1589 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1590 {
1591 save_link_info(s, o);
1592 if (o->in2) {
1593 tcg_gen_mov_i64(psw_addr, o->in2);
1594 per_branch(s, false);
1595 return DISAS_PC_UPDATED;
1596 } else {
1597 return DISAS_NEXT;
1598 }
1599 }
1600
1601 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1602 {
1603 pc_to_link_info(o->out, s, s->pc_tmp);
1604 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
1605 }
1606
1607 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1608 {
1609 int m1 = get_field(s, m1);
1610 bool is_imm = have_field(s, i2);
1611 int imm = is_imm ? get_field(s, i2) : 0;
1612 DisasCompare c;
1613
1614 /* BCR with R2 = 0 causes no branching */
1615 if (have_field(s, r2) && get_field(s, r2) == 0) {
1616 if (m1 == 14) {
1617 /* Perform serialization */
1618 /* FIXME: check for fast-BCR-serialization facility */
1619 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1620 }
1621 if (m1 == 15) {
1622 /* Perform serialization */
1623 /* FIXME: perform checkpoint-synchronisation */
1624 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1625 }
1626 return DISAS_NEXT;
1627 }
1628
1629 disas_jcc(s, &c, m1);
1630 return help_branch(s, &c, is_imm, imm, o->in2);
1631 }
1632
1633 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1634 {
1635 int r1 = get_field(s, r1);
1636 bool is_imm = have_field(s, i2);
1637 int imm = is_imm ? get_field(s, i2) : 0;
1638 DisasCompare c;
1639 TCGv_i64 t;
1640
1641 c.cond = TCG_COND_NE;
1642 c.is_64 = false;
1643 c.g1 = false;
1644 c.g2 = false;
1645
1646 t = tcg_temp_new_i64();
1647 tcg_gen_subi_i64(t, regs[r1], 1);
1648 store_reg32_i64(r1, t);
1649 c.u.s32.a = tcg_temp_new_i32();
1650 c.u.s32.b = tcg_const_i32(0);
1651 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1652 tcg_temp_free_i64(t);
1653
1654 return help_branch(s, &c, is_imm, imm, o->in2);
1655 }
1656
1657 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1658 {
1659 int r1 = get_field(s, r1);
1660 int imm = get_field(s, i2);
1661 DisasCompare c;
1662 TCGv_i64 t;
1663
1664 c.cond = TCG_COND_NE;
1665 c.is_64 = false;
1666 c.g1 = false;
1667 c.g2 = false;
1668
1669 t = tcg_temp_new_i64();
1670 tcg_gen_shri_i64(t, regs[r1], 32);
1671 tcg_gen_subi_i64(t, t, 1);
1672 store_reg32h_i64(r1, t);
1673 c.u.s32.a = tcg_temp_new_i32();
1674 c.u.s32.b = tcg_const_i32(0);
1675 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1676 tcg_temp_free_i64(t);
1677
1678 return help_branch(s, &c, 1, imm, o->in2);
1679 }
1680
1681 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1682 {
1683 int r1 = get_field(s, r1);
1684 bool is_imm = have_field(s, i2);
1685 int imm = is_imm ? get_field(s, i2) : 0;
1686 DisasCompare c;
1687
1688 c.cond = TCG_COND_NE;
1689 c.is_64 = true;
1690 c.g1 = true;
1691 c.g2 = false;
1692
1693 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1694 c.u.s64.a = regs[r1];
1695 c.u.s64.b = tcg_const_i64(0);
1696
1697 return help_branch(s, &c, is_imm, imm, o->in2);
1698 }
1699
1700 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1701 {
1702 int r1 = get_field(s, r1);
1703 int r3 = get_field(s, r3);
1704 bool is_imm = have_field(s, i2);
1705 int imm = is_imm ? get_field(s, i2) : 0;
1706 DisasCompare c;
1707 TCGv_i64 t;
1708
1709 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1710 c.is_64 = false;
1711 c.g1 = false;
1712 c.g2 = false;
1713
1714 t = tcg_temp_new_i64();
1715 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1716 c.u.s32.a = tcg_temp_new_i32();
1717 c.u.s32.b = tcg_temp_new_i32();
1718 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1719 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1720 store_reg32_i64(r1, t);
1721 tcg_temp_free_i64(t);
1722
1723 return help_branch(s, &c, is_imm, imm, o->in2);
1724 }
1725
1726 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1727 {
1728 int r1 = get_field(s, r1);
1729 int r3 = get_field(s, r3);
1730 bool is_imm = have_field(s, i2);
1731 int imm = is_imm ? get_field(s, i2) : 0;
1732 DisasCompare c;
1733
1734 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1735 c.is_64 = true;
1736
1737 if (r1 == (r3 | 1)) {
1738 c.u.s64.b = load_reg(r3 | 1);
1739 c.g2 = false;
1740 } else {
1741 c.u.s64.b = regs[r3 | 1];
1742 c.g2 = true;
1743 }
1744
1745 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1746 c.u.s64.a = regs[r1];
1747 c.g1 = true;
1748
1749 return help_branch(s, &c, is_imm, imm, o->in2);
1750 }
1751
1752 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1753 {
1754 int imm, m3 = get_field(s, m3);
1755 bool is_imm;
1756 DisasCompare c;
1757
1758 c.cond = ltgt_cond[m3];
1759 if (s->insn->data) {
1760 c.cond = tcg_unsigned_cond(c.cond);
1761 }
1762 c.is_64 = c.g1 = c.g2 = true;
1763 c.u.s64.a = o->in1;
1764 c.u.s64.b = o->in2;
1765
1766 is_imm = have_field(s, i4);
1767 if (is_imm) {
1768 imm = get_field(s, i4);
1769 } else {
1770 imm = 0;
1771 o->out = get_address(s, 0, get_field(s, b4),
1772 get_field(s, d4));
1773 }
1774
1775 return help_branch(s, &c, is_imm, imm, o->out);
1776 }
1777
1778 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1779 {
1780 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1781 set_cc_static(s);
1782 return DISAS_NEXT;
1783 }
1784
1785 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1786 {
1787 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1788 set_cc_static(s);
1789 return DISAS_NEXT;
1790 }
1791
1792 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1793 {
1794 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1795 set_cc_static(s);
1796 return DISAS_NEXT;
1797 }
1798
1799 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1800 bool m4_with_fpe)
1801 {
1802 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1803 uint8_t m3 = get_field(s, m3);
1804 uint8_t m4 = get_field(s, m4);
1805
1806 /* m3 field was introduced with FPE */
1807 if (!fpe && m3_with_fpe) {
1808 m3 = 0;
1809 }
1810 /* m4 field was introduced with FPE */
1811 if (!fpe && m4_with_fpe) {
1812 m4 = 0;
1813 }
1814
1815 /* Check for valid rounding modes. Mode 3 was introduced later. */
1816 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1817 gen_program_exception(s, PGM_SPECIFICATION);
1818 return NULL;
1819 }
1820
1821 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1822 }
1823
1824 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1825 {
1826 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1827
1828 if (!m34) {
1829 return DISAS_NORETURN;
1830 }
1831 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1832 tcg_temp_free_i32(m34);
1833 gen_set_cc_nz_f32(s, o->in2);
1834 return DISAS_NEXT;
1835 }
1836
1837 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1838 {
1839 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1840
1841 if (!m34) {
1842 return DISAS_NORETURN;
1843 }
1844 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1845 tcg_temp_free_i32(m34);
1846 gen_set_cc_nz_f64(s, o->in2);
1847 return DISAS_NEXT;
1848 }
1849
1850 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1851 {
1852 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1853
1854 if (!m34) {
1855 return DISAS_NORETURN;
1856 }
1857 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1858 tcg_temp_free_i32(m34);
1859 gen_set_cc_nz_f128(s, o->in1, o->in2);
1860 return DISAS_NEXT;
1861 }
1862
1863 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1864 {
1865 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1866
1867 if (!m34) {
1868 return DISAS_NORETURN;
1869 }
1870 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1871 tcg_temp_free_i32(m34);
1872 gen_set_cc_nz_f32(s, o->in2);
1873 return DISAS_NEXT;
1874 }
1875
1876 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1877 {
1878 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1879
1880 if (!m34) {
1881 return DISAS_NORETURN;
1882 }
1883 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1884 tcg_temp_free_i32(m34);
1885 gen_set_cc_nz_f64(s, o->in2);
1886 return DISAS_NEXT;
1887 }
1888
1889 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1890 {
1891 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1892
1893 if (!m34) {
1894 return DISAS_NORETURN;
1895 }
1896 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1897 tcg_temp_free_i32(m34);
1898 gen_set_cc_nz_f128(s, o->in1, o->in2);
1899 return DISAS_NEXT;
1900 }
1901
1902 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1903 {
1904 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1905
1906 if (!m34) {
1907 return DISAS_NORETURN;
1908 }
1909 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1910 tcg_temp_free_i32(m34);
1911 gen_set_cc_nz_f32(s, o->in2);
1912 return DISAS_NEXT;
1913 }
1914
1915 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1916 {
1917 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1918
1919 if (!m34) {
1920 return DISAS_NORETURN;
1921 }
1922 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1923 tcg_temp_free_i32(m34);
1924 gen_set_cc_nz_f64(s, o->in2);
1925 return DISAS_NEXT;
1926 }
1927
1928 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1929 {
1930 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1931
1932 if (!m34) {
1933 return DISAS_NORETURN;
1934 }
1935 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1936 tcg_temp_free_i32(m34);
1937 gen_set_cc_nz_f128(s, o->in1, o->in2);
1938 return DISAS_NEXT;
1939 }
1940
1941 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1942 {
1943 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1944
1945 if (!m34) {
1946 return DISAS_NORETURN;
1947 }
1948 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1949 tcg_temp_free_i32(m34);
1950 gen_set_cc_nz_f32(s, o->in2);
1951 return DISAS_NEXT;
1952 }
1953
1954 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1955 {
1956 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1957
1958 if (!m34) {
1959 return DISAS_NORETURN;
1960 }
1961 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1962 tcg_temp_free_i32(m34);
1963 gen_set_cc_nz_f64(s, o->in2);
1964 return DISAS_NEXT;
1965 }
1966
1967 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1968 {
1969 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1970
1971 if (!m34) {
1972 return DISAS_NORETURN;
1973 }
1974 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1975 tcg_temp_free_i32(m34);
1976 gen_set_cc_nz_f128(s, o->in1, o->in2);
1977 return DISAS_NEXT;
1978 }
1979
1980 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1981 {
1982 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1983
1984 if (!m34) {
1985 return DISAS_NORETURN;
1986 }
1987 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1988 tcg_temp_free_i32(m34);
1989 return DISAS_NEXT;
1990 }
1991
1992 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1993 {
1994 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1995
1996 if (!m34) {
1997 return DISAS_NORETURN;
1998 }
1999 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
2000 tcg_temp_free_i32(m34);
2001 return DISAS_NEXT;
2002 }
2003
2004 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2005 {
2006 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2007
2008 if (!m34) {
2009 return DISAS_NORETURN;
2010 }
2011 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2012 tcg_temp_free_i32(m34);
2013 return_low128(o->out2);
2014 return DISAS_NEXT;
2015 }
2016
2017 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2018 {
2019 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2020
2021 if (!m34) {
2022 return DISAS_NORETURN;
2023 }
2024 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2025 tcg_temp_free_i32(m34);
2026 return DISAS_NEXT;
2027 }
2028
2029 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2030 {
2031 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2032
2033 if (!m34) {
2034 return DISAS_NORETURN;
2035 }
2036 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2037 tcg_temp_free_i32(m34);
2038 return DISAS_NEXT;
2039 }
2040
2041 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2042 {
2043 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2044
2045 if (!m34) {
2046 return DISAS_NORETURN;
2047 }
2048 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2049 tcg_temp_free_i32(m34);
2050 return_low128(o->out2);
2051 return DISAS_NEXT;
2052 }
2053
2054 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2055 {
2056 int r2 = get_field(s, r2);
2057 TCGv_i64 len = tcg_temp_new_i64();
2058
2059 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2060 set_cc_static(s);
2061 return_low128(o->out);
2062
2063 tcg_gen_add_i64(regs[r2], regs[r2], len);
2064 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2065 tcg_temp_free_i64(len);
2066
2067 return DISAS_NEXT;
2068 }
2069
2070 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2071 {
2072 int l = get_field(s, l1);
2073 TCGv_i32 vl;
2074
2075 switch (l + 1) {
2076 case 1:
2077 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2078 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2079 break;
2080 case 2:
2081 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2082 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2083 break;
2084 case 4:
2085 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2086 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2087 break;
2088 case 8:
2089 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2090 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2091 break;
2092 default:
2093 vl = tcg_const_i32(l);
2094 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2095 tcg_temp_free_i32(vl);
2096 set_cc_static(s);
2097 return DISAS_NEXT;
2098 }
2099 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2100 return DISAS_NEXT;
2101 }
2102
2103 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2104 {
2105 int r1 = get_field(s, r1);
2106 int r2 = get_field(s, r2);
2107 TCGv_i32 t1, t2;
2108
2109 /* r1 and r2 must be even. */
2110 if (r1 & 1 || r2 & 1) {
2111 gen_program_exception(s, PGM_SPECIFICATION);
2112 return DISAS_NORETURN;
2113 }
2114
2115 t1 = tcg_const_i32(r1);
2116 t2 = tcg_const_i32(r2);
2117 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2118 tcg_temp_free_i32(t1);
2119 tcg_temp_free_i32(t2);
2120 set_cc_static(s);
2121 return DISAS_NEXT;
2122 }
2123
2124 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2125 {
2126 int r1 = get_field(s, r1);
2127 int r3 = get_field(s, r3);
2128 TCGv_i32 t1, t3;
2129
2130 /* r1 and r3 must be even. */
2131 if (r1 & 1 || r3 & 1) {
2132 gen_program_exception(s, PGM_SPECIFICATION);
2133 return DISAS_NORETURN;
2134 }
2135
2136 t1 = tcg_const_i32(r1);
2137 t3 = tcg_const_i32(r3);
2138 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2139 tcg_temp_free_i32(t1);
2140 tcg_temp_free_i32(t3);
2141 set_cc_static(s);
2142 return DISAS_NEXT;
2143 }
2144
2145 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2146 {
2147 int r1 = get_field(s, r1);
2148 int r3 = get_field(s, r3);
2149 TCGv_i32 t1, t3;
2150
2151 /* r1 and r3 must be even. */
2152 if (r1 & 1 || r3 & 1) {
2153 gen_program_exception(s, PGM_SPECIFICATION);
2154 return DISAS_NORETURN;
2155 }
2156
2157 t1 = tcg_const_i32(r1);
2158 t3 = tcg_const_i32(r3);
2159 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2160 tcg_temp_free_i32(t1);
2161 tcg_temp_free_i32(t3);
2162 set_cc_static(s);
2163 return DISAS_NEXT;
2164 }
2165
2166 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2167 {
2168 TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2169 TCGv_i32 t1 = tcg_temp_new_i32();
2170 tcg_gen_extrl_i64_i32(t1, o->in1);
2171 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2172 set_cc_static(s);
2173 tcg_temp_free_i32(t1);
2174 tcg_temp_free_i32(m3);
2175 return DISAS_NEXT;
2176 }
2177
2178 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2179 {
2180 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2181 set_cc_static(s);
2182 return_low128(o->in2);
2183 return DISAS_NEXT;
2184 }
2185
2186 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2187 {
2188 TCGv_i64 t = tcg_temp_new_i64();
2189 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2190 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2191 tcg_gen_or_i64(o->out, o->out, t);
2192 tcg_temp_free_i64(t);
2193 return DISAS_NEXT;
2194 }
2195
2196 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2197 {
2198 int d2 = get_field(s, d2);
2199 int b2 = get_field(s, b2);
2200 TCGv_i64 addr, cc;
2201
2202 /* Note that in1 = R3 (new value) and
2203 in2 = (zero-extended) R1 (expected value). */
2204
2205 addr = get_address(s, 0, b2, d2);
2206 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2207 get_mem_index(s), s->insn->data | MO_ALIGN);
2208 tcg_temp_free_i64(addr);
2209
2210 /* Are the memory and expected values (un)equal? Note that this setcond
2211 produces the output CC value, thus the NE sense of the test. */
2212 cc = tcg_temp_new_i64();
2213 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2214 tcg_gen_extrl_i64_i32(cc_op, cc);
2215 tcg_temp_free_i64(cc);
2216 set_cc_static(s);
2217
2218 return DISAS_NEXT;
2219 }
2220
2221 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2222 {
2223 int r1 = get_field(s, r1);
2224 int r3 = get_field(s, r3);
2225 int d2 = get_field(s, d2);
2226 int b2 = get_field(s, b2);
2227 DisasJumpType ret = DISAS_NEXT;
2228 TCGv_i64 addr;
2229 TCGv_i32 t_r1, t_r3;
2230
2231 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2232 addr = get_address(s, 0, b2, d2);
2233 t_r1 = tcg_const_i32(r1);
2234 t_r3 = tcg_const_i32(r3);
2235 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2236 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2237 } else if (HAVE_CMPXCHG128) {
2238 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2239 } else {
2240 gen_helper_exit_atomic(cpu_env);
2241 ret = DISAS_NORETURN;
2242 }
2243 tcg_temp_free_i64(addr);
2244 tcg_temp_free_i32(t_r1);
2245 tcg_temp_free_i32(t_r3);
2246
2247 set_cc_static(s);
2248 return ret;
2249 }
2250
2251 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2252 {
2253 int r3 = get_field(s, r3);
2254 TCGv_i32 t_r3 = tcg_const_i32(r3);
2255
2256 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2257 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2258 } else {
2259 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2260 }
2261 tcg_temp_free_i32(t_r3);
2262
2263 set_cc_static(s);
2264 return DISAS_NEXT;
2265 }
2266
2267 #ifndef CONFIG_USER_ONLY
2268 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2269 {
2270 MemOp mop = s->insn->data;
2271 TCGv_i64 addr, old, cc;
2272 TCGLabel *lab = gen_new_label();
2273
2274 /* Note that in1 = R1 (zero-extended expected value),
2275 out = R1 (original reg), out2 = R1+1 (new value). */
2276
2277 addr = tcg_temp_new_i64();
2278 old = tcg_temp_new_i64();
2279 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2280 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2281 get_mem_index(s), mop | MO_ALIGN);
2282 tcg_temp_free_i64(addr);
2283
2284 /* Are the memory and expected values (un)equal? */
2285 cc = tcg_temp_new_i64();
2286 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2287 tcg_gen_extrl_i64_i32(cc_op, cc);
2288
2289 /* Write back the output now, so that it happens before the
2290 following branch, so that we don't need local temps. */
2291 if ((mop & MO_SIZE) == MO_32) {
2292 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2293 } else {
2294 tcg_gen_mov_i64(o->out, old);
2295 }
2296 tcg_temp_free_i64(old);
2297
2298 /* If the comparison was equal, and the LSB of R2 was set,
2299 then we need to flush the TLB (for all cpus). */
2300 tcg_gen_xori_i64(cc, cc, 1);
2301 tcg_gen_and_i64(cc, cc, o->in2);
2302 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2303 tcg_temp_free_i64(cc);
2304
2305 gen_helper_purge(cpu_env);
2306 gen_set_label(lab);
2307
2308 return DISAS_NEXT;
2309 }
2310 #endif
2311
2312 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2313 {
2314 TCGv_i64 t1 = tcg_temp_new_i64();
2315 TCGv_i32 t2 = tcg_temp_new_i32();
2316 tcg_gen_extrl_i64_i32(t2, o->in1);
2317 gen_helper_cvd(t1, t2);
2318 tcg_temp_free_i32(t2);
2319 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2320 tcg_temp_free_i64(t1);
2321 return DISAS_NEXT;
2322 }
2323
2324 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2325 {
2326 int m3 = get_field(s, m3);
2327 TCGLabel *lab = gen_new_label();
2328 TCGCond c;
2329
2330 c = tcg_invert_cond(ltgt_cond[m3]);
2331 if (s->insn->data) {
2332 c = tcg_unsigned_cond(c);
2333 }
2334 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2335
2336 /* Trap. */
2337 gen_trap(s);
2338
2339 gen_set_label(lab);
2340 return DISAS_NEXT;
2341 }
2342
2343 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2344 {
2345 int m3 = get_field(s, m3);
2346 int r1 = get_field(s, r1);
2347 int r2 = get_field(s, r2);
2348 TCGv_i32 tr1, tr2, chk;
2349
2350 /* R1 and R2 must both be even. */
2351 if ((r1 | r2) & 1) {
2352 gen_program_exception(s, PGM_SPECIFICATION);
2353 return DISAS_NORETURN;
2354 }
2355 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2356 m3 = 0;
2357 }
2358
2359 tr1 = tcg_const_i32(r1);
2360 tr2 = tcg_const_i32(r2);
2361 chk = tcg_const_i32(m3);
2362
2363 switch (s->insn->data) {
2364 case 12:
2365 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2366 break;
2367 case 14:
2368 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2369 break;
2370 case 21:
2371 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2372 break;
2373 case 24:
2374 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2375 break;
2376 case 41:
2377 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2378 break;
2379 case 42:
2380 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2381 break;
2382 default:
2383 g_assert_not_reached();
2384 }
2385
2386 tcg_temp_free_i32(tr1);
2387 tcg_temp_free_i32(tr2);
2388 tcg_temp_free_i32(chk);
2389 set_cc_static(s);
2390 return DISAS_NEXT;
2391 }
2392
2393 #ifndef CONFIG_USER_ONLY
2394 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2395 {
2396 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2397 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2398 TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2399
2400 gen_helper_diag(cpu_env, r1, r3, func_code);
2401
2402 tcg_temp_free_i32(func_code);
2403 tcg_temp_free_i32(r3);
2404 tcg_temp_free_i32(r1);
2405 return DISAS_NEXT;
2406 }
2407 #endif
2408
2409 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2410 {
2411 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2412 return_low128(o->out);
2413 return DISAS_NEXT;
2414 }
2415
2416 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2417 {
2418 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2419 return_low128(o->out);
2420 return DISAS_NEXT;
2421 }
2422
2423 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2424 {
2425 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2426 return_low128(o->out);
2427 return DISAS_NEXT;
2428 }
2429
2430 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2431 {
2432 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2433 return_low128(o->out);
2434 return DISAS_NEXT;
2435 }
2436
2437 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2438 {
2439 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2440 return DISAS_NEXT;
2441 }
2442
2443 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2444 {
2445 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2446 return DISAS_NEXT;
2447 }
2448
2449 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2450 {
2451 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2452 return_low128(o->out2);
2453 return DISAS_NEXT;
2454 }
2455
2456 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2457 {
2458 int r2 = get_field(s, r2);
2459 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2460 return DISAS_NEXT;
2461 }
2462
2463 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2464 {
2465 /* No cache information provided. */
2466 tcg_gen_movi_i64(o->out, -1);
2467 return DISAS_NEXT;
2468 }
2469
2470 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2471 {
2472 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2473 return DISAS_NEXT;
2474 }
2475
2476 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2477 {
2478 int r1 = get_field(s, r1);
2479 int r2 = get_field(s, r2);
2480 TCGv_i64 t = tcg_temp_new_i64();
2481
2482 /* Note the "subsequently" in the PoO, which implies a defined result
2483 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2484 tcg_gen_shri_i64(t, psw_mask, 32);
2485 store_reg32_i64(r1, t);
2486 if (r2 != 0) {
2487 store_reg32_i64(r2, psw_mask);
2488 }
2489
2490 tcg_temp_free_i64(t);
2491 return DISAS_NEXT;
2492 }
2493
2494 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2495 {
2496 int r1 = get_field(s, r1);
2497 TCGv_i32 ilen;
2498 TCGv_i64 v1;
2499
2500 /* Nested EXECUTE is not allowed. */
2501 if (unlikely(s->ex_value)) {
2502 gen_program_exception(s, PGM_EXECUTE);
2503 return DISAS_NORETURN;
2504 }
2505
2506 update_psw_addr(s);
2507 update_cc_op(s);
2508
2509 if (r1 == 0) {
2510 v1 = tcg_const_i64(0);
2511 } else {
2512 v1 = regs[r1];
2513 }
2514
2515 ilen = tcg_const_i32(s->ilen);
2516 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2517 tcg_temp_free_i32(ilen);
2518
2519 if (r1 == 0) {
2520 tcg_temp_free_i64(v1);
2521 }
2522
2523 return DISAS_PC_CC_UPDATED;
2524 }
2525
2526 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2527 {
2528 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2529
2530 if (!m34) {
2531 return DISAS_NORETURN;
2532 }
2533 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2534 tcg_temp_free_i32(m34);
2535 return DISAS_NEXT;
2536 }
2537
2538 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2539 {
2540 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2541
2542 if (!m34) {
2543 return DISAS_NORETURN;
2544 }
2545 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2546 tcg_temp_free_i32(m34);
2547 return DISAS_NEXT;
2548 }
2549
2550 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2551 {
2552 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2553
2554 if (!m34) {
2555 return DISAS_NORETURN;
2556 }
2557 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2558 return_low128(o->out2);
2559 tcg_temp_free_i32(m34);
2560 return DISAS_NEXT;
2561 }
2562
2563 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2564 {
2565 /* We'll use the original input for cc computation, since we get to
2566 compare that against 0, which ought to be better than comparing
2567 the real output against 64. It also lets cc_dst be a convenient
2568 temporary during our computation. */
2569 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2570
2571 /* R1 = IN ? CLZ(IN) : 64. */
2572 tcg_gen_clzi_i64(o->out, o->in2, 64);
2573
2574 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2575 value by 64, which is undefined. But since the shift is 64 iff the
2576 input is zero, we still get the correct result after and'ing. */
2577 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2578 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2579 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2580 return DISAS_NEXT;
2581 }
2582
2583 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2584 {
2585 int m3 = get_field(s, m3);
2586 int pos, len, base = s->insn->data;
2587 TCGv_i64 tmp = tcg_temp_new_i64();
2588 uint64_t ccm;
2589
2590 switch (m3) {
2591 case 0xf:
2592 /* Effectively a 32-bit load. */
2593 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2594 len = 32;
2595 goto one_insert;
2596
2597 case 0xc:
2598 case 0x6:
2599 case 0x3:
2600 /* Effectively a 16-bit load. */
2601 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2602 len = 16;
2603 goto one_insert;
2604
2605 case 0x8:
2606 case 0x4:
2607 case 0x2:
2608 case 0x1:
2609 /* Effectively an 8-bit load. */
2610 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2611 len = 8;
2612 goto one_insert;
2613
2614 one_insert:
2615 pos = base + ctz32(m3) * 8;
2616 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2617 ccm = ((1ull << len) - 1) << pos;
2618 break;
2619
2620 default:
2621 /* This is going to be a sequence of loads and inserts. */
2622 pos = base + 32 - 8;
2623 ccm = 0;
2624 while (m3) {
2625 if (m3 & 0x8) {
2626 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2627 tcg_gen_addi_i64(o->in2, o->in2, 1);
2628 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2629 ccm |= 0xff << pos;
2630 }
2631 m3 = (m3 << 1) & 0xf;
2632 pos -= 8;
2633 }
2634 break;
2635 }
2636
2637 tcg_gen_movi_i64(tmp, ccm);
2638 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2639 tcg_temp_free_i64(tmp);
2640 return DISAS_NEXT;
2641 }
2642
2643 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2644 {
2645 int shift = s->insn->data & 0xff;
2646 int size = s->insn->data >> 8;
2647 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2648 return DISAS_NEXT;
2649 }
2650
2651 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2652 {
2653 TCGv_i64 t1, t2;
2654
2655 gen_op_calc_cc(s);
2656 t1 = tcg_temp_new_i64();
2657 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2658 t2 = tcg_temp_new_i64();
2659 tcg_gen_extu_i32_i64(t2, cc_op);
2660 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2661 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2662 tcg_temp_free_i64(t1);
2663 tcg_temp_free_i64(t2);
2664 return DISAS_NEXT;
2665 }
2666
2667 #ifndef CONFIG_USER_ONLY
2668 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2669 {
2670 TCGv_i32 m4;
2671
2672 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2673 m4 = tcg_const_i32(get_field(s, m4));
2674 } else {
2675 m4 = tcg_const_i32(0);
2676 }
2677 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2678 tcg_temp_free_i32(m4);
2679 return DISAS_NEXT;
2680 }
2681
2682 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2683 {
2684 TCGv_i32 m4;
2685
2686 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2687 m4 = tcg_const_i32(get_field(s, m4));
2688 } else {
2689 m4 = tcg_const_i32(0);
2690 }
2691 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2692 tcg_temp_free_i32(m4);
2693 return DISAS_NEXT;
2694 }
2695
2696 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2697 {
2698 gen_helper_iske(o->out, cpu_env, o->in2);
2699 return DISAS_NEXT;
2700 }
2701 #endif
2702
2703 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2704 {
2705 int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2706 int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2707 int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2708 TCGv_i32 t_r1, t_r2, t_r3, type;
2709
2710 switch (s->insn->data) {
2711 case S390_FEAT_TYPE_KMCTR:
2712 if (r3 & 1 || !r3) {
2713 gen_program_exception(s, PGM_SPECIFICATION);
2714 return DISAS_NORETURN;
2715 }
2716 /* FALL THROUGH */
2717 case S390_FEAT_TYPE_PPNO:
2718 case S390_FEAT_TYPE_KMF:
2719 case S390_FEAT_TYPE_KMC:
2720 case S390_FEAT_TYPE_KMO:
2721 case S390_FEAT_TYPE_KM:
2722 if (r1 & 1 || !r1) {
2723 gen_program_exception(s, PGM_SPECIFICATION);
2724 return DISAS_NORETURN;
2725 }
2726 /* FALL THROUGH */
2727 case S390_FEAT_TYPE_KMAC:
2728 case S390_FEAT_TYPE_KIMD:
2729 case S390_FEAT_TYPE_KLMD:
2730 if (r2 & 1 || !r2) {
2731 gen_program_exception(s, PGM_SPECIFICATION);
2732 return DISAS_NORETURN;
2733 }
2734 /* FALL THROUGH */
2735 case S390_FEAT_TYPE_PCKMO:
2736 case S390_FEAT_TYPE_PCC:
2737 break;
2738 default:
2739 g_assert_not_reached();
2740 };
2741
2742 t_r1 = tcg_const_i32(r1);
2743 t_r2 = tcg_const_i32(r2);
2744 t_r3 = tcg_const_i32(r3);
2745 type = tcg_const_i32(s->insn->data);
2746 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2747 set_cc_static(s);
2748 tcg_temp_free_i32(t_r1);
2749 tcg_temp_free_i32(t_r2);
2750 tcg_temp_free_i32(t_r3);
2751 tcg_temp_free_i32(type);
2752 return DISAS_NEXT;
2753 }
2754
2755 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2756 {
2757 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2758 set_cc_static(s);
2759 return DISAS_NEXT;
2760 }
2761
2762 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2763 {
2764 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2765 set_cc_static(s);
2766 return DISAS_NEXT;
2767 }
2768
2769 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2770 {
2771 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2772 set_cc_static(s);
2773 return DISAS_NEXT;
2774 }
2775
2776 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2777 {
2778 /* The real output is indeed the original value in memory;
2779 recompute the addition for the computation of CC. */
2780 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2781 s->insn->data | MO_ALIGN);
2782 /* However, we need to recompute the addition for setting CC. */
2783 tcg_gen_add_i64(o->out, o->in1, o->in2);
2784 return DISAS_NEXT;
2785 }
2786
2787 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2788 {
2789 /* The real output is indeed the original value in memory;
2790 recompute the addition for the computation of CC. */
2791 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2792 s->insn->data | MO_ALIGN);
2793 /* However, we need to recompute the operation for setting CC. */
2794 tcg_gen_and_i64(o->out, o->in1, o->in2);
2795 return DISAS_NEXT;
2796 }
2797
2798 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2799 {
2800 /* The real output is indeed the original value in memory;
2801 recompute the addition for the computation of CC. */
2802 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2803 s->insn->data | MO_ALIGN);
2804 /* However, we need to recompute the operation for setting CC. */
2805 tcg_gen_or_i64(o->out, o->in1, o->in2);
2806 return DISAS_NEXT;
2807 }
2808
2809 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2810 {
2811 /* The real output is indeed the original value in memory;
2812 recompute the addition for the computation of CC. */
2813 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2814 s->insn->data | MO_ALIGN);
2815 /* However, we need to recompute the operation for setting CC. */
2816 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2817 return DISAS_NEXT;
2818 }
2819
2820 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2821 {
2822 gen_helper_ldeb(o->out, cpu_env, o->in2);
2823 return DISAS_NEXT;
2824 }
2825
2826 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2827 {
2828 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2829
2830 if (!m34) {
2831 return DISAS_NORETURN;
2832 }
2833 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2834 tcg_temp_free_i32(m34);
2835 return DISAS_NEXT;
2836 }
2837
2838 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2839 {
2840 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2841
2842 if (!m34) {
2843 return DISAS_NORETURN;
2844 }
2845 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2846 tcg_temp_free_i32(m34);
2847 return DISAS_NEXT;
2848 }
2849
2850 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2851 {
2852 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2853
2854 if (!m34) {
2855 return DISAS_NORETURN;
2856 }
2857 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2858 tcg_temp_free_i32(m34);
2859 return DISAS_NEXT;
2860 }
2861
2862 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2863 {
2864 gen_helper_lxdb(o->out, cpu_env, o->in2);
2865 return_low128(o->out2);
2866 return DISAS_NEXT;
2867 }
2868
2869 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2870 {
2871 gen_helper_lxeb(o->out, cpu_env, o->in2);
2872 return_low128(o->out2);
2873 return DISAS_NEXT;
2874 }
2875
2876 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2877 {
2878 tcg_gen_shli_i64(o->out, o->in2, 32);
2879 return DISAS_NEXT;
2880 }
2881
2882 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2883 {
2884 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2885 return DISAS_NEXT;
2886 }
2887
2888 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2889 {
2890 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2891 return DISAS_NEXT;
2892 }
2893
2894 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2895 {
2896 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2897 return DISAS_NEXT;
2898 }
2899
2900 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2901 {
2902 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2903 return DISAS_NEXT;
2904 }
2905
2906 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2907 {
2908 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2909 return DISAS_NEXT;
2910 }
2911
2912 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2913 {
2914 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2915 return DISAS_NEXT;
2916 }
2917
2918 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2919 {
2920 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2921 return DISAS_NEXT;
2922 }
2923
2924 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2925 {
2926 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2927 return DISAS_NEXT;
2928 }
2929
2930 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2931 {
2932 TCGLabel *lab = gen_new_label();
2933 store_reg32_i64(get_field(s, r1), o->in2);
2934 /* The value is stored even in case of trap. */
2935 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2936 gen_trap(s);
2937 gen_set_label(lab);
2938 return DISAS_NEXT;
2939 }
2940
2941 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2942 {
2943 TCGLabel *lab = gen_new_label();
2944 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2945 /* The value is stored even in case of trap. */
2946 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2947 gen_trap(s);
2948 gen_set_label(lab);
2949 return DISAS_NEXT;
2950 }
2951
2952 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2953 {
2954 TCGLabel *lab = gen_new_label();
2955 store_reg32h_i64(get_field(s, r1), o->in2);
2956 /* The value is stored even in case of trap. */
2957 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2958 gen_trap(s);
2959 gen_set_label(lab);
2960 return DISAS_NEXT;
2961 }
2962
2963 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2964 {
2965 TCGLabel *lab = gen_new_label();
2966 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2967 /* The value is stored even in case of trap. */
2968 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2969 gen_trap(s);
2970 gen_set_label(lab);
2971 return DISAS_NEXT;
2972 }
2973
2974 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2975 {
2976 TCGLabel *lab = gen_new_label();
2977 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2978 /* The value is stored even in case of trap. */
2979 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2980 gen_trap(s);
2981 gen_set_label(lab);
2982 return DISAS_NEXT;
2983 }
2984
2985 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2986 {
2987 DisasCompare c;
2988
2989 disas_jcc(s, &c, get_field(s, m3));
2990
2991 if (c.is_64) {
2992 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2993 o->in2, o->in1);
2994 free_compare(&c);
2995 } else {
2996 TCGv_i32 t32 = tcg_temp_new_i32();
2997 TCGv_i64 t, z;
2998
2999 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
3000 free_compare(&c);
3001
3002 t = tcg_temp_new_i64();
3003 tcg_gen_extu_i32_i64(t, t32);
3004 tcg_temp_free_i32(t32);
3005
3006 z = tcg_const_i64(0);
3007 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3008 tcg_temp_free_i64(t);
3009 tcg_temp_free_i64(z);
3010 }
3011
3012 return DISAS_NEXT;
3013 }
3014
3015 #ifndef CONFIG_USER_ONLY
3016 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3017 {
3018 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3019 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3020 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3021 tcg_temp_free_i32(r1);
3022 tcg_temp_free_i32(r3);
3023 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3024 return DISAS_PC_STALE_NOCHAIN;
3025 }
3026
3027 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3028 {
3029 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3030 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3031 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3032 tcg_temp_free_i32(r1);
3033 tcg_temp_free_i32(r3);
3034 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3035 return DISAS_PC_STALE_NOCHAIN;
3036 }
3037
3038 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3039 {
3040 gen_helper_lra(o->out, cpu_env, o->in2);
3041 set_cc_static(s);
3042 return DISAS_NEXT;
3043 }
3044
3045 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3046 {
3047 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3048 return DISAS_NEXT;
3049 }
3050
3051 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3052 {
3053 TCGv_i64 t1, t2;
3054
3055 per_breaking_event(s);
3056
3057 t1 = tcg_temp_new_i64();
3058 t2 = tcg_temp_new_i64();
3059 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3060 MO_TEUL | MO_ALIGN_8);
3061 tcg_gen_addi_i64(o->in2, o->in2, 4);
3062 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3063 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3064 tcg_gen_shli_i64(t1, t1, 32);
3065 gen_helper_load_psw(cpu_env, t1, t2);
3066 tcg_temp_free_i64(t1);
3067 tcg_temp_free_i64(t2);
3068 return DISAS_NORETURN;
3069 }
3070
3071 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3072 {
3073 TCGv_i64 t1, t2;
3074
3075 per_breaking_event(s);
3076
3077 t1 = tcg_temp_new_i64();
3078 t2 = tcg_temp_new_i64();
3079 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3080 MO_TEQ | MO_ALIGN_8);
3081 tcg_gen_addi_i64(o->in2, o->in2, 8);
3082 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3083 gen_helper_load_psw(cpu_env, t1, t2);
3084 tcg_temp_free_i64(t1);
3085 tcg_temp_free_i64(t2);
3086 return DISAS_NORETURN;
3087 }
3088 #endif
3089
3090 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3091 {
3092 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3093 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3094 gen_helper_lam(cpu_env, r1, o->in2, r3);
3095 tcg_temp_free_i32(r1);
3096 tcg_temp_free_i32(r3);
3097 return DISAS_NEXT;
3098 }
3099
3100 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3101 {
3102 int r1 = get_field(s, r1);
3103 int r3 = get_field(s, r3);
3104 TCGv_i64 t1, t2;
3105
3106 /* Only one register to read. */
3107 t1 = tcg_temp_new_i64();
3108 if (unlikely(r1 == r3)) {
3109 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3110 store_reg32_i64(r1, t1);
3111 tcg_temp_free(t1);
3112 return DISAS_NEXT;
3113 }
3114
3115 /* First load the values of the first and last registers to trigger
3116 possible page faults. */
3117 t2 = tcg_temp_new_i64();
3118 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3119 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3120 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3121 store_reg32_i64(r1, t1);
3122 store_reg32_i64(r3, t2);
3123
3124 /* Only two registers to read. */
3125 if (((r1 + 1) & 15) == r3) {
3126 tcg_temp_free(t2);
3127 tcg_temp_free(t1);
3128 return DISAS_NEXT;
3129 }
3130
3131 /* Then load the remaining registers. Page fault can't occur. */
3132 r3 = (r3 - 1) & 15;
3133 tcg_gen_movi_i64(t2, 4);
3134 while (r1 != r3) {
3135 r1 = (r1 + 1) & 15;
3136 tcg_gen_add_i64(o->in2, o->in2, t2);
3137 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3138 store_reg32_i64(r1, t1);
3139 }
3140 tcg_temp_free(t2);
3141 tcg_temp_free(t1);
3142
3143 return DISAS_NEXT;
3144 }
3145
3146 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3147 {
3148 int r1 = get_field(s, r1);
3149 int r3 = get_field(s, r3);
3150 TCGv_i64 t1, t2;
3151
3152 /* Only one register to read. */
3153 t1 = tcg_temp_new_i64();
3154 if (unlikely(r1 == r3)) {
3155 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3156 store_reg32h_i64(r1, t1);
3157 tcg_temp_free(t1);
3158 return DISAS_NEXT;
3159 }
3160
3161 /* First load the values of the first and last registers to trigger
3162 possible page faults. */
3163 t2 = tcg_temp_new_i64();
3164 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3165 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3166 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3167 store_reg32h_i64(r1, t1);
3168 store_reg32h_i64(r3, t2);
3169
3170 /* Only two registers to read. */
3171 if (((r1 + 1) & 15) == r3) {
3172 tcg_temp_free(t2);
3173 tcg_temp_free(t1);
3174 return DISAS_NEXT;
3175 }
3176
3177 /* Then load the remaining registers. Page fault can't occur. */
3178 r3 = (r3 - 1) & 15;
3179 tcg_gen_movi_i64(t2, 4);
3180 while (r1 != r3) {
3181 r1 = (r1 + 1) & 15;
3182 tcg_gen_add_i64(o->in2, o->in2, t2);
3183 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3184 store_reg32h_i64(r1, t1);
3185 }
3186 tcg_temp_free(t2);
3187 tcg_temp_free(t1);
3188
3189 return DISAS_NEXT;
3190 }
3191
3192 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3193 {
3194 int r1 = get_field(s, r1);
3195 int r3 = get_field(s, r3);
3196 TCGv_i64 t1, t2;
3197
3198 /* Only one register to read. */
3199 if (unlikely(r1 == r3)) {
3200 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3201 return DISAS_NEXT;
3202 }
3203
3204 /* First load the values of the first and last registers to trigger
3205 possible page faults. */
3206 t1 = tcg_temp_new_i64();
3207 t2 = tcg_temp_new_i64();
3208 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3209 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3210 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3211 tcg_gen_mov_i64(regs[r1], t1);
3212 tcg_temp_free(t2);
3213
3214 /* Only two registers to read. */
3215 if (((r1 + 1) & 15) == r3) {
3216 tcg_temp_free(t1);
3217 return DISAS_NEXT;
3218 }
3219
3220 /* Then load the remaining registers. Page fault can't occur. */
3221 r3 = (r3 - 1) & 15;
3222 tcg_gen_movi_i64(t1, 8);
3223 while (r1 != r3) {
3224 r1 = (r1 + 1) & 15;
3225 tcg_gen_add_i64(o->in2, o->in2, t1);
3226 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3227 }
3228 tcg_temp_free(t1);
3229
3230 return DISAS_NEXT;
3231 }
3232
3233 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3234 {
3235 TCGv_i64 a1, a2;
3236 MemOp mop = s->insn->data;
3237
3238 /* In a parallel context, stop the world and single step. */
3239 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3240 update_psw_addr(s);
3241 update_cc_op(s);
3242 gen_exception(EXCP_ATOMIC);
3243 return DISAS_NORETURN;
3244 }
3245
3246 /* In a serial context, perform the two loads ... */
3247 a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3248 a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3249 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3250 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3251 tcg_temp_free_i64(a1);
3252 tcg_temp_free_i64(a2);
3253
3254 /* ... and indicate that we performed them while interlocked. */
3255 gen_op_movi_cc(s, 0);
3256 return DISAS_NEXT;
3257 }
3258
3259 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3260 {
3261 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3262 gen_helper_lpq(o->out, cpu_env, o->in2);
3263 } else if (HAVE_ATOMIC128) {
3264 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3265 } else {
3266 gen_helper_exit_atomic(cpu_env);
3267 return DISAS_NORETURN;
3268 }
3269 return_low128(o->out2);
3270 return DISAS_NEXT;
3271 }
3272
3273 #ifndef CONFIG_USER_ONLY
3274 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3275 {
3276 o->addr1 = get_address(s, 0, get_field(s, r2), 0);
3277 tcg_gen_qemu_ld_tl(o->out, o->addr1, MMU_REAL_IDX, s->insn->data);
3278 return DISAS_NEXT;
3279 }
3280 #endif
3281
3282 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3283 {
3284 tcg_gen_andi_i64(o->out, o->in2, -256);
3285 return DISAS_NEXT;
3286 }
3287
3288 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3289 {
3290 const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3291
3292 if (get_field(s, m3) > 6) {
3293 gen_program_exception(s, PGM_SPECIFICATION);
3294 return DISAS_NORETURN;
3295 }
3296
3297 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3298 tcg_gen_neg_i64(o->addr1, o->addr1);
3299 tcg_gen_movi_i64(o->out, 16);
3300 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3301 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3302 return DISAS_NEXT;
3303 }
3304
3305 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3306 {
3307 o->out = o->in2;
3308 o->g_out = o->g_in2;
3309 o->in2 = NULL;
3310 o->g_in2 = false;
3311 return DISAS_NEXT;
3312 }
3313
3314 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3315 {
3316 int b2 = get_field(s, b2);
3317 TCGv ar1 = tcg_temp_new_i64();
3318
3319 o->out = o->in2;
3320 o->g_out = o->g_in2;
3321 o->in2 = NULL;
3322 o->g_in2 = false;
3323
3324 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3325 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3326 tcg_gen_movi_i64(ar1, 0);
3327 break;
3328 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3329 tcg_gen_movi_i64(ar1, 1);
3330 break;
3331 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3332 if (b2) {
3333 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3334 } else {
3335 tcg_gen_movi_i64(ar1, 0);
3336 }
3337 break;
3338 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3339 tcg_gen_movi_i64(ar1, 2);
3340 break;
3341 }
3342
3343 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3344 tcg_temp_free_i64(ar1);
3345
3346 return DISAS_NEXT;
3347 }
3348
3349 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3350 {
3351 o->out = o->in1;
3352 o->out2 = o->in2;
3353 o->g_out = o->g_in1;
3354 o->g_out2 = o->g_in2;
3355 o->in1 = NULL;
3356 o->in2 = NULL;
3357 o->g_in1 = o->g_in2 = false;
3358 return DISAS_NEXT;
3359 }
3360
3361 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3362 {
3363 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3364 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3365 tcg_temp_free_i32(l);
3366 return DISAS_NEXT;
3367 }
3368
3369 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3370 {
3371 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3372 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3373 tcg_temp_free_i32(l);
3374 return DISAS_NEXT;
3375 }
3376
3377 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3378 {
3379 int r1 = get_field(s, r1);
3380 int r2 = get_field(s, r2);
3381 TCGv_i32 t1, t2;
3382
3383 /* r1 and r2 must be even. */
3384 if (r1 & 1 || r2 & 1) {
3385 gen_program_exception(s, PGM_SPECIFICATION);
3386 return DISAS_NORETURN;
3387 }
3388
3389 t1 = tcg_const_i32(r1);
3390 t2 = tcg_const_i32(r2);
3391 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3392 tcg_temp_free_i32(t1);
3393 tcg_temp_free_i32(t2);
3394 set_cc_static(s);
3395 return DISAS_NEXT;
3396 }
3397
3398 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3399 {
3400 int r1 = get_field(s, r1);
3401 int r3 = get_field(s, r3);
3402 TCGv_i32 t1, t3;
3403
3404 /* r1 and r3 must be even. */
3405 if (r1 & 1 || r3 & 1) {
3406 gen_program_exception(s, PGM_SPECIFICATION);
3407 return DISAS_NORETURN;
3408 }
3409
3410 t1 = tcg_const_i32(r1);
3411 t3 = tcg_const_i32(r3);
3412 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3413 tcg_temp_free_i32(t1);
3414 tcg_temp_free_i32(t3);
3415 set_cc_static(s);
3416 return DISAS_NEXT;
3417 }
3418
3419 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3420 {
3421 int r1 = get_field(s, r1);
3422 int r3 = get_field(s, r3);
3423 TCGv_i32 t1, t3;
3424
3425 /* r1 and r3 must be even. */
3426 if (r1 & 1 || r3 & 1) {
3427 gen_program_exception(s, PGM_SPECIFICATION);
3428 return DISAS_NORETURN;
3429 }
3430
3431 t1 = tcg_const_i32(r1);
3432 t3 = tcg_const_i32(r3);
3433 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3434 tcg_temp_free_i32(t1);
3435 tcg_temp_free_i32(t3);
3436 set_cc_static(s);
3437 return DISAS_NEXT;
3438 }
3439
3440 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3441 {
3442 int r3 = get_field(s, r3);
3443 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3444 set_cc_static(s);
3445 return DISAS_NEXT;
3446 }
3447
3448 #ifndef CONFIG_USER_ONLY
3449 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3450 {
3451 int r1 = get_field(s, l1);
3452 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3453 set_cc_static(s);
3454 return DISAS_NEXT;
3455 }
3456
3457 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3458 {
3459 int r1 = get_field(s, l1);
3460 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3461 set_cc_static(s);
3462 return DISAS_NEXT;
3463 }
3464 #endif
3465
3466 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3467 {
3468 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3469 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3470 tcg_temp_free_i32(l);
3471 return DISAS_NEXT;
3472 }
3473
3474 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3475 {
3476 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3477 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3478 tcg_temp_free_i32(l);
3479 return DISAS_NEXT;
3480 }
3481
3482 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3483 {
3484 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3485 set_cc_static(s);
3486 return DISAS_NEXT;
3487 }
3488
3489 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3490 {
3491 TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3492 TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3493
3494 gen_helper_mvst(cc_op, cpu_env, t1, t2);
3495 tcg_temp_free_i32(t1);
3496 tcg_temp_free_i32(t2);
3497 set_cc_static(s);
3498 return DISAS_NEXT;
3499 }
3500
3501 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3502 {
3503 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3504 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3505 tcg_temp_free_i32(l);
3506 return DISAS_NEXT;
3507 }
3508
3509 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3510 {
3511 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3512 return DISAS_NEXT;
3513 }
3514
3515 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3516 {
3517 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3518 return DISAS_NEXT;
3519 }
3520
3521 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3522 {
3523 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3524 return DISAS_NEXT;
3525 }
3526
3527 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3528 {
3529 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3530 return DISAS_NEXT;
3531 }
3532
3533 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3534 {
3535 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3536 return DISAS_NEXT;
3537 }
3538
3539 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3540 {
3541 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3542 return_low128(o->out2);
3543 return DISAS_NEXT;
3544 }
3545
3546 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3547 {
3548 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3549 return_low128(o->out2);
3550 return DISAS_NEXT;
3551 }
3552
3553 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3554 {
3555 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3556 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3557 tcg_temp_free_i64(r3);
3558 return DISAS_NEXT;
3559 }
3560
3561 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3562 {
3563 TCGv_i64 r3 = load_freg(get_field(s, r3));
3564 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3565 tcg_temp_free_i64(r3);
3566 return DISAS_NEXT;
3567 }
3568
3569 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3570 {
3571 TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3572 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3573 tcg_temp_free_i64(r3);
3574 return DISAS_NEXT;
3575 }
3576
3577 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3578 {
3579 TCGv_i64 r3 = load_freg(get_field(s, r3));
3580 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3581 tcg_temp_free_i64(r3);
3582 return DISAS_NEXT;
3583 }
3584
3585 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3586 {
3587 TCGv_i64 z, n;
3588 z = tcg_const_i64(0);
3589 n = tcg_temp_new_i64();
3590 tcg_gen_neg_i64(n, o->in2);
3591 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3592 tcg_temp_free_i64(n);
3593 tcg_temp_free_i64(z);
3594 return DISAS_NEXT;
3595 }
3596
3597 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3598 {
3599 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3600 return DISAS_NEXT;
3601 }
3602
3603 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3604 {
3605 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3606 return DISAS_NEXT;
3607 }
3608
3609 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3610 {
3611 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3612 tcg_gen_mov_i64(o->out2, o->in2);
3613 return DISAS_NEXT;
3614 }
3615
3616 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3617 {
3618 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3619 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3620 tcg_temp_free_i32(l);
3621 set_cc_static(s);
3622 return DISAS_NEXT;
3623 }
3624
3625 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3626 {
3627 tcg_gen_neg_i64(o->out, o->in2);
3628 return DISAS_NEXT;
3629 }
3630
3631 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3632 {
3633 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3634 return DISAS_NEXT;
3635 }
3636
3637 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3638 {
3639 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3640 return DISAS_NEXT;
3641 }
3642
3643 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3644 {
3645 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3646 tcg_gen_mov_i64(o->out2, o->in2);
3647 return DISAS_NEXT;
3648 }
3649
3650 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3651 {
3652 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3653 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3654 tcg_temp_free_i32(l);
3655 set_cc_static(s);
3656 return DISAS_NEXT;
3657 }
3658
3659 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3660 {
3661 tcg_gen_or_i64(o->out, o->in1, o->in2);
3662 return DISAS_NEXT;
3663 }
3664
3665 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3666 {
3667 int shift = s->insn->data & 0xff;
3668 int size = s->insn->data >> 8;
3669 uint64_t mask = ((1ull << size) - 1) << shift;
3670
3671 assert(!o->g_in2);
3672 tcg_gen_shli_i64(o->in2, o->in2, shift);
3673 tcg_gen_or_i64(o->out, o->in1, o->in2);
3674
3675 /* Produce the CC from only the bits manipulated. */
3676 tcg_gen_andi_i64(cc_dst, o->out, mask);
3677 set_cc_nz_u64(s, cc_dst);
3678 return DISAS_NEXT;
3679 }
3680
3681 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3682 {
3683 o->in1 = tcg_temp_new_i64();
3684
3685 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3686 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3687 } else {
3688 /* Perform the atomic operation in memory. */
3689 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3690 s->insn->data);
3691 }
3692
3693 /* Recompute also for atomic case: needed for setting CC. */
3694 tcg_gen_or_i64(o->out, o->in1, o->in2);
3695
3696 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3697 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3698 }
3699 return DISAS_NEXT;
3700 }
3701
3702 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3703 {
3704 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3705 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3706 tcg_temp_free_i32(l);
3707 return DISAS_NEXT;
3708 }
3709
3710 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3711 {
3712 int l2 = get_field(s, l2) + 1;
3713 TCGv_i32 l;
3714
3715 /* The length must not exceed 32 bytes. */
3716 if (l2 > 32) {
3717 gen_program_exception(s, PGM_SPECIFICATION);
3718 return DISAS_NORETURN;
3719 }
3720 l = tcg_const_i32(l2);
3721 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3722 tcg_temp_free_i32(l);
3723 return DISAS_NEXT;
3724 }
3725
3726 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3727 {
3728 int l2 = get_field(s, l2) + 1;
3729 TCGv_i32 l;
3730
3731 /* The length must be even and should not exceed 64 bytes. */
3732 if ((l2 & 1) || (l2 > 64)) {
3733 gen_program_exception(s, PGM_SPECIFICATION);
3734 return DISAS_NORETURN;
3735 }
3736 l = tcg_const_i32(l2);
3737 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3738 tcg_temp_free_i32(l);
3739 return DISAS_NEXT;
3740 }
3741
3742 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3743 {
3744 gen_helper_popcnt(o->out, o->in2);
3745 return DISAS_NEXT;
3746 }
3747
3748 #ifndef CONFIG_USER_ONLY
3749 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3750 {
3751 gen_helper_ptlb(cpu_env);
3752 return DISAS_NEXT;
3753 }
3754 #endif
3755
3756 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3757 {
3758 int i3 = get_field(s, i3);
3759 int i4 = get_field(s, i4);
3760 int i5 = get_field(s, i5);
3761 int do_zero = i4 & 0x80;
3762 uint64_t mask, imask, pmask;
3763 int pos, len, rot;
3764
3765 /* Adjust the arguments for the specific insn. */
3766 switch (s->fields.op2) {
3767 case 0x55: /* risbg */
3768 case 0x59: /* risbgn */
3769 i3 &= 63;
3770 i4 &= 63;
3771 pmask = ~0;
3772 break;
3773 case 0x5d: /* risbhg */
3774 i3 &= 31;
3775 i4 &= 31;
3776 pmask = 0xffffffff00000000ull;
3777 break;
3778 case 0x51: /* risblg */
3779 i3 &= 31;
3780 i4 &= 31;
3781 pmask = 0x00000000ffffffffull;
3782 break;
3783 default:
3784 g_assert_not_reached();
3785 }
3786
3787 /* MASK is the set of bits to be inserted from R2.
3788 Take care for I3/I4 wraparound. */
3789 mask = pmask >> i3;
3790 if (i3 <= i4) {
3791 mask ^= pmask >> i4 >> 1;
3792 } else {
3793 mask |= ~(pmask >> i4 >> 1);
3794 }
3795 mask &= pmask;
3796
3797 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3798 insns, we need to keep the other half of the register. */
3799 imask = ~mask | ~pmask;
3800 if (do_zero) {
3801 imask = ~pmask;
3802 }
3803
3804 len = i4 - i3 + 1;
3805 pos = 63 - i4;
3806 rot = i5 & 63;
3807 if (s->fields.op2 == 0x5d) {
3808 pos += 32;
3809 }
3810
3811 /* In some cases we can implement this with extract. */
3812 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3813 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3814 return DISAS_NEXT;
3815 }
3816
3817 /* In some cases we can implement this with deposit. */
3818 if (len > 0 && (imask == 0 || ~mask == imask)) {
3819 /* Note that we rotate the bits to be inserted to the lsb, not to
3820 the position as described in the PoO. */
3821 rot = (rot - pos) & 63;
3822 } else {
3823 pos = -1;
3824 }
3825
3826 /* Rotate the input as necessary. */
3827 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3828
3829 /* Insert the selected bits into the output. */
3830 if (pos >= 0) {
3831 if (imask == 0) {
3832 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3833 } else {
3834 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3835 }
3836 } else if (imask == 0) {
3837 tcg_gen_andi_i64(o->out, o->in2, mask);
3838 } else {
3839 tcg_gen_andi_i64(o->in2, o->in2, mask);
3840 tcg_gen_andi_i64(o->out, o->out, imask);
3841 tcg_gen_or_i64(o->out, o->out, o->in2);
3842 }
3843 return DISAS_NEXT;
3844 }
3845
3846 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3847 {
3848 int i3 = get_field(s, i3);
3849 int i4 = get_field(s, i4);
3850 int i5 = get_field(s, i5);
3851 uint64_t mask;
3852
3853 /* If this is a test-only form, arrange to discard the result. */
3854 if (i3 & 0x80) {
3855 o->out = tcg_temp_new_i64();
3856 o->g_out = false;
3857 }
3858
3859 i3 &= 63;
3860 i4 &= 63;
3861 i5 &= 63;
3862
3863 /* MASK is the set of bits to be operated on from R2.
3864 Take care for I3/I4 wraparound. */
3865 mask = ~0ull >> i3;
3866 if (i3 <= i4) {
3867 mask ^= ~0ull >> i4 >> 1;
3868 } else {
3869 mask |= ~(~0ull >> i4 >> 1);
3870 }
3871
3872 /* Rotate the input as necessary. */
3873 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3874
3875 /* Operate. */
3876 switch (s->fields.op2) {
3877 case 0x55: /* AND */
3878 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3879 tcg_gen_and_i64(o->out, o->out, o->in2);
3880 break;
3881 case 0x56: /* OR */
3882 tcg_gen_andi_i64(o->in2, o->in2, mask);
3883 tcg_gen_or_i64(o->out, o->out, o->in2);
3884 break;
3885 case 0x57: /* XOR */
3886 tcg_gen_andi_i64(o->in2, o->in2, mask);
3887 tcg_gen_xor_i64(o->out, o->out, o->in2);
3888 break;
3889 default:
3890 abort();
3891 }
3892
3893 /* Set the CC. */
3894 tcg_gen_andi_i64(cc_dst, o->out, mask);
3895 set_cc_nz_u64(s, cc_dst);
3896 return DISAS_NEXT;
3897 }
3898
3899 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3900 {
3901 tcg_gen_bswap16_i64(o->out, o->in2);
3902 return DISAS_NEXT;
3903 }
3904
3905 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3906 {
3907 tcg_gen_bswap32_i64(o->out, o->in2);
3908 return DISAS_NEXT;
3909 }
3910
3911 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3912 {
3913 tcg_gen_bswap64_i64(o->out, o->in2);
3914 return DISAS_NEXT;
3915 }
3916
3917 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3918 {
3919 TCGv_i32 t1 = tcg_temp_new_i32();
3920 TCGv_i32 t2 = tcg_temp_new_i32();
3921 TCGv_i32 to = tcg_temp_new_i32();
3922 tcg_gen_extrl_i64_i32(t1, o->in1);
3923 tcg_gen_extrl_i64_i32(t2, o->in2);
3924 tcg_gen_rotl_i32(to, t1, t2);
3925 tcg_gen_extu_i32_i64(o->out, to);
3926 tcg_temp_free_i32(t1);
3927 tcg_temp_free_i32(t2);
3928 tcg_temp_free_i32(to);
3929 return DISAS_NEXT;
3930 }
3931
3932 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3933 {
3934 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3935 return DISAS_NEXT;
3936 }
3937
3938 #ifndef CONFIG_USER_ONLY
3939 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3940 {
3941 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3942 set_cc_static(s);
3943 return DISAS_NEXT;
3944 }
3945
3946 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3947 {
3948 gen_helper_sacf(cpu_env, o->in2);
3949 /* Addressing mode has changed, so end the block. */
3950 return DISAS_PC_STALE;
3951 }
3952 #endif
3953
3954 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3955 {
3956 int sam = s->insn->data;
3957 TCGv_i64 tsam;
3958 uint64_t mask;
3959
3960 switch (sam) {
3961 case 0:
3962 mask = 0xffffff;
3963 break;
3964 case 1:
3965 mask = 0x7fffffff;
3966 break;
3967 default:
3968 mask = -1;
3969 break;
3970 }
3971
3972 /* Bizarre but true, we check the address of the current insn for the
3973 specification exception, not the next to be executed. Thus the PoO
3974 documents that Bad Things Happen two bytes before the end. */
3975 if (s->base.pc_next & ~mask) {
3976 gen_program_exception(s, PGM_SPECIFICATION);
3977 return DISAS_NORETURN;
3978 }
3979 s->pc_tmp &= mask;
3980
3981 tsam = tcg_const_i64(sam);
3982 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3983 tcg_temp_free_i64(tsam);
3984
3985 /* Always exit the TB, since we (may have) changed execution mode. */
3986 return DISAS_PC_STALE;
3987 }
3988
3989 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3990 {
3991 int r1 = get_field(s, r1);
3992 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3993 return DISAS_NEXT;
3994 }
3995
3996 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3997 {
3998 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3999 return DISAS_NEXT;
4000 }
4001
4002 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4003 {
4004 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4005 return DISAS_NEXT;
4006 }
4007
4008 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4009 {
4010 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4011 return_low128(o->out2);
4012 return DISAS_NEXT;
4013 }
4014
4015 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4016 {
4017 gen_helper_sqeb(o->out, cpu_env, o->in2);
4018 return DISAS_NEXT;
4019 }
4020
4021 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4022 {
4023 gen_helper_sqdb(o->out, cpu_env, o->in2);
4024 return DISAS_NEXT;
4025 }
4026
4027 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4028 {
4029 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4030 return_low128(o->out2);
4031 return DISAS_NEXT;
4032 }
4033
4034 #ifndef CONFIG_USER_ONLY
4035 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4036 {
4037 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4038 set_cc_static(s);
4039 return DISAS_NEXT;
4040 }
4041
4042 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4043 {
4044 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4045 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4046 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4047 set_cc_static(s);
4048 tcg_temp_free_i32(r1);
4049 tcg_temp_free_i32(r3);
4050 return DISAS_NEXT;
4051 }
4052 #endif
4053
4054 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4055 {
4056 DisasCompare c;
4057 TCGv_i64 a, h;
4058 TCGLabel *lab;
4059 int r1;
4060
4061 disas_jcc(s, &c, get_field(s, m3));
4062
4063 /* We want to store when the condition is fulfilled, so branch
4064 out when it's not */
4065 c.cond = tcg_invert_cond(c.cond);
4066
4067 lab = gen_new_label();
4068 if (c.is_64) {
4069 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4070 } else {
4071 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4072 }
4073 free_compare(&c);
4074
4075 r1 = get_field(s, r1);
4076 a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4077 switch (s->insn->data) {
4078 case 1: /* STOCG */
4079 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4080 break;
4081 case 0: /* STOC */
4082 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4083 break;
4084 case 2: /* STOCFH */
4085 h = tcg_temp_new_i64();
4086 tcg_gen_shri_i64(h, regs[r1], 32);
4087 tcg_gen_qemu_st32(h, a, get_mem_index(s));
4088 tcg_temp_free_i64(h);
4089 break;
4090 default:
4091 g_assert_not_reached();
4092 }
4093 tcg_temp_free_i64(a);
4094
4095 gen_set_label(lab);
4096 return DISAS_NEXT;
4097 }
4098
4099 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4100 {
4101 uint64_t sign = 1ull << s->insn->data;
4102 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4103 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4104 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4105 /* The arithmetic left shift is curious in that it does not affect
4106 the sign bit. Copy that over from the source unchanged. */
4107 tcg_gen_andi_i64(o->out, o->out, ~sign);
4108 tcg_gen_andi_i64(o->in1, o->in1, sign);
4109 tcg_gen_or_i64(o->out, o->out, o->in1);
4110 return DISAS_NEXT;
4111 }
4112
4113 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4114 {
4115 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4116 return DISAS_NEXT;
4117 }
4118
4119 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4120 {
4121 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4122 return DISAS_NEXT;
4123 }
4124
4125 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4126 {
4127 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4128 return DISAS_NEXT;
4129 }
4130
4131 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4132 {
4133 gen_helper_sfpc(cpu_env, o->in2);
4134 return DISAS_NEXT;
4135 }
4136
4137 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4138 {
4139 gen_helper_sfas(cpu_env, o->in2);
4140 return DISAS_NEXT;
4141 }
4142
4143 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4144 {
4145 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4146 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4147 gen_helper_srnm(cpu_env, o->addr1);
4148 return DISAS_NEXT;
4149 }
4150
4151 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4152 {
4153 /* Bits 0-55 are are ignored. */
4154 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4155 gen_helper_srnm(cpu_env, o->addr1);
4156 return DISAS_NEXT;
4157 }
4158
4159 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4160 {
4161 TCGv_i64 tmp = tcg_temp_new_i64();
4162
4163 /* Bits other than 61-63 are ignored. */
4164 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4165
4166 /* No need to call a helper, we don't implement dfp */
4167 tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4168 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4169 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4170
4171 tcg_temp_free_i64(tmp);
4172 return DISAS_NEXT;
4173 }
4174
4175 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4176 {
4177 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4178 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4179 set_cc_static(s);
4180
4181 tcg_gen_shri_i64(o->in1, o->in1, 24);
4182 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4183 return DISAS_NEXT;
4184 }
4185
4186 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4187 {
4188 int b1 = get_field(s, b1);
4189 int d1 = get_field(s, d1);
4190 int b2 = get_field(s, b2);
4191 int d2 = get_field(s, d2);
4192 int r3 = get_field(s, r3);
4193 TCGv_i64 tmp = tcg_temp_new_i64();
4194
4195 /* fetch all operands first */
4196 o->in1 = tcg_temp_new_i64();
4197 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4198 o->in2 = tcg_temp_new_i64();
4199 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4200 o->addr1 = get_address(s, 0, r3, 0);
4201
4202 /* load the third operand into r3 before modifying anything */
4203 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4204
4205 /* subtract CPU timer from first operand and store in GR0 */
4206 gen_helper_stpt(tmp, cpu_env);
4207 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4208
4209 /* store second operand in GR1 */
4210 tcg_gen_mov_i64(regs[1], o->in2);
4211
4212 tcg_temp_free_i64(tmp);
4213 return DISAS_NEXT;
4214 }
4215
4216 #ifndef CONFIG_USER_ONLY
4217 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4218 {
4219 tcg_gen_shri_i64(o->in2, o->in2, 4);
4220 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4221 return DISAS_NEXT;
4222 }
4223
4224 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4225 {
4226 gen_helper_sske(cpu_env, o->in1, o->in2);
4227 return DISAS_NEXT;
4228 }
4229
4230 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4231 {
4232 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4233 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4234 return DISAS_PC_STALE_NOCHAIN;
4235 }
4236
4237 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4238 {
4239 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4240 return DISAS_NEXT;
4241 }
4242 #endif
4243
4244 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4245 {
4246 gen_helper_stck(o->out, cpu_env);
4247 /* ??? We don't implement clock states. */
4248 gen_op_movi_cc(s, 0);
4249 return DISAS_NEXT;
4250 }
4251
4252 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4253 {
4254 TCGv_i64 c1 = tcg_temp_new_i64();
4255 TCGv_i64 c2 = tcg_temp_new_i64();
4256 TCGv_i64 todpr = tcg_temp_new_i64();
4257 gen_helper_stck(c1, cpu_env);
4258 /* 16 bit value store in an uint32_t (only valid bits set) */
4259 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4260 /* Shift the 64-bit value into its place as a zero-extended
4261 104-bit value. Note that "bit positions 64-103 are always
4262 non-zero so that they compare differently to STCK"; we set
4263 the least significant bit to 1. */
4264 tcg_gen_shli_i64(c2, c1, 56);
4265 tcg_gen_shri_i64(c1, c1, 8);
4266 tcg_gen_ori_i64(c2, c2, 0x10000);
4267 tcg_gen_or_i64(c2, c2, todpr);
4268 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4269 tcg_gen_addi_i64(o->in2, o->in2, 8);
4270 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4271 tcg_temp_free_i64(c1);
4272 tcg_temp_free_i64(c2);
4273 tcg_temp_free_i64(todpr);
4274 /* ??? We don't implement clock states. */
4275 gen_op_movi_cc(s, 0);
4276 return DISAS_NEXT;
4277 }
4278
4279 #ifndef CONFIG_USER_ONLY
4280 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4281 {
4282 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4283 gen_helper_sck(cc_op, cpu_env, o->in1);
4284 set_cc_static(s);
4285 return DISAS_NEXT;
4286 }
4287
4288 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4289 {
4290 gen_helper_sckc(cpu_env, o->in2);
4291 return DISAS_NEXT;
4292 }
4293
4294 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4295 {
4296 gen_helper_sckpf(cpu_env, regs[0]);
4297 return DISAS_NEXT;
4298 }
4299
4300 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4301 {
4302 gen_helper_stckc(o->out, cpu_env);
4303 return DISAS_NEXT;
4304 }
4305
4306 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4307 {
4308 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4309 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4310 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4311 tcg_temp_free_i32(r1);
4312 tcg_temp_free_i32(r3);
4313 return DISAS_NEXT;
4314 }
4315
4316 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4317 {
4318 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4319 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4320 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4321 tcg_temp_free_i32(r1);
4322 tcg_temp_free_i32(r3);
4323 return DISAS_NEXT;
4324 }
4325
4326 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4327 {
4328 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4329 return DISAS_NEXT;
4330 }
4331
4332 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4333 {
4334 gen_helper_spt(cpu_env, o->in2);
4335 return DISAS_NEXT;
4336 }
4337
4338 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4339 {
4340 gen_helper_stfl(cpu_env);
4341 return DISAS_NEXT;
4342 }
4343
4344 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4345 {
4346 gen_helper_stpt(o->out, cpu_env);
4347 return DISAS_NEXT;
4348 }
4349
4350 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4351 {
4352 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4353 set_cc_static(s);
4354 return DISAS_NEXT;
4355 }
4356
4357 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4358 {
4359 gen_helper_spx(cpu_env, o->in2);
4360 return DISAS_NEXT;
4361 }
4362
4363 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4364 {
4365 gen_helper_xsch(cpu_env, regs[1]);
4366 set_cc_static(s);
4367 return DISAS_NEXT;
4368 }
4369
4370 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4371 {
4372 gen_helper_csch(cpu_env, regs[1]);
4373 set_cc_static(s);
4374 return DISAS_NEXT;
4375 }
4376
4377 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4378 {
4379 gen_helper_hsch(cpu_env, regs[1]);
4380 set_cc_static(s);
4381 return DISAS_NEXT;
4382 }
4383
4384 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4385 {
4386 gen_helper_msch(cpu_env, regs[1], o->in2);
4387 set_cc_static(s);
4388 return DISAS_NEXT;
4389 }
4390
4391 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4392 {
4393 gen_helper_rchp(cpu_env, regs[1]);
4394 set_cc_static(s);
4395 return DISAS_NEXT;
4396 }
4397
4398 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4399 {
4400 gen_helper_rsch(cpu_env, regs[1]);
4401 set_cc_static(s);
4402 return DISAS_NEXT;
4403 }
4404
4405 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4406 {
4407 gen_helper_sal(cpu_env, regs[1]);
4408 return DISAS_NEXT;
4409 }
4410
4411 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4412 {
4413 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4414 return DISAS_NEXT;
4415 }
4416
4417 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4418 {
4419 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4420 gen_op_movi_cc(s, 3);
4421 return DISAS_NEXT;
4422 }
4423
4424 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4425 {
4426 /* The instruction is suppressed if not provided. */
4427 return DISAS_NEXT;
4428 }
4429
4430 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4431 {
4432 gen_helper_ssch(cpu_env, regs[1], o->in2);
4433 set_cc_static(s);
4434 return DISAS_NEXT;
4435 }
4436
4437 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4438 {
4439 gen_helper_stsch(cpu_env, regs[1], o->in2);
4440 set_cc_static(s);
4441 return DISAS_NEXT;
4442 }
4443
4444 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4445 {
4446 gen_helper_stcrw(cpu_env, o->in2);
4447 set_cc_static(s);
4448 return DISAS_NEXT;
4449 }
4450
4451 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4452 {
4453 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4454 set_cc_static(s);
4455 return DISAS_NEXT;
4456 }
4457
4458 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4459 {
4460 gen_helper_tsch(cpu_env, regs[1], o->in2);
4461 set_cc_static(s);
4462 return DISAS_NEXT;
4463 }
4464
4465 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4466 {
4467 gen_helper_chsc(cpu_env, o->in2);
4468 set_cc_static(s);
4469 return DISAS_NEXT;
4470 }
4471
4472 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4473 {
4474 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4475 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4476 return DISAS_NEXT;
4477 }
4478
4479 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4480 {
4481 uint64_t i2 = get_field(s, i2);
4482 TCGv_i64 t;
4483
4484 /* It is important to do what the instruction name says: STORE THEN.
4485 If we let the output hook perform the store then if we fault and
4486 restart, we'll have the wrong SYSTEM MASK in place. */
4487 t = tcg_temp_new_i64();
4488 tcg_gen_shri_i64(t, psw_mask, 56);
4489 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4490 tcg_temp_free_i64(t);
4491
4492 if (s->fields.op == 0xac) {
4493 tcg_gen_andi_i64(psw_mask, psw_mask,
4494 (i2 << 56) | 0x00ffffffffffffffull);
4495 } else {
4496 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4497 }
4498
4499 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4500 return DISAS_PC_STALE_NOCHAIN;
4501 }
4502
4503 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4504 {
4505 o->addr1 = get_address(s, 0, get_field(s, r2), 0);
4506 tcg_gen_qemu_st_tl(o->in1, o->addr1, MMU_REAL_IDX, s->insn->data);
4507
4508 if (s->base.tb->flags & FLAG_MASK_PER) {
4509 update_psw_addr(s);
4510 gen_helper_per_store_real(cpu_env);
4511 }
4512 return DISAS_NEXT;
4513 }
4514 #endif
4515
4516 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4517 {
4518 gen_helper_stfle(cc_op, cpu_env, o->in2);
4519 set_cc_static(s);
4520 return DISAS_NEXT;
4521 }
4522
4523 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4524 {
4525 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4526 return DISAS_NEXT;
4527 }
4528
4529 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4530 {
4531 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4532 return DISAS_NEXT;
4533 }
4534
4535 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4536 {
4537 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4538 return DISAS_NEXT;
4539 }
4540
4541 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4542 {
4543 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4544 return DISAS_NEXT;
4545 }
4546
4547 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4548 {
4549 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4550 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4551 gen_helper_stam(cpu_env, r1, o->in2, r3);
4552 tcg_temp_free_i32(r1);
4553 tcg_temp_free_i32(r3);
4554 return DISAS_NEXT;
4555 }
4556
4557 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4558 {
4559 int m3 = get_field(s, m3);
4560 int pos, base = s->insn->data;
4561 TCGv_i64 tmp = tcg_temp_new_i64();
4562
4563 pos = base + ctz32(m3) * 8;
4564 switch (m3) {
4565 case 0xf:
4566 /* Effectively a 32-bit store. */
4567 tcg_gen_shri_i64(tmp, o->in1, pos);
4568 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4569 break;
4570
4571 case 0xc:
4572 case 0x6:
4573 case 0x3:
4574 /* Effectively a 16-bit store. */
4575 tcg_gen_shri_i64(tmp, o->in1, pos);
4576 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4577 break;
4578
4579 case 0x8:
4580 case 0x4:
4581 case 0x2:
4582 case 0x1:
4583 /* Effectively an 8-bit store. */
4584 tcg_gen_shri_i64(tmp, o->in1, pos);
4585 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4586 break;
4587
4588 default:
4589 /* This is going to be a sequence of shifts and stores. */
4590 pos = base + 32 - 8;
4591 while (m3) {
4592 if (m3 & 0x8) {
4593 tcg_gen_shri_i64(tmp, o->in1, pos);
4594 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4595 tcg_gen_addi_i64(o->in2, o->in2, 1);
4596 }
4597 m3 = (m3 << 1) & 0xf;
4598 pos -= 8;
4599 }
4600 break;
4601 }
4602 tcg_temp_free_i64(tmp);
4603 return DISAS_NEXT;
4604 }
4605
4606 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4607 {
4608 int r1 = get_field(s, r1);
4609 int r3 = get_field(s, r3);
4610 int size = s->insn->data;
4611 TCGv_i64 tsize = tcg_const_i64(size);
4612
4613 while (1) {
4614 if (size == 8) {
4615 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4616 } else {
4617 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4618 }
4619 if (r1 == r3) {
4620 break;
4621 }
4622 tcg_gen_add_i64(o->in2, o->in2, tsize);
4623 r1 = (r1 + 1) & 15;
4624 }
4625
4626 tcg_temp_free_i64(tsize);
4627 return DISAS_NEXT;
4628 }
4629
4630 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4631 {
4632 int r1 = get_field(s, r1);
4633 int r3 = get_field(s, r3);
4634 TCGv_i64 t = tcg_temp_new_i64();
4635 TCGv_i64 t4 = tcg_const_i64(4);
4636 TCGv_i64 t32 = tcg_const_i64(32);
4637
4638 while (1) {
4639 tcg_gen_shl_i64(t, regs[r1], t32);
4640 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4641 if (r1 == r3) {
4642 break;
4643 }
4644 tcg_gen_add_i64(o->in2, o->in2, t4);
4645 r1 = (r1 + 1) & 15;
4646 }
4647
4648 tcg_temp_free_i64(t);
4649 tcg_temp_free_i64(t4);
4650 tcg_temp_free_i64(t32);
4651 return DISAS_NEXT;
4652 }
4653
4654 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4655 {
4656 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4657 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4658 } else if (HAVE_ATOMIC128) {
4659 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4660 } else {
4661 gen_helper_exit_atomic(cpu_env);
4662 return DISAS_NORETURN;
4663 }
4664 return DISAS_NEXT;
4665 }
4666
4667 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4668 {
4669 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4670 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4671
4672 gen_helper_srst(cpu_env, r1, r2);
4673
4674 tcg_temp_free_i32(r1);
4675 tcg_temp_free_i32(r2);
4676 set_cc_static(s);
4677 return DISAS_NEXT;
4678 }
4679
4680 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4681 {
4682 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4683 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4684
4685 gen_helper_srstu(cpu_env, r1, r2);
4686
4687 tcg_temp_free_i32(r1);
4688 tcg_temp_free_i32(r2);
4689 set_cc_static(s);
4690 return DISAS_NEXT;
4691 }
4692
4693 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4694 {
4695 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4696 return DISAS_NEXT;
4697 }
4698
4699 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4700 {
4701 DisasCompare cmp;
4702 TCGv_i64 borrow;
4703
4704 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4705
4706 /* The !borrow flag is the msb of CC. Since we want the inverse of
4707 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4708 disas_jcc(s, &cmp, 8 | 4);
4709 borrow = tcg_temp_new_i64();
4710 if (cmp.is_64) {
4711 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4712 } else {
4713 TCGv_i32 t = tcg_temp_new_i32();
4714 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4715 tcg_gen_extu_i32_i64(borrow, t);
4716 tcg_temp_free_i32(t);
4717 }
4718 free_compare(&cmp);
4719
4720 tcg_gen_sub_i64(o->out, o->out, borrow);
4721 tcg_temp_free_i64(borrow);
4722 return DISAS_NEXT;
4723 }
4724
4725 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4726 {
4727 TCGv_i32 t;
4728
4729 update_psw_addr(s);
4730 update_cc_op(s);
4731
4732 t = tcg_const_i32(get_field(s, i1) & 0xff);
4733 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4734 tcg_temp_free_i32(t);
4735
4736 t = tcg_const_i32(s->ilen);
4737 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4738 tcg_temp_free_i32(t);
4739
4740 gen_exception(EXCP_SVC);
4741 return DISAS_NORETURN;
4742 }
4743
4744 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4745 {
4746 int cc = 0;
4747
4748 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4749 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4750 gen_op_movi_cc(s, cc);
4751 return DISAS_NEXT;
4752 }
4753
4754 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4755 {
4756 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4757 set_cc_static(s);
4758 return DISAS_NEXT;
4759 }
4760
4761 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4762 {
4763 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4764 set_cc_static(s);
4765 return DISAS_NEXT;
4766 }
4767
4768 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4769 {
4770 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4771 set_cc_static(s);
4772 return DISAS_NEXT;
4773 }
4774
4775 #ifndef CONFIG_USER_ONLY
4776
4777 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4778 {
4779 gen_helper_testblock(cc_op, cpu_env, o->in2);
4780 set_cc_static(s);
4781 return DISAS_NEXT;
4782 }
4783
4784 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4785 {
4786 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4787 set_cc_static(s);
4788 return DISAS_NEXT;
4789 }
4790
4791 #endif
4792
4793 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4794 {
4795 TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4796 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4797 tcg_temp_free_i32(l1);
4798 set_cc_static(s);
4799 return DISAS_NEXT;
4800 }
4801
4802 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4803 {
4804 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4805 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4806 tcg_temp_free_i32(l);
4807 set_cc_static(s);
4808 return DISAS_NEXT;
4809 }
4810
4811 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4812 {
4813 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4814 return_low128(o->out2);
4815 set_cc_static(s);
4816 return DISAS_NEXT;
4817 }
4818
4819 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4820 {
4821 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4822 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4823 tcg_temp_free_i32(l);
4824 set_cc_static(s);
4825 return DISAS_NEXT;
4826 }
4827
4828 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4829 {
4830 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4831 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4832 tcg_temp_free_i32(l);
4833 set_cc_static(s);
4834 return DISAS_NEXT;
4835 }
4836
4837 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4838 {
4839 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4840 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4841 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4842 TCGv_i32 tst = tcg_temp_new_i32();
4843 int m3 = get_field(s, m3);
4844
4845 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4846 m3 = 0;
4847 }
4848 if (m3 & 1) {
4849 tcg_gen_movi_i32(tst, -1);
4850 } else {
4851 tcg_gen_extrl_i64_i32(tst, regs[0]);
4852 if (s->insn->opc & 3) {
4853 tcg_gen_ext8u_i32(tst, tst);
4854 } else {
4855 tcg_gen_ext16u_i32(tst, tst);
4856 }
4857 }
4858 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4859
4860 tcg_temp_free_i32(r1);
4861 tcg_temp_free_i32(r2);
4862 tcg_temp_free_i32(sizes);
4863 tcg_temp_free_i32(tst);
4864 set_cc_static(s);
4865 return DISAS_NEXT;
4866 }
4867
4868 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4869 {
4870 TCGv_i32 t1 = tcg_const_i32(0xff);
4871 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4872 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4873 tcg_temp_free_i32(t1);
4874 set_cc_static(s);
4875 return DISAS_NEXT;
4876 }
4877
4878 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4879 {
4880 TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4881 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4882 tcg_temp_free_i32(l);
4883 return DISAS_NEXT;
4884 }
4885
4886 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4887 {
4888 int l1 = get_field(s, l1) + 1;
4889 TCGv_i32 l;
4890
4891 /* The length must not exceed 32 bytes. */
4892 if (l1 > 32) {
4893 gen_program_exception(s, PGM_SPECIFICATION);
4894 return DISAS_NORETURN;
4895 }
4896 l = tcg_const_i32(l1);
4897 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4898 tcg_temp_free_i32(l);
4899 set_cc_static(s);
4900 return DISAS_NEXT;
4901 }
4902
4903 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4904 {
4905 int l1 = get_field(s, l1) + 1;
4906 TCGv_i32 l;
4907
4908 /* The length must be even and should not exceed 64 bytes. */
4909 if ((l1 & 1) || (l1 > 64)) {
4910 gen_program_exception(s, PGM_SPECIFICATION);
4911 return DISAS_NORETURN;
4912 }
4913 l = tcg_const_i32(l1);
4914 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4915 tcg_temp_free_i32(l);
4916 set_cc_static(s);
4917 return DISAS_NEXT;
4918 }
4919
4920
4921 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4922 {
4923 int d1 = get_field(s, d1);
4924 int d2 = get_field(s, d2);
4925 int b1 = get_field(s, b1);
4926 int b2 = get_field(s, b2);
4927 int l = get_field(s, l1);
4928 TCGv_i32 t32;
4929
4930 o->addr1 = get_address(s, 0, b1, d1);
4931
4932 /* If the addresses are identical, this is a store/memset of zero. */
4933 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4934 o->in2 = tcg_const_i64(0);
4935
4936 l++;
4937 while (l >= 8) {
4938 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4939 l -= 8;
4940 if (l > 0) {
4941 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4942 }
4943 }
4944 if (l >= 4) {
4945 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4946 l -= 4;
4947 if (l > 0) {
4948 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4949 }
4950 }
4951 if (l >= 2) {
4952 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4953 l -= 2;
4954 if (l > 0) {
4955 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4956 }
4957 }
4958 if (l) {
4959 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4960 }
4961 gen_op_movi_cc(s, 0);
4962 return DISAS_NEXT;
4963 }
4964
4965 /* But in general we'll defer to a helper. */
4966 o->in2 = get_address(s, 0, b2, d2);
4967 t32 = tcg_const_i32(l);
4968 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4969 tcg_temp_free_i32(t32);
4970 set_cc_static(s);
4971 return DISAS_NEXT;
4972 }
4973
4974 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4975 {
4976 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4977 return DISAS_NEXT;
4978 }
4979
4980 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4981 {
4982 int shift = s->insn->data & 0xff;
4983 int size = s->insn->data >> 8;
4984 uint64_t mask = ((1ull << size) - 1) << shift;
4985
4986 assert(!o->g_in2);
4987 tcg_gen_shli_i64(o->in2, o->in2, shift);
4988 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4989
4990 /* Produce the CC from only the bits manipulated. */
4991 tcg_gen_andi_i64(cc_dst, o->out, mask);
4992 set_cc_nz_u64(s, cc_dst);
4993 return DISAS_NEXT;
4994 }
4995
4996 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4997 {
4998 o->in1 = tcg_temp_new_i64();
4999
5000 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5001 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5002 } else {
5003 /* Perform the atomic operation in memory. */
5004 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5005 s->insn->data);
5006 }
5007
5008 /* Recompute also for atomic case: needed for setting CC. */
5009 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5010
5011 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5012 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5013 }
5014 return DISAS_NEXT;
5015 }
5016
5017 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5018 {
5019 o->out = tcg_const_i64(0);
5020 return DISAS_NEXT;
5021 }
5022
5023 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5024 {
5025 o->out = tcg_const_i64(0);
5026 o->out2 = o->out;
5027 o->g_out2 = true;
5028 return DISAS_NEXT;
5029 }
5030
5031 #ifndef CONFIG_USER_ONLY
5032 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5033 {
5034 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5035
5036 gen_helper_clp(cpu_env, r2);
5037 tcg_temp_free_i32(r2);
5038 set_cc_static(s);
5039 return DISAS_NEXT;
5040 }
5041
5042 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5043 {
5044 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5045 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5046
5047 gen_helper_pcilg(cpu_env, r1, r2);
5048 tcg_temp_free_i32(r1);
5049 tcg_temp_free_i32(r2);
5050 set_cc_static(s);
5051 return DISAS_NEXT;
5052 }
5053
5054 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5055 {
5056 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5057 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5058
5059 gen_helper_pcistg(cpu_env, r1, r2);
5060 tcg_temp_free_i32(r1);
5061 tcg_temp_free_i32(r2);
5062 set_cc_static(s);
5063 return DISAS_NEXT;
5064 }
5065
5066 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5067 {
5068 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5069 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5070
5071 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5072 tcg_temp_free_i32(ar);
5073 tcg_temp_free_i32(r1);
5074 set_cc_static(s);
5075 return DISAS_NEXT;
5076 }
5077
5078 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5079 {
5080 gen_helper_sic(cpu_env, o->in1, o->in2);
5081 return DISAS_NEXT;
5082 }
5083
5084 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5085 {
5086 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5087 TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5088
5089 gen_helper_rpcit(cpu_env, r1, r2);
5090 tcg_temp_free_i32(r1);
5091 tcg_temp_free_i32(r2);
5092 set_cc_static(s);
5093 return DISAS_NEXT;
5094 }
5095
5096 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5097 {
5098 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5099 TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5100 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5101
5102 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5103 tcg_temp_free_i32(ar);
5104 tcg_temp_free_i32(r1);
5105 tcg_temp_free_i32(r3);
5106 set_cc_static(s);
5107 return DISAS_NEXT;
5108 }
5109
5110 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5111 {
5112 TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5113 TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5114
5115 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5116 tcg_temp_free_i32(ar);
5117 tcg_temp_free_i32(r1);
5118 set_cc_static(s);
5119 return DISAS_NEXT;
5120 }
5121 #endif
5122
5123 #include "translate_vx.inc.c"
5124
5125 /* ====================================================================== */
5126 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5127 the original inputs), update the various cc data structures in order to
5128 be able to compute the new condition code. */
5129
5130 static void cout_abs32(DisasContext *s, DisasOps *o)
5131 {
5132 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5133 }
5134
5135 static void cout_abs64(DisasContext *s, DisasOps *o)
5136 {
5137 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5138 }
5139
5140 static void cout_adds32(DisasContext *s, DisasOps *o)
5141 {
5142 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5143 }
5144
5145 static void cout_adds64(DisasContext *s, DisasOps *o)
5146 {
5147 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5148 }
5149
5150 static void cout_addu32(DisasContext *s, DisasOps *o)
5151 {
5152 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
5153 }
5154
5155 static void cout_addu64(DisasContext *s, DisasOps *o)
5156 {
5157 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
5158 }
5159
5160 static void cout_addc32(DisasContext *s, DisasOps *o)
5161 {
5162 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
5163 }
5164
5165 static void cout_addc64(DisasContext *s, DisasOps *o)
5166 {
5167 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
5168 }
5169
5170 static void cout_cmps32(DisasContext *s, DisasOps *o)
5171 {
5172 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5173 }
5174
5175 static void cout_cmps64(DisasContext *s, DisasOps *o)
5176 {
5177 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5178 }
5179
5180 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5181 {
5182 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5183 }
5184
5185 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5186 {
5187 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5188 }
5189
5190 static void cout_f32(DisasContext *s, DisasOps *o)
5191 {
5192 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5193 }
5194
5195 static void cout_f64(DisasContext *s, DisasOps *o)
5196 {
5197 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5198 }
5199
5200 static void cout_f128(DisasContext *s, DisasOps *o)
5201 {
5202 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5203 }
5204
5205 static void cout_nabs32(DisasContext *s, DisasOps *o)
5206 {
5207 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5208 }
5209
5210 static void cout_nabs64(DisasContext *s, DisasOps *o)
5211 {
5212 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5213 }
5214
5215 static void cout_neg32(DisasContext *s, DisasOps *o)
5216 {
5217 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5218 }
5219
5220 static void cout_neg64(DisasContext *s, DisasOps *o)
5221 {
5222 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5223 }
5224
5225 static void cout_nz32(DisasContext *s, DisasOps *o)
5226 {
5227 tcg_gen_ext32u_i64(cc_dst, o->out);
5228 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5229 }
5230
5231 static void cout_nz64(DisasContext *s, DisasOps *o)
5232 {
5233 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5234 }
5235
5236 static void cout_s32(DisasContext *s, DisasOps *o)
5237 {
5238 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5239 }
5240
5241 static void cout_s64(DisasContext *s, DisasOps *o)
5242 {
5243 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5244 }
5245
5246 static void cout_subs32(DisasContext *s, DisasOps *o)
5247 {
5248 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5249 }
5250
5251 static void cout_subs64(DisasContext *s, DisasOps *o)
5252 {
5253 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5254 }
5255
5256 static void cout_subu32(DisasContext *s, DisasOps *o)
5257 {
5258 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5259 }
5260
5261 static void cout_subu64(DisasContext *s, DisasOps *o)
5262 {
5263 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5264 }
5265
5266 static void cout_subb32(DisasContext *s, DisasOps *o)
5267 {
5268 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5269 }
5270
5271 static void cout_subb64(DisasContext *s, DisasOps *o)
5272 {
5273 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5274 }
5275
5276 static void cout_tm32(DisasContext *s, DisasOps *o)
5277 {
5278 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5279 }
5280
5281 static void cout_tm64(DisasContext *s, DisasOps *o)
5282 {
5283 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5284 }
5285
5286 /* ====================================================================== */
5287 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5288 with the TCG register to which we will write. Used in combination with
5289 the "wout" generators, in some cases we need a new temporary, and in
5290 some cases we can write to a TCG global. */
5291
5292 static void prep_new(DisasContext *s, DisasOps *o)
5293 {
5294 o->out = tcg_temp_new_i64();
5295 }
5296 #define SPEC_prep_new 0
5297
5298 static void prep_new_P(DisasContext *s, DisasOps *o)
5299 {
5300 o->out = tcg_temp_new_i64();
5301 o->out2 = tcg_temp_new_i64();
5302 }
5303 #define SPEC_prep_new_P 0
5304
5305 static void prep_r1(DisasContext *s, DisasOps *o)
5306 {
5307 o->out = regs[get_field(s, r1)];
5308 o->g_out = true;
5309 }
5310 #define SPEC_prep_r1 0
5311
5312 static void prep_r1_P(DisasContext *s, DisasOps *o)
5313 {
5314 int r1 = get_field(s, r1);
5315 o->out = regs[r1];
5316 o->out2 = regs[r1 + 1];
5317 o->g_out = o->g_out2 = true;
5318 }
5319 #define SPEC_prep_r1_P SPEC_r1_even
5320
5321 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5322 static void prep_x1(DisasContext *s, DisasOps *o)
5323 {
5324 o->out = load_freg(get_field(s, r1));
5325 o->out2 = load_freg(get_field(s, r1) + 2);
5326 }
5327 #define SPEC_prep_x1 SPEC_r1_f128
5328
5329 /* ====================================================================== */
5330 /* The "Write OUTput" generators. These generally perform some non-trivial
5331 copy of data to TCG globals, or to main memory. The trivial cases are
5332 generally handled by having a "prep" generator install the TCG global
5333 as the destination of the operation. */
5334
5335 static void wout_r1(DisasContext *s, DisasOps *o)
5336 {
5337 store_reg(get_field(s, r1), o->out);
5338 }
5339 #define SPEC_wout_r1 0
5340
5341 static void wout_r1_8(DisasContext *s, DisasOps *o)
5342 {
5343 int r1 = get_field(s, r1);
5344 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5345 }
5346 #define SPEC_wout_r1_8 0
5347
5348 static void wout_r1_16(DisasContext *s, DisasOps *o)
5349 {
5350 int r1 = get_field(s, r1);
5351 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5352 }
5353 #define SPEC_wout_r1_16 0
5354
5355 static void wout_r1_32(DisasContext *s, DisasOps *o)
5356 {
5357 store_reg32_i64(get_field(s, r1), o->out);
5358 }
5359 #define SPEC_wout_r1_32 0
5360
5361 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5362 {
5363 store_reg32h_i64(get_field(s, r1), o->out);
5364 }
5365 #define SPEC_wout_r1_32h 0
5366
5367 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5368 {
5369 int r1 = get_field(s, r1);
5370 store_reg32_i64(r1, o->out);
5371 store_reg32_i64(r1 + 1, o->out2);
5372 }
5373 #define SPEC_wout_r1_P32 SPEC_r1_even
5374
5375 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5376 {
5377 int r1 = get_field(s, r1);
5378 store_reg32_i64(r1 + 1, o->out);
5379 tcg_gen_shri_i64(o->out, o->out, 32);
5380 store_reg32_i64(r1, o->out);
5381 }
5382 #define SPEC_wout_r1_D32 SPEC_r1_even
5383
5384 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5385 {
5386 int r3 = get_field(s, r3);
5387 store_reg32_i64(r3, o->out);
5388 store_reg32_i64(r3 + 1, o->out2);
5389 }
5390 #define SPEC_wout_r3_P32 SPEC_r3_even
5391
5392 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5393 {
5394 int r3 = get_field(s, r3);
5395 store_reg(r3, o->out);
5396 store_reg(r3 + 1, o->out2);
5397 }
5398 #define SPEC_wout_r3_P64 SPEC_r3_even
5399
5400 static void wout_e1(DisasContext *s, DisasOps *o)
5401 {
5402 store_freg32_i64(get_field(s, r1), o->out);
5403 }
5404 #define SPEC_wout_e1 0
5405
5406 static void wout_f1(DisasContext *s, DisasOps *o)
5407 {
5408 store_freg(get_field(s, r1), o->out);
5409 }
5410 #define SPEC_wout_f1 0
5411
5412 static void wout_x1(DisasContext *s, DisasOps *o)
5413 {
5414 int f1 = get_field(s, r1);
5415 store_freg(f1, o->out);
5416 store_freg(f1 + 2, o->out2);
5417 }
5418 #define SPEC_wout_x1 SPEC_r1_f128
5419
5420 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5421 {
5422 if (get_field(s, r1) != get_field(s, r2)) {
5423 store_reg32_i64(get_field(s, r1), o->out);
5424 }
5425 }
5426 #define SPEC_wout_cond_r1r2_32 0
5427
5428 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5429 {
5430 if (get_field(s, r1) != get_field(s, r2)) {
5431 store_freg32_i64(get_field(s, r1), o->out);
5432 }
5433 }
5434 #define SPEC_wout_cond_e1e2 0
5435
5436 static void wout_m1_8(DisasContext *s, DisasOps *o)
5437 {
5438 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5439 }
5440 #define SPEC_wout_m1_8 0
5441
5442 static void wout_m1_16(DisasContext *s, DisasOps *o)
5443 {
5444 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5445 }
5446 #define SPEC_wout_m1_16 0
5447
5448 #ifndef CONFIG_USER_ONLY
5449 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5450 {
5451 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5452 }
5453 #define SPEC_wout_m1_16a 0
5454 #endif
5455
5456 static void wout_m1_32(DisasContext *s, DisasOps *o)
5457 {
5458 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5459 }
5460 #define SPEC_wout_m1_32 0
5461
5462 #ifndef CONFIG_USER_ONLY
5463 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5464 {
5465 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5466 }
5467 #define SPEC_wout_m1_32a 0
5468 #endif
5469
5470 static void wout_m1_64(DisasContext *s, DisasOps *o)
5471 {
5472 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5473 }
5474 #define SPEC_wout_m1_64 0
5475
5476 #ifndef CONFIG_USER_ONLY
5477 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5478 {
5479 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5480 }
5481 #define SPEC_wout_m1_64a 0
5482 #endif
5483
5484 static void wout_m2_32(DisasContext *s, DisasOps *o)
5485 {
5486 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5487 }
5488 #define SPEC_wout_m2_32 0
5489
5490 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5491 {
5492 store_reg(get_field(s, r1), o->in2);
5493 }
5494 #define SPEC_wout_in2_r1 0
5495
5496 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5497 {
5498 store_reg32_i64(get_field(s, r1), o->in2);
5499 }
5500 #define SPEC_wout_in2_r1_32 0
5501
5502 /* ====================================================================== */
5503 /* The "INput 1" generators. These load the first operand to an insn. */
5504
5505 static void in1_r1(DisasContext *s, DisasOps *o)
5506 {
5507 o->in1 = load_reg(get_field(s, r1));
5508 }
5509 #define SPEC_in1_r1 0
5510
5511 static void in1_r1_o(DisasContext *s, DisasOps *o)
5512 {
5513 o->in1 = regs[get_field(s, r1)];
5514 o->g_in1 = true;
5515 }
5516 #define SPEC_in1_r1_o 0
5517
5518 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5519 {
5520 o->in1 = tcg_temp_new_i64();
5521 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5522 }
5523 #define SPEC_in1_r1_32s 0
5524
5525 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5526 {
5527 o->in1 = tcg_temp_new_i64();
5528 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5529 }
5530 #define SPEC_in1_r1_32u 0
5531
5532 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5533 {
5534 o->in1 = tcg_temp_new_i64();
5535 tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5536 }
5537 #define SPEC_in1_r1_sr32 0
5538
5539 static void in1_r1p1(DisasContext *s, DisasOps *o)
5540 {
5541 o->in1 = load_reg(get_field(s, r1) + 1);
5542 }
5543 #define SPEC_in1_r1p1 SPEC_r1_even
5544
5545 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5546 {
5547 o->in1 = tcg_temp_new_i64();
5548 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5549 }
5550 #define SPEC_in1_r1p1_32s SPEC_r1_even
5551
5552 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5553 {
5554 o->in1 = tcg_temp_new_i64();
5555 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5556 }
5557 #define SPEC_in1_r1p1_32u SPEC_r1_even
5558
5559 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5560 {
5561 int r1 = get_field(s, r1);
5562 o->in1 = tcg_temp_new_i64();
5563 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5564 }
5565 #define SPEC_in1_r1_D32 SPEC_r1_even
5566
5567 static void in1_r2(DisasContext *s, DisasOps *o)
5568 {
5569 o->in1 = load_reg(get_field(s, r2));
5570 }
5571 #define SPEC_in1_r2 0
5572
5573 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5574 {
5575 o->in1 = tcg_temp_new_i64();
5576 tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5577 }
5578 #define SPEC_in1_r2_sr32 0
5579
5580 static void in1_r3(DisasContext *s, DisasOps *o)
5581 {
5582 o->in1 = load_reg(get_field(s, r3));
5583 }
5584 #define SPEC_in1_r3 0
5585
5586 static void in1_r3_o(DisasContext *s, DisasOps *o)
5587 {
5588 o->in1 = regs[get_field(s, r3)];
5589 o->g_in1 = true;
5590 }
5591 #define SPEC_in1_r3_o 0
5592
5593 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5594 {
5595 o->in1 = tcg_temp_new_i64();
5596 tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5597 }
5598 #define SPEC_in1_r3_32s 0
5599
5600 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5601 {
5602 o->in1 = tcg_temp_new_i64();
5603 tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5604 }
5605 #define SPEC_in1_r3_32u 0
5606
5607 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5608 {
5609 int r3 = get_field(s, r3);
5610 o->in1 = tcg_temp_new_i64();
5611 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5612 }
5613 #define SPEC_in1_r3_D32 SPEC_r3_even
5614
5615 static void in1_e1(DisasContext *s, DisasOps *o)
5616 {
5617 o->in1 = load_freg32_i64(get_field(s, r1));
5618 }
5619 #define SPEC_in1_e1 0
5620
5621 static void in1_f1(DisasContext *s, DisasOps *o)
5622 {
5623 o->in1 = load_freg(get_field(s, r1));
5624 }
5625 #define SPEC_in1_f1 0
5626
5627 /* Load the high double word of an extended (128-bit) format FP number */
5628 static void in1_x2h(DisasContext *s, DisasOps *o)
5629 {
5630 o->in1 = load_freg(get_field(s, r2));
5631 }
5632 #define SPEC_in1_x2h SPEC_r2_f128
5633
5634 static void in1_f3(DisasContext *s, DisasOps *o)
5635 {
5636 o->in1 = load_freg(get_field(s, r3));
5637 }
5638 #define SPEC_in1_f3 0
5639
5640 static void in1_la1(DisasContext *s, DisasOps *o)
5641 {
5642 o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5643 }
5644 #define SPEC_in1_la1 0
5645
5646 static void in1_la2(DisasContext *s, DisasOps *o)
5647 {
5648 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5649 o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5650 }
5651 #define SPEC_in1_la2 0
5652
5653 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5654 {
5655 in1_la1(s, o);
5656 o->in1 = tcg_temp_new_i64();
5657 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5658 }
5659 #define SPEC_in1_m1_8u 0
5660
5661 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5662 {
5663 in1_la1(s, o);
5664 o->in1 = tcg_temp_new_i64();
5665 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5666 }
5667 #define SPEC_in1_m1_16s 0
5668
5669 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5670 {
5671 in1_la1(s, o);
5672 o->in1 = tcg_temp_new_i64();
5673 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5674 }
5675 #define SPEC_in1_m1_16u 0
5676
5677 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5678 {
5679 in1_la1(s, o);
5680 o->in1 = tcg_temp_new_i64();
5681 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5682 }
5683 #define SPEC_in1_m1_32s 0
5684
5685 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5686 {
5687 in1_la1(s, o);
5688 o->in1 = tcg_temp_new_i64();
5689 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5690 }
5691 #define SPEC_in1_m1_32u 0
5692
5693 static void in1_m1_64(DisasContext *s, DisasOps *o)
5694 {
5695 in1_la1(s, o);
5696 o->in1 = tcg_temp_new_i64();
5697 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5698 }
5699 #define SPEC_in1_m1_64 0
5700
5701 /* ====================================================================== */
5702 /* The "INput 2" generators. These load the second operand to an insn. */
5703
5704 static void in2_r1_o(DisasContext *s, DisasOps *o)
5705 {
5706 o->in2 = regs[get_field(s, r1)];
5707 o->g_in2 = true;
5708 }
5709 #define SPEC_in2_r1_o 0
5710
5711 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5712 {
5713 o->in2 = tcg_temp_new_i64();
5714 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5715 }
5716 #define SPEC_in2_r1_16u 0
5717
5718 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5719 {
5720 o->in2 = tcg_temp_new_i64();
5721 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5722 }
5723 #define SPEC_in2_r1_32u 0
5724
5725 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5726 {
5727 int r1 = get_field(s, r1);
5728 o->in2 = tcg_temp_new_i64();
5729 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5730 }
5731 #define SPEC_in2_r1_D32 SPEC_r1_even
5732
5733 static void in2_r2(DisasContext *s, DisasOps *o)
5734 {
5735 o->in2 = load_reg(get_field(s, r2));
5736 }
5737 #define SPEC_in2_r2 0
5738
5739 static void in2_r2_o(DisasContext *s, DisasOps *o)
5740 {
5741 o->in2 = regs[get_field(s, r2)];
5742 o->g_in2 = true;
5743 }
5744 #define SPEC_in2_r2_o 0
5745
5746 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5747 {
5748 int r2 = get_field(s, r2);
5749 if (r2 != 0) {
5750 o->in2 = load_reg(r2);
5751 }
5752 }
5753 #define SPEC_in2_r2_nz 0
5754
5755 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5756 {
5757 o->in2 = tcg_temp_new_i64();
5758 tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5759 }
5760 #define SPEC_in2_r2_8s 0
5761
5762 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5763 {
5764 o->in2 = tcg_temp_new_i64();
5765 tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5766 }
5767 #define SPEC_in2_r2_8u 0
5768
5769 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5770 {
5771 o->in2 = tcg_temp_new_i64();
5772 tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5773 }
5774 #define SPEC_in2_r2_16s 0
5775
5776 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5777 {
5778 o->in2 = tcg_temp_new_i64();
5779 tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5780 }
5781 #define SPEC_in2_r2_16u 0
5782
5783 static void in2_r3(DisasContext *s, DisasOps *o)
5784 {
5785 o->in2 = load_reg(get_field(s, r3));
5786 }
5787 #define SPEC_in2_r3 0
5788
5789 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5790 {
5791 o->in2 = tcg_temp_new_i64();
5792 tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5793 }
5794 #define SPEC_in2_r3_sr32 0
5795
5796 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5797 {
5798 o->in2 = tcg_temp_new_i64();
5799 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5800 }
5801 #define SPEC_in2_r3_32u 0
5802
5803 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5804 {
5805 o->in2 = tcg_temp_new_i64();
5806 tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5807 }
5808 #define SPEC_in2_r2_32s 0
5809
5810 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5811 {
5812 o->in2 = tcg_temp_new_i64();
5813 tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5814 }
5815 #define SPEC_in2_r2_32u 0
5816
5817 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5818 {
5819 o->in2 = tcg_temp_new_i64();
5820 tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5821 }
5822 #define SPEC_in2_r2_sr32 0
5823
5824 static void in2_e2(DisasContext *s, DisasOps *o)
5825 {
5826 o->in2 = load_freg32_i64(get_field(s, r2));
5827 }
5828 #define SPEC_in2_e2 0
5829
5830 static void in2_f2(DisasContext *s, DisasOps *o)
5831 {
5832 o->in2 = load_freg(get_field(s, r2));
5833 }
5834 #define SPEC_in2_f2 0
5835
5836 /* Load the low double word of an extended (128-bit) format FP number */
5837 static void in2_x2l(DisasContext *s, DisasOps *o)
5838 {
5839 o->in2 = load_freg(get_field(s, r2) + 2);
5840 }
5841 #define SPEC_in2_x2l SPEC_r2_f128
5842
5843 static void in2_ra2(DisasContext *s, DisasOps *o)
5844 {
5845 o->in2 = get_address(s, 0, get_field(s, r2), 0);
5846 }
5847 #define SPEC_in2_ra2 0
5848
5849 static void in2_a2(DisasContext *s, DisasOps *o)
5850 {
5851 int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5852 o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5853 }
5854 #define SPEC_in2_a2 0
5855
5856 static void in2_ri2(DisasContext *s, DisasOps *o)
5857 {
5858 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5859 }
5860 #define SPEC_in2_ri2 0
5861
5862 static void in2_sh32(DisasContext *s, DisasOps *o)
5863 {
5864 help_l2_shift(s, o, 31);
5865 }
5866 #define SPEC_in2_sh32 0
5867
5868 static void in2_sh64(DisasContext *s, DisasOps *o)
5869 {
5870 help_l2_shift(s, o, 63);
5871 }
5872 #define SPEC_in2_sh64 0
5873
5874 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5875 {
5876 in2_a2(s, o);
5877 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5878 }
5879 #define SPEC_in2_m2_8u 0
5880
5881 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5882 {
5883 in2_a2(s, o);
5884 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5885 }
5886 #define SPEC_in2_m2_16s 0
5887
5888 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5889 {
5890 in2_a2(s, o);
5891 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5892 }
5893 #define SPEC_in2_m2_16u 0
5894
5895 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5896 {
5897 in2_a2(s, o);
5898 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5899 }
5900 #define SPEC_in2_m2_32s 0
5901
5902 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5903 {
5904 in2_a2(s, o);
5905 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5906 }
5907 #define SPEC_in2_m2_32u 0
5908
5909 #ifndef CONFIG_USER_ONLY
5910 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5911 {
5912 in2_a2(s, o);
5913 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5914 }
5915 #define SPEC_in2_m2_32ua 0
5916 #endif
5917
5918 static void in2_m2_64(DisasContext *s, DisasOps *o)
5919 {
5920 in2_a2(s, o);
5921 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5922 }
5923 #define SPEC_in2_m2_64 0
5924
5925 #ifndef CONFIG_USER_ONLY
5926 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5927 {
5928 in2_a2(s, o);
5929 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5930 }
5931 #define SPEC_in2_m2_64a 0
5932 #endif
5933
5934 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5935 {
5936 in2_ri2(s, o);
5937 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5938 }
5939 #define SPEC_in2_mri2_16u 0
5940
5941 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5942 {
5943 in2_ri2(s, o);
5944 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5945 }
5946 #define SPEC_in2_mri2_32s 0
5947
5948 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5949 {
5950 in2_ri2(s, o);
5951 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5952 }
5953 #define SPEC_in2_mri2_32u 0
5954
5955 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5956 {
5957 in2_ri2(s, o);
5958 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5959 }
5960 #define SPEC_in2_mri2_64 0
5961
5962 static void in2_i2(DisasContext *s, DisasOps *o)
5963 {
5964 o->in2 = tcg_const_i64(get_field(s, i2));
5965 }
5966 #define SPEC_in2_i2 0
5967
5968 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5969 {
5970 o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
5971 }
5972 #define SPEC_in2_i2_8u 0
5973
5974 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5975 {
5976 o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
5977 }
5978 #define SPEC_in2_i2_16u 0
5979
5980 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5981 {
5982 o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
5983 }
5984 #define SPEC_in2_i2_32u 0
5985
5986 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5987 {
5988 uint64_t i2 = (uint16_t)get_field(s, i2);
5989 o->in2 = tcg_const_i64(i2 << s->insn->data);
5990 }
5991 #define SPEC_in2_i2_16u_shl 0
5992
5993 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5994 {
5995 uint64_t i2 = (uint32_t)get_field(s, i2);
5996 o->in2 = tcg_const_i64(i2 << s->insn->data);
5997 }
5998 #define SPEC_in2_i2_32u_shl 0
5999
6000 #ifndef CONFIG_USER_ONLY
6001 static void in2_insn(DisasContext *s, DisasOps *o)
6002 {
6003 o->in2 = tcg_const_i64(s->fields.raw_insn);
6004 }
6005 #define SPEC_in2_insn 0
6006 #endif
6007
6008 /* ====================================================================== */
6009
6010 /* Find opc within the table of insns. This is formulated as a switch
6011 statement so that (1) we get compile-time notice of cut-paste errors
6012 for duplicated opcodes, and (2) the compiler generates the binary
6013 search tree, rather than us having to post-process the table. */
6014
6015 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6016 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6017
6018 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6019 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6020
6021 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6022 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6023
6024 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6025
6026 enum DisasInsnEnum {
6027 #include "insn-data.def"
6028 };
6029
6030 #undef E
6031 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6032 .opc = OPC, \
6033 .flags = FL, \
6034 .fmt = FMT_##FT, \
6035 .fac = FAC_##FC, \
6036 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6037 .name = #NM, \
6038 .help_in1 = in1_##I1, \
6039 .help_in2 = in2_##I2, \
6040 .help_prep = prep_##P, \
6041 .help_wout = wout_##W, \
6042 .help_cout = cout_##CC, \
6043 .help_op = op_##OP, \
6044 .data = D \
6045 },
6046
6047 /* Allow 0 to be used for NULL in the table below. */
6048 #define in1_0 NULL
6049 #define in2_0 NULL
6050 #define prep_0 NULL
6051 #define wout_0 NULL
6052 #define cout_0 NULL
6053 #define op_0 NULL
6054
6055 #define SPEC_in1_0 0
6056 #define SPEC_in2_0 0
6057 #define SPEC_prep_0 0
6058 #define SPEC_wout_0 0
6059
6060 /* Give smaller names to the various facilities. */
6061 #define FAC_Z S390_FEAT_ZARCH
6062 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6063 #define FAC_DFP S390_FEAT_DFP
6064 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6065 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6066 #define FAC_EE S390_FEAT_EXECUTE_EXT
6067 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6068 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6069 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6070 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6071 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6072 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6073 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6074 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6075 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6076 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6077 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6078 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6079 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6080 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6081 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6082 #define FAC_SFLE S390_FEAT_STFLE
6083 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6084 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6085 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6086 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6087 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6088 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6089 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6090 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6091 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6092 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6093 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6094 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6095 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6096 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6097 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6098 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6099 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6100 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6101
6102 static const DisasInsn insn_info[] = {
6103 #include "insn-data.def"
6104 };
6105
6106 #undef E
6107 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6108 case OPC: return &insn_info[insn_ ## NM];
6109
6110 static const DisasInsn *lookup_opc(uint16_t opc)
6111 {
6112 switch (opc) {
6113 #include "insn-data.def"
6114 default:
6115 return NULL;
6116 }
6117 }
6118
6119 #undef F
6120 #undef E
6121 #undef D
6122 #undef C
6123
6124 /* Extract a field from the insn. The INSN should be left-aligned in
6125 the uint64_t so that we can more easily utilize the big-bit-endian
6126 definitions we extract from the Principals of Operation. */
6127
6128 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6129 {
6130 uint32_t r, m;
6131
6132 if (f->size == 0) {
6133 return;
6134 }
6135
6136 /* Zero extract the field from the insn. */
6137 r = (insn << f->beg) >> (64 - f->size);
6138
6139 /* Sign-extend, or un-swap the field as necessary. */
6140 switch (f->type) {
6141 case 0: /* unsigned */
6142 break;
6143 case 1: /* signed */
6144 assert(f->size <= 32);
6145 m = 1u << (f->size - 1);
6146 r = (r ^ m) - m;
6147 break;
6148 case 2: /* dl+dh split, signed 20 bit. */
6149 r = ((int8_t)r << 12) | (r >> 8);
6150 break;
6151 case 3: /* MSB stored in RXB */
6152 g_assert(f->size == 4);
6153 switch (f->beg) {
6154 case 8:
6155 r |= extract64(insn, 63 - 36, 1) << 4;
6156 break;
6157 case 12:
6158 r |= extract64(insn, 63 - 37, 1) << 4;
6159 break;
6160 case 16:
6161 r |= extract64(insn, 63 - 38, 1) << 4;
6162 break;
6163 case 32:
6164 r |= extract64(insn, 63 - 39, 1) << 4;
6165 break;
6166 default:
6167 g_assert_not_reached();
6168 }
6169 break;
6170 default:
6171 abort();
6172 }
6173
6174 /* Validate that the "compressed" encoding we selected above is valid.
6175 I.e. we havn't make two different original fields overlap. */
6176 assert(((o->presentC >> f->indexC) & 1) == 0);
6177 o->presentC |= 1 << f->indexC;
6178 o->presentO |= 1 << f->indexO;
6179
6180 o->c[f->indexC] = r;
6181 }
6182
6183 /* Lookup the insn at the current PC, extracting the operands into O and
6184 returning the info struct for the insn. Returns NULL for invalid insn. */
6185
6186 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6187 {
6188 uint64_t insn, pc = s->base.pc_next;
6189 int op, op2, ilen;
6190 const DisasInsn *info;
6191
6192 if (unlikely(s->ex_value)) {
6193 /* Drop the EX data now, so that it's clear on exception paths. */
6194 TCGv_i64 zero = tcg_const_i64(0);
6195 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6196 tcg_temp_free_i64(zero);
6197
6198 /* Extract the values saved by EXECUTE. */
6199 insn = s->ex_value & 0xffffffffffff0000ull;
6200 ilen = s->ex_value & 0xf;
6201 op = insn >> 56;
6202 } else {
6203 insn = ld_code2(env, pc);
6204 op = (insn >> 8) & 0xff;
6205 ilen = get_ilen(op);
6206 switch (ilen) {
6207 case 2:
6208 insn = insn << 48;
6209 break;
6210 case 4:
6211 insn = ld_code4(env, pc) << 32;
6212 break;
6213 case 6:
6214 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6215 break;
6216 default:
6217 g_assert_not_reached();
6218 }
6219 }
6220 s->pc_tmp = s->base.pc_next + ilen;
6221 s->ilen = ilen;
6222
6223 /* We can't actually determine the insn format until we've looked up
6224 the full insn opcode. Which we can't do without locating the
6225 secondary opcode. Assume by default that OP2 is at bit 40; for
6226 those smaller insns that don't actually have a secondary opcode
6227 this will correctly result in OP2 = 0. */
6228 switch (op) {
6229 case 0x01: /* E */
6230 case 0x80: /* S */
6231 case 0x82: /* S */
6232 case 0x93: /* S */
6233 case 0xb2: /* S, RRF, RRE, IE */
6234 case 0xb3: /* RRE, RRD, RRF */
6235 case 0xb9: /* RRE, RRF */
6236 case 0xe5: /* SSE, SIL */
6237 op2 = (insn << 8) >> 56;
6238 break;
6239 case 0xa5: /* RI */
6240 case 0xa7: /* RI */
6241 case 0xc0: /* RIL */
6242 case 0xc2: /* RIL */
6243 case 0xc4: /* RIL */
6244 case 0xc6: /* RIL */
6245 case 0xc8: /* SSF */
6246 case 0xcc: /* RIL */
6247 op2 = (insn << 12) >> 60;
6248 break;
6249 case 0xc5: /* MII */
6250 case 0xc7: /* SMI */
6251 case 0xd0 ... 0xdf: /* SS */
6252 case 0xe1: /* SS */
6253 case 0xe2: /* SS */
6254 case 0xe8: /* SS */
6255 case 0xe9: /* SS */
6256 case 0xea: /* SS */
6257 case 0xee ... 0xf3: /* SS */
6258 case 0xf8 ... 0xfd: /* SS */
6259 op2 = 0;
6260 break;
6261 default:
6262 op2 = (insn << 40) >> 56;
6263 break;
6264 }
6265
6266 memset(&s->fields, 0, sizeof(s->fields));
6267 s->fields.raw_insn = insn;
6268 s->fields.op = op;
6269 s->fields.op2 = op2;
6270
6271 /* Lookup the instruction. */
6272 info = lookup_opc(op << 8 | op2);
6273 s->insn = info;
6274
6275 /* If we found it, extract the operands. */
6276 if (info != NULL) {
6277 DisasFormat fmt = info->fmt;
6278 int i;
6279
6280 for (i = 0; i < NUM_C_FIELD; ++i) {
6281 extract_field(&s->fields, &format_info[fmt].op[i], insn);
6282 }
6283 }
6284 return info;
6285 }
6286
6287 static bool is_afp_reg(int reg)
6288 {
6289 return reg % 2 || reg > 6;
6290 }
6291
6292 static bool is_fp_pair(int reg)
6293 {
6294 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6295 return !(reg & 0x2);
6296 }
6297
6298 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6299 {
6300 const DisasInsn *insn;
6301 DisasJumpType ret = DISAS_NEXT;
6302 DisasOps o = {};
6303
6304 /* Search for the insn in the table. */
6305 insn = extract_insn(env, s);
6306
6307 /* Emit insn_start now that we know the ILEN. */
6308 tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen);
6309
6310 /* Not found means unimplemented/illegal opcode. */
6311 if (insn == NULL) {
6312 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6313 s->fields.op, s->fields.op2);
6314 gen_illegal_opcode(s);
6315 return DISAS_NORETURN;
6316 }
6317
6318 #ifndef CONFIG_USER_ONLY
6319 if (s->base.tb->flags & FLAG_MASK_PER) {
6320 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6321 gen_helper_per_ifetch(cpu_env, addr);
6322 tcg_temp_free_i64(addr);
6323 }
6324 #endif
6325
6326 /* process flags */
6327 if (insn->flags) {
6328 /* privileged instruction */
6329 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6330 gen_program_exception(s, PGM_PRIVILEGED);
6331 return DISAS_NORETURN;
6332 }
6333
6334 /* if AFP is not enabled, instructions and registers are forbidden */
6335 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6336 uint8_t dxc = 0;
6337
6338 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6339 dxc = 1;
6340 }
6341 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6342 dxc = 1;
6343 }
6344 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6345 dxc = 1;
6346 }
6347 if (insn->flags & IF_BFP) {
6348 dxc = 2;
6349 }
6350 if (insn->flags & IF_DFP) {
6351 dxc = 3;
6352 }
6353 if (insn->flags & IF_VEC) {
6354 dxc = 0xfe;
6355 }
6356 if (dxc) {
6357 gen_data_exception(dxc);
6358 return DISAS_NORETURN;
6359 }
6360 }
6361
6362 /* if vector instructions not enabled, executing them is forbidden */
6363 if (insn->flags & IF_VEC) {
6364 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6365 gen_data_exception(0xfe);
6366 return DISAS_NORETURN;
6367 }
6368 }
6369 }
6370
6371 /* Check for insn specification exceptions. */
6372 if (insn->spec) {
6373 if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6374 (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6375 (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6376 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6377 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6378 gen_program_exception(s, PGM_SPECIFICATION);
6379 return DISAS_NORETURN;
6380 }
6381 }
6382
6383 /* Implement the instruction. */
6384 if (insn->help_in1) {
6385 insn->help_in1(s, &o);
6386 }
6387 if (insn->help_in2) {
6388 insn->help_in2(s, &o);
6389 }
6390 if (insn->help_prep) {
6391 insn->help_prep(s, &o);
6392 }
6393 if (insn->help_op) {
6394 ret = insn->help_op(s, &o);
6395 }
6396 if (ret != DISAS_NORETURN) {
6397 if (insn->help_wout) {
6398 insn->help_wout(s, &o);
6399 }
6400 if (insn->help_cout) {
6401 insn->help_cout(s, &o);
6402 }
6403 }
6404
6405 /* Free any temporaries created by the helpers. */
6406 if (o.out && !o.g_out) {
6407 tcg_temp_free_i64(o.out);
6408 }
6409 if (o.out2 && !o.g_out2) {
6410 tcg_temp_free_i64(o.out2);
6411 }
6412 if (o.in1 && !o.g_in1) {
6413 tcg_temp_free_i64(o.in1);
6414 }
6415 if (o.in2 && !o.g_in2) {
6416 tcg_temp_free_i64(o.in2);
6417 }
6418 if (o.addr1) {
6419 tcg_temp_free_i64(o.addr1);
6420 }
6421
6422 #ifndef CONFIG_USER_ONLY
6423 if (s->base.tb->flags & FLAG_MASK_PER) {
6424 /* An exception might be triggered, save PSW if not already done. */
6425 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6426 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6427 }
6428
6429 /* Call the helper to check for a possible PER exception. */
6430 gen_helper_per_check_exception(cpu_env);
6431 }
6432 #endif
6433
6434 /* Advance to the next instruction. */
6435 s->base.pc_next = s->pc_tmp;
6436 return ret;
6437 }
6438
6439 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6440 {
6441 DisasContext *dc = container_of(dcbase, DisasContext, base);
6442
6443 /* 31-bit mode */
6444 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6445 dc->base.pc_first &= 0x7fffffff;
6446 dc->base.pc_next = dc->base.pc_first;
6447 }
6448
6449 dc->cc_op = CC_OP_DYNAMIC;
6450 dc->ex_value = dc->base.tb->cs_base;
6451 dc->do_debug = dc->base.singlestep_enabled;
6452 }
6453
6454 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6455 {
6456 }
6457
6458 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6459 {
6460 }
6461
6462 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6463 const CPUBreakpoint *bp)
6464 {
6465 DisasContext *dc = container_of(dcbase, DisasContext, base);
6466
6467 /*
6468 * Emit an insn_start to accompany the breakpoint exception.
6469 * The ILEN value is a dummy, since this does not result in
6470 * an s390x exception, but an internal qemu exception which
6471 * brings us back to interact with the gdbstub.
6472 */
6473 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
6474
6475 dc->base.is_jmp = DISAS_PC_STALE;
6476 dc->do_debug = true;
6477 /* The address covered by the breakpoint must be included in
6478 [tb->pc, tb->pc + tb->size) in order to for it to be
6479 properly cleared -- thus we increment the PC here so that
6480 the logic setting tb->size does the right thing. */
6481 dc->base.pc_next += 2;
6482 return true;
6483 }
6484
6485 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6486 {
6487 CPUS390XState *env = cs->env_ptr;
6488 DisasContext *dc = container_of(dcbase, DisasContext, base);
6489
6490 dc->base.is_jmp = translate_one(env, dc);
6491 if (dc->base.is_jmp == DISAS_NEXT) {
6492 uint64_t page_start;
6493
6494 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6495 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6496 dc->base.is_jmp = DISAS_TOO_MANY;
6497 }
6498 }
6499 }
6500
6501 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6502 {
6503 DisasContext *dc = container_of(dcbase, DisasContext, base);
6504
6505 switch (dc->base.is_jmp) {
6506 case DISAS_GOTO_TB:
6507 case DISAS_NORETURN:
6508 break;
6509 case DISAS_TOO_MANY:
6510 case DISAS_PC_STALE:
6511 case DISAS_PC_STALE_NOCHAIN:
6512 update_psw_addr(dc);
6513 /* FALLTHRU */
6514 case DISAS_PC_UPDATED:
6515 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6516 cc op type is in env */
6517 update_cc_op(dc);
6518 /* FALLTHRU */
6519 case DISAS_PC_CC_UPDATED:
6520 /* Exit the TB, either by raising a debug exception or by return. */
6521 if (dc->do_debug) {
6522 gen_exception(EXCP_DEBUG);
6523 } else if (use_exit_tb(dc) ||
6524 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6525 tcg_gen_exit_tb(NULL, 0);
6526 } else {
6527 tcg_gen_lookup_and_goto_ptr();
6528 }
6529 break;
6530 default:
6531 g_assert_not_reached();
6532 }
6533 }
6534
6535 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6536 {
6537 DisasContext *dc = container_of(dcbase, DisasContext, base);
6538
6539 if (unlikely(dc->ex_value)) {
6540 /* ??? Unfortunately log_target_disas can't use host memory. */
6541 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6542 } else {
6543 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6544 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6545 }
6546 }
6547
6548 static const TranslatorOps s390x_tr_ops = {
6549 .init_disas_context = s390x_tr_init_disas_context,
6550 .tb_start = s390x_tr_tb_start,
6551 .insn_start = s390x_tr_insn_start,
6552 .breakpoint_check = s390x_tr_breakpoint_check,
6553 .translate_insn = s390x_tr_translate_insn,
6554 .tb_stop = s390x_tr_tb_stop,
6555 .disas_log = s390x_tr_disas_log,
6556 };
6557
6558 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6559 {
6560 DisasContext dc;
6561
6562 translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6563 }
6564
6565 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6566 target_ulong *data)
6567 {
6568 int cc_op = data[1];
6569
6570 env->psw.addr = data[0];
6571
6572 /* Update the CC opcode if it is not already up-to-date. */
6573 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6574 env->cc_op = cc_op;
6575 }
6576
6577 /* Record ILEN. */
6578 env->int_pgm_ilen = data[2];
6579 }