]> git.proxmox.com Git - mirror_qemu.git/blob - target/s390x/translate.c
target/s390x: Use tcg_gen_abs_i64
[mirror_qemu.git] / target / s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
47 #include "exec/log.h"
48 #include "qemu/atomic128.h"
49
50
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
55
56 struct DisasContext {
57 DisasContextBase base;
58 const DisasInsn *insn;
59 DisasFields *fields;
60 uint64_t ex_value;
61 /*
62 * During translate_one(), pc_tmp is used to determine the instruction
63 * to be executed after base.pc_next - e.g. next sequential instruction
64 * or a branch target.
65 */
66 uint64_t pc_tmp;
67 uint32_t ilen;
68 enum cc_op cc_op;
69 bool do_debug;
70 };
71
72 /* Information carried about a condition to be evaluated. */
73 typedef struct {
74 TCGCond cond:8;
75 bool is_64;
76 bool g1;
77 bool g2;
78 union {
79 struct { TCGv_i64 a, b; } s64;
80 struct { TCGv_i32 a, b; } s32;
81 } u;
82 } DisasCompare;
83
84 #ifdef DEBUG_INLINE_BRANCHES
85 static uint64_t inline_branch_hit[CC_OP_MAX];
86 static uint64_t inline_branch_miss[CC_OP_MAX];
87 #endif
88
89 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
90 {
91 TCGv_i64 tmp;
92
93 if (s->base.tb->flags & FLAG_MASK_32) {
94 if (s->base.tb->flags & FLAG_MASK_64) {
95 tcg_gen_movi_i64(out, pc);
96 return;
97 }
98 pc |= 0x80000000;
99 }
100 assert(!(s->base.tb->flags & FLAG_MASK_64));
101 tmp = tcg_const_i64(pc);
102 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
103 tcg_temp_free_i64(tmp);
104 }
105
106 static TCGv_i64 psw_addr;
107 static TCGv_i64 psw_mask;
108 static TCGv_i64 gbea;
109
110 static TCGv_i32 cc_op;
111 static TCGv_i64 cc_src;
112 static TCGv_i64 cc_dst;
113 static TCGv_i64 cc_vr;
114
115 static char cpu_reg_names[16][4];
116 static TCGv_i64 regs[16];
117
118 void s390x_translate_init(void)
119 {
120 int i;
121
122 psw_addr = tcg_global_mem_new_i64(cpu_env,
123 offsetof(CPUS390XState, psw.addr),
124 "psw_addr");
125 psw_mask = tcg_global_mem_new_i64(cpu_env,
126 offsetof(CPUS390XState, psw.mask),
127 "psw_mask");
128 gbea = tcg_global_mem_new_i64(cpu_env,
129 offsetof(CPUS390XState, gbea),
130 "gbea");
131
132 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
133 "cc_op");
134 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
135 "cc_src");
136 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
137 "cc_dst");
138 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
139 "cc_vr");
140
141 for (i = 0; i < 16; i++) {
142 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
143 regs[i] = tcg_global_mem_new(cpu_env,
144 offsetof(CPUS390XState, regs[i]),
145 cpu_reg_names[i]);
146 }
147 }
148
149 static inline int vec_full_reg_offset(uint8_t reg)
150 {
151 g_assert(reg < 32);
152 return offsetof(CPUS390XState, vregs[reg][0].d);
153 }
154
155 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, TCGMemOp es)
156 {
157 /* Convert element size (es) - e.g. MO_8 - to bytes */
158 const uint8_t bytes = 1 << es;
159 int offs = enr * bytes;
160
161 /*
162 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
163 * of the 16 byte vector, on both, little and big endian systems.
164 *
165 * Big Endian (target/possible host)
166 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
167 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
168 * W: [ 0][ 1] - [ 2][ 3]
169 * DW: [ 0] - [ 1]
170 *
171 * Little Endian (possible host)
172 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
173 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
174 * W: [ 1][ 0] - [ 3][ 2]
175 * DW: [ 0] - [ 1]
176 *
177 * For 16 byte elements, the two 8 byte halves will not form a host
178 * int128 if the host is little endian, since they're in the wrong order.
179 * Some operations (e.g. xor) do not care. For operations like addition,
180 * the two 8 byte elements have to be loaded separately. Let's force all
181 * 16 byte operations to handle it in a special way.
182 */
183 g_assert(es <= MO_64);
184 #ifndef HOST_WORDS_BIGENDIAN
185 offs ^= (8 - bytes);
186 #endif
187 return offs + vec_full_reg_offset(reg);
188 }
189
190 static inline int freg64_offset(uint8_t reg)
191 {
192 g_assert(reg < 16);
193 return vec_reg_offset(reg, 0, MO_64);
194 }
195
196 static inline int freg32_offset(uint8_t reg)
197 {
198 g_assert(reg < 16);
199 return vec_reg_offset(reg, 0, MO_32);
200 }
201
202 static TCGv_i64 load_reg(int reg)
203 {
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_mov_i64(r, regs[reg]);
206 return r;
207 }
208
209 static TCGv_i64 load_freg(int reg)
210 {
211 TCGv_i64 r = tcg_temp_new_i64();
212
213 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
214 return r;
215 }
216
217 static TCGv_i64 load_freg32_i64(int reg)
218 {
219 TCGv_i64 r = tcg_temp_new_i64();
220
221 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
222 return r;
223 }
224
225 static void store_reg(int reg, TCGv_i64 v)
226 {
227 tcg_gen_mov_i64(regs[reg], v);
228 }
229
230 static void store_freg(int reg, TCGv_i64 v)
231 {
232 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
233 }
234
235 static void store_reg32_i64(int reg, TCGv_i64 v)
236 {
237 /* 32 bit register writes keep the upper half */
238 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
239 }
240
241 static void store_reg32h_i64(int reg, TCGv_i64 v)
242 {
243 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
244 }
245
246 static void store_freg32_i64(int reg, TCGv_i64 v)
247 {
248 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
249 }
250
251 static void return_low128(TCGv_i64 dest)
252 {
253 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
254 }
255
256 static void update_psw_addr(DisasContext *s)
257 {
258 /* psw.addr */
259 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
260 }
261
262 static void per_branch(DisasContext *s, bool to_next)
263 {
264 #ifndef CONFIG_USER_ONLY
265 tcg_gen_movi_i64(gbea, s->base.pc_next);
266
267 if (s->base.tb->flags & FLAG_MASK_PER) {
268 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
269 gen_helper_per_branch(cpu_env, gbea, next_pc);
270 if (to_next) {
271 tcg_temp_free_i64(next_pc);
272 }
273 }
274 #endif
275 }
276
277 static void per_branch_cond(DisasContext *s, TCGCond cond,
278 TCGv_i64 arg1, TCGv_i64 arg2)
279 {
280 #ifndef CONFIG_USER_ONLY
281 if (s->base.tb->flags & FLAG_MASK_PER) {
282 TCGLabel *lab = gen_new_label();
283 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
284
285 tcg_gen_movi_i64(gbea, s->base.pc_next);
286 gen_helper_per_branch(cpu_env, gbea, psw_addr);
287
288 gen_set_label(lab);
289 } else {
290 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
291 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
292 tcg_temp_free_i64(pc);
293 }
294 #endif
295 }
296
297 static void per_breaking_event(DisasContext *s)
298 {
299 tcg_gen_movi_i64(gbea, s->base.pc_next);
300 }
301
302 static void update_cc_op(DisasContext *s)
303 {
304 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
305 tcg_gen_movi_i32(cc_op, s->cc_op);
306 }
307 }
308
309 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
310 {
311 return (uint64_t)cpu_lduw_code(env, pc);
312 }
313
314 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
315 {
316 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
317 }
318
319 static int get_mem_index(DisasContext *s)
320 {
321 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
322 return MMU_REAL_IDX;
323 }
324
325 switch (s->base.tb->flags & FLAG_MASK_ASC) {
326 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
327 return MMU_PRIMARY_IDX;
328 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
329 return MMU_SECONDARY_IDX;
330 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
331 return MMU_HOME_IDX;
332 default:
333 tcg_abort();
334 break;
335 }
336 }
337
338 static void gen_exception(int excp)
339 {
340 TCGv_i32 tmp = tcg_const_i32(excp);
341 gen_helper_exception(cpu_env, tmp);
342 tcg_temp_free_i32(tmp);
343 }
344
345 static void gen_program_exception(DisasContext *s, int code)
346 {
347 TCGv_i32 tmp;
348
349 /* Remember what pgm exeption this was. */
350 tmp = tcg_const_i32(code);
351 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
352 tcg_temp_free_i32(tmp);
353
354 tmp = tcg_const_i32(s->ilen);
355 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
356 tcg_temp_free_i32(tmp);
357
358 /* update the psw */
359 update_psw_addr(s);
360
361 /* Save off cc. */
362 update_cc_op(s);
363
364 /* Trigger exception. */
365 gen_exception(EXCP_PGM);
366 }
367
368 static inline void gen_illegal_opcode(DisasContext *s)
369 {
370 gen_program_exception(s, PGM_OPERATION);
371 }
372
373 static inline void gen_data_exception(uint8_t dxc)
374 {
375 TCGv_i32 tmp = tcg_const_i32(dxc);
376 gen_helper_data_exception(cpu_env, tmp);
377 tcg_temp_free_i32(tmp);
378 }
379
380 static inline void gen_trap(DisasContext *s)
381 {
382 /* Set DXC to 0xff */
383 gen_data_exception(0xff);
384 }
385
386 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
387 int64_t imm)
388 {
389 tcg_gen_addi_i64(dst, src, imm);
390 if (!(s->base.tb->flags & FLAG_MASK_64)) {
391 if (s->base.tb->flags & FLAG_MASK_32) {
392 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
393 } else {
394 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
395 }
396 }
397 }
398
399 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
400 {
401 TCGv_i64 tmp = tcg_temp_new_i64();
402
403 /*
404 * Note that d2 is limited to 20 bits, signed. If we crop negative
405 * displacements early we create larger immedate addends.
406 */
407 if (b2 && x2) {
408 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
409 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
410 } else if (b2) {
411 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
412 } else if (x2) {
413 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
414 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
415 if (s->base.tb->flags & FLAG_MASK_32) {
416 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
417 } else {
418 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
419 }
420 } else {
421 tcg_gen_movi_i64(tmp, d2);
422 }
423
424 return tmp;
425 }
426
427 static inline bool live_cc_data(DisasContext *s)
428 {
429 return (s->cc_op != CC_OP_DYNAMIC
430 && s->cc_op != CC_OP_STATIC
431 && s->cc_op > 3);
432 }
433
434 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
435 {
436 if (live_cc_data(s)) {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_discard_i64(cc_dst);
439 tcg_gen_discard_i64(cc_vr);
440 }
441 s->cc_op = CC_OP_CONST0 + val;
442 }
443
444 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
445 {
446 if (live_cc_data(s)) {
447 tcg_gen_discard_i64(cc_src);
448 tcg_gen_discard_i64(cc_vr);
449 }
450 tcg_gen_mov_i64(cc_dst, dst);
451 s->cc_op = op;
452 }
453
454 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
455 TCGv_i64 dst)
456 {
457 if (live_cc_data(s)) {
458 tcg_gen_discard_i64(cc_vr);
459 }
460 tcg_gen_mov_i64(cc_src, src);
461 tcg_gen_mov_i64(cc_dst, dst);
462 s->cc_op = op;
463 }
464
465 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
466 TCGv_i64 dst, TCGv_i64 vr)
467 {
468 tcg_gen_mov_i64(cc_src, src);
469 tcg_gen_mov_i64(cc_dst, dst);
470 tcg_gen_mov_i64(cc_vr, vr);
471 s->cc_op = op;
472 }
473
474 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
475 {
476 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
477 }
478
479 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
480 {
481 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
482 }
483
484 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
485 {
486 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
487 }
488
489 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
490 {
491 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
492 }
493
494 /* CC value is in env->cc_op */
495 static void set_cc_static(DisasContext *s)
496 {
497 if (live_cc_data(s)) {
498 tcg_gen_discard_i64(cc_src);
499 tcg_gen_discard_i64(cc_dst);
500 tcg_gen_discard_i64(cc_vr);
501 }
502 s->cc_op = CC_OP_STATIC;
503 }
504
505 /* calculates cc into cc_op */
506 static void gen_op_calc_cc(DisasContext *s)
507 {
508 TCGv_i32 local_cc_op = NULL;
509 TCGv_i64 dummy = NULL;
510
511 switch (s->cc_op) {
512 default:
513 dummy = tcg_const_i64(0);
514 /* FALLTHRU */
515 case CC_OP_ADD_64:
516 case CC_OP_ADDU_64:
517 case CC_OP_ADDC_64:
518 case CC_OP_SUB_64:
519 case CC_OP_SUBU_64:
520 case CC_OP_SUBB_64:
521 case CC_OP_ADD_32:
522 case CC_OP_ADDU_32:
523 case CC_OP_ADDC_32:
524 case CC_OP_SUB_32:
525 case CC_OP_SUBU_32:
526 case CC_OP_SUBB_32:
527 local_cc_op = tcg_const_i32(s->cc_op);
528 break;
529 case CC_OP_CONST0:
530 case CC_OP_CONST1:
531 case CC_OP_CONST2:
532 case CC_OP_CONST3:
533 case CC_OP_STATIC:
534 case CC_OP_DYNAMIC:
535 break;
536 }
537
538 switch (s->cc_op) {
539 case CC_OP_CONST0:
540 case CC_OP_CONST1:
541 case CC_OP_CONST2:
542 case CC_OP_CONST3:
543 /* s->cc_op is the cc value */
544 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
545 break;
546 case CC_OP_STATIC:
547 /* env->cc_op already is the cc value */
548 break;
549 case CC_OP_NZ:
550 case CC_OP_ABS_64:
551 case CC_OP_NABS_64:
552 case CC_OP_ABS_32:
553 case CC_OP_NABS_32:
554 case CC_OP_LTGT0_32:
555 case CC_OP_LTGT0_64:
556 case CC_OP_COMP_32:
557 case CC_OP_COMP_64:
558 case CC_OP_NZ_F32:
559 case CC_OP_NZ_F64:
560 case CC_OP_FLOGR:
561 case CC_OP_LCBB:
562 /* 1 argument */
563 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
564 break;
565 case CC_OP_ICM:
566 case CC_OP_LTGT_32:
567 case CC_OP_LTGT_64:
568 case CC_OP_LTUGTU_32:
569 case CC_OP_LTUGTU_64:
570 case CC_OP_TM_32:
571 case CC_OP_TM_64:
572 case CC_OP_SLA_32:
573 case CC_OP_SLA_64:
574 case CC_OP_NZ_F128:
575 /* 2 arguments */
576 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
577 break;
578 case CC_OP_ADD_64:
579 case CC_OP_ADDU_64:
580 case CC_OP_ADDC_64:
581 case CC_OP_SUB_64:
582 case CC_OP_SUBU_64:
583 case CC_OP_SUBB_64:
584 case CC_OP_ADD_32:
585 case CC_OP_ADDU_32:
586 case CC_OP_ADDC_32:
587 case CC_OP_SUB_32:
588 case CC_OP_SUBU_32:
589 case CC_OP_SUBB_32:
590 /* 3 arguments */
591 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
592 break;
593 case CC_OP_DYNAMIC:
594 /* unknown operation - assume 3 arguments and cc_op in env */
595 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
596 break;
597 default:
598 tcg_abort();
599 }
600
601 if (local_cc_op) {
602 tcg_temp_free_i32(local_cc_op);
603 }
604 if (dummy) {
605 tcg_temp_free_i64(dummy);
606 }
607
608 /* We now have cc in cc_op as constant */
609 set_cc_static(s);
610 }
611
612 static bool use_exit_tb(DisasContext *s)
613 {
614 return s->base.singlestep_enabled ||
615 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
616 (s->base.tb->flags & FLAG_MASK_PER);
617 }
618
619 static bool use_goto_tb(DisasContext *s, uint64_t dest)
620 {
621 if (unlikely(use_exit_tb(s))) {
622 return false;
623 }
624 #ifndef CONFIG_USER_ONLY
625 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
626 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
627 #else
628 return true;
629 #endif
630 }
631
632 static void account_noninline_branch(DisasContext *s, int cc_op)
633 {
634 #ifdef DEBUG_INLINE_BRANCHES
635 inline_branch_miss[cc_op]++;
636 #endif
637 }
638
639 static void account_inline_branch(DisasContext *s, int cc_op)
640 {
641 #ifdef DEBUG_INLINE_BRANCHES
642 inline_branch_hit[cc_op]++;
643 #endif
644 }
645
646 /* Table of mask values to comparison codes, given a comparison as input.
647 For such, CC=3 should not be possible. */
648 static const TCGCond ltgt_cond[16] = {
649 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
650 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
651 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
652 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
653 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
654 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
655 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
656 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
657 };
658
659 /* Table of mask values to comparison codes, given a logic op as input.
660 For such, only CC=0 and CC=1 should be possible. */
661 static const TCGCond nz_cond[16] = {
662 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
663 TCG_COND_NEVER, TCG_COND_NEVER,
664 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
665 TCG_COND_NE, TCG_COND_NE,
666 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
667 TCG_COND_EQ, TCG_COND_EQ,
668 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
669 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
670 };
671
672 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
673 details required to generate a TCG comparison. */
674 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
675 {
676 TCGCond cond;
677 enum cc_op old_cc_op = s->cc_op;
678
679 if (mask == 15 || mask == 0) {
680 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
681 c->u.s32.a = cc_op;
682 c->u.s32.b = cc_op;
683 c->g1 = c->g2 = true;
684 c->is_64 = false;
685 return;
686 }
687
688 /* Find the TCG condition for the mask + cc op. */
689 switch (old_cc_op) {
690 case CC_OP_LTGT0_32:
691 case CC_OP_LTGT0_64:
692 case CC_OP_LTGT_32:
693 case CC_OP_LTGT_64:
694 cond = ltgt_cond[mask];
695 if (cond == TCG_COND_NEVER) {
696 goto do_dynamic;
697 }
698 account_inline_branch(s, old_cc_op);
699 break;
700
701 case CC_OP_LTUGTU_32:
702 case CC_OP_LTUGTU_64:
703 cond = tcg_unsigned_cond(ltgt_cond[mask]);
704 if (cond == TCG_COND_NEVER) {
705 goto do_dynamic;
706 }
707 account_inline_branch(s, old_cc_op);
708 break;
709
710 case CC_OP_NZ:
711 cond = nz_cond[mask];
712 if (cond == TCG_COND_NEVER) {
713 goto do_dynamic;
714 }
715 account_inline_branch(s, old_cc_op);
716 break;
717
718 case CC_OP_TM_32:
719 case CC_OP_TM_64:
720 switch (mask) {
721 case 8:
722 cond = TCG_COND_EQ;
723 break;
724 case 4 | 2 | 1:
725 cond = TCG_COND_NE;
726 break;
727 default:
728 goto do_dynamic;
729 }
730 account_inline_branch(s, old_cc_op);
731 break;
732
733 case CC_OP_ICM:
734 switch (mask) {
735 case 8:
736 cond = TCG_COND_EQ;
737 break;
738 case 4 | 2 | 1:
739 case 4 | 2:
740 cond = TCG_COND_NE;
741 break;
742 default:
743 goto do_dynamic;
744 }
745 account_inline_branch(s, old_cc_op);
746 break;
747
748 case CC_OP_FLOGR:
749 switch (mask & 0xa) {
750 case 8: /* src == 0 -> no one bit found */
751 cond = TCG_COND_EQ;
752 break;
753 case 2: /* src != 0 -> one bit found */
754 cond = TCG_COND_NE;
755 break;
756 default:
757 goto do_dynamic;
758 }
759 account_inline_branch(s, old_cc_op);
760 break;
761
762 case CC_OP_ADDU_32:
763 case CC_OP_ADDU_64:
764 switch (mask) {
765 case 8 | 2: /* vr == 0 */
766 cond = TCG_COND_EQ;
767 break;
768 case 4 | 1: /* vr != 0 */
769 cond = TCG_COND_NE;
770 break;
771 case 8 | 4: /* no carry -> vr >= src */
772 cond = TCG_COND_GEU;
773 break;
774 case 2 | 1: /* carry -> vr < src */
775 cond = TCG_COND_LTU;
776 break;
777 default:
778 goto do_dynamic;
779 }
780 account_inline_branch(s, old_cc_op);
781 break;
782
783 case CC_OP_SUBU_32:
784 case CC_OP_SUBU_64:
785 /* Note that CC=0 is impossible; treat it as dont-care. */
786 switch (mask & 7) {
787 case 2: /* zero -> op1 == op2 */
788 cond = TCG_COND_EQ;
789 break;
790 case 4 | 1: /* !zero -> op1 != op2 */
791 cond = TCG_COND_NE;
792 break;
793 case 4: /* borrow (!carry) -> op1 < op2 */
794 cond = TCG_COND_LTU;
795 break;
796 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
797 cond = TCG_COND_GEU;
798 break;
799 default:
800 goto do_dynamic;
801 }
802 account_inline_branch(s, old_cc_op);
803 break;
804
805 default:
806 do_dynamic:
807 /* Calculate cc value. */
808 gen_op_calc_cc(s);
809 /* FALLTHRU */
810
811 case CC_OP_STATIC:
812 /* Jump based on CC. We'll load up the real cond below;
813 the assignment here merely avoids a compiler warning. */
814 account_noninline_branch(s, old_cc_op);
815 old_cc_op = CC_OP_STATIC;
816 cond = TCG_COND_NEVER;
817 break;
818 }
819
820 /* Load up the arguments of the comparison. */
821 c->is_64 = true;
822 c->g1 = c->g2 = false;
823 switch (old_cc_op) {
824 case CC_OP_LTGT0_32:
825 c->is_64 = false;
826 c->u.s32.a = tcg_temp_new_i32();
827 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
828 c->u.s32.b = tcg_const_i32(0);
829 break;
830 case CC_OP_LTGT_32:
831 case CC_OP_LTUGTU_32:
832 case CC_OP_SUBU_32:
833 c->is_64 = false;
834 c->u.s32.a = tcg_temp_new_i32();
835 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
836 c->u.s32.b = tcg_temp_new_i32();
837 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
838 break;
839
840 case CC_OP_LTGT0_64:
841 case CC_OP_NZ:
842 case CC_OP_FLOGR:
843 c->u.s64.a = cc_dst;
844 c->u.s64.b = tcg_const_i64(0);
845 c->g1 = true;
846 break;
847 case CC_OP_LTGT_64:
848 case CC_OP_LTUGTU_64:
849 case CC_OP_SUBU_64:
850 c->u.s64.a = cc_src;
851 c->u.s64.b = cc_dst;
852 c->g1 = c->g2 = true;
853 break;
854
855 case CC_OP_TM_32:
856 case CC_OP_TM_64:
857 case CC_OP_ICM:
858 c->u.s64.a = tcg_temp_new_i64();
859 c->u.s64.b = tcg_const_i64(0);
860 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
861 break;
862
863 case CC_OP_ADDU_32:
864 c->is_64 = false;
865 c->u.s32.a = tcg_temp_new_i32();
866 c->u.s32.b = tcg_temp_new_i32();
867 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
868 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
869 tcg_gen_movi_i32(c->u.s32.b, 0);
870 } else {
871 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
872 }
873 break;
874
875 case CC_OP_ADDU_64:
876 c->u.s64.a = cc_vr;
877 c->g1 = true;
878 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
879 c->u.s64.b = tcg_const_i64(0);
880 } else {
881 c->u.s64.b = cc_src;
882 c->g2 = true;
883 }
884 break;
885
886 case CC_OP_STATIC:
887 c->is_64 = false;
888 c->u.s32.a = cc_op;
889 c->g1 = true;
890 switch (mask) {
891 case 0x8 | 0x4 | 0x2: /* cc != 3 */
892 cond = TCG_COND_NE;
893 c->u.s32.b = tcg_const_i32(3);
894 break;
895 case 0x8 | 0x4 | 0x1: /* cc != 2 */
896 cond = TCG_COND_NE;
897 c->u.s32.b = tcg_const_i32(2);
898 break;
899 case 0x8 | 0x2 | 0x1: /* cc != 1 */
900 cond = TCG_COND_NE;
901 c->u.s32.b = tcg_const_i32(1);
902 break;
903 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
904 cond = TCG_COND_EQ;
905 c->g1 = false;
906 c->u.s32.a = tcg_temp_new_i32();
907 c->u.s32.b = tcg_const_i32(0);
908 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
909 break;
910 case 0x8 | 0x4: /* cc < 2 */
911 cond = TCG_COND_LTU;
912 c->u.s32.b = tcg_const_i32(2);
913 break;
914 case 0x8: /* cc == 0 */
915 cond = TCG_COND_EQ;
916 c->u.s32.b = tcg_const_i32(0);
917 break;
918 case 0x4 | 0x2 | 0x1: /* cc != 0 */
919 cond = TCG_COND_NE;
920 c->u.s32.b = tcg_const_i32(0);
921 break;
922 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
923 cond = TCG_COND_NE;
924 c->g1 = false;
925 c->u.s32.a = tcg_temp_new_i32();
926 c->u.s32.b = tcg_const_i32(0);
927 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
928 break;
929 case 0x4: /* cc == 1 */
930 cond = TCG_COND_EQ;
931 c->u.s32.b = tcg_const_i32(1);
932 break;
933 case 0x2 | 0x1: /* cc > 1 */
934 cond = TCG_COND_GTU;
935 c->u.s32.b = tcg_const_i32(1);
936 break;
937 case 0x2: /* cc == 2 */
938 cond = TCG_COND_EQ;
939 c->u.s32.b = tcg_const_i32(2);
940 break;
941 case 0x1: /* cc == 3 */
942 cond = TCG_COND_EQ;
943 c->u.s32.b = tcg_const_i32(3);
944 break;
945 default:
946 /* CC is masked by something else: (8 >> cc) & mask. */
947 cond = TCG_COND_NE;
948 c->g1 = false;
949 c->u.s32.a = tcg_const_i32(8);
950 c->u.s32.b = tcg_const_i32(0);
951 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
952 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
953 break;
954 }
955 break;
956
957 default:
958 abort();
959 }
960 c->cond = cond;
961 }
962
963 static void free_compare(DisasCompare *c)
964 {
965 if (!c->g1) {
966 if (c->is_64) {
967 tcg_temp_free_i64(c->u.s64.a);
968 } else {
969 tcg_temp_free_i32(c->u.s32.a);
970 }
971 }
972 if (!c->g2) {
973 if (c->is_64) {
974 tcg_temp_free_i64(c->u.s64.b);
975 } else {
976 tcg_temp_free_i32(c->u.s32.b);
977 }
978 }
979 }
980
981 /* ====================================================================== */
982 /* Define the insn format enumeration. */
983 #define F0(N) FMT_##N,
984 #define F1(N, X1) F0(N)
985 #define F2(N, X1, X2) F0(N)
986 #define F3(N, X1, X2, X3) F0(N)
987 #define F4(N, X1, X2, X3, X4) F0(N)
988 #define F5(N, X1, X2, X3, X4, X5) F0(N)
989 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
990
991 typedef enum {
992 #include "insn-format.def"
993 } DisasFormat;
994
995 #undef F0
996 #undef F1
997 #undef F2
998 #undef F3
999 #undef F4
1000 #undef F5
1001 #undef F6
1002
1003 /* Define a structure to hold the decoded fields. We'll store each inside
1004 an array indexed by an enum. In order to conserve memory, we'll arrange
1005 for fields that do not exist at the same time to overlap, thus the "C"
1006 for compact. For checking purposes there is an "O" for original index
1007 as well that will be applied to availability bitmaps. */
1008
1009 enum DisasFieldIndexO {
1010 FLD_O_r1,
1011 FLD_O_r2,
1012 FLD_O_r3,
1013 FLD_O_m1,
1014 FLD_O_m3,
1015 FLD_O_m4,
1016 FLD_O_m5,
1017 FLD_O_m6,
1018 FLD_O_b1,
1019 FLD_O_b2,
1020 FLD_O_b4,
1021 FLD_O_d1,
1022 FLD_O_d2,
1023 FLD_O_d4,
1024 FLD_O_x2,
1025 FLD_O_l1,
1026 FLD_O_l2,
1027 FLD_O_i1,
1028 FLD_O_i2,
1029 FLD_O_i3,
1030 FLD_O_i4,
1031 FLD_O_i5,
1032 FLD_O_v1,
1033 FLD_O_v2,
1034 FLD_O_v3,
1035 FLD_O_v4,
1036 };
1037
1038 enum DisasFieldIndexC {
1039 FLD_C_r1 = 0,
1040 FLD_C_m1 = 0,
1041 FLD_C_b1 = 0,
1042 FLD_C_i1 = 0,
1043 FLD_C_v1 = 0,
1044
1045 FLD_C_r2 = 1,
1046 FLD_C_b2 = 1,
1047 FLD_C_i2 = 1,
1048
1049 FLD_C_r3 = 2,
1050 FLD_C_m3 = 2,
1051 FLD_C_i3 = 2,
1052 FLD_C_v3 = 2,
1053
1054 FLD_C_m4 = 3,
1055 FLD_C_b4 = 3,
1056 FLD_C_i4 = 3,
1057 FLD_C_l1 = 3,
1058 FLD_C_v4 = 3,
1059
1060 FLD_C_i5 = 4,
1061 FLD_C_d1 = 4,
1062 FLD_C_m5 = 4,
1063
1064 FLD_C_d2 = 5,
1065 FLD_C_m6 = 5,
1066
1067 FLD_C_d4 = 6,
1068 FLD_C_x2 = 6,
1069 FLD_C_l2 = 6,
1070 FLD_C_v2 = 6,
1071
1072 NUM_C_FIELD = 7
1073 };
1074
1075 struct DisasFields {
1076 uint64_t raw_insn;
1077 unsigned op:8;
1078 unsigned op2:8;
1079 unsigned presentC:16;
1080 unsigned int presentO;
1081 int c[NUM_C_FIELD];
1082 };
1083
1084 /* This is the way fields are to be accessed out of DisasFields. */
1085 #define have_field(S, F) have_field1((S), FLD_O_##F)
1086 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1087
1088 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1089 {
1090 return (f->presentO >> c) & 1;
1091 }
1092
1093 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1094 enum DisasFieldIndexC c)
1095 {
1096 assert(have_field1(f, o));
1097 return f->c[c];
1098 }
1099
1100 /* Describe the layout of each field in each format. */
1101 typedef struct DisasField {
1102 unsigned int beg:8;
1103 unsigned int size:8;
1104 unsigned int type:2;
1105 unsigned int indexC:6;
1106 enum DisasFieldIndexO indexO:8;
1107 } DisasField;
1108
1109 typedef struct DisasFormatInfo {
1110 DisasField op[NUM_C_FIELD];
1111 } DisasFormatInfo;
1112
1113 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1114 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1115 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1116 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1117 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1118 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1119 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1120 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1121 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1122 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1123 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1124 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1125 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1126 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1127 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1128
1129 #define F0(N) { { } },
1130 #define F1(N, X1) { { X1 } },
1131 #define F2(N, X1, X2) { { X1, X2 } },
1132 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1133 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1134 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1135 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1136
1137 static const DisasFormatInfo format_info[] = {
1138 #include "insn-format.def"
1139 };
1140
1141 #undef F0
1142 #undef F1
1143 #undef F2
1144 #undef F3
1145 #undef F4
1146 #undef F5
1147 #undef F6
1148 #undef R
1149 #undef M
1150 #undef V
1151 #undef BD
1152 #undef BXD
1153 #undef BDL
1154 #undef BXDL
1155 #undef I
1156 #undef L
1157
1158 /* Generally, we'll extract operands into this structures, operate upon
1159 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1160 of routines below for more details. */
1161 typedef struct {
1162 bool g_out, g_out2, g_in1, g_in2;
1163 TCGv_i64 out, out2, in1, in2;
1164 TCGv_i64 addr1;
1165 } DisasOps;
1166
1167 /* Instructions can place constraints on their operands, raising specification
1168 exceptions if they are violated. To make this easy to automate, each "in1",
1169 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1170 of the following, or 0. To make this easy to document, we'll put the
1171 SPEC_<name> defines next to <name>. */
1172
1173 #define SPEC_r1_even 1
1174 #define SPEC_r2_even 2
1175 #define SPEC_r3_even 4
1176 #define SPEC_r1_f128 8
1177 #define SPEC_r2_f128 16
1178
1179 /* Return values from translate_one, indicating the state of the TB. */
1180
1181 /* We are not using a goto_tb (for whatever reason), but have updated
1182 the PC (for whatever reason), so there's no need to do it again on
1183 exiting the TB. */
1184 #define DISAS_PC_UPDATED DISAS_TARGET_0
1185
1186 /* We have emitted one or more goto_tb. No fixup required. */
1187 #define DISAS_GOTO_TB DISAS_TARGET_1
1188
1189 /* We have updated the PC and CC values. */
1190 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1191
1192 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1193 updated the PC for the next instruction to be executed. */
1194 #define DISAS_PC_STALE DISAS_TARGET_3
1195
1196 /* We are exiting the TB to the main loop. */
1197 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1198
1199
1200 /* Instruction flags */
1201 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1202 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1203 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1204 #define IF_BFP 0x0008 /* binary floating point instruction */
1205 #define IF_DFP 0x0010 /* decimal floating point instruction */
1206 #define IF_PRIV 0x0020 /* privileged instruction */
1207 #define IF_VEC 0x0040 /* vector instruction */
1208
1209 struct DisasInsn {
1210 unsigned opc:16;
1211 unsigned flags:16;
1212 DisasFormat fmt:8;
1213 unsigned fac:8;
1214 unsigned spec:8;
1215
1216 const char *name;
1217
1218 /* Pre-process arguments before HELP_OP. */
1219 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1220 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1221 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1222
1223 /*
1224 * Post-process output after HELP_OP.
1225 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1226 */
1227 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1228 void (*help_cout)(DisasContext *, DisasOps *);
1229
1230 /* Implement the operation itself. */
1231 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1232
1233 uint64_t data;
1234 };
1235
1236 /* ====================================================================== */
1237 /* Miscellaneous helpers, used by several operations. */
1238
1239 static void help_l2_shift(DisasContext *s, DisasFields *f,
1240 DisasOps *o, int mask)
1241 {
1242 int b2 = get_field(f, b2);
1243 int d2 = get_field(f, d2);
1244
1245 if (b2 == 0) {
1246 o->in2 = tcg_const_i64(d2 & mask);
1247 } else {
1248 o->in2 = get_address(s, 0, b2, d2);
1249 tcg_gen_andi_i64(o->in2, o->in2, mask);
1250 }
1251 }
1252
1253 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1254 {
1255 if (dest == s->pc_tmp) {
1256 per_branch(s, true);
1257 return DISAS_NEXT;
1258 }
1259 if (use_goto_tb(s, dest)) {
1260 update_cc_op(s);
1261 per_breaking_event(s);
1262 tcg_gen_goto_tb(0);
1263 tcg_gen_movi_i64(psw_addr, dest);
1264 tcg_gen_exit_tb(s->base.tb, 0);
1265 return DISAS_GOTO_TB;
1266 } else {
1267 tcg_gen_movi_i64(psw_addr, dest);
1268 per_branch(s, false);
1269 return DISAS_PC_UPDATED;
1270 }
1271 }
1272
1273 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1274 bool is_imm, int imm, TCGv_i64 cdest)
1275 {
1276 DisasJumpType ret;
1277 uint64_t dest = s->base.pc_next + 2 * imm;
1278 TCGLabel *lab;
1279
1280 /* Take care of the special cases first. */
1281 if (c->cond == TCG_COND_NEVER) {
1282 ret = DISAS_NEXT;
1283 goto egress;
1284 }
1285 if (is_imm) {
1286 if (dest == s->pc_tmp) {
1287 /* Branch to next. */
1288 per_branch(s, true);
1289 ret = DISAS_NEXT;
1290 goto egress;
1291 }
1292 if (c->cond == TCG_COND_ALWAYS) {
1293 ret = help_goto_direct(s, dest);
1294 goto egress;
1295 }
1296 } else {
1297 if (!cdest) {
1298 /* E.g. bcr %r0 -> no branch. */
1299 ret = DISAS_NEXT;
1300 goto egress;
1301 }
1302 if (c->cond == TCG_COND_ALWAYS) {
1303 tcg_gen_mov_i64(psw_addr, cdest);
1304 per_branch(s, false);
1305 ret = DISAS_PC_UPDATED;
1306 goto egress;
1307 }
1308 }
1309
1310 if (use_goto_tb(s, s->pc_tmp)) {
1311 if (is_imm && use_goto_tb(s, dest)) {
1312 /* Both exits can use goto_tb. */
1313 update_cc_op(s);
1314
1315 lab = gen_new_label();
1316 if (c->is_64) {
1317 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1318 } else {
1319 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1320 }
1321
1322 /* Branch not taken. */
1323 tcg_gen_goto_tb(0);
1324 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1325 tcg_gen_exit_tb(s->base.tb, 0);
1326
1327 /* Branch taken. */
1328 gen_set_label(lab);
1329 per_breaking_event(s);
1330 tcg_gen_goto_tb(1);
1331 tcg_gen_movi_i64(psw_addr, dest);
1332 tcg_gen_exit_tb(s->base.tb, 1);
1333
1334 ret = DISAS_GOTO_TB;
1335 } else {
1336 /* Fallthru can use goto_tb, but taken branch cannot. */
1337 /* Store taken branch destination before the brcond. This
1338 avoids having to allocate a new local temp to hold it.
1339 We'll overwrite this in the not taken case anyway. */
1340 if (!is_imm) {
1341 tcg_gen_mov_i64(psw_addr, cdest);
1342 }
1343
1344 lab = gen_new_label();
1345 if (c->is_64) {
1346 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1347 } else {
1348 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1349 }
1350
1351 /* Branch not taken. */
1352 update_cc_op(s);
1353 tcg_gen_goto_tb(0);
1354 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1355 tcg_gen_exit_tb(s->base.tb, 0);
1356
1357 gen_set_label(lab);
1358 if (is_imm) {
1359 tcg_gen_movi_i64(psw_addr, dest);
1360 }
1361 per_breaking_event(s);
1362 ret = DISAS_PC_UPDATED;
1363 }
1364 } else {
1365 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1366 Most commonly we're single-stepping or some other condition that
1367 disables all use of goto_tb. Just update the PC and exit. */
1368
1369 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1370 if (is_imm) {
1371 cdest = tcg_const_i64(dest);
1372 }
1373
1374 if (c->is_64) {
1375 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1376 cdest, next);
1377 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1378 } else {
1379 TCGv_i32 t0 = tcg_temp_new_i32();
1380 TCGv_i64 t1 = tcg_temp_new_i64();
1381 TCGv_i64 z = tcg_const_i64(0);
1382 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1383 tcg_gen_extu_i32_i64(t1, t0);
1384 tcg_temp_free_i32(t0);
1385 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1386 per_branch_cond(s, TCG_COND_NE, t1, z);
1387 tcg_temp_free_i64(t1);
1388 tcg_temp_free_i64(z);
1389 }
1390
1391 if (is_imm) {
1392 tcg_temp_free_i64(cdest);
1393 }
1394 tcg_temp_free_i64(next);
1395
1396 ret = DISAS_PC_UPDATED;
1397 }
1398
1399 egress:
1400 free_compare(c);
1401 return ret;
1402 }
1403
1404 /* ====================================================================== */
1405 /* The operations. These perform the bulk of the work for any insn,
1406 usually after the operands have been loaded and output initialized. */
1407
1408 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1409 {
1410 tcg_gen_abs_i64(o->out, o->in2);
1411 return DISAS_NEXT;
1412 }
1413
1414 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1415 {
1416 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1417 return DISAS_NEXT;
1418 }
1419
1420 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1421 {
1422 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1423 return DISAS_NEXT;
1424 }
1425
1426 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1427 {
1428 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1429 tcg_gen_mov_i64(o->out2, o->in2);
1430 return DISAS_NEXT;
1431 }
1432
1433 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1434 {
1435 tcg_gen_add_i64(o->out, o->in1, o->in2);
1436 return DISAS_NEXT;
1437 }
1438
1439 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1440 {
1441 DisasCompare cmp;
1442 TCGv_i64 carry;
1443
1444 tcg_gen_add_i64(o->out, o->in1, o->in2);
1445
1446 /* The carry flag is the msb of CC, therefore the branch mask that would
1447 create that comparison is 3. Feeding the generated comparison to
1448 setcond produces the carry flag that we desire. */
1449 disas_jcc(s, &cmp, 3);
1450 carry = tcg_temp_new_i64();
1451 if (cmp.is_64) {
1452 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1453 } else {
1454 TCGv_i32 t = tcg_temp_new_i32();
1455 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1456 tcg_gen_extu_i32_i64(carry, t);
1457 tcg_temp_free_i32(t);
1458 }
1459 free_compare(&cmp);
1460
1461 tcg_gen_add_i64(o->out, o->out, carry);
1462 tcg_temp_free_i64(carry);
1463 return DISAS_NEXT;
1464 }
1465
1466 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1467 {
1468 o->in1 = tcg_temp_new_i64();
1469
1470 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1471 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1472 } else {
1473 /* Perform the atomic addition in memory. */
1474 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1475 s->insn->data);
1476 }
1477
1478 /* Recompute also for atomic case: needed for setting CC. */
1479 tcg_gen_add_i64(o->out, o->in1, o->in2);
1480
1481 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1482 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1483 }
1484 return DISAS_NEXT;
1485 }
1486
1487 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1488 {
1489 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1490 return DISAS_NEXT;
1491 }
1492
1493 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1494 {
1495 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1496 return DISAS_NEXT;
1497 }
1498
1499 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1500 {
1501 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1502 return_low128(o->out2);
1503 return DISAS_NEXT;
1504 }
1505
1506 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1507 {
1508 tcg_gen_and_i64(o->out, o->in1, o->in2);
1509 return DISAS_NEXT;
1510 }
1511
1512 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1513 {
1514 int shift = s->insn->data & 0xff;
1515 int size = s->insn->data >> 8;
1516 uint64_t mask = ((1ull << size) - 1) << shift;
1517
1518 assert(!o->g_in2);
1519 tcg_gen_shli_i64(o->in2, o->in2, shift);
1520 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1521 tcg_gen_and_i64(o->out, o->in1, o->in2);
1522
1523 /* Produce the CC from only the bits manipulated. */
1524 tcg_gen_andi_i64(cc_dst, o->out, mask);
1525 set_cc_nz_u64(s, cc_dst);
1526 return DISAS_NEXT;
1527 }
1528
1529 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1530 {
1531 o->in1 = tcg_temp_new_i64();
1532
1533 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1534 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1535 } else {
1536 /* Perform the atomic operation in memory. */
1537 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1538 s->insn->data);
1539 }
1540
1541 /* Recompute also for atomic case: needed for setting CC. */
1542 tcg_gen_and_i64(o->out, o->in1, o->in2);
1543
1544 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1545 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1546 }
1547 return DISAS_NEXT;
1548 }
1549
1550 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1551 {
1552 pc_to_link_info(o->out, s, s->pc_tmp);
1553 if (o->in2) {
1554 tcg_gen_mov_i64(psw_addr, o->in2);
1555 per_branch(s, false);
1556 return DISAS_PC_UPDATED;
1557 } else {
1558 return DISAS_NEXT;
1559 }
1560 }
1561
1562 static void save_link_info(DisasContext *s, DisasOps *o)
1563 {
1564 TCGv_i64 t;
1565
1566 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1567 pc_to_link_info(o->out, s, s->pc_tmp);
1568 return;
1569 }
1570 gen_op_calc_cc(s);
1571 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1572 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1573 t = tcg_temp_new_i64();
1574 tcg_gen_shri_i64(t, psw_mask, 16);
1575 tcg_gen_andi_i64(t, t, 0x0f000000);
1576 tcg_gen_or_i64(o->out, o->out, t);
1577 tcg_gen_extu_i32_i64(t, cc_op);
1578 tcg_gen_shli_i64(t, t, 28);
1579 tcg_gen_or_i64(o->out, o->out, t);
1580 tcg_temp_free_i64(t);
1581 }
1582
1583 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1584 {
1585 save_link_info(s, o);
1586 if (o->in2) {
1587 tcg_gen_mov_i64(psw_addr, o->in2);
1588 per_branch(s, false);
1589 return DISAS_PC_UPDATED;
1590 } else {
1591 return DISAS_NEXT;
1592 }
1593 }
1594
1595 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1596 {
1597 pc_to_link_info(o->out, s, s->pc_tmp);
1598 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2));
1599 }
1600
1601 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1602 {
1603 int m1 = get_field(s->fields, m1);
1604 bool is_imm = have_field(s->fields, i2);
1605 int imm = is_imm ? get_field(s->fields, i2) : 0;
1606 DisasCompare c;
1607
1608 /* BCR with R2 = 0 causes no branching */
1609 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1610 if (m1 == 14) {
1611 /* Perform serialization */
1612 /* FIXME: check for fast-BCR-serialization facility */
1613 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1614 }
1615 if (m1 == 15) {
1616 /* Perform serialization */
1617 /* FIXME: perform checkpoint-synchronisation */
1618 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1619 }
1620 return DISAS_NEXT;
1621 }
1622
1623 disas_jcc(s, &c, m1);
1624 return help_branch(s, &c, is_imm, imm, o->in2);
1625 }
1626
1627 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1628 {
1629 int r1 = get_field(s->fields, r1);
1630 bool is_imm = have_field(s->fields, i2);
1631 int imm = is_imm ? get_field(s->fields, i2) : 0;
1632 DisasCompare c;
1633 TCGv_i64 t;
1634
1635 c.cond = TCG_COND_NE;
1636 c.is_64 = false;
1637 c.g1 = false;
1638 c.g2 = false;
1639
1640 t = tcg_temp_new_i64();
1641 tcg_gen_subi_i64(t, regs[r1], 1);
1642 store_reg32_i64(r1, t);
1643 c.u.s32.a = tcg_temp_new_i32();
1644 c.u.s32.b = tcg_const_i32(0);
1645 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1646 tcg_temp_free_i64(t);
1647
1648 return help_branch(s, &c, is_imm, imm, o->in2);
1649 }
1650
1651 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1652 {
1653 int r1 = get_field(s->fields, r1);
1654 int imm = get_field(s->fields, i2);
1655 DisasCompare c;
1656 TCGv_i64 t;
1657
1658 c.cond = TCG_COND_NE;
1659 c.is_64 = false;
1660 c.g1 = false;
1661 c.g2 = false;
1662
1663 t = tcg_temp_new_i64();
1664 tcg_gen_shri_i64(t, regs[r1], 32);
1665 tcg_gen_subi_i64(t, t, 1);
1666 store_reg32h_i64(r1, t);
1667 c.u.s32.a = tcg_temp_new_i32();
1668 c.u.s32.b = tcg_const_i32(0);
1669 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1670 tcg_temp_free_i64(t);
1671
1672 return help_branch(s, &c, 1, imm, o->in2);
1673 }
1674
1675 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1676 {
1677 int r1 = get_field(s->fields, r1);
1678 bool is_imm = have_field(s->fields, i2);
1679 int imm = is_imm ? get_field(s->fields, i2) : 0;
1680 DisasCompare c;
1681
1682 c.cond = TCG_COND_NE;
1683 c.is_64 = true;
1684 c.g1 = true;
1685 c.g2 = false;
1686
1687 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1688 c.u.s64.a = regs[r1];
1689 c.u.s64.b = tcg_const_i64(0);
1690
1691 return help_branch(s, &c, is_imm, imm, o->in2);
1692 }
1693
1694 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1695 {
1696 int r1 = get_field(s->fields, r1);
1697 int r3 = get_field(s->fields, r3);
1698 bool is_imm = have_field(s->fields, i2);
1699 int imm = is_imm ? get_field(s->fields, i2) : 0;
1700 DisasCompare c;
1701 TCGv_i64 t;
1702
1703 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1704 c.is_64 = false;
1705 c.g1 = false;
1706 c.g2 = false;
1707
1708 t = tcg_temp_new_i64();
1709 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1710 c.u.s32.a = tcg_temp_new_i32();
1711 c.u.s32.b = tcg_temp_new_i32();
1712 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1713 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1714 store_reg32_i64(r1, t);
1715 tcg_temp_free_i64(t);
1716
1717 return help_branch(s, &c, is_imm, imm, o->in2);
1718 }
1719
1720 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1721 {
1722 int r1 = get_field(s->fields, r1);
1723 int r3 = get_field(s->fields, r3);
1724 bool is_imm = have_field(s->fields, i2);
1725 int imm = is_imm ? get_field(s->fields, i2) : 0;
1726 DisasCompare c;
1727
1728 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1729 c.is_64 = true;
1730
1731 if (r1 == (r3 | 1)) {
1732 c.u.s64.b = load_reg(r3 | 1);
1733 c.g2 = false;
1734 } else {
1735 c.u.s64.b = regs[r3 | 1];
1736 c.g2 = true;
1737 }
1738
1739 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1740 c.u.s64.a = regs[r1];
1741 c.g1 = true;
1742
1743 return help_branch(s, &c, is_imm, imm, o->in2);
1744 }
1745
1746 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1747 {
1748 int imm, m3 = get_field(s->fields, m3);
1749 bool is_imm;
1750 DisasCompare c;
1751
1752 c.cond = ltgt_cond[m3];
1753 if (s->insn->data) {
1754 c.cond = tcg_unsigned_cond(c.cond);
1755 }
1756 c.is_64 = c.g1 = c.g2 = true;
1757 c.u.s64.a = o->in1;
1758 c.u.s64.b = o->in2;
1759
1760 is_imm = have_field(s->fields, i4);
1761 if (is_imm) {
1762 imm = get_field(s->fields, i4);
1763 } else {
1764 imm = 0;
1765 o->out = get_address(s, 0, get_field(s->fields, b4),
1766 get_field(s->fields, d4));
1767 }
1768
1769 return help_branch(s, &c, is_imm, imm, o->out);
1770 }
1771
1772 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1773 {
1774 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1775 set_cc_static(s);
1776 return DISAS_NEXT;
1777 }
1778
1779 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1780 {
1781 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1782 set_cc_static(s);
1783 return DISAS_NEXT;
1784 }
1785
1786 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1787 {
1788 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1789 set_cc_static(s);
1790 return DISAS_NEXT;
1791 }
1792
1793 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1794 bool m4_with_fpe)
1795 {
1796 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1797 uint8_t m3 = get_field(s->fields, m3);
1798 uint8_t m4 = get_field(s->fields, m4);
1799
1800 /* m3 field was introduced with FPE */
1801 if (!fpe && m3_with_fpe) {
1802 m3 = 0;
1803 }
1804 /* m4 field was introduced with FPE */
1805 if (!fpe && m4_with_fpe) {
1806 m4 = 0;
1807 }
1808
1809 /* Check for valid rounding modes. Mode 3 was introduced later. */
1810 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1811 gen_program_exception(s, PGM_SPECIFICATION);
1812 return NULL;
1813 }
1814
1815 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1816 }
1817
1818 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1819 {
1820 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1821
1822 if (!m34) {
1823 return DISAS_NORETURN;
1824 }
1825 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1826 tcg_temp_free_i32(m34);
1827 gen_set_cc_nz_f32(s, o->in2);
1828 return DISAS_NEXT;
1829 }
1830
1831 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1832 {
1833 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1834
1835 if (!m34) {
1836 return DISAS_NORETURN;
1837 }
1838 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1839 tcg_temp_free_i32(m34);
1840 gen_set_cc_nz_f64(s, o->in2);
1841 return DISAS_NEXT;
1842 }
1843
1844 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1845 {
1846 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1847
1848 if (!m34) {
1849 return DISAS_NORETURN;
1850 }
1851 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1852 tcg_temp_free_i32(m34);
1853 gen_set_cc_nz_f128(s, o->in1, o->in2);
1854 return DISAS_NEXT;
1855 }
1856
1857 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1858 {
1859 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1860
1861 if (!m34) {
1862 return DISAS_NORETURN;
1863 }
1864 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1865 tcg_temp_free_i32(m34);
1866 gen_set_cc_nz_f32(s, o->in2);
1867 return DISAS_NEXT;
1868 }
1869
1870 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1871 {
1872 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1873
1874 if (!m34) {
1875 return DISAS_NORETURN;
1876 }
1877 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1878 tcg_temp_free_i32(m34);
1879 gen_set_cc_nz_f64(s, o->in2);
1880 return DISAS_NEXT;
1881 }
1882
1883 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1884 {
1885 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1886
1887 if (!m34) {
1888 return DISAS_NORETURN;
1889 }
1890 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1891 tcg_temp_free_i32(m34);
1892 gen_set_cc_nz_f128(s, o->in1, o->in2);
1893 return DISAS_NEXT;
1894 }
1895
1896 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1897 {
1898 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1899
1900 if (!m34) {
1901 return DISAS_NORETURN;
1902 }
1903 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1904 tcg_temp_free_i32(m34);
1905 gen_set_cc_nz_f32(s, o->in2);
1906 return DISAS_NEXT;
1907 }
1908
1909 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1910 {
1911 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1912
1913 if (!m34) {
1914 return DISAS_NORETURN;
1915 }
1916 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1917 tcg_temp_free_i32(m34);
1918 gen_set_cc_nz_f64(s, o->in2);
1919 return DISAS_NEXT;
1920 }
1921
1922 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1923 {
1924 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1925
1926 if (!m34) {
1927 return DISAS_NORETURN;
1928 }
1929 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1930 tcg_temp_free_i32(m34);
1931 gen_set_cc_nz_f128(s, o->in1, o->in2);
1932 return DISAS_NEXT;
1933 }
1934
1935 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1936 {
1937 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1938
1939 if (!m34) {
1940 return DISAS_NORETURN;
1941 }
1942 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1943 tcg_temp_free_i32(m34);
1944 gen_set_cc_nz_f32(s, o->in2);
1945 return DISAS_NEXT;
1946 }
1947
1948 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1949 {
1950 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1951
1952 if (!m34) {
1953 return DISAS_NORETURN;
1954 }
1955 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1956 tcg_temp_free_i32(m34);
1957 gen_set_cc_nz_f64(s, o->in2);
1958 return DISAS_NEXT;
1959 }
1960
1961 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1962 {
1963 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1964
1965 if (!m34) {
1966 return DISAS_NORETURN;
1967 }
1968 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1969 tcg_temp_free_i32(m34);
1970 gen_set_cc_nz_f128(s, o->in1, o->in2);
1971 return DISAS_NEXT;
1972 }
1973
1974 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1975 {
1976 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1977
1978 if (!m34) {
1979 return DISAS_NORETURN;
1980 }
1981 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1982 tcg_temp_free_i32(m34);
1983 return DISAS_NEXT;
1984 }
1985
1986 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1987 {
1988 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1989
1990 if (!m34) {
1991 return DISAS_NORETURN;
1992 }
1993 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1994 tcg_temp_free_i32(m34);
1995 return DISAS_NEXT;
1996 }
1997
1998 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1999 {
2000 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2001
2002 if (!m34) {
2003 return DISAS_NORETURN;
2004 }
2005 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2006 tcg_temp_free_i32(m34);
2007 return_low128(o->out2);
2008 return DISAS_NEXT;
2009 }
2010
2011 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2012 {
2013 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2014
2015 if (!m34) {
2016 return DISAS_NORETURN;
2017 }
2018 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2019 tcg_temp_free_i32(m34);
2020 return DISAS_NEXT;
2021 }
2022
2023 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2024 {
2025 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2026
2027 if (!m34) {
2028 return DISAS_NORETURN;
2029 }
2030 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2031 tcg_temp_free_i32(m34);
2032 return DISAS_NEXT;
2033 }
2034
2035 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2036 {
2037 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2038
2039 if (!m34) {
2040 return DISAS_NORETURN;
2041 }
2042 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2043 tcg_temp_free_i32(m34);
2044 return_low128(o->out2);
2045 return DISAS_NEXT;
2046 }
2047
2048 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2049 {
2050 int r2 = get_field(s->fields, r2);
2051 TCGv_i64 len = tcg_temp_new_i64();
2052
2053 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2054 set_cc_static(s);
2055 return_low128(o->out);
2056
2057 tcg_gen_add_i64(regs[r2], regs[r2], len);
2058 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2059 tcg_temp_free_i64(len);
2060
2061 return DISAS_NEXT;
2062 }
2063
2064 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2065 {
2066 int l = get_field(s->fields, l1);
2067 TCGv_i32 vl;
2068
2069 switch (l + 1) {
2070 case 1:
2071 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2072 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2073 break;
2074 case 2:
2075 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2076 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2077 break;
2078 case 4:
2079 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2080 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2081 break;
2082 case 8:
2083 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2084 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2085 break;
2086 default:
2087 vl = tcg_const_i32(l);
2088 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2089 tcg_temp_free_i32(vl);
2090 set_cc_static(s);
2091 return DISAS_NEXT;
2092 }
2093 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2094 return DISAS_NEXT;
2095 }
2096
2097 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2098 {
2099 int r1 = get_field(s->fields, r1);
2100 int r2 = get_field(s->fields, r2);
2101 TCGv_i32 t1, t2;
2102
2103 /* r1 and r2 must be even. */
2104 if (r1 & 1 || r2 & 1) {
2105 gen_program_exception(s, PGM_SPECIFICATION);
2106 return DISAS_NORETURN;
2107 }
2108
2109 t1 = tcg_const_i32(r1);
2110 t2 = tcg_const_i32(r2);
2111 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2112 tcg_temp_free_i32(t1);
2113 tcg_temp_free_i32(t2);
2114 set_cc_static(s);
2115 return DISAS_NEXT;
2116 }
2117
2118 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2119 {
2120 int r1 = get_field(s->fields, r1);
2121 int r3 = get_field(s->fields, r3);
2122 TCGv_i32 t1, t3;
2123
2124 /* r1 and r3 must be even. */
2125 if (r1 & 1 || r3 & 1) {
2126 gen_program_exception(s, PGM_SPECIFICATION);
2127 return DISAS_NORETURN;
2128 }
2129
2130 t1 = tcg_const_i32(r1);
2131 t3 = tcg_const_i32(r3);
2132 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2133 tcg_temp_free_i32(t1);
2134 tcg_temp_free_i32(t3);
2135 set_cc_static(s);
2136 return DISAS_NEXT;
2137 }
2138
2139 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2140 {
2141 int r1 = get_field(s->fields, r1);
2142 int r3 = get_field(s->fields, r3);
2143 TCGv_i32 t1, t3;
2144
2145 /* r1 and r3 must be even. */
2146 if (r1 & 1 || r3 & 1) {
2147 gen_program_exception(s, PGM_SPECIFICATION);
2148 return DISAS_NORETURN;
2149 }
2150
2151 t1 = tcg_const_i32(r1);
2152 t3 = tcg_const_i32(r3);
2153 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2154 tcg_temp_free_i32(t1);
2155 tcg_temp_free_i32(t3);
2156 set_cc_static(s);
2157 return DISAS_NEXT;
2158 }
2159
2160 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2161 {
2162 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2163 TCGv_i32 t1 = tcg_temp_new_i32();
2164 tcg_gen_extrl_i64_i32(t1, o->in1);
2165 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2166 set_cc_static(s);
2167 tcg_temp_free_i32(t1);
2168 tcg_temp_free_i32(m3);
2169 return DISAS_NEXT;
2170 }
2171
2172 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2173 {
2174 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2175 set_cc_static(s);
2176 return_low128(o->in2);
2177 return DISAS_NEXT;
2178 }
2179
2180 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2181 {
2182 TCGv_i64 t = tcg_temp_new_i64();
2183 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2184 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2185 tcg_gen_or_i64(o->out, o->out, t);
2186 tcg_temp_free_i64(t);
2187 return DISAS_NEXT;
2188 }
2189
2190 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2191 {
2192 int d2 = get_field(s->fields, d2);
2193 int b2 = get_field(s->fields, b2);
2194 TCGv_i64 addr, cc;
2195
2196 /* Note that in1 = R3 (new value) and
2197 in2 = (zero-extended) R1 (expected value). */
2198
2199 addr = get_address(s, 0, b2, d2);
2200 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2201 get_mem_index(s), s->insn->data | MO_ALIGN);
2202 tcg_temp_free_i64(addr);
2203
2204 /* Are the memory and expected values (un)equal? Note that this setcond
2205 produces the output CC value, thus the NE sense of the test. */
2206 cc = tcg_temp_new_i64();
2207 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2208 tcg_gen_extrl_i64_i32(cc_op, cc);
2209 tcg_temp_free_i64(cc);
2210 set_cc_static(s);
2211
2212 return DISAS_NEXT;
2213 }
2214
2215 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2216 {
2217 int r1 = get_field(s->fields, r1);
2218 int r3 = get_field(s->fields, r3);
2219 int d2 = get_field(s->fields, d2);
2220 int b2 = get_field(s->fields, b2);
2221 DisasJumpType ret = DISAS_NEXT;
2222 TCGv_i64 addr;
2223 TCGv_i32 t_r1, t_r3;
2224
2225 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2226 addr = get_address(s, 0, b2, d2);
2227 t_r1 = tcg_const_i32(r1);
2228 t_r3 = tcg_const_i32(r3);
2229 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2230 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2231 } else if (HAVE_CMPXCHG128) {
2232 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2233 } else {
2234 gen_helper_exit_atomic(cpu_env);
2235 ret = DISAS_NORETURN;
2236 }
2237 tcg_temp_free_i64(addr);
2238 tcg_temp_free_i32(t_r1);
2239 tcg_temp_free_i32(t_r3);
2240
2241 set_cc_static(s);
2242 return ret;
2243 }
2244
2245 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2246 {
2247 int r3 = get_field(s->fields, r3);
2248 TCGv_i32 t_r3 = tcg_const_i32(r3);
2249
2250 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2251 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2252 } else {
2253 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2254 }
2255 tcg_temp_free_i32(t_r3);
2256
2257 set_cc_static(s);
2258 return DISAS_NEXT;
2259 }
2260
2261 #ifndef CONFIG_USER_ONLY
2262 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2263 {
2264 TCGMemOp mop = s->insn->data;
2265 TCGv_i64 addr, old, cc;
2266 TCGLabel *lab = gen_new_label();
2267
2268 /* Note that in1 = R1 (zero-extended expected value),
2269 out = R1 (original reg), out2 = R1+1 (new value). */
2270
2271 addr = tcg_temp_new_i64();
2272 old = tcg_temp_new_i64();
2273 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2274 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2275 get_mem_index(s), mop | MO_ALIGN);
2276 tcg_temp_free_i64(addr);
2277
2278 /* Are the memory and expected values (un)equal? */
2279 cc = tcg_temp_new_i64();
2280 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2281 tcg_gen_extrl_i64_i32(cc_op, cc);
2282
2283 /* Write back the output now, so that it happens before the
2284 following branch, so that we don't need local temps. */
2285 if ((mop & MO_SIZE) == MO_32) {
2286 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2287 } else {
2288 tcg_gen_mov_i64(o->out, old);
2289 }
2290 tcg_temp_free_i64(old);
2291
2292 /* If the comparison was equal, and the LSB of R2 was set,
2293 then we need to flush the TLB (for all cpus). */
2294 tcg_gen_xori_i64(cc, cc, 1);
2295 tcg_gen_and_i64(cc, cc, o->in2);
2296 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2297 tcg_temp_free_i64(cc);
2298
2299 gen_helper_purge(cpu_env);
2300 gen_set_label(lab);
2301
2302 return DISAS_NEXT;
2303 }
2304 #endif
2305
2306 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2307 {
2308 TCGv_i64 t1 = tcg_temp_new_i64();
2309 TCGv_i32 t2 = tcg_temp_new_i32();
2310 tcg_gen_extrl_i64_i32(t2, o->in1);
2311 gen_helper_cvd(t1, t2);
2312 tcg_temp_free_i32(t2);
2313 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2314 tcg_temp_free_i64(t1);
2315 return DISAS_NEXT;
2316 }
2317
2318 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2319 {
2320 int m3 = get_field(s->fields, m3);
2321 TCGLabel *lab = gen_new_label();
2322 TCGCond c;
2323
2324 c = tcg_invert_cond(ltgt_cond[m3]);
2325 if (s->insn->data) {
2326 c = tcg_unsigned_cond(c);
2327 }
2328 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2329
2330 /* Trap. */
2331 gen_trap(s);
2332
2333 gen_set_label(lab);
2334 return DISAS_NEXT;
2335 }
2336
2337 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2338 {
2339 int m3 = get_field(s->fields, m3);
2340 int r1 = get_field(s->fields, r1);
2341 int r2 = get_field(s->fields, r2);
2342 TCGv_i32 tr1, tr2, chk;
2343
2344 /* R1 and R2 must both be even. */
2345 if ((r1 | r2) & 1) {
2346 gen_program_exception(s, PGM_SPECIFICATION);
2347 return DISAS_NORETURN;
2348 }
2349 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2350 m3 = 0;
2351 }
2352
2353 tr1 = tcg_const_i32(r1);
2354 tr2 = tcg_const_i32(r2);
2355 chk = tcg_const_i32(m3);
2356
2357 switch (s->insn->data) {
2358 case 12:
2359 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2360 break;
2361 case 14:
2362 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2363 break;
2364 case 21:
2365 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2366 break;
2367 case 24:
2368 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2369 break;
2370 case 41:
2371 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2372 break;
2373 case 42:
2374 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2375 break;
2376 default:
2377 g_assert_not_reached();
2378 }
2379
2380 tcg_temp_free_i32(tr1);
2381 tcg_temp_free_i32(tr2);
2382 tcg_temp_free_i32(chk);
2383 set_cc_static(s);
2384 return DISAS_NEXT;
2385 }
2386
2387 #ifndef CONFIG_USER_ONLY
2388 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2389 {
2390 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2391 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2392 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2393
2394 gen_helper_diag(cpu_env, r1, r3, func_code);
2395
2396 tcg_temp_free_i32(func_code);
2397 tcg_temp_free_i32(r3);
2398 tcg_temp_free_i32(r1);
2399 return DISAS_NEXT;
2400 }
2401 #endif
2402
2403 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2404 {
2405 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2406 return_low128(o->out);
2407 return DISAS_NEXT;
2408 }
2409
2410 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2411 {
2412 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2413 return_low128(o->out);
2414 return DISAS_NEXT;
2415 }
2416
2417 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2418 {
2419 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2420 return_low128(o->out);
2421 return DISAS_NEXT;
2422 }
2423
2424 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2425 {
2426 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2427 return_low128(o->out);
2428 return DISAS_NEXT;
2429 }
2430
2431 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2432 {
2433 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2434 return DISAS_NEXT;
2435 }
2436
2437 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2438 {
2439 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2440 return DISAS_NEXT;
2441 }
2442
2443 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2444 {
2445 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2446 return_low128(o->out2);
2447 return DISAS_NEXT;
2448 }
2449
2450 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2451 {
2452 int r2 = get_field(s->fields, r2);
2453 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2454 return DISAS_NEXT;
2455 }
2456
2457 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2458 {
2459 /* No cache information provided. */
2460 tcg_gen_movi_i64(o->out, -1);
2461 return DISAS_NEXT;
2462 }
2463
2464 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2465 {
2466 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2467 return DISAS_NEXT;
2468 }
2469
2470 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2471 {
2472 int r1 = get_field(s->fields, r1);
2473 int r2 = get_field(s->fields, r2);
2474 TCGv_i64 t = tcg_temp_new_i64();
2475
2476 /* Note the "subsequently" in the PoO, which implies a defined result
2477 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2478 tcg_gen_shri_i64(t, psw_mask, 32);
2479 store_reg32_i64(r1, t);
2480 if (r2 != 0) {
2481 store_reg32_i64(r2, psw_mask);
2482 }
2483
2484 tcg_temp_free_i64(t);
2485 return DISAS_NEXT;
2486 }
2487
2488 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2489 {
2490 int r1 = get_field(s->fields, r1);
2491 TCGv_i32 ilen;
2492 TCGv_i64 v1;
2493
2494 /* Nested EXECUTE is not allowed. */
2495 if (unlikely(s->ex_value)) {
2496 gen_program_exception(s, PGM_EXECUTE);
2497 return DISAS_NORETURN;
2498 }
2499
2500 update_psw_addr(s);
2501 update_cc_op(s);
2502
2503 if (r1 == 0) {
2504 v1 = tcg_const_i64(0);
2505 } else {
2506 v1 = regs[r1];
2507 }
2508
2509 ilen = tcg_const_i32(s->ilen);
2510 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2511 tcg_temp_free_i32(ilen);
2512
2513 if (r1 == 0) {
2514 tcg_temp_free_i64(v1);
2515 }
2516
2517 return DISAS_PC_CC_UPDATED;
2518 }
2519
2520 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2521 {
2522 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2523
2524 if (!m34) {
2525 return DISAS_NORETURN;
2526 }
2527 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2528 tcg_temp_free_i32(m34);
2529 return DISAS_NEXT;
2530 }
2531
2532 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2533 {
2534 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2535
2536 if (!m34) {
2537 return DISAS_NORETURN;
2538 }
2539 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2540 tcg_temp_free_i32(m34);
2541 return DISAS_NEXT;
2542 }
2543
2544 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2545 {
2546 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2547
2548 if (!m34) {
2549 return DISAS_NORETURN;
2550 }
2551 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2552 return_low128(o->out2);
2553 tcg_temp_free_i32(m34);
2554 return DISAS_NEXT;
2555 }
2556
2557 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2558 {
2559 /* We'll use the original input for cc computation, since we get to
2560 compare that against 0, which ought to be better than comparing
2561 the real output against 64. It also lets cc_dst be a convenient
2562 temporary during our computation. */
2563 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2564
2565 /* R1 = IN ? CLZ(IN) : 64. */
2566 tcg_gen_clzi_i64(o->out, o->in2, 64);
2567
2568 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2569 value by 64, which is undefined. But since the shift is 64 iff the
2570 input is zero, we still get the correct result after and'ing. */
2571 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2572 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2573 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2574 return DISAS_NEXT;
2575 }
2576
2577 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2578 {
2579 int m3 = get_field(s->fields, m3);
2580 int pos, len, base = s->insn->data;
2581 TCGv_i64 tmp = tcg_temp_new_i64();
2582 uint64_t ccm;
2583
2584 switch (m3) {
2585 case 0xf:
2586 /* Effectively a 32-bit load. */
2587 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2588 len = 32;
2589 goto one_insert;
2590
2591 case 0xc:
2592 case 0x6:
2593 case 0x3:
2594 /* Effectively a 16-bit load. */
2595 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2596 len = 16;
2597 goto one_insert;
2598
2599 case 0x8:
2600 case 0x4:
2601 case 0x2:
2602 case 0x1:
2603 /* Effectively an 8-bit load. */
2604 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2605 len = 8;
2606 goto one_insert;
2607
2608 one_insert:
2609 pos = base + ctz32(m3) * 8;
2610 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2611 ccm = ((1ull << len) - 1) << pos;
2612 break;
2613
2614 default:
2615 /* This is going to be a sequence of loads and inserts. */
2616 pos = base + 32 - 8;
2617 ccm = 0;
2618 while (m3) {
2619 if (m3 & 0x8) {
2620 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2621 tcg_gen_addi_i64(o->in2, o->in2, 1);
2622 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2623 ccm |= 0xff << pos;
2624 }
2625 m3 = (m3 << 1) & 0xf;
2626 pos -= 8;
2627 }
2628 break;
2629 }
2630
2631 tcg_gen_movi_i64(tmp, ccm);
2632 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2633 tcg_temp_free_i64(tmp);
2634 return DISAS_NEXT;
2635 }
2636
2637 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2638 {
2639 int shift = s->insn->data & 0xff;
2640 int size = s->insn->data >> 8;
2641 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2642 return DISAS_NEXT;
2643 }
2644
2645 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2646 {
2647 TCGv_i64 t1, t2;
2648
2649 gen_op_calc_cc(s);
2650 t1 = tcg_temp_new_i64();
2651 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2652 t2 = tcg_temp_new_i64();
2653 tcg_gen_extu_i32_i64(t2, cc_op);
2654 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2655 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2656 tcg_temp_free_i64(t1);
2657 tcg_temp_free_i64(t2);
2658 return DISAS_NEXT;
2659 }
2660
2661 #ifndef CONFIG_USER_ONLY
2662 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2663 {
2664 TCGv_i32 m4;
2665
2666 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2667 m4 = tcg_const_i32(get_field(s->fields, m4));
2668 } else {
2669 m4 = tcg_const_i32(0);
2670 }
2671 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2672 tcg_temp_free_i32(m4);
2673 return DISAS_NEXT;
2674 }
2675
2676 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2677 {
2678 TCGv_i32 m4;
2679
2680 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2681 m4 = tcg_const_i32(get_field(s->fields, m4));
2682 } else {
2683 m4 = tcg_const_i32(0);
2684 }
2685 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2686 tcg_temp_free_i32(m4);
2687 return DISAS_NEXT;
2688 }
2689
2690 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2691 {
2692 gen_helper_iske(o->out, cpu_env, o->in2);
2693 return DISAS_NEXT;
2694 }
2695 #endif
2696
2697 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2698 {
2699 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2700 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2701 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2702 TCGv_i32 t_r1, t_r2, t_r3, type;
2703
2704 switch (s->insn->data) {
2705 case S390_FEAT_TYPE_KMCTR:
2706 if (r3 & 1 || !r3) {
2707 gen_program_exception(s, PGM_SPECIFICATION);
2708 return DISAS_NORETURN;
2709 }
2710 /* FALL THROUGH */
2711 case S390_FEAT_TYPE_PPNO:
2712 case S390_FEAT_TYPE_KMF:
2713 case S390_FEAT_TYPE_KMC:
2714 case S390_FEAT_TYPE_KMO:
2715 case S390_FEAT_TYPE_KM:
2716 if (r1 & 1 || !r1) {
2717 gen_program_exception(s, PGM_SPECIFICATION);
2718 return DISAS_NORETURN;
2719 }
2720 /* FALL THROUGH */
2721 case S390_FEAT_TYPE_KMAC:
2722 case S390_FEAT_TYPE_KIMD:
2723 case S390_FEAT_TYPE_KLMD:
2724 if (r2 & 1 || !r2) {
2725 gen_program_exception(s, PGM_SPECIFICATION);
2726 return DISAS_NORETURN;
2727 }
2728 /* FALL THROUGH */
2729 case S390_FEAT_TYPE_PCKMO:
2730 case S390_FEAT_TYPE_PCC:
2731 break;
2732 default:
2733 g_assert_not_reached();
2734 };
2735
2736 t_r1 = tcg_const_i32(r1);
2737 t_r2 = tcg_const_i32(r2);
2738 t_r3 = tcg_const_i32(r3);
2739 type = tcg_const_i32(s->insn->data);
2740 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2741 set_cc_static(s);
2742 tcg_temp_free_i32(t_r1);
2743 tcg_temp_free_i32(t_r2);
2744 tcg_temp_free_i32(t_r3);
2745 tcg_temp_free_i32(type);
2746 return DISAS_NEXT;
2747 }
2748
2749 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2750 {
2751 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2752 set_cc_static(s);
2753 return DISAS_NEXT;
2754 }
2755
2756 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2757 {
2758 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2759 set_cc_static(s);
2760 return DISAS_NEXT;
2761 }
2762
2763 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2764 {
2765 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2766 set_cc_static(s);
2767 return DISAS_NEXT;
2768 }
2769
2770 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2771 {
2772 /* The real output is indeed the original value in memory;
2773 recompute the addition for the computation of CC. */
2774 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2775 s->insn->data | MO_ALIGN);
2776 /* However, we need to recompute the addition for setting CC. */
2777 tcg_gen_add_i64(o->out, o->in1, o->in2);
2778 return DISAS_NEXT;
2779 }
2780
2781 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2782 {
2783 /* The real output is indeed the original value in memory;
2784 recompute the addition for the computation of CC. */
2785 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2786 s->insn->data | MO_ALIGN);
2787 /* However, we need to recompute the operation for setting CC. */
2788 tcg_gen_and_i64(o->out, o->in1, o->in2);
2789 return DISAS_NEXT;
2790 }
2791
2792 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2793 {
2794 /* The real output is indeed the original value in memory;
2795 recompute the addition for the computation of CC. */
2796 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2797 s->insn->data | MO_ALIGN);
2798 /* However, we need to recompute the operation for setting CC. */
2799 tcg_gen_or_i64(o->out, o->in1, o->in2);
2800 return DISAS_NEXT;
2801 }
2802
2803 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2804 {
2805 /* The real output is indeed the original value in memory;
2806 recompute the addition for the computation of CC. */
2807 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2808 s->insn->data | MO_ALIGN);
2809 /* However, we need to recompute the operation for setting CC. */
2810 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2811 return DISAS_NEXT;
2812 }
2813
2814 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2815 {
2816 gen_helper_ldeb(o->out, cpu_env, o->in2);
2817 return DISAS_NEXT;
2818 }
2819
2820 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2821 {
2822 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2823
2824 if (!m34) {
2825 return DISAS_NORETURN;
2826 }
2827 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2828 tcg_temp_free_i32(m34);
2829 return DISAS_NEXT;
2830 }
2831
2832 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2833 {
2834 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2835
2836 if (!m34) {
2837 return DISAS_NORETURN;
2838 }
2839 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2840 tcg_temp_free_i32(m34);
2841 return DISAS_NEXT;
2842 }
2843
2844 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2845 {
2846 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2847
2848 if (!m34) {
2849 return DISAS_NORETURN;
2850 }
2851 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2852 tcg_temp_free_i32(m34);
2853 return DISAS_NEXT;
2854 }
2855
2856 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2857 {
2858 gen_helper_lxdb(o->out, cpu_env, o->in2);
2859 return_low128(o->out2);
2860 return DISAS_NEXT;
2861 }
2862
2863 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2864 {
2865 gen_helper_lxeb(o->out, cpu_env, o->in2);
2866 return_low128(o->out2);
2867 return DISAS_NEXT;
2868 }
2869
2870 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2871 {
2872 tcg_gen_shli_i64(o->out, o->in2, 32);
2873 return DISAS_NEXT;
2874 }
2875
2876 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2877 {
2878 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2879 return DISAS_NEXT;
2880 }
2881
2882 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2883 {
2884 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2885 return DISAS_NEXT;
2886 }
2887
2888 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2889 {
2890 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2891 return DISAS_NEXT;
2892 }
2893
2894 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2895 {
2896 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2897 return DISAS_NEXT;
2898 }
2899
2900 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2901 {
2902 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2903 return DISAS_NEXT;
2904 }
2905
2906 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2907 {
2908 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2909 return DISAS_NEXT;
2910 }
2911
2912 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2913 {
2914 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2915 return DISAS_NEXT;
2916 }
2917
2918 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2919 {
2920 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2921 return DISAS_NEXT;
2922 }
2923
2924 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2925 {
2926 TCGLabel *lab = gen_new_label();
2927 store_reg32_i64(get_field(s->fields, r1), o->in2);
2928 /* The value is stored even in case of trap. */
2929 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2930 gen_trap(s);
2931 gen_set_label(lab);
2932 return DISAS_NEXT;
2933 }
2934
2935 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2936 {
2937 TCGLabel *lab = gen_new_label();
2938 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2939 /* The value is stored even in case of trap. */
2940 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2941 gen_trap(s);
2942 gen_set_label(lab);
2943 return DISAS_NEXT;
2944 }
2945
2946 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2947 {
2948 TCGLabel *lab = gen_new_label();
2949 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2950 /* The value is stored even in case of trap. */
2951 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2952 gen_trap(s);
2953 gen_set_label(lab);
2954 return DISAS_NEXT;
2955 }
2956
2957 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2958 {
2959 TCGLabel *lab = gen_new_label();
2960 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2961 /* The value is stored even in case of trap. */
2962 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2963 gen_trap(s);
2964 gen_set_label(lab);
2965 return DISAS_NEXT;
2966 }
2967
2968 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2969 {
2970 TCGLabel *lab = gen_new_label();
2971 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2972 /* The value is stored even in case of trap. */
2973 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2974 gen_trap(s);
2975 gen_set_label(lab);
2976 return DISAS_NEXT;
2977 }
2978
2979 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2980 {
2981 DisasCompare c;
2982
2983 disas_jcc(s, &c, get_field(s->fields, m3));
2984
2985 if (c.is_64) {
2986 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2987 o->in2, o->in1);
2988 free_compare(&c);
2989 } else {
2990 TCGv_i32 t32 = tcg_temp_new_i32();
2991 TCGv_i64 t, z;
2992
2993 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2994 free_compare(&c);
2995
2996 t = tcg_temp_new_i64();
2997 tcg_gen_extu_i32_i64(t, t32);
2998 tcg_temp_free_i32(t32);
2999
3000 z = tcg_const_i64(0);
3001 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3002 tcg_temp_free_i64(t);
3003 tcg_temp_free_i64(z);
3004 }
3005
3006 return DISAS_NEXT;
3007 }
3008
3009 #ifndef CONFIG_USER_ONLY
3010 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3011 {
3012 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3013 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3014 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3015 tcg_temp_free_i32(r1);
3016 tcg_temp_free_i32(r3);
3017 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3018 return DISAS_PC_STALE_NOCHAIN;
3019 }
3020
3021 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3022 {
3023 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3024 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3025 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3026 tcg_temp_free_i32(r1);
3027 tcg_temp_free_i32(r3);
3028 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3029 return DISAS_PC_STALE_NOCHAIN;
3030 }
3031
3032 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3033 {
3034 gen_helper_lra(o->out, cpu_env, o->in2);
3035 set_cc_static(s);
3036 return DISAS_NEXT;
3037 }
3038
3039 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3040 {
3041 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3042 return DISAS_NEXT;
3043 }
3044
3045 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3046 {
3047 TCGv_i64 t1, t2;
3048
3049 per_breaking_event(s);
3050
3051 t1 = tcg_temp_new_i64();
3052 t2 = tcg_temp_new_i64();
3053 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3054 MO_TEUL | MO_ALIGN_8);
3055 tcg_gen_addi_i64(o->in2, o->in2, 4);
3056 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3057 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3058 tcg_gen_shli_i64(t1, t1, 32);
3059 gen_helper_load_psw(cpu_env, t1, t2);
3060 tcg_temp_free_i64(t1);
3061 tcg_temp_free_i64(t2);
3062 return DISAS_NORETURN;
3063 }
3064
3065 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3066 {
3067 TCGv_i64 t1, t2;
3068
3069 per_breaking_event(s);
3070
3071 t1 = tcg_temp_new_i64();
3072 t2 = tcg_temp_new_i64();
3073 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3074 MO_TEQ | MO_ALIGN_8);
3075 tcg_gen_addi_i64(o->in2, o->in2, 8);
3076 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3077 gen_helper_load_psw(cpu_env, t1, t2);
3078 tcg_temp_free_i64(t1);
3079 tcg_temp_free_i64(t2);
3080 return DISAS_NORETURN;
3081 }
3082 #endif
3083
3084 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3085 {
3086 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3087 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3088 gen_helper_lam(cpu_env, r1, o->in2, r3);
3089 tcg_temp_free_i32(r1);
3090 tcg_temp_free_i32(r3);
3091 return DISAS_NEXT;
3092 }
3093
3094 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3095 {
3096 int r1 = get_field(s->fields, r1);
3097 int r3 = get_field(s->fields, r3);
3098 TCGv_i64 t1, t2;
3099
3100 /* Only one register to read. */
3101 t1 = tcg_temp_new_i64();
3102 if (unlikely(r1 == r3)) {
3103 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3104 store_reg32_i64(r1, t1);
3105 tcg_temp_free(t1);
3106 return DISAS_NEXT;
3107 }
3108
3109 /* First load the values of the first and last registers to trigger
3110 possible page faults. */
3111 t2 = tcg_temp_new_i64();
3112 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3113 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3114 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3115 store_reg32_i64(r1, t1);
3116 store_reg32_i64(r3, t2);
3117
3118 /* Only two registers to read. */
3119 if (((r1 + 1) & 15) == r3) {
3120 tcg_temp_free(t2);
3121 tcg_temp_free(t1);
3122 return DISAS_NEXT;
3123 }
3124
3125 /* Then load the remaining registers. Page fault can't occur. */
3126 r3 = (r3 - 1) & 15;
3127 tcg_gen_movi_i64(t2, 4);
3128 while (r1 != r3) {
3129 r1 = (r1 + 1) & 15;
3130 tcg_gen_add_i64(o->in2, o->in2, t2);
3131 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3132 store_reg32_i64(r1, t1);
3133 }
3134 tcg_temp_free(t2);
3135 tcg_temp_free(t1);
3136
3137 return DISAS_NEXT;
3138 }
3139
3140 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3141 {
3142 int r1 = get_field(s->fields, r1);
3143 int r3 = get_field(s->fields, r3);
3144 TCGv_i64 t1, t2;
3145
3146 /* Only one register to read. */
3147 t1 = tcg_temp_new_i64();
3148 if (unlikely(r1 == r3)) {
3149 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3150 store_reg32h_i64(r1, t1);
3151 tcg_temp_free(t1);
3152 return DISAS_NEXT;
3153 }
3154
3155 /* First load the values of the first and last registers to trigger
3156 possible page faults. */
3157 t2 = tcg_temp_new_i64();
3158 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3159 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3160 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3161 store_reg32h_i64(r1, t1);
3162 store_reg32h_i64(r3, t2);
3163
3164 /* Only two registers to read. */
3165 if (((r1 + 1) & 15) == r3) {
3166 tcg_temp_free(t2);
3167 tcg_temp_free(t1);
3168 return DISAS_NEXT;
3169 }
3170
3171 /* Then load the remaining registers. Page fault can't occur. */
3172 r3 = (r3 - 1) & 15;
3173 tcg_gen_movi_i64(t2, 4);
3174 while (r1 != r3) {
3175 r1 = (r1 + 1) & 15;
3176 tcg_gen_add_i64(o->in2, o->in2, t2);
3177 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3178 store_reg32h_i64(r1, t1);
3179 }
3180 tcg_temp_free(t2);
3181 tcg_temp_free(t1);
3182
3183 return DISAS_NEXT;
3184 }
3185
3186 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3187 {
3188 int r1 = get_field(s->fields, r1);
3189 int r3 = get_field(s->fields, r3);
3190 TCGv_i64 t1, t2;
3191
3192 /* Only one register to read. */
3193 if (unlikely(r1 == r3)) {
3194 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3195 return DISAS_NEXT;
3196 }
3197
3198 /* First load the values of the first and last registers to trigger
3199 possible page faults. */
3200 t1 = tcg_temp_new_i64();
3201 t2 = tcg_temp_new_i64();
3202 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3203 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3204 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3205 tcg_gen_mov_i64(regs[r1], t1);
3206 tcg_temp_free(t2);
3207
3208 /* Only two registers to read. */
3209 if (((r1 + 1) & 15) == r3) {
3210 tcg_temp_free(t1);
3211 return DISAS_NEXT;
3212 }
3213
3214 /* Then load the remaining registers. Page fault can't occur. */
3215 r3 = (r3 - 1) & 15;
3216 tcg_gen_movi_i64(t1, 8);
3217 while (r1 != r3) {
3218 r1 = (r1 + 1) & 15;
3219 tcg_gen_add_i64(o->in2, o->in2, t1);
3220 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3221 }
3222 tcg_temp_free(t1);
3223
3224 return DISAS_NEXT;
3225 }
3226
3227 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3228 {
3229 TCGv_i64 a1, a2;
3230 TCGMemOp mop = s->insn->data;
3231
3232 /* In a parallel context, stop the world and single step. */
3233 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3234 update_psw_addr(s);
3235 update_cc_op(s);
3236 gen_exception(EXCP_ATOMIC);
3237 return DISAS_NORETURN;
3238 }
3239
3240 /* In a serial context, perform the two loads ... */
3241 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
3242 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3243 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3244 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3245 tcg_temp_free_i64(a1);
3246 tcg_temp_free_i64(a2);
3247
3248 /* ... and indicate that we performed them while interlocked. */
3249 gen_op_movi_cc(s, 0);
3250 return DISAS_NEXT;
3251 }
3252
3253 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3254 {
3255 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3256 gen_helper_lpq(o->out, cpu_env, o->in2);
3257 } else if (HAVE_ATOMIC128) {
3258 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3259 } else {
3260 gen_helper_exit_atomic(cpu_env);
3261 return DISAS_NORETURN;
3262 }
3263 return_low128(o->out2);
3264 return DISAS_NEXT;
3265 }
3266
3267 #ifndef CONFIG_USER_ONLY
3268 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3269 {
3270 gen_helper_lura(o->out, cpu_env, o->in2);
3271 return DISAS_NEXT;
3272 }
3273
3274 static DisasJumpType op_lurag(DisasContext *s, DisasOps *o)
3275 {
3276 gen_helper_lurag(o->out, cpu_env, o->in2);
3277 return DISAS_NEXT;
3278 }
3279 #endif
3280
3281 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3282 {
3283 tcg_gen_andi_i64(o->out, o->in2, -256);
3284 return DISAS_NEXT;
3285 }
3286
3287 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3288 {
3289 const int64_t block_size = (1ull << (get_field(s->fields, m3) + 6));
3290
3291 if (get_field(s->fields, m3) > 6) {
3292 gen_program_exception(s, PGM_SPECIFICATION);
3293 return DISAS_NORETURN;
3294 }
3295
3296 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3297 tcg_gen_neg_i64(o->addr1, o->addr1);
3298 tcg_gen_movi_i64(o->out, 16);
3299 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3300 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3301 return DISAS_NEXT;
3302 }
3303
3304 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3305 {
3306 o->out = o->in2;
3307 o->g_out = o->g_in2;
3308 o->in2 = NULL;
3309 o->g_in2 = false;
3310 return DISAS_NEXT;
3311 }
3312
3313 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3314 {
3315 int b2 = get_field(s->fields, b2);
3316 TCGv ar1 = tcg_temp_new_i64();
3317
3318 o->out = o->in2;
3319 o->g_out = o->g_in2;
3320 o->in2 = NULL;
3321 o->g_in2 = false;
3322
3323 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3324 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3325 tcg_gen_movi_i64(ar1, 0);
3326 break;
3327 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3328 tcg_gen_movi_i64(ar1, 1);
3329 break;
3330 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3331 if (b2) {
3332 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3333 } else {
3334 tcg_gen_movi_i64(ar1, 0);
3335 }
3336 break;
3337 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3338 tcg_gen_movi_i64(ar1, 2);
3339 break;
3340 }
3341
3342 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3343 tcg_temp_free_i64(ar1);
3344
3345 return DISAS_NEXT;
3346 }
3347
3348 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3349 {
3350 o->out = o->in1;
3351 o->out2 = o->in2;
3352 o->g_out = o->g_in1;
3353 o->g_out2 = o->g_in2;
3354 o->in1 = NULL;
3355 o->in2 = NULL;
3356 o->g_in1 = o->g_in2 = false;
3357 return DISAS_NEXT;
3358 }
3359
3360 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3361 {
3362 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3363 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3364 tcg_temp_free_i32(l);
3365 return DISAS_NEXT;
3366 }
3367
3368 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3369 {
3370 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3371 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3372 tcg_temp_free_i32(l);
3373 return DISAS_NEXT;
3374 }
3375
3376 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3377 {
3378 int r1 = get_field(s->fields, r1);
3379 int r2 = get_field(s->fields, r2);
3380 TCGv_i32 t1, t2;
3381
3382 /* r1 and r2 must be even. */
3383 if (r1 & 1 || r2 & 1) {
3384 gen_program_exception(s, PGM_SPECIFICATION);
3385 return DISAS_NORETURN;
3386 }
3387
3388 t1 = tcg_const_i32(r1);
3389 t2 = tcg_const_i32(r2);
3390 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3391 tcg_temp_free_i32(t1);
3392 tcg_temp_free_i32(t2);
3393 set_cc_static(s);
3394 return DISAS_NEXT;
3395 }
3396
3397 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3398 {
3399 int r1 = get_field(s->fields, r1);
3400 int r3 = get_field(s->fields, r3);
3401 TCGv_i32 t1, t3;
3402
3403 /* r1 and r3 must be even. */
3404 if (r1 & 1 || r3 & 1) {
3405 gen_program_exception(s, PGM_SPECIFICATION);
3406 return DISAS_NORETURN;
3407 }
3408
3409 t1 = tcg_const_i32(r1);
3410 t3 = tcg_const_i32(r3);
3411 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3412 tcg_temp_free_i32(t1);
3413 tcg_temp_free_i32(t3);
3414 set_cc_static(s);
3415 return DISAS_NEXT;
3416 }
3417
3418 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3419 {
3420 int r1 = get_field(s->fields, r1);
3421 int r3 = get_field(s->fields, r3);
3422 TCGv_i32 t1, t3;
3423
3424 /* r1 and r3 must be even. */
3425 if (r1 & 1 || r3 & 1) {
3426 gen_program_exception(s, PGM_SPECIFICATION);
3427 return DISAS_NORETURN;
3428 }
3429
3430 t1 = tcg_const_i32(r1);
3431 t3 = tcg_const_i32(r3);
3432 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3433 tcg_temp_free_i32(t1);
3434 tcg_temp_free_i32(t3);
3435 set_cc_static(s);
3436 return DISAS_NEXT;
3437 }
3438
3439 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3440 {
3441 int r3 = get_field(s->fields, r3);
3442 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3443 set_cc_static(s);
3444 return DISAS_NEXT;
3445 }
3446
3447 #ifndef CONFIG_USER_ONLY
3448 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3449 {
3450 int r1 = get_field(s->fields, l1);
3451 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3452 set_cc_static(s);
3453 return DISAS_NEXT;
3454 }
3455
3456 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3457 {
3458 int r1 = get_field(s->fields, l1);
3459 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3460 set_cc_static(s);
3461 return DISAS_NEXT;
3462 }
3463 #endif
3464
3465 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3466 {
3467 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3468 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3469 tcg_temp_free_i32(l);
3470 return DISAS_NEXT;
3471 }
3472
3473 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3474 {
3475 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3476 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3477 tcg_temp_free_i32(l);
3478 return DISAS_NEXT;
3479 }
3480
3481 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3482 {
3483 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3484 set_cc_static(s);
3485 return DISAS_NEXT;
3486 }
3487
3488 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3489 {
3490 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3491 set_cc_static(s);
3492 return_low128(o->in2);
3493 return DISAS_NEXT;
3494 }
3495
3496 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3497 {
3498 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3499 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3500 tcg_temp_free_i32(l);
3501 return DISAS_NEXT;
3502 }
3503
3504 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3505 {
3506 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3507 return DISAS_NEXT;
3508 }
3509
3510 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3511 {
3512 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3513 return DISAS_NEXT;
3514 }
3515
3516 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3517 {
3518 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3519 return DISAS_NEXT;
3520 }
3521
3522 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3523 {
3524 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3525 return DISAS_NEXT;
3526 }
3527
3528 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3529 {
3530 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3531 return DISAS_NEXT;
3532 }
3533
3534 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3535 {
3536 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3537 return_low128(o->out2);
3538 return DISAS_NEXT;
3539 }
3540
3541 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3542 {
3543 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3544 return_low128(o->out2);
3545 return DISAS_NEXT;
3546 }
3547
3548 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3549 {
3550 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3551 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3552 tcg_temp_free_i64(r3);
3553 return DISAS_NEXT;
3554 }
3555
3556 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3557 {
3558 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3559 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3560 tcg_temp_free_i64(r3);
3561 return DISAS_NEXT;
3562 }
3563
3564 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3565 {
3566 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3567 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3568 tcg_temp_free_i64(r3);
3569 return DISAS_NEXT;
3570 }
3571
3572 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3573 {
3574 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3575 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3576 tcg_temp_free_i64(r3);
3577 return DISAS_NEXT;
3578 }
3579
3580 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3581 {
3582 TCGv_i64 z, n;
3583 z = tcg_const_i64(0);
3584 n = tcg_temp_new_i64();
3585 tcg_gen_neg_i64(n, o->in2);
3586 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3587 tcg_temp_free_i64(n);
3588 tcg_temp_free_i64(z);
3589 return DISAS_NEXT;
3590 }
3591
3592 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3593 {
3594 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3595 return DISAS_NEXT;
3596 }
3597
3598 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3599 {
3600 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3601 return DISAS_NEXT;
3602 }
3603
3604 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3605 {
3606 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3607 tcg_gen_mov_i64(o->out2, o->in2);
3608 return DISAS_NEXT;
3609 }
3610
3611 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3612 {
3613 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3614 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3615 tcg_temp_free_i32(l);
3616 set_cc_static(s);
3617 return DISAS_NEXT;
3618 }
3619
3620 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3621 {
3622 tcg_gen_neg_i64(o->out, o->in2);
3623 return DISAS_NEXT;
3624 }
3625
3626 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3627 {
3628 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3629 return DISAS_NEXT;
3630 }
3631
3632 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3633 {
3634 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3635 return DISAS_NEXT;
3636 }
3637
3638 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3639 {
3640 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3641 tcg_gen_mov_i64(o->out2, o->in2);
3642 return DISAS_NEXT;
3643 }
3644
3645 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3646 {
3647 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3648 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3649 tcg_temp_free_i32(l);
3650 set_cc_static(s);
3651 return DISAS_NEXT;
3652 }
3653
3654 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3655 {
3656 tcg_gen_or_i64(o->out, o->in1, o->in2);
3657 return DISAS_NEXT;
3658 }
3659
3660 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3661 {
3662 int shift = s->insn->data & 0xff;
3663 int size = s->insn->data >> 8;
3664 uint64_t mask = ((1ull << size) - 1) << shift;
3665
3666 assert(!o->g_in2);
3667 tcg_gen_shli_i64(o->in2, o->in2, shift);
3668 tcg_gen_or_i64(o->out, o->in1, o->in2);
3669
3670 /* Produce the CC from only the bits manipulated. */
3671 tcg_gen_andi_i64(cc_dst, o->out, mask);
3672 set_cc_nz_u64(s, cc_dst);
3673 return DISAS_NEXT;
3674 }
3675
3676 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3677 {
3678 o->in1 = tcg_temp_new_i64();
3679
3680 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3681 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3682 } else {
3683 /* Perform the atomic operation in memory. */
3684 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3685 s->insn->data);
3686 }
3687
3688 /* Recompute also for atomic case: needed for setting CC. */
3689 tcg_gen_or_i64(o->out, o->in1, o->in2);
3690
3691 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3692 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3693 }
3694 return DISAS_NEXT;
3695 }
3696
3697 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3698 {
3699 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3700 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3701 tcg_temp_free_i32(l);
3702 return DISAS_NEXT;
3703 }
3704
3705 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3706 {
3707 int l2 = get_field(s->fields, l2) + 1;
3708 TCGv_i32 l;
3709
3710 /* The length must not exceed 32 bytes. */
3711 if (l2 > 32) {
3712 gen_program_exception(s, PGM_SPECIFICATION);
3713 return DISAS_NORETURN;
3714 }
3715 l = tcg_const_i32(l2);
3716 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3717 tcg_temp_free_i32(l);
3718 return DISAS_NEXT;
3719 }
3720
3721 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3722 {
3723 int l2 = get_field(s->fields, l2) + 1;
3724 TCGv_i32 l;
3725
3726 /* The length must be even and should not exceed 64 bytes. */
3727 if ((l2 & 1) || (l2 > 64)) {
3728 gen_program_exception(s, PGM_SPECIFICATION);
3729 return DISAS_NORETURN;
3730 }
3731 l = tcg_const_i32(l2);
3732 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3733 tcg_temp_free_i32(l);
3734 return DISAS_NEXT;
3735 }
3736
3737 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3738 {
3739 gen_helper_popcnt(o->out, o->in2);
3740 return DISAS_NEXT;
3741 }
3742
3743 #ifndef CONFIG_USER_ONLY
3744 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3745 {
3746 gen_helper_ptlb(cpu_env);
3747 return DISAS_NEXT;
3748 }
3749 #endif
3750
3751 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3752 {
3753 int i3 = get_field(s->fields, i3);
3754 int i4 = get_field(s->fields, i4);
3755 int i5 = get_field(s->fields, i5);
3756 int do_zero = i4 & 0x80;
3757 uint64_t mask, imask, pmask;
3758 int pos, len, rot;
3759
3760 /* Adjust the arguments for the specific insn. */
3761 switch (s->fields->op2) {
3762 case 0x55: /* risbg */
3763 case 0x59: /* risbgn */
3764 i3 &= 63;
3765 i4 &= 63;
3766 pmask = ~0;
3767 break;
3768 case 0x5d: /* risbhg */
3769 i3 &= 31;
3770 i4 &= 31;
3771 pmask = 0xffffffff00000000ull;
3772 break;
3773 case 0x51: /* risblg */
3774 i3 &= 31;
3775 i4 &= 31;
3776 pmask = 0x00000000ffffffffull;
3777 break;
3778 default:
3779 g_assert_not_reached();
3780 }
3781
3782 /* MASK is the set of bits to be inserted from R2.
3783 Take care for I3/I4 wraparound. */
3784 mask = pmask >> i3;
3785 if (i3 <= i4) {
3786 mask ^= pmask >> i4 >> 1;
3787 } else {
3788 mask |= ~(pmask >> i4 >> 1);
3789 }
3790 mask &= pmask;
3791
3792 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3793 insns, we need to keep the other half of the register. */
3794 imask = ~mask | ~pmask;
3795 if (do_zero) {
3796 imask = ~pmask;
3797 }
3798
3799 len = i4 - i3 + 1;
3800 pos = 63 - i4;
3801 rot = i5 & 63;
3802 if (s->fields->op2 == 0x5d) {
3803 pos += 32;
3804 }
3805
3806 /* In some cases we can implement this with extract. */
3807 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3808 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3809 return DISAS_NEXT;
3810 }
3811
3812 /* In some cases we can implement this with deposit. */
3813 if (len > 0 && (imask == 0 || ~mask == imask)) {
3814 /* Note that we rotate the bits to be inserted to the lsb, not to
3815 the position as described in the PoO. */
3816 rot = (rot - pos) & 63;
3817 } else {
3818 pos = -1;
3819 }
3820
3821 /* Rotate the input as necessary. */
3822 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3823
3824 /* Insert the selected bits into the output. */
3825 if (pos >= 0) {
3826 if (imask == 0) {
3827 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3828 } else {
3829 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3830 }
3831 } else if (imask == 0) {
3832 tcg_gen_andi_i64(o->out, o->in2, mask);
3833 } else {
3834 tcg_gen_andi_i64(o->in2, o->in2, mask);
3835 tcg_gen_andi_i64(o->out, o->out, imask);
3836 tcg_gen_or_i64(o->out, o->out, o->in2);
3837 }
3838 return DISAS_NEXT;
3839 }
3840
3841 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3842 {
3843 int i3 = get_field(s->fields, i3);
3844 int i4 = get_field(s->fields, i4);
3845 int i5 = get_field(s->fields, i5);
3846 uint64_t mask;
3847
3848 /* If this is a test-only form, arrange to discard the result. */
3849 if (i3 & 0x80) {
3850 o->out = tcg_temp_new_i64();
3851 o->g_out = false;
3852 }
3853
3854 i3 &= 63;
3855 i4 &= 63;
3856 i5 &= 63;
3857
3858 /* MASK is the set of bits to be operated on from R2.
3859 Take care for I3/I4 wraparound. */
3860 mask = ~0ull >> i3;
3861 if (i3 <= i4) {
3862 mask ^= ~0ull >> i4 >> 1;
3863 } else {
3864 mask |= ~(~0ull >> i4 >> 1);
3865 }
3866
3867 /* Rotate the input as necessary. */
3868 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3869
3870 /* Operate. */
3871 switch (s->fields->op2) {
3872 case 0x55: /* AND */
3873 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3874 tcg_gen_and_i64(o->out, o->out, o->in2);
3875 break;
3876 case 0x56: /* OR */
3877 tcg_gen_andi_i64(o->in2, o->in2, mask);
3878 tcg_gen_or_i64(o->out, o->out, o->in2);
3879 break;
3880 case 0x57: /* XOR */
3881 tcg_gen_andi_i64(o->in2, o->in2, mask);
3882 tcg_gen_xor_i64(o->out, o->out, o->in2);
3883 break;
3884 default:
3885 abort();
3886 }
3887
3888 /* Set the CC. */
3889 tcg_gen_andi_i64(cc_dst, o->out, mask);
3890 set_cc_nz_u64(s, cc_dst);
3891 return DISAS_NEXT;
3892 }
3893
3894 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3895 {
3896 tcg_gen_bswap16_i64(o->out, o->in2);
3897 return DISAS_NEXT;
3898 }
3899
3900 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3901 {
3902 tcg_gen_bswap32_i64(o->out, o->in2);
3903 return DISAS_NEXT;
3904 }
3905
3906 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3907 {
3908 tcg_gen_bswap64_i64(o->out, o->in2);
3909 return DISAS_NEXT;
3910 }
3911
3912 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3913 {
3914 TCGv_i32 t1 = tcg_temp_new_i32();
3915 TCGv_i32 t2 = tcg_temp_new_i32();
3916 TCGv_i32 to = tcg_temp_new_i32();
3917 tcg_gen_extrl_i64_i32(t1, o->in1);
3918 tcg_gen_extrl_i64_i32(t2, o->in2);
3919 tcg_gen_rotl_i32(to, t1, t2);
3920 tcg_gen_extu_i32_i64(o->out, to);
3921 tcg_temp_free_i32(t1);
3922 tcg_temp_free_i32(t2);
3923 tcg_temp_free_i32(to);
3924 return DISAS_NEXT;
3925 }
3926
3927 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3928 {
3929 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3930 return DISAS_NEXT;
3931 }
3932
3933 #ifndef CONFIG_USER_ONLY
3934 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3935 {
3936 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3937 set_cc_static(s);
3938 return DISAS_NEXT;
3939 }
3940
3941 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3942 {
3943 gen_helper_sacf(cpu_env, o->in2);
3944 /* Addressing mode has changed, so end the block. */
3945 return DISAS_PC_STALE;
3946 }
3947 #endif
3948
3949 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3950 {
3951 int sam = s->insn->data;
3952 TCGv_i64 tsam;
3953 uint64_t mask;
3954
3955 switch (sam) {
3956 case 0:
3957 mask = 0xffffff;
3958 break;
3959 case 1:
3960 mask = 0x7fffffff;
3961 break;
3962 default:
3963 mask = -1;
3964 break;
3965 }
3966
3967 /* Bizarre but true, we check the address of the current insn for the
3968 specification exception, not the next to be executed. Thus the PoO
3969 documents that Bad Things Happen two bytes before the end. */
3970 if (s->base.pc_next & ~mask) {
3971 gen_program_exception(s, PGM_SPECIFICATION);
3972 return DISAS_NORETURN;
3973 }
3974 s->pc_tmp &= mask;
3975
3976 tsam = tcg_const_i64(sam);
3977 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3978 tcg_temp_free_i64(tsam);
3979
3980 /* Always exit the TB, since we (may have) changed execution mode. */
3981 return DISAS_PC_STALE;
3982 }
3983
3984 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3985 {
3986 int r1 = get_field(s->fields, r1);
3987 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3988 return DISAS_NEXT;
3989 }
3990
3991 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3992 {
3993 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3994 return DISAS_NEXT;
3995 }
3996
3997 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3998 {
3999 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4000 return DISAS_NEXT;
4001 }
4002
4003 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4004 {
4005 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4006 return_low128(o->out2);
4007 return DISAS_NEXT;
4008 }
4009
4010 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4011 {
4012 gen_helper_sqeb(o->out, cpu_env, o->in2);
4013 return DISAS_NEXT;
4014 }
4015
4016 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4017 {
4018 gen_helper_sqdb(o->out, cpu_env, o->in2);
4019 return DISAS_NEXT;
4020 }
4021
4022 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4023 {
4024 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4025 return_low128(o->out2);
4026 return DISAS_NEXT;
4027 }
4028
4029 #ifndef CONFIG_USER_ONLY
4030 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4031 {
4032 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4033 set_cc_static(s);
4034 return DISAS_NEXT;
4035 }
4036
4037 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4038 {
4039 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4040 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4041 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4042 set_cc_static(s);
4043 tcg_temp_free_i32(r1);
4044 tcg_temp_free_i32(r3);
4045 return DISAS_NEXT;
4046 }
4047 #endif
4048
4049 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4050 {
4051 DisasCompare c;
4052 TCGv_i64 a, h;
4053 TCGLabel *lab;
4054 int r1;
4055
4056 disas_jcc(s, &c, get_field(s->fields, m3));
4057
4058 /* We want to store when the condition is fulfilled, so branch
4059 out when it's not */
4060 c.cond = tcg_invert_cond(c.cond);
4061
4062 lab = gen_new_label();
4063 if (c.is_64) {
4064 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4065 } else {
4066 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4067 }
4068 free_compare(&c);
4069
4070 r1 = get_field(s->fields, r1);
4071 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
4072 switch (s->insn->data) {
4073 case 1: /* STOCG */
4074 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4075 break;
4076 case 0: /* STOC */
4077 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4078 break;
4079 case 2: /* STOCFH */
4080 h = tcg_temp_new_i64();
4081 tcg_gen_shri_i64(h, regs[r1], 32);
4082 tcg_gen_qemu_st32(h, a, get_mem_index(s));
4083 tcg_temp_free_i64(h);
4084 break;
4085 default:
4086 g_assert_not_reached();
4087 }
4088 tcg_temp_free_i64(a);
4089
4090 gen_set_label(lab);
4091 return DISAS_NEXT;
4092 }
4093
4094 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4095 {
4096 uint64_t sign = 1ull << s->insn->data;
4097 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4098 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4099 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4100 /* The arithmetic left shift is curious in that it does not affect
4101 the sign bit. Copy that over from the source unchanged. */
4102 tcg_gen_andi_i64(o->out, o->out, ~sign);
4103 tcg_gen_andi_i64(o->in1, o->in1, sign);
4104 tcg_gen_or_i64(o->out, o->out, o->in1);
4105 return DISAS_NEXT;
4106 }
4107
4108 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4109 {
4110 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4111 return DISAS_NEXT;
4112 }
4113
4114 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4115 {
4116 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4117 return DISAS_NEXT;
4118 }
4119
4120 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4121 {
4122 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4123 return DISAS_NEXT;
4124 }
4125
4126 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4127 {
4128 gen_helper_sfpc(cpu_env, o->in2);
4129 return DISAS_NEXT;
4130 }
4131
4132 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4133 {
4134 gen_helper_sfas(cpu_env, o->in2);
4135 return DISAS_NEXT;
4136 }
4137
4138 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4139 {
4140 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4141 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4142 gen_helper_srnm(cpu_env, o->addr1);
4143 return DISAS_NEXT;
4144 }
4145
4146 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4147 {
4148 /* Bits 0-55 are are ignored. */
4149 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4150 gen_helper_srnm(cpu_env, o->addr1);
4151 return DISAS_NEXT;
4152 }
4153
4154 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4155 {
4156 TCGv_i64 tmp = tcg_temp_new_i64();
4157
4158 /* Bits other than 61-63 are ignored. */
4159 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4160
4161 /* No need to call a helper, we don't implement dfp */
4162 tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4163 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4164 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4165
4166 tcg_temp_free_i64(tmp);
4167 return DISAS_NEXT;
4168 }
4169
4170 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4171 {
4172 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4173 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4174 set_cc_static(s);
4175
4176 tcg_gen_shri_i64(o->in1, o->in1, 24);
4177 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4178 return DISAS_NEXT;
4179 }
4180
4181 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4182 {
4183 int b1 = get_field(s->fields, b1);
4184 int d1 = get_field(s->fields, d1);
4185 int b2 = get_field(s->fields, b2);
4186 int d2 = get_field(s->fields, d2);
4187 int r3 = get_field(s->fields, r3);
4188 TCGv_i64 tmp = tcg_temp_new_i64();
4189
4190 /* fetch all operands first */
4191 o->in1 = tcg_temp_new_i64();
4192 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4193 o->in2 = tcg_temp_new_i64();
4194 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4195 o->addr1 = get_address(s, 0, r3, 0);
4196
4197 /* load the third operand into r3 before modifying anything */
4198 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4199
4200 /* subtract CPU timer from first operand and store in GR0 */
4201 gen_helper_stpt(tmp, cpu_env);
4202 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4203
4204 /* store second operand in GR1 */
4205 tcg_gen_mov_i64(regs[1], o->in2);
4206
4207 tcg_temp_free_i64(tmp);
4208 return DISAS_NEXT;
4209 }
4210
4211 #ifndef CONFIG_USER_ONLY
4212 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4213 {
4214 tcg_gen_shri_i64(o->in2, o->in2, 4);
4215 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4216 return DISAS_NEXT;
4217 }
4218
4219 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4220 {
4221 gen_helper_sske(cpu_env, o->in1, o->in2);
4222 return DISAS_NEXT;
4223 }
4224
4225 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4226 {
4227 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4228 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4229 return DISAS_PC_STALE_NOCHAIN;
4230 }
4231
4232 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4233 {
4234 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4235 return DISAS_NEXT;
4236 }
4237 #endif
4238
4239 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4240 {
4241 gen_helper_stck(o->out, cpu_env);
4242 /* ??? We don't implement clock states. */
4243 gen_op_movi_cc(s, 0);
4244 return DISAS_NEXT;
4245 }
4246
4247 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4248 {
4249 TCGv_i64 c1 = tcg_temp_new_i64();
4250 TCGv_i64 c2 = tcg_temp_new_i64();
4251 TCGv_i64 todpr = tcg_temp_new_i64();
4252 gen_helper_stck(c1, cpu_env);
4253 /* 16 bit value store in an uint32_t (only valid bits set) */
4254 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4255 /* Shift the 64-bit value into its place as a zero-extended
4256 104-bit value. Note that "bit positions 64-103 are always
4257 non-zero so that they compare differently to STCK"; we set
4258 the least significant bit to 1. */
4259 tcg_gen_shli_i64(c2, c1, 56);
4260 tcg_gen_shri_i64(c1, c1, 8);
4261 tcg_gen_ori_i64(c2, c2, 0x10000);
4262 tcg_gen_or_i64(c2, c2, todpr);
4263 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4264 tcg_gen_addi_i64(o->in2, o->in2, 8);
4265 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4266 tcg_temp_free_i64(c1);
4267 tcg_temp_free_i64(c2);
4268 tcg_temp_free_i64(todpr);
4269 /* ??? We don't implement clock states. */
4270 gen_op_movi_cc(s, 0);
4271 return DISAS_NEXT;
4272 }
4273
4274 #ifndef CONFIG_USER_ONLY
4275 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4276 {
4277 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4278 gen_helper_sck(cc_op, cpu_env, o->in1);
4279 set_cc_static(s);
4280 return DISAS_NEXT;
4281 }
4282
4283 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4284 {
4285 gen_helper_sckc(cpu_env, o->in2);
4286 return DISAS_NEXT;
4287 }
4288
4289 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4290 {
4291 gen_helper_sckpf(cpu_env, regs[0]);
4292 return DISAS_NEXT;
4293 }
4294
4295 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4296 {
4297 gen_helper_stckc(o->out, cpu_env);
4298 return DISAS_NEXT;
4299 }
4300
4301 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4302 {
4303 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4304 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4305 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4306 tcg_temp_free_i32(r1);
4307 tcg_temp_free_i32(r3);
4308 return DISAS_NEXT;
4309 }
4310
4311 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4312 {
4313 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4314 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4315 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4316 tcg_temp_free_i32(r1);
4317 tcg_temp_free_i32(r3);
4318 return DISAS_NEXT;
4319 }
4320
4321 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4322 {
4323 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4324 return DISAS_NEXT;
4325 }
4326
4327 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4328 {
4329 gen_helper_spt(cpu_env, o->in2);
4330 return DISAS_NEXT;
4331 }
4332
4333 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4334 {
4335 gen_helper_stfl(cpu_env);
4336 return DISAS_NEXT;
4337 }
4338
4339 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4340 {
4341 gen_helper_stpt(o->out, cpu_env);
4342 return DISAS_NEXT;
4343 }
4344
4345 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4346 {
4347 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4348 set_cc_static(s);
4349 return DISAS_NEXT;
4350 }
4351
4352 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4353 {
4354 gen_helper_spx(cpu_env, o->in2);
4355 return DISAS_NEXT;
4356 }
4357
4358 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4359 {
4360 gen_helper_xsch(cpu_env, regs[1]);
4361 set_cc_static(s);
4362 return DISAS_NEXT;
4363 }
4364
4365 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4366 {
4367 gen_helper_csch(cpu_env, regs[1]);
4368 set_cc_static(s);
4369 return DISAS_NEXT;
4370 }
4371
4372 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4373 {
4374 gen_helper_hsch(cpu_env, regs[1]);
4375 set_cc_static(s);
4376 return DISAS_NEXT;
4377 }
4378
4379 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4380 {
4381 gen_helper_msch(cpu_env, regs[1], o->in2);
4382 set_cc_static(s);
4383 return DISAS_NEXT;
4384 }
4385
4386 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4387 {
4388 gen_helper_rchp(cpu_env, regs[1]);
4389 set_cc_static(s);
4390 return DISAS_NEXT;
4391 }
4392
4393 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4394 {
4395 gen_helper_rsch(cpu_env, regs[1]);
4396 set_cc_static(s);
4397 return DISAS_NEXT;
4398 }
4399
4400 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4401 {
4402 gen_helper_sal(cpu_env, regs[1]);
4403 return DISAS_NEXT;
4404 }
4405
4406 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4407 {
4408 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4409 return DISAS_NEXT;
4410 }
4411
4412 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4413 {
4414 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4415 gen_op_movi_cc(s, 3);
4416 return DISAS_NEXT;
4417 }
4418
4419 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4420 {
4421 /* The instruction is suppressed if not provided. */
4422 return DISAS_NEXT;
4423 }
4424
4425 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4426 {
4427 gen_helper_ssch(cpu_env, regs[1], o->in2);
4428 set_cc_static(s);
4429 return DISAS_NEXT;
4430 }
4431
4432 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4433 {
4434 gen_helper_stsch(cpu_env, regs[1], o->in2);
4435 set_cc_static(s);
4436 return DISAS_NEXT;
4437 }
4438
4439 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4440 {
4441 gen_helper_stcrw(cpu_env, o->in2);
4442 set_cc_static(s);
4443 return DISAS_NEXT;
4444 }
4445
4446 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4447 {
4448 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4449 set_cc_static(s);
4450 return DISAS_NEXT;
4451 }
4452
4453 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4454 {
4455 gen_helper_tsch(cpu_env, regs[1], o->in2);
4456 set_cc_static(s);
4457 return DISAS_NEXT;
4458 }
4459
4460 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4461 {
4462 gen_helper_chsc(cpu_env, o->in2);
4463 set_cc_static(s);
4464 return DISAS_NEXT;
4465 }
4466
4467 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4468 {
4469 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4470 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4471 return DISAS_NEXT;
4472 }
4473
4474 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4475 {
4476 uint64_t i2 = get_field(s->fields, i2);
4477 TCGv_i64 t;
4478
4479 /* It is important to do what the instruction name says: STORE THEN.
4480 If we let the output hook perform the store then if we fault and
4481 restart, we'll have the wrong SYSTEM MASK in place. */
4482 t = tcg_temp_new_i64();
4483 tcg_gen_shri_i64(t, psw_mask, 56);
4484 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4485 tcg_temp_free_i64(t);
4486
4487 if (s->fields->op == 0xac) {
4488 tcg_gen_andi_i64(psw_mask, psw_mask,
4489 (i2 << 56) | 0x00ffffffffffffffull);
4490 } else {
4491 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4492 }
4493
4494 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4495 return DISAS_PC_STALE_NOCHAIN;
4496 }
4497
4498 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4499 {
4500 gen_helper_stura(cpu_env, o->in2, o->in1);
4501 return DISAS_NEXT;
4502 }
4503
4504 static DisasJumpType op_sturg(DisasContext *s, DisasOps *o)
4505 {
4506 gen_helper_sturg(cpu_env, o->in2, o->in1);
4507 return DISAS_NEXT;
4508 }
4509 #endif
4510
4511 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4512 {
4513 gen_helper_stfle(cc_op, cpu_env, o->in2);
4514 set_cc_static(s);
4515 return DISAS_NEXT;
4516 }
4517
4518 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4519 {
4520 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4521 return DISAS_NEXT;
4522 }
4523
4524 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4525 {
4526 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4527 return DISAS_NEXT;
4528 }
4529
4530 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4531 {
4532 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4533 return DISAS_NEXT;
4534 }
4535
4536 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4537 {
4538 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4539 return DISAS_NEXT;
4540 }
4541
4542 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4543 {
4544 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4545 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4546 gen_helper_stam(cpu_env, r1, o->in2, r3);
4547 tcg_temp_free_i32(r1);
4548 tcg_temp_free_i32(r3);
4549 return DISAS_NEXT;
4550 }
4551
4552 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4553 {
4554 int m3 = get_field(s->fields, m3);
4555 int pos, base = s->insn->data;
4556 TCGv_i64 tmp = tcg_temp_new_i64();
4557
4558 pos = base + ctz32(m3) * 8;
4559 switch (m3) {
4560 case 0xf:
4561 /* Effectively a 32-bit store. */
4562 tcg_gen_shri_i64(tmp, o->in1, pos);
4563 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4564 break;
4565
4566 case 0xc:
4567 case 0x6:
4568 case 0x3:
4569 /* Effectively a 16-bit store. */
4570 tcg_gen_shri_i64(tmp, o->in1, pos);
4571 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4572 break;
4573
4574 case 0x8:
4575 case 0x4:
4576 case 0x2:
4577 case 0x1:
4578 /* Effectively an 8-bit store. */
4579 tcg_gen_shri_i64(tmp, o->in1, pos);
4580 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4581 break;
4582
4583 default:
4584 /* This is going to be a sequence of shifts and stores. */
4585 pos = base + 32 - 8;
4586 while (m3) {
4587 if (m3 & 0x8) {
4588 tcg_gen_shri_i64(tmp, o->in1, pos);
4589 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4590 tcg_gen_addi_i64(o->in2, o->in2, 1);
4591 }
4592 m3 = (m3 << 1) & 0xf;
4593 pos -= 8;
4594 }
4595 break;
4596 }
4597 tcg_temp_free_i64(tmp);
4598 return DISAS_NEXT;
4599 }
4600
4601 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4602 {
4603 int r1 = get_field(s->fields, r1);
4604 int r3 = get_field(s->fields, r3);
4605 int size = s->insn->data;
4606 TCGv_i64 tsize = tcg_const_i64(size);
4607
4608 while (1) {
4609 if (size == 8) {
4610 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4611 } else {
4612 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4613 }
4614 if (r1 == r3) {
4615 break;
4616 }
4617 tcg_gen_add_i64(o->in2, o->in2, tsize);
4618 r1 = (r1 + 1) & 15;
4619 }
4620
4621 tcg_temp_free_i64(tsize);
4622 return DISAS_NEXT;
4623 }
4624
4625 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4626 {
4627 int r1 = get_field(s->fields, r1);
4628 int r3 = get_field(s->fields, r3);
4629 TCGv_i64 t = tcg_temp_new_i64();
4630 TCGv_i64 t4 = tcg_const_i64(4);
4631 TCGv_i64 t32 = tcg_const_i64(32);
4632
4633 while (1) {
4634 tcg_gen_shl_i64(t, regs[r1], t32);
4635 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4636 if (r1 == r3) {
4637 break;
4638 }
4639 tcg_gen_add_i64(o->in2, o->in2, t4);
4640 r1 = (r1 + 1) & 15;
4641 }
4642
4643 tcg_temp_free_i64(t);
4644 tcg_temp_free_i64(t4);
4645 tcg_temp_free_i64(t32);
4646 return DISAS_NEXT;
4647 }
4648
4649 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4650 {
4651 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4652 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4653 } else if (HAVE_ATOMIC128) {
4654 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4655 } else {
4656 gen_helper_exit_atomic(cpu_env);
4657 return DISAS_NORETURN;
4658 }
4659 return DISAS_NEXT;
4660 }
4661
4662 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4663 {
4664 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4665 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4666
4667 gen_helper_srst(cpu_env, r1, r2);
4668
4669 tcg_temp_free_i32(r1);
4670 tcg_temp_free_i32(r2);
4671 set_cc_static(s);
4672 return DISAS_NEXT;
4673 }
4674
4675 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4676 {
4677 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4678 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4679
4680 gen_helper_srstu(cpu_env, r1, r2);
4681
4682 tcg_temp_free_i32(r1);
4683 tcg_temp_free_i32(r2);
4684 set_cc_static(s);
4685 return DISAS_NEXT;
4686 }
4687
4688 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4689 {
4690 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4691 return DISAS_NEXT;
4692 }
4693
4694 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4695 {
4696 DisasCompare cmp;
4697 TCGv_i64 borrow;
4698
4699 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4700
4701 /* The !borrow flag is the msb of CC. Since we want the inverse of
4702 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4703 disas_jcc(s, &cmp, 8 | 4);
4704 borrow = tcg_temp_new_i64();
4705 if (cmp.is_64) {
4706 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4707 } else {
4708 TCGv_i32 t = tcg_temp_new_i32();
4709 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4710 tcg_gen_extu_i32_i64(borrow, t);
4711 tcg_temp_free_i32(t);
4712 }
4713 free_compare(&cmp);
4714
4715 tcg_gen_sub_i64(o->out, o->out, borrow);
4716 tcg_temp_free_i64(borrow);
4717 return DISAS_NEXT;
4718 }
4719
4720 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4721 {
4722 TCGv_i32 t;
4723
4724 update_psw_addr(s);
4725 update_cc_op(s);
4726
4727 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4728 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4729 tcg_temp_free_i32(t);
4730
4731 t = tcg_const_i32(s->ilen);
4732 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4733 tcg_temp_free_i32(t);
4734
4735 gen_exception(EXCP_SVC);
4736 return DISAS_NORETURN;
4737 }
4738
4739 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4740 {
4741 int cc = 0;
4742
4743 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4744 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4745 gen_op_movi_cc(s, cc);
4746 return DISAS_NEXT;
4747 }
4748
4749 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4750 {
4751 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4752 set_cc_static(s);
4753 return DISAS_NEXT;
4754 }
4755
4756 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4757 {
4758 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4759 set_cc_static(s);
4760 return DISAS_NEXT;
4761 }
4762
4763 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4764 {
4765 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4766 set_cc_static(s);
4767 return DISAS_NEXT;
4768 }
4769
4770 #ifndef CONFIG_USER_ONLY
4771
4772 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4773 {
4774 gen_helper_testblock(cc_op, cpu_env, o->in2);
4775 set_cc_static(s);
4776 return DISAS_NEXT;
4777 }
4778
4779 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4780 {
4781 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4782 set_cc_static(s);
4783 return DISAS_NEXT;
4784 }
4785
4786 #endif
4787
4788 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4789 {
4790 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4791 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4792 tcg_temp_free_i32(l1);
4793 set_cc_static(s);
4794 return DISAS_NEXT;
4795 }
4796
4797 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4798 {
4799 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4800 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4801 tcg_temp_free_i32(l);
4802 set_cc_static(s);
4803 return DISAS_NEXT;
4804 }
4805
4806 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4807 {
4808 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4809 return_low128(o->out2);
4810 set_cc_static(s);
4811 return DISAS_NEXT;
4812 }
4813
4814 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4815 {
4816 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4817 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4818 tcg_temp_free_i32(l);
4819 set_cc_static(s);
4820 return DISAS_NEXT;
4821 }
4822
4823 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4824 {
4825 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4826 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4827 tcg_temp_free_i32(l);
4828 set_cc_static(s);
4829 return DISAS_NEXT;
4830 }
4831
4832 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4833 {
4834 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4835 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4836 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4837 TCGv_i32 tst = tcg_temp_new_i32();
4838 int m3 = get_field(s->fields, m3);
4839
4840 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4841 m3 = 0;
4842 }
4843 if (m3 & 1) {
4844 tcg_gen_movi_i32(tst, -1);
4845 } else {
4846 tcg_gen_extrl_i64_i32(tst, regs[0]);
4847 if (s->insn->opc & 3) {
4848 tcg_gen_ext8u_i32(tst, tst);
4849 } else {
4850 tcg_gen_ext16u_i32(tst, tst);
4851 }
4852 }
4853 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4854
4855 tcg_temp_free_i32(r1);
4856 tcg_temp_free_i32(r2);
4857 tcg_temp_free_i32(sizes);
4858 tcg_temp_free_i32(tst);
4859 set_cc_static(s);
4860 return DISAS_NEXT;
4861 }
4862
4863 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4864 {
4865 TCGv_i32 t1 = tcg_const_i32(0xff);
4866 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4867 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4868 tcg_temp_free_i32(t1);
4869 set_cc_static(s);
4870 return DISAS_NEXT;
4871 }
4872
4873 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4874 {
4875 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4876 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4877 tcg_temp_free_i32(l);
4878 return DISAS_NEXT;
4879 }
4880
4881 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4882 {
4883 int l1 = get_field(s->fields, l1) + 1;
4884 TCGv_i32 l;
4885
4886 /* The length must not exceed 32 bytes. */
4887 if (l1 > 32) {
4888 gen_program_exception(s, PGM_SPECIFICATION);
4889 return DISAS_NORETURN;
4890 }
4891 l = tcg_const_i32(l1);
4892 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4893 tcg_temp_free_i32(l);
4894 set_cc_static(s);
4895 return DISAS_NEXT;
4896 }
4897
4898 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4899 {
4900 int l1 = get_field(s->fields, l1) + 1;
4901 TCGv_i32 l;
4902
4903 /* The length must be even and should not exceed 64 bytes. */
4904 if ((l1 & 1) || (l1 > 64)) {
4905 gen_program_exception(s, PGM_SPECIFICATION);
4906 return DISAS_NORETURN;
4907 }
4908 l = tcg_const_i32(l1);
4909 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4910 tcg_temp_free_i32(l);
4911 set_cc_static(s);
4912 return DISAS_NEXT;
4913 }
4914
4915
4916 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4917 {
4918 int d1 = get_field(s->fields, d1);
4919 int d2 = get_field(s->fields, d2);
4920 int b1 = get_field(s->fields, b1);
4921 int b2 = get_field(s->fields, b2);
4922 int l = get_field(s->fields, l1);
4923 TCGv_i32 t32;
4924
4925 o->addr1 = get_address(s, 0, b1, d1);
4926
4927 /* If the addresses are identical, this is a store/memset of zero. */
4928 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4929 o->in2 = tcg_const_i64(0);
4930
4931 l++;
4932 while (l >= 8) {
4933 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4934 l -= 8;
4935 if (l > 0) {
4936 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4937 }
4938 }
4939 if (l >= 4) {
4940 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4941 l -= 4;
4942 if (l > 0) {
4943 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4944 }
4945 }
4946 if (l >= 2) {
4947 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4948 l -= 2;
4949 if (l > 0) {
4950 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4951 }
4952 }
4953 if (l) {
4954 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4955 }
4956 gen_op_movi_cc(s, 0);
4957 return DISAS_NEXT;
4958 }
4959
4960 /* But in general we'll defer to a helper. */
4961 o->in2 = get_address(s, 0, b2, d2);
4962 t32 = tcg_const_i32(l);
4963 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4964 tcg_temp_free_i32(t32);
4965 set_cc_static(s);
4966 return DISAS_NEXT;
4967 }
4968
4969 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4970 {
4971 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4972 return DISAS_NEXT;
4973 }
4974
4975 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4976 {
4977 int shift = s->insn->data & 0xff;
4978 int size = s->insn->data >> 8;
4979 uint64_t mask = ((1ull << size) - 1) << shift;
4980
4981 assert(!o->g_in2);
4982 tcg_gen_shli_i64(o->in2, o->in2, shift);
4983 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4984
4985 /* Produce the CC from only the bits manipulated. */
4986 tcg_gen_andi_i64(cc_dst, o->out, mask);
4987 set_cc_nz_u64(s, cc_dst);
4988 return DISAS_NEXT;
4989 }
4990
4991 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4992 {
4993 o->in1 = tcg_temp_new_i64();
4994
4995 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4996 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4997 } else {
4998 /* Perform the atomic operation in memory. */
4999 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5000 s->insn->data);
5001 }
5002
5003 /* Recompute also for atomic case: needed for setting CC. */
5004 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5005
5006 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5007 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5008 }
5009 return DISAS_NEXT;
5010 }
5011
5012 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5013 {
5014 o->out = tcg_const_i64(0);
5015 return DISAS_NEXT;
5016 }
5017
5018 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5019 {
5020 o->out = tcg_const_i64(0);
5021 o->out2 = o->out;
5022 o->g_out2 = true;
5023 return DISAS_NEXT;
5024 }
5025
5026 #ifndef CONFIG_USER_ONLY
5027 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5028 {
5029 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5030
5031 gen_helper_clp(cpu_env, r2);
5032 tcg_temp_free_i32(r2);
5033 set_cc_static(s);
5034 return DISAS_NEXT;
5035 }
5036
5037 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5038 {
5039 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5040 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5041
5042 gen_helper_pcilg(cpu_env, r1, r2);
5043 tcg_temp_free_i32(r1);
5044 tcg_temp_free_i32(r2);
5045 set_cc_static(s);
5046 return DISAS_NEXT;
5047 }
5048
5049 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5050 {
5051 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5052 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5053
5054 gen_helper_pcistg(cpu_env, r1, r2);
5055 tcg_temp_free_i32(r1);
5056 tcg_temp_free_i32(r2);
5057 set_cc_static(s);
5058 return DISAS_NEXT;
5059 }
5060
5061 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5062 {
5063 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5064 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5065
5066 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5067 tcg_temp_free_i32(ar);
5068 tcg_temp_free_i32(r1);
5069 set_cc_static(s);
5070 return DISAS_NEXT;
5071 }
5072
5073 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5074 {
5075 gen_helper_sic(cpu_env, o->in1, o->in2);
5076 return DISAS_NEXT;
5077 }
5078
5079 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5080 {
5081 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5082 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5083
5084 gen_helper_rpcit(cpu_env, r1, r2);
5085 tcg_temp_free_i32(r1);
5086 tcg_temp_free_i32(r2);
5087 set_cc_static(s);
5088 return DISAS_NEXT;
5089 }
5090
5091 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5092 {
5093 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5094 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
5095 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5096
5097 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5098 tcg_temp_free_i32(ar);
5099 tcg_temp_free_i32(r1);
5100 tcg_temp_free_i32(r3);
5101 set_cc_static(s);
5102 return DISAS_NEXT;
5103 }
5104
5105 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5106 {
5107 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5108 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5109
5110 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5111 tcg_temp_free_i32(ar);
5112 tcg_temp_free_i32(r1);
5113 set_cc_static(s);
5114 return DISAS_NEXT;
5115 }
5116 #endif
5117
5118 #include "translate_vx.inc.c"
5119
5120 /* ====================================================================== */
5121 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5122 the original inputs), update the various cc data structures in order to
5123 be able to compute the new condition code. */
5124
5125 static void cout_abs32(DisasContext *s, DisasOps *o)
5126 {
5127 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5128 }
5129
5130 static void cout_abs64(DisasContext *s, DisasOps *o)
5131 {
5132 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5133 }
5134
5135 static void cout_adds32(DisasContext *s, DisasOps *o)
5136 {
5137 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5138 }
5139
5140 static void cout_adds64(DisasContext *s, DisasOps *o)
5141 {
5142 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5143 }
5144
5145 static void cout_addu32(DisasContext *s, DisasOps *o)
5146 {
5147 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
5148 }
5149
5150 static void cout_addu64(DisasContext *s, DisasOps *o)
5151 {
5152 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
5153 }
5154
5155 static void cout_addc32(DisasContext *s, DisasOps *o)
5156 {
5157 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
5158 }
5159
5160 static void cout_addc64(DisasContext *s, DisasOps *o)
5161 {
5162 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
5163 }
5164
5165 static void cout_cmps32(DisasContext *s, DisasOps *o)
5166 {
5167 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5168 }
5169
5170 static void cout_cmps64(DisasContext *s, DisasOps *o)
5171 {
5172 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5173 }
5174
5175 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5176 {
5177 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5178 }
5179
5180 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5181 {
5182 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5183 }
5184
5185 static void cout_f32(DisasContext *s, DisasOps *o)
5186 {
5187 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5188 }
5189
5190 static void cout_f64(DisasContext *s, DisasOps *o)
5191 {
5192 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5193 }
5194
5195 static void cout_f128(DisasContext *s, DisasOps *o)
5196 {
5197 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5198 }
5199
5200 static void cout_nabs32(DisasContext *s, DisasOps *o)
5201 {
5202 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5203 }
5204
5205 static void cout_nabs64(DisasContext *s, DisasOps *o)
5206 {
5207 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5208 }
5209
5210 static void cout_neg32(DisasContext *s, DisasOps *o)
5211 {
5212 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5213 }
5214
5215 static void cout_neg64(DisasContext *s, DisasOps *o)
5216 {
5217 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5218 }
5219
5220 static void cout_nz32(DisasContext *s, DisasOps *o)
5221 {
5222 tcg_gen_ext32u_i64(cc_dst, o->out);
5223 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5224 }
5225
5226 static void cout_nz64(DisasContext *s, DisasOps *o)
5227 {
5228 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5229 }
5230
5231 static void cout_s32(DisasContext *s, DisasOps *o)
5232 {
5233 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5234 }
5235
5236 static void cout_s64(DisasContext *s, DisasOps *o)
5237 {
5238 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5239 }
5240
5241 static void cout_subs32(DisasContext *s, DisasOps *o)
5242 {
5243 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5244 }
5245
5246 static void cout_subs64(DisasContext *s, DisasOps *o)
5247 {
5248 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5249 }
5250
5251 static void cout_subu32(DisasContext *s, DisasOps *o)
5252 {
5253 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5254 }
5255
5256 static void cout_subu64(DisasContext *s, DisasOps *o)
5257 {
5258 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5259 }
5260
5261 static void cout_subb32(DisasContext *s, DisasOps *o)
5262 {
5263 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5264 }
5265
5266 static void cout_subb64(DisasContext *s, DisasOps *o)
5267 {
5268 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5269 }
5270
5271 static void cout_tm32(DisasContext *s, DisasOps *o)
5272 {
5273 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5274 }
5275
5276 static void cout_tm64(DisasContext *s, DisasOps *o)
5277 {
5278 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5279 }
5280
5281 /* ====================================================================== */
5282 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5283 with the TCG register to which we will write. Used in combination with
5284 the "wout" generators, in some cases we need a new temporary, and in
5285 some cases we can write to a TCG global. */
5286
5287 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5288 {
5289 o->out = tcg_temp_new_i64();
5290 }
5291 #define SPEC_prep_new 0
5292
5293 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5294 {
5295 o->out = tcg_temp_new_i64();
5296 o->out2 = tcg_temp_new_i64();
5297 }
5298 #define SPEC_prep_new_P 0
5299
5300 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5301 {
5302 o->out = regs[get_field(f, r1)];
5303 o->g_out = true;
5304 }
5305 #define SPEC_prep_r1 0
5306
5307 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5308 {
5309 int r1 = get_field(f, r1);
5310 o->out = regs[r1];
5311 o->out2 = regs[r1 + 1];
5312 o->g_out = o->g_out2 = true;
5313 }
5314 #define SPEC_prep_r1_P SPEC_r1_even
5315
5316 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5317 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5318 {
5319 o->out = load_freg(get_field(f, r1));
5320 o->out2 = load_freg(get_field(f, r1) + 2);
5321 }
5322 #define SPEC_prep_x1 SPEC_r1_f128
5323
5324 /* ====================================================================== */
5325 /* The "Write OUTput" generators. These generally perform some non-trivial
5326 copy of data to TCG globals, or to main memory. The trivial cases are
5327 generally handled by having a "prep" generator install the TCG global
5328 as the destination of the operation. */
5329
5330 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5331 {
5332 store_reg(get_field(f, r1), o->out);
5333 }
5334 #define SPEC_wout_r1 0
5335
5336 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5337 {
5338 int r1 = get_field(f, r1);
5339 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5340 }
5341 #define SPEC_wout_r1_8 0
5342
5343 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5344 {
5345 int r1 = get_field(f, r1);
5346 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5347 }
5348 #define SPEC_wout_r1_16 0
5349
5350 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5351 {
5352 store_reg32_i64(get_field(f, r1), o->out);
5353 }
5354 #define SPEC_wout_r1_32 0
5355
5356 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5357 {
5358 store_reg32h_i64(get_field(f, r1), o->out);
5359 }
5360 #define SPEC_wout_r1_32h 0
5361
5362 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5363 {
5364 int r1 = get_field(f, r1);
5365 store_reg32_i64(r1, o->out);
5366 store_reg32_i64(r1 + 1, o->out2);
5367 }
5368 #define SPEC_wout_r1_P32 SPEC_r1_even
5369
5370 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5371 {
5372 int r1 = get_field(f, r1);
5373 store_reg32_i64(r1 + 1, o->out);
5374 tcg_gen_shri_i64(o->out, o->out, 32);
5375 store_reg32_i64(r1, o->out);
5376 }
5377 #define SPEC_wout_r1_D32 SPEC_r1_even
5378
5379 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5380 {
5381 int r3 = get_field(f, r3);
5382 store_reg32_i64(r3, o->out);
5383 store_reg32_i64(r3 + 1, o->out2);
5384 }
5385 #define SPEC_wout_r3_P32 SPEC_r3_even
5386
5387 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5388 {
5389 int r3 = get_field(f, r3);
5390 store_reg(r3, o->out);
5391 store_reg(r3 + 1, o->out2);
5392 }
5393 #define SPEC_wout_r3_P64 SPEC_r3_even
5394
5395 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5396 {
5397 store_freg32_i64(get_field(f, r1), o->out);
5398 }
5399 #define SPEC_wout_e1 0
5400
5401 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5402 {
5403 store_freg(get_field(f, r1), o->out);
5404 }
5405 #define SPEC_wout_f1 0
5406
5407 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5408 {
5409 int f1 = get_field(s->fields, r1);
5410 store_freg(f1, o->out);
5411 store_freg(f1 + 2, o->out2);
5412 }
5413 #define SPEC_wout_x1 SPEC_r1_f128
5414
5415 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5416 {
5417 if (get_field(f, r1) != get_field(f, r2)) {
5418 store_reg32_i64(get_field(f, r1), o->out);
5419 }
5420 }
5421 #define SPEC_wout_cond_r1r2_32 0
5422
5423 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5424 {
5425 if (get_field(f, r1) != get_field(f, r2)) {
5426 store_freg32_i64(get_field(f, r1), o->out);
5427 }
5428 }
5429 #define SPEC_wout_cond_e1e2 0
5430
5431 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5432 {
5433 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5434 }
5435 #define SPEC_wout_m1_8 0
5436
5437 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5438 {
5439 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5440 }
5441 #define SPEC_wout_m1_16 0
5442
5443 #ifndef CONFIG_USER_ONLY
5444 static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5445 {
5446 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5447 }
5448 #define SPEC_wout_m1_16a 0
5449 #endif
5450
5451 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5452 {
5453 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5454 }
5455 #define SPEC_wout_m1_32 0
5456
5457 #ifndef CONFIG_USER_ONLY
5458 static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5459 {
5460 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5461 }
5462 #define SPEC_wout_m1_32a 0
5463 #endif
5464
5465 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5466 {
5467 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5468 }
5469 #define SPEC_wout_m1_64 0
5470
5471 #ifndef CONFIG_USER_ONLY
5472 static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5473 {
5474 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5475 }
5476 #define SPEC_wout_m1_64a 0
5477 #endif
5478
5479 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5480 {
5481 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5482 }
5483 #define SPEC_wout_m2_32 0
5484
5485 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5486 {
5487 store_reg(get_field(f, r1), o->in2);
5488 }
5489 #define SPEC_wout_in2_r1 0
5490
5491 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5492 {
5493 store_reg32_i64(get_field(f, r1), o->in2);
5494 }
5495 #define SPEC_wout_in2_r1_32 0
5496
5497 /* ====================================================================== */
5498 /* The "INput 1" generators. These load the first operand to an insn. */
5499
5500 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5501 {
5502 o->in1 = load_reg(get_field(f, r1));
5503 }
5504 #define SPEC_in1_r1 0
5505
5506 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5507 {
5508 o->in1 = regs[get_field(f, r1)];
5509 o->g_in1 = true;
5510 }
5511 #define SPEC_in1_r1_o 0
5512
5513 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5514 {
5515 o->in1 = tcg_temp_new_i64();
5516 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5517 }
5518 #define SPEC_in1_r1_32s 0
5519
5520 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5521 {
5522 o->in1 = tcg_temp_new_i64();
5523 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5524 }
5525 #define SPEC_in1_r1_32u 0
5526
5527 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5528 {
5529 o->in1 = tcg_temp_new_i64();
5530 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5531 }
5532 #define SPEC_in1_r1_sr32 0
5533
5534 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5535 {
5536 o->in1 = load_reg(get_field(f, r1) + 1);
5537 }
5538 #define SPEC_in1_r1p1 SPEC_r1_even
5539
5540 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5541 {
5542 o->in1 = tcg_temp_new_i64();
5543 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5544 }
5545 #define SPEC_in1_r1p1_32s SPEC_r1_even
5546
5547 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5548 {
5549 o->in1 = tcg_temp_new_i64();
5550 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5551 }
5552 #define SPEC_in1_r1p1_32u SPEC_r1_even
5553
5554 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5555 {
5556 int r1 = get_field(f, r1);
5557 o->in1 = tcg_temp_new_i64();
5558 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5559 }
5560 #define SPEC_in1_r1_D32 SPEC_r1_even
5561
5562 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5563 {
5564 o->in1 = load_reg(get_field(f, r2));
5565 }
5566 #define SPEC_in1_r2 0
5567
5568 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5569 {
5570 o->in1 = tcg_temp_new_i64();
5571 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5572 }
5573 #define SPEC_in1_r2_sr32 0
5574
5575 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5576 {
5577 o->in1 = load_reg(get_field(f, r3));
5578 }
5579 #define SPEC_in1_r3 0
5580
5581 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5582 {
5583 o->in1 = regs[get_field(f, r3)];
5584 o->g_in1 = true;
5585 }
5586 #define SPEC_in1_r3_o 0
5587
5588 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5589 {
5590 o->in1 = tcg_temp_new_i64();
5591 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5592 }
5593 #define SPEC_in1_r3_32s 0
5594
5595 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5596 {
5597 o->in1 = tcg_temp_new_i64();
5598 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5599 }
5600 #define SPEC_in1_r3_32u 0
5601
5602 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5603 {
5604 int r3 = get_field(f, r3);
5605 o->in1 = tcg_temp_new_i64();
5606 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5607 }
5608 #define SPEC_in1_r3_D32 SPEC_r3_even
5609
5610 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5611 {
5612 o->in1 = load_freg32_i64(get_field(f, r1));
5613 }
5614 #define SPEC_in1_e1 0
5615
5616 static void in1_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5617 {
5618 o->in1 = load_freg(get_field(f, r1));
5619 }
5620 #define SPEC_in1_f1 0
5621
5622 /* Load the high double word of an extended (128-bit) format FP number */
5623 static void in1_x2h(DisasContext *s, DisasFields *f, DisasOps *o)
5624 {
5625 o->in1 = load_freg(get_field(f, r2));
5626 }
5627 #define SPEC_in1_x2h SPEC_r2_f128
5628
5629 static void in1_f3(DisasContext *s, DisasFields *f, DisasOps *o)
5630 {
5631 o->in1 = load_freg(get_field(f, r3));
5632 }
5633 #define SPEC_in1_f3 0
5634
5635 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5636 {
5637 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5638 }
5639 #define SPEC_in1_la1 0
5640
5641 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5642 {
5643 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5644 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5645 }
5646 #define SPEC_in1_la2 0
5647
5648 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5649 {
5650 in1_la1(s, f, o);
5651 o->in1 = tcg_temp_new_i64();
5652 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5653 }
5654 #define SPEC_in1_m1_8u 0
5655
5656 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5657 {
5658 in1_la1(s, f, o);
5659 o->in1 = tcg_temp_new_i64();
5660 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5661 }
5662 #define SPEC_in1_m1_16s 0
5663
5664 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5665 {
5666 in1_la1(s, f, o);
5667 o->in1 = tcg_temp_new_i64();
5668 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5669 }
5670 #define SPEC_in1_m1_16u 0
5671
5672 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5673 {
5674 in1_la1(s, f, o);
5675 o->in1 = tcg_temp_new_i64();
5676 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5677 }
5678 #define SPEC_in1_m1_32s 0
5679
5680 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5681 {
5682 in1_la1(s, f, o);
5683 o->in1 = tcg_temp_new_i64();
5684 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5685 }
5686 #define SPEC_in1_m1_32u 0
5687
5688 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5689 {
5690 in1_la1(s, f, o);
5691 o->in1 = tcg_temp_new_i64();
5692 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5693 }
5694 #define SPEC_in1_m1_64 0
5695
5696 /* ====================================================================== */
5697 /* The "INput 2" generators. These load the second operand to an insn. */
5698
5699 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5700 {
5701 o->in2 = regs[get_field(f, r1)];
5702 o->g_in2 = true;
5703 }
5704 #define SPEC_in2_r1_o 0
5705
5706 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5707 {
5708 o->in2 = tcg_temp_new_i64();
5709 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5710 }
5711 #define SPEC_in2_r1_16u 0
5712
5713 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5714 {
5715 o->in2 = tcg_temp_new_i64();
5716 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5717 }
5718 #define SPEC_in2_r1_32u 0
5719
5720 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5721 {
5722 int r1 = get_field(f, r1);
5723 o->in2 = tcg_temp_new_i64();
5724 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5725 }
5726 #define SPEC_in2_r1_D32 SPEC_r1_even
5727
5728 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5729 {
5730 o->in2 = load_reg(get_field(f, r2));
5731 }
5732 #define SPEC_in2_r2 0
5733
5734 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5735 {
5736 o->in2 = regs[get_field(f, r2)];
5737 o->g_in2 = true;
5738 }
5739 #define SPEC_in2_r2_o 0
5740
5741 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5742 {
5743 int r2 = get_field(f, r2);
5744 if (r2 != 0) {
5745 o->in2 = load_reg(r2);
5746 }
5747 }
5748 #define SPEC_in2_r2_nz 0
5749
5750 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5751 {
5752 o->in2 = tcg_temp_new_i64();
5753 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5754 }
5755 #define SPEC_in2_r2_8s 0
5756
5757 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5758 {
5759 o->in2 = tcg_temp_new_i64();
5760 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5761 }
5762 #define SPEC_in2_r2_8u 0
5763
5764 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5765 {
5766 o->in2 = tcg_temp_new_i64();
5767 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5768 }
5769 #define SPEC_in2_r2_16s 0
5770
5771 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5772 {
5773 o->in2 = tcg_temp_new_i64();
5774 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5775 }
5776 #define SPEC_in2_r2_16u 0
5777
5778 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5779 {
5780 o->in2 = load_reg(get_field(f, r3));
5781 }
5782 #define SPEC_in2_r3 0
5783
5784 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5785 {
5786 o->in2 = tcg_temp_new_i64();
5787 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5788 }
5789 #define SPEC_in2_r3_sr32 0
5790
5791 static void in2_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5792 {
5793 o->in2 = tcg_temp_new_i64();
5794 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r3)]);
5795 }
5796 #define SPEC_in2_r3_32u 0
5797
5798 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5799 {
5800 o->in2 = tcg_temp_new_i64();
5801 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5802 }
5803 #define SPEC_in2_r2_32s 0
5804
5805 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5806 {
5807 o->in2 = tcg_temp_new_i64();
5808 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5809 }
5810 #define SPEC_in2_r2_32u 0
5811
5812 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5813 {
5814 o->in2 = tcg_temp_new_i64();
5815 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5816 }
5817 #define SPEC_in2_r2_sr32 0
5818
5819 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5820 {
5821 o->in2 = load_freg32_i64(get_field(f, r2));
5822 }
5823 #define SPEC_in2_e2 0
5824
5825 static void in2_f2(DisasContext *s, DisasFields *f, DisasOps *o)
5826 {
5827 o->in2 = load_freg(get_field(f, r2));
5828 }
5829 #define SPEC_in2_f2 0
5830
5831 /* Load the low double word of an extended (128-bit) format FP number */
5832 static void in2_x2l(DisasContext *s, DisasFields *f, DisasOps *o)
5833 {
5834 o->in2 = load_freg(get_field(f, r2) + 2);
5835 }
5836 #define SPEC_in2_x2l SPEC_r2_f128
5837
5838 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5839 {
5840 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5841 }
5842 #define SPEC_in2_ra2 0
5843
5844 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5845 {
5846 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5847 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5848 }
5849 #define SPEC_in2_a2 0
5850
5851 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5852 {
5853 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(f, i2) * 2);
5854 }
5855 #define SPEC_in2_ri2 0
5856
5857 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5858 {
5859 help_l2_shift(s, f, o, 31);
5860 }
5861 #define SPEC_in2_sh32 0
5862
5863 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5864 {
5865 help_l2_shift(s, f, o, 63);
5866 }
5867 #define SPEC_in2_sh64 0
5868
5869 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5870 {
5871 in2_a2(s, f, o);
5872 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5873 }
5874 #define SPEC_in2_m2_8u 0
5875
5876 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5877 {
5878 in2_a2(s, f, o);
5879 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5880 }
5881 #define SPEC_in2_m2_16s 0
5882
5883 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5884 {
5885 in2_a2(s, f, o);
5886 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5887 }
5888 #define SPEC_in2_m2_16u 0
5889
5890 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5891 {
5892 in2_a2(s, f, o);
5893 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5894 }
5895 #define SPEC_in2_m2_32s 0
5896
5897 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5898 {
5899 in2_a2(s, f, o);
5900 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5901 }
5902 #define SPEC_in2_m2_32u 0
5903
5904 #ifndef CONFIG_USER_ONLY
5905 static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5906 {
5907 in2_a2(s, f, o);
5908 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5909 }
5910 #define SPEC_in2_m2_32ua 0
5911 #endif
5912
5913 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5914 {
5915 in2_a2(s, f, o);
5916 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5917 }
5918 #define SPEC_in2_m2_64 0
5919
5920 #ifndef CONFIG_USER_ONLY
5921 static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5922 {
5923 in2_a2(s, f, o);
5924 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5925 }
5926 #define SPEC_in2_m2_64a 0
5927 #endif
5928
5929 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5930 {
5931 in2_ri2(s, f, o);
5932 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5933 }
5934 #define SPEC_in2_mri2_16u 0
5935
5936 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5937 {
5938 in2_ri2(s, f, o);
5939 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5940 }
5941 #define SPEC_in2_mri2_32s 0
5942
5943 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5944 {
5945 in2_ri2(s, f, o);
5946 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5947 }
5948 #define SPEC_in2_mri2_32u 0
5949
5950 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5951 {
5952 in2_ri2(s, f, o);
5953 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5954 }
5955 #define SPEC_in2_mri2_64 0
5956
5957 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5958 {
5959 o->in2 = tcg_const_i64(get_field(f, i2));
5960 }
5961 #define SPEC_in2_i2 0
5962
5963 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5964 {
5965 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5966 }
5967 #define SPEC_in2_i2_8u 0
5968
5969 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5970 {
5971 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5972 }
5973 #define SPEC_in2_i2_16u 0
5974
5975 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5976 {
5977 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5978 }
5979 #define SPEC_in2_i2_32u 0
5980
5981 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5982 {
5983 uint64_t i2 = (uint16_t)get_field(f, i2);
5984 o->in2 = tcg_const_i64(i2 << s->insn->data);
5985 }
5986 #define SPEC_in2_i2_16u_shl 0
5987
5988 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5989 {
5990 uint64_t i2 = (uint32_t)get_field(f, i2);
5991 o->in2 = tcg_const_i64(i2 << s->insn->data);
5992 }
5993 #define SPEC_in2_i2_32u_shl 0
5994
5995 #ifndef CONFIG_USER_ONLY
5996 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5997 {
5998 o->in2 = tcg_const_i64(s->fields->raw_insn);
5999 }
6000 #define SPEC_in2_insn 0
6001 #endif
6002
6003 /* ====================================================================== */
6004
6005 /* Find opc within the table of insns. This is formulated as a switch
6006 statement so that (1) we get compile-time notice of cut-paste errors
6007 for duplicated opcodes, and (2) the compiler generates the binary
6008 search tree, rather than us having to post-process the table. */
6009
6010 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6011 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6012
6013 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6014 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6015
6016 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6017 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6018
6019 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6020
6021 enum DisasInsnEnum {
6022 #include "insn-data.def"
6023 };
6024
6025 #undef E
6026 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6027 .opc = OPC, \
6028 .flags = FL, \
6029 .fmt = FMT_##FT, \
6030 .fac = FAC_##FC, \
6031 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6032 .name = #NM, \
6033 .help_in1 = in1_##I1, \
6034 .help_in2 = in2_##I2, \
6035 .help_prep = prep_##P, \
6036 .help_wout = wout_##W, \
6037 .help_cout = cout_##CC, \
6038 .help_op = op_##OP, \
6039 .data = D \
6040 },
6041
6042 /* Allow 0 to be used for NULL in the table below. */
6043 #define in1_0 NULL
6044 #define in2_0 NULL
6045 #define prep_0 NULL
6046 #define wout_0 NULL
6047 #define cout_0 NULL
6048 #define op_0 NULL
6049
6050 #define SPEC_in1_0 0
6051 #define SPEC_in2_0 0
6052 #define SPEC_prep_0 0
6053 #define SPEC_wout_0 0
6054
6055 /* Give smaller names to the various facilities. */
6056 #define FAC_Z S390_FEAT_ZARCH
6057 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6058 #define FAC_DFP S390_FEAT_DFP
6059 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6060 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6061 #define FAC_EE S390_FEAT_EXECUTE_EXT
6062 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6063 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6064 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6065 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6066 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6067 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6068 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6069 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6070 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6071 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6072 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6073 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6074 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6075 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6076 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6077 #define FAC_SFLE S390_FEAT_STFLE
6078 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6079 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6080 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6081 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6082 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6083 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6084 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6085 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6086 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6087 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6088 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6089 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6090 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6091 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6092 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6093 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6094 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6095
6096 static const DisasInsn insn_info[] = {
6097 #include "insn-data.def"
6098 };
6099
6100 #undef E
6101 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6102 case OPC: return &insn_info[insn_ ## NM];
6103
6104 static const DisasInsn *lookup_opc(uint16_t opc)
6105 {
6106 switch (opc) {
6107 #include "insn-data.def"
6108 default:
6109 return NULL;
6110 }
6111 }
6112
6113 #undef F
6114 #undef E
6115 #undef D
6116 #undef C
6117
6118 /* Extract a field from the insn. The INSN should be left-aligned in
6119 the uint64_t so that we can more easily utilize the big-bit-endian
6120 definitions we extract from the Principals of Operation. */
6121
6122 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6123 {
6124 uint32_t r, m;
6125
6126 if (f->size == 0) {
6127 return;
6128 }
6129
6130 /* Zero extract the field from the insn. */
6131 r = (insn << f->beg) >> (64 - f->size);
6132
6133 /* Sign-extend, or un-swap the field as necessary. */
6134 switch (f->type) {
6135 case 0: /* unsigned */
6136 break;
6137 case 1: /* signed */
6138 assert(f->size <= 32);
6139 m = 1u << (f->size - 1);
6140 r = (r ^ m) - m;
6141 break;
6142 case 2: /* dl+dh split, signed 20 bit. */
6143 r = ((int8_t)r << 12) | (r >> 8);
6144 break;
6145 case 3: /* MSB stored in RXB */
6146 g_assert(f->size == 4);
6147 switch (f->beg) {
6148 case 8:
6149 r |= extract64(insn, 63 - 36, 1) << 4;
6150 break;
6151 case 12:
6152 r |= extract64(insn, 63 - 37, 1) << 4;
6153 break;
6154 case 16:
6155 r |= extract64(insn, 63 - 38, 1) << 4;
6156 break;
6157 case 32:
6158 r |= extract64(insn, 63 - 39, 1) << 4;
6159 break;
6160 default:
6161 g_assert_not_reached();
6162 }
6163 break;
6164 default:
6165 abort();
6166 }
6167
6168 /* Validate that the "compressed" encoding we selected above is valid.
6169 I.e. we havn't make two different original fields overlap. */
6170 assert(((o->presentC >> f->indexC) & 1) == 0);
6171 o->presentC |= 1 << f->indexC;
6172 o->presentO |= 1 << f->indexO;
6173
6174 o->c[f->indexC] = r;
6175 }
6176
6177 /* Lookup the insn at the current PC, extracting the operands into O and
6178 returning the info struct for the insn. Returns NULL for invalid insn. */
6179
6180 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
6181 DisasFields *f)
6182 {
6183 uint64_t insn, pc = s->base.pc_next;
6184 int op, op2, ilen;
6185 const DisasInsn *info;
6186
6187 if (unlikely(s->ex_value)) {
6188 /* Drop the EX data now, so that it's clear on exception paths. */
6189 TCGv_i64 zero = tcg_const_i64(0);
6190 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6191 tcg_temp_free_i64(zero);
6192
6193 /* Extract the values saved by EXECUTE. */
6194 insn = s->ex_value & 0xffffffffffff0000ull;
6195 ilen = s->ex_value & 0xf;
6196 op = insn >> 56;
6197 } else {
6198 insn = ld_code2(env, pc);
6199 op = (insn >> 8) & 0xff;
6200 ilen = get_ilen(op);
6201 switch (ilen) {
6202 case 2:
6203 insn = insn << 48;
6204 break;
6205 case 4:
6206 insn = ld_code4(env, pc) << 32;
6207 break;
6208 case 6:
6209 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6210 break;
6211 default:
6212 g_assert_not_reached();
6213 }
6214 }
6215 s->pc_tmp = s->base.pc_next + ilen;
6216 s->ilen = ilen;
6217
6218 /* We can't actually determine the insn format until we've looked up
6219 the full insn opcode. Which we can't do without locating the
6220 secondary opcode. Assume by default that OP2 is at bit 40; for
6221 those smaller insns that don't actually have a secondary opcode
6222 this will correctly result in OP2 = 0. */
6223 switch (op) {
6224 case 0x01: /* E */
6225 case 0x80: /* S */
6226 case 0x82: /* S */
6227 case 0x93: /* S */
6228 case 0xb2: /* S, RRF, RRE, IE */
6229 case 0xb3: /* RRE, RRD, RRF */
6230 case 0xb9: /* RRE, RRF */
6231 case 0xe5: /* SSE, SIL */
6232 op2 = (insn << 8) >> 56;
6233 break;
6234 case 0xa5: /* RI */
6235 case 0xa7: /* RI */
6236 case 0xc0: /* RIL */
6237 case 0xc2: /* RIL */
6238 case 0xc4: /* RIL */
6239 case 0xc6: /* RIL */
6240 case 0xc8: /* SSF */
6241 case 0xcc: /* RIL */
6242 op2 = (insn << 12) >> 60;
6243 break;
6244 case 0xc5: /* MII */
6245 case 0xc7: /* SMI */
6246 case 0xd0 ... 0xdf: /* SS */
6247 case 0xe1: /* SS */
6248 case 0xe2: /* SS */
6249 case 0xe8: /* SS */
6250 case 0xe9: /* SS */
6251 case 0xea: /* SS */
6252 case 0xee ... 0xf3: /* SS */
6253 case 0xf8 ... 0xfd: /* SS */
6254 op2 = 0;
6255 break;
6256 default:
6257 op2 = (insn << 40) >> 56;
6258 break;
6259 }
6260
6261 memset(f, 0, sizeof(*f));
6262 f->raw_insn = insn;
6263 f->op = op;
6264 f->op2 = op2;
6265
6266 /* Lookup the instruction. */
6267 info = lookup_opc(op << 8 | op2);
6268
6269 /* If we found it, extract the operands. */
6270 if (info != NULL) {
6271 DisasFormat fmt = info->fmt;
6272 int i;
6273
6274 for (i = 0; i < NUM_C_FIELD; ++i) {
6275 extract_field(f, &format_info[fmt].op[i], insn);
6276 }
6277 }
6278 return info;
6279 }
6280
6281 static bool is_afp_reg(int reg)
6282 {
6283 return reg % 2 || reg > 6;
6284 }
6285
6286 static bool is_fp_pair(int reg)
6287 {
6288 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6289 return !(reg & 0x2);
6290 }
6291
6292 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6293 {
6294 const DisasInsn *insn;
6295 DisasJumpType ret = DISAS_NEXT;
6296 DisasFields f;
6297 DisasOps o = {};
6298
6299 /* Search for the insn in the table. */
6300 insn = extract_insn(env, s, &f);
6301
6302 /* Not found means unimplemented/illegal opcode. */
6303 if (insn == NULL) {
6304 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6305 f.op, f.op2);
6306 gen_illegal_opcode(s);
6307 return DISAS_NORETURN;
6308 }
6309
6310 #ifndef CONFIG_USER_ONLY
6311 if (s->base.tb->flags & FLAG_MASK_PER) {
6312 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6313 gen_helper_per_ifetch(cpu_env, addr);
6314 tcg_temp_free_i64(addr);
6315 }
6316 #endif
6317
6318 /* process flags */
6319 if (insn->flags) {
6320 /* privileged instruction */
6321 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6322 gen_program_exception(s, PGM_PRIVILEGED);
6323 return DISAS_NORETURN;
6324 }
6325
6326 /* if AFP is not enabled, instructions and registers are forbidden */
6327 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6328 uint8_t dxc = 0;
6329
6330 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(&f, r1))) {
6331 dxc = 1;
6332 }
6333 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(&f, r2))) {
6334 dxc = 1;
6335 }
6336 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(&f, r3))) {
6337 dxc = 1;
6338 }
6339 if (insn->flags & IF_BFP) {
6340 dxc = 2;
6341 }
6342 if (insn->flags & IF_DFP) {
6343 dxc = 3;
6344 }
6345 if (insn->flags & IF_VEC) {
6346 dxc = 0xfe;
6347 }
6348 if (dxc) {
6349 gen_data_exception(dxc);
6350 return DISAS_NORETURN;
6351 }
6352 }
6353
6354 /* if vector instructions not enabled, executing them is forbidden */
6355 if (insn->flags & IF_VEC) {
6356 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6357 gen_data_exception(0xfe);
6358 return DISAS_NORETURN;
6359 }
6360 }
6361 }
6362
6363 /* Check for insn specification exceptions. */
6364 if (insn->spec) {
6365 if ((insn->spec & SPEC_r1_even && get_field(&f, r1) & 1) ||
6366 (insn->spec & SPEC_r2_even && get_field(&f, r2) & 1) ||
6367 (insn->spec & SPEC_r3_even && get_field(&f, r3) & 1) ||
6368 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(&f, r1))) ||
6369 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(&f, r2)))) {
6370 gen_program_exception(s, PGM_SPECIFICATION);
6371 return DISAS_NORETURN;
6372 }
6373 }
6374
6375 /* Set up the strutures we use to communicate with the helpers. */
6376 s->insn = insn;
6377 s->fields = &f;
6378
6379 /* Implement the instruction. */
6380 if (insn->help_in1) {
6381 insn->help_in1(s, &f, &o);
6382 }
6383 if (insn->help_in2) {
6384 insn->help_in2(s, &f, &o);
6385 }
6386 if (insn->help_prep) {
6387 insn->help_prep(s, &f, &o);
6388 }
6389 if (insn->help_op) {
6390 ret = insn->help_op(s, &o);
6391 }
6392 if (ret != DISAS_NORETURN) {
6393 if (insn->help_wout) {
6394 insn->help_wout(s, &f, &o);
6395 }
6396 if (insn->help_cout) {
6397 insn->help_cout(s, &o);
6398 }
6399 }
6400
6401 /* Free any temporaries created by the helpers. */
6402 if (o.out && !o.g_out) {
6403 tcg_temp_free_i64(o.out);
6404 }
6405 if (o.out2 && !o.g_out2) {
6406 tcg_temp_free_i64(o.out2);
6407 }
6408 if (o.in1 && !o.g_in1) {
6409 tcg_temp_free_i64(o.in1);
6410 }
6411 if (o.in2 && !o.g_in2) {
6412 tcg_temp_free_i64(o.in2);
6413 }
6414 if (o.addr1) {
6415 tcg_temp_free_i64(o.addr1);
6416 }
6417
6418 #ifndef CONFIG_USER_ONLY
6419 if (s->base.tb->flags & FLAG_MASK_PER) {
6420 /* An exception might be triggered, save PSW if not already done. */
6421 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6422 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6423 }
6424
6425 /* Call the helper to check for a possible PER exception. */
6426 gen_helper_per_check_exception(cpu_env);
6427 }
6428 #endif
6429
6430 /* Advance to the next instruction. */
6431 s->base.pc_next = s->pc_tmp;
6432 return ret;
6433 }
6434
6435 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6436 {
6437 DisasContext *dc = container_of(dcbase, DisasContext, base);
6438
6439 /* 31-bit mode */
6440 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6441 dc->base.pc_first &= 0x7fffffff;
6442 dc->base.pc_next = dc->base.pc_first;
6443 }
6444
6445 dc->cc_op = CC_OP_DYNAMIC;
6446 dc->ex_value = dc->base.tb->cs_base;
6447 dc->do_debug = dc->base.singlestep_enabled;
6448 }
6449
6450 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6451 {
6452 }
6453
6454 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6455 {
6456 DisasContext *dc = container_of(dcbase, DisasContext, base);
6457
6458 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6459 }
6460
6461 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6462 const CPUBreakpoint *bp)
6463 {
6464 DisasContext *dc = container_of(dcbase, DisasContext, base);
6465
6466 dc->base.is_jmp = DISAS_PC_STALE;
6467 dc->do_debug = true;
6468 /* The address covered by the breakpoint must be included in
6469 [tb->pc, tb->pc + tb->size) in order to for it to be
6470 properly cleared -- thus we increment the PC here so that
6471 the logic setting tb->size does the right thing. */
6472 dc->base.pc_next += 2;
6473 return true;
6474 }
6475
6476 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6477 {
6478 CPUS390XState *env = cs->env_ptr;
6479 DisasContext *dc = container_of(dcbase, DisasContext, base);
6480
6481 dc->base.is_jmp = translate_one(env, dc);
6482 if (dc->base.is_jmp == DISAS_NEXT) {
6483 uint64_t page_start;
6484
6485 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6486 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6487 dc->base.is_jmp = DISAS_TOO_MANY;
6488 }
6489 }
6490 }
6491
6492 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6493 {
6494 DisasContext *dc = container_of(dcbase, DisasContext, base);
6495
6496 switch (dc->base.is_jmp) {
6497 case DISAS_GOTO_TB:
6498 case DISAS_NORETURN:
6499 break;
6500 case DISAS_TOO_MANY:
6501 case DISAS_PC_STALE:
6502 case DISAS_PC_STALE_NOCHAIN:
6503 update_psw_addr(dc);
6504 /* FALLTHRU */
6505 case DISAS_PC_UPDATED:
6506 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6507 cc op type is in env */
6508 update_cc_op(dc);
6509 /* FALLTHRU */
6510 case DISAS_PC_CC_UPDATED:
6511 /* Exit the TB, either by raising a debug exception or by return. */
6512 if (dc->do_debug) {
6513 gen_exception(EXCP_DEBUG);
6514 } else if (use_exit_tb(dc) ||
6515 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6516 tcg_gen_exit_tb(NULL, 0);
6517 } else {
6518 tcg_gen_lookup_and_goto_ptr();
6519 }
6520 break;
6521 default:
6522 g_assert_not_reached();
6523 }
6524 }
6525
6526 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6527 {
6528 DisasContext *dc = container_of(dcbase, DisasContext, base);
6529
6530 if (unlikely(dc->ex_value)) {
6531 /* ??? Unfortunately log_target_disas can't use host memory. */
6532 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6533 } else {
6534 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6535 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6536 }
6537 }
6538
6539 static const TranslatorOps s390x_tr_ops = {
6540 .init_disas_context = s390x_tr_init_disas_context,
6541 .tb_start = s390x_tr_tb_start,
6542 .insn_start = s390x_tr_insn_start,
6543 .breakpoint_check = s390x_tr_breakpoint_check,
6544 .translate_insn = s390x_tr_translate_insn,
6545 .tb_stop = s390x_tr_tb_stop,
6546 .disas_log = s390x_tr_disas_log,
6547 };
6548
6549 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6550 {
6551 DisasContext dc;
6552
6553 translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6554 }
6555
6556 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6557 target_ulong *data)
6558 {
6559 int cc_op = data[1];
6560 env->psw.addr = data[0];
6561 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6562 env->cc_op = cc_op;
6563 }
6564 }