]> git.proxmox.com Git - mirror_qemu.git/blob - target/s390x/translate.c
f7de77192c7e7a15ce358b6e44a8205c48d7114f
[mirror_qemu.git] / target / s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
43
44 #include "trace-tcg.h"
45 #include "exec/translator.h"
46 #include "exec/log.h"
47 #include "qemu/atomic128.h"
48
49
50 /* Information that (most) every instruction needs to manipulate. */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
54
55 struct DisasContext {
56 DisasContextBase base;
57 const DisasInsn *insn;
58 DisasFields *fields;
59 uint64_t ex_value;
60 /*
61 * During translate_one(), pc_tmp is used to determine the instruction
62 * to be executed after base.pc_next - e.g. next sequential instruction
63 * or a branch target.
64 */
65 uint64_t pc_tmp;
66 uint32_t ilen;
67 enum cc_op cc_op;
68 bool do_debug;
69 };
70
71 /* Information carried about a condition to be evaluated. */
72 typedef struct {
73 TCGCond cond:8;
74 bool is_64;
75 bool g1;
76 bool g2;
77 union {
78 struct { TCGv_i64 a, b; } s64;
79 struct { TCGv_i32 a, b; } s32;
80 } u;
81 } DisasCompare;
82
83 #ifdef DEBUG_INLINE_BRANCHES
84 static uint64_t inline_branch_hit[CC_OP_MAX];
85 static uint64_t inline_branch_miss[CC_OP_MAX];
86 #endif
87
88 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
89 {
90 TCGv_i64 tmp;
91
92 if (s->base.tb->flags & FLAG_MASK_32) {
93 if (s->base.tb->flags & FLAG_MASK_64) {
94 tcg_gen_movi_i64(out, pc);
95 return;
96 }
97 pc |= 0x80000000;
98 }
99 assert(!(s->base.tb->flags & FLAG_MASK_64));
100 tmp = tcg_const_i64(pc);
101 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
102 tcg_temp_free_i64(tmp);
103 }
104
105 static TCGv_i64 psw_addr;
106 static TCGv_i64 psw_mask;
107 static TCGv_i64 gbea;
108
109 static TCGv_i32 cc_op;
110 static TCGv_i64 cc_src;
111 static TCGv_i64 cc_dst;
112 static TCGv_i64 cc_vr;
113
114 static char cpu_reg_names[16][4];
115 static TCGv_i64 regs[16];
116
117 void s390x_translate_init(void)
118 {
119 int i;
120
121 psw_addr = tcg_global_mem_new_i64(cpu_env,
122 offsetof(CPUS390XState, psw.addr),
123 "psw_addr");
124 psw_mask = tcg_global_mem_new_i64(cpu_env,
125 offsetof(CPUS390XState, psw.mask),
126 "psw_mask");
127 gbea = tcg_global_mem_new_i64(cpu_env,
128 offsetof(CPUS390XState, gbea),
129 "gbea");
130
131 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
132 "cc_op");
133 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
134 "cc_src");
135 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
136 "cc_dst");
137 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
138 "cc_vr");
139
140 for (i = 0; i < 16; i++) {
141 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
142 regs[i] = tcg_global_mem_new(cpu_env,
143 offsetof(CPUS390XState, regs[i]),
144 cpu_reg_names[i]);
145 }
146 }
147
148 static inline int vec_full_reg_offset(uint8_t reg)
149 {
150 g_assert(reg < 32);
151 return offsetof(CPUS390XState, vregs[reg][0].d);
152 }
153
154 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, TCGMemOp es)
155 {
156 /* Convert element size (es) - e.g. MO_8 - to bytes */
157 const uint8_t bytes = 1 << es;
158 int offs = enr * bytes;
159
160 /*
161 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
162 * of the 16 byte vector, on both, little and big endian systems.
163 *
164 * Big Endian (target/possible host)
165 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
166 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
167 * W: [ 0][ 1] - [ 2][ 3]
168 * DW: [ 0] - [ 1]
169 *
170 * Little Endian (possible host)
171 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
172 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
173 * W: [ 1][ 0] - [ 3][ 2]
174 * DW: [ 0] - [ 1]
175 *
176 * For 16 byte elements, the two 8 byte halves will not form a host
177 * int128 if the host is little endian, since they're in the wrong order.
178 * Some operations (e.g. xor) do not care. For operations like addition,
179 * the two 8 byte elements have to be loaded separately. Let's force all
180 * 16 byte operations to handle it in a special way.
181 */
182 g_assert(es <= MO_64);
183 #ifndef HOST_WORDS_BIGENDIAN
184 offs ^= (8 - bytes);
185 #endif
186 return offs + vec_full_reg_offset(reg);
187 }
188
189 static inline int freg64_offset(uint8_t reg)
190 {
191 g_assert(reg < 16);
192 return vec_reg_offset(reg, 0, MO_64);
193 }
194
195 static inline int freg32_offset(uint8_t reg)
196 {
197 g_assert(reg < 16);
198 return vec_reg_offset(reg, 0, MO_32);
199 }
200
201 static TCGv_i64 load_reg(int reg)
202 {
203 TCGv_i64 r = tcg_temp_new_i64();
204 tcg_gen_mov_i64(r, regs[reg]);
205 return r;
206 }
207
208 static TCGv_i64 load_freg(int reg)
209 {
210 TCGv_i64 r = tcg_temp_new_i64();
211
212 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
213 return r;
214 }
215
216 static TCGv_i64 load_freg32_i64(int reg)
217 {
218 TCGv_i64 r = tcg_temp_new_i64();
219
220 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
221 return r;
222 }
223
224 static void store_reg(int reg, TCGv_i64 v)
225 {
226 tcg_gen_mov_i64(regs[reg], v);
227 }
228
229 static void store_freg(int reg, TCGv_i64 v)
230 {
231 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
232 }
233
234 static void store_reg32_i64(int reg, TCGv_i64 v)
235 {
236 /* 32 bit register writes keep the upper half */
237 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
238 }
239
240 static void store_reg32h_i64(int reg, TCGv_i64 v)
241 {
242 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
243 }
244
245 static void store_freg32_i64(int reg, TCGv_i64 v)
246 {
247 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
248 }
249
250 static void return_low128(TCGv_i64 dest)
251 {
252 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
253 }
254
255 static void update_psw_addr(DisasContext *s)
256 {
257 /* psw.addr */
258 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
259 }
260
261 static void per_branch(DisasContext *s, bool to_next)
262 {
263 #ifndef CONFIG_USER_ONLY
264 tcg_gen_movi_i64(gbea, s->base.pc_next);
265
266 if (s->base.tb->flags & FLAG_MASK_PER) {
267 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
268 gen_helper_per_branch(cpu_env, gbea, next_pc);
269 if (to_next) {
270 tcg_temp_free_i64(next_pc);
271 }
272 }
273 #endif
274 }
275
276 static void per_branch_cond(DisasContext *s, TCGCond cond,
277 TCGv_i64 arg1, TCGv_i64 arg2)
278 {
279 #ifndef CONFIG_USER_ONLY
280 if (s->base.tb->flags & FLAG_MASK_PER) {
281 TCGLabel *lab = gen_new_label();
282 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
283
284 tcg_gen_movi_i64(gbea, s->base.pc_next);
285 gen_helper_per_branch(cpu_env, gbea, psw_addr);
286
287 gen_set_label(lab);
288 } else {
289 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
290 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
291 tcg_temp_free_i64(pc);
292 }
293 #endif
294 }
295
296 static void per_breaking_event(DisasContext *s)
297 {
298 tcg_gen_movi_i64(gbea, s->base.pc_next);
299 }
300
301 static void update_cc_op(DisasContext *s)
302 {
303 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
304 tcg_gen_movi_i32(cc_op, s->cc_op);
305 }
306 }
307
308 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
309 {
310 return (uint64_t)cpu_lduw_code(env, pc);
311 }
312
313 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
314 {
315 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
316 }
317
318 static int get_mem_index(DisasContext *s)
319 {
320 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
321 return MMU_REAL_IDX;
322 }
323
324 switch (s->base.tb->flags & FLAG_MASK_ASC) {
325 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
326 return MMU_PRIMARY_IDX;
327 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
328 return MMU_SECONDARY_IDX;
329 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
330 return MMU_HOME_IDX;
331 default:
332 tcg_abort();
333 break;
334 }
335 }
336
337 static void gen_exception(int excp)
338 {
339 TCGv_i32 tmp = tcg_const_i32(excp);
340 gen_helper_exception(cpu_env, tmp);
341 tcg_temp_free_i32(tmp);
342 }
343
344 static void gen_program_exception(DisasContext *s, int code)
345 {
346 TCGv_i32 tmp;
347
348 /* Remember what pgm exeption this was. */
349 tmp = tcg_const_i32(code);
350 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
351 tcg_temp_free_i32(tmp);
352
353 tmp = tcg_const_i32(s->ilen);
354 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
355 tcg_temp_free_i32(tmp);
356
357 /* update the psw */
358 update_psw_addr(s);
359
360 /* Save off cc. */
361 update_cc_op(s);
362
363 /* Trigger exception. */
364 gen_exception(EXCP_PGM);
365 }
366
367 static inline void gen_illegal_opcode(DisasContext *s)
368 {
369 gen_program_exception(s, PGM_OPERATION);
370 }
371
372 static inline void gen_data_exception(uint8_t dxc)
373 {
374 TCGv_i32 tmp = tcg_const_i32(dxc);
375 gen_helper_data_exception(cpu_env, tmp);
376 tcg_temp_free_i32(tmp);
377 }
378
379 static inline void gen_trap(DisasContext *s)
380 {
381 /* Set DXC to 0xff */
382 gen_data_exception(0xff);
383 }
384
385 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
386 int64_t imm)
387 {
388 tcg_gen_addi_i64(dst, src, imm);
389 if (!(s->base.tb->flags & FLAG_MASK_64)) {
390 if (s->base.tb->flags & FLAG_MASK_32) {
391 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
392 } else {
393 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
394 }
395 }
396 }
397
398 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
399 {
400 TCGv_i64 tmp = tcg_temp_new_i64();
401
402 /*
403 * Note that d2 is limited to 20 bits, signed. If we crop negative
404 * displacements early we create larger immedate addends.
405 */
406 if (b2 && x2) {
407 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
408 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
409 } else if (b2) {
410 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
411 } else if (x2) {
412 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
413 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
414 if (s->base.tb->flags & FLAG_MASK_32) {
415 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
416 } else {
417 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
418 }
419 } else {
420 tcg_gen_movi_i64(tmp, d2);
421 }
422
423 return tmp;
424 }
425
426 static inline bool live_cc_data(DisasContext *s)
427 {
428 return (s->cc_op != CC_OP_DYNAMIC
429 && s->cc_op != CC_OP_STATIC
430 && s->cc_op > 3);
431 }
432
433 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
434 {
435 if (live_cc_data(s)) {
436 tcg_gen_discard_i64(cc_src);
437 tcg_gen_discard_i64(cc_dst);
438 tcg_gen_discard_i64(cc_vr);
439 }
440 s->cc_op = CC_OP_CONST0 + val;
441 }
442
443 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
444 {
445 if (live_cc_data(s)) {
446 tcg_gen_discard_i64(cc_src);
447 tcg_gen_discard_i64(cc_vr);
448 }
449 tcg_gen_mov_i64(cc_dst, dst);
450 s->cc_op = op;
451 }
452
453 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
454 TCGv_i64 dst)
455 {
456 if (live_cc_data(s)) {
457 tcg_gen_discard_i64(cc_vr);
458 }
459 tcg_gen_mov_i64(cc_src, src);
460 tcg_gen_mov_i64(cc_dst, dst);
461 s->cc_op = op;
462 }
463
464 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
465 TCGv_i64 dst, TCGv_i64 vr)
466 {
467 tcg_gen_mov_i64(cc_src, src);
468 tcg_gen_mov_i64(cc_dst, dst);
469 tcg_gen_mov_i64(cc_vr, vr);
470 s->cc_op = op;
471 }
472
473 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
474 {
475 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
476 }
477
478 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
479 {
480 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
481 }
482
483 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
484 {
485 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
486 }
487
488 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
489 {
490 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
491 }
492
493 /* CC value is in env->cc_op */
494 static void set_cc_static(DisasContext *s)
495 {
496 if (live_cc_data(s)) {
497 tcg_gen_discard_i64(cc_src);
498 tcg_gen_discard_i64(cc_dst);
499 tcg_gen_discard_i64(cc_vr);
500 }
501 s->cc_op = CC_OP_STATIC;
502 }
503
504 /* calculates cc into cc_op */
505 static void gen_op_calc_cc(DisasContext *s)
506 {
507 TCGv_i32 local_cc_op = NULL;
508 TCGv_i64 dummy = NULL;
509
510 switch (s->cc_op) {
511 default:
512 dummy = tcg_const_i64(0);
513 /* FALLTHRU */
514 case CC_OP_ADD_64:
515 case CC_OP_ADDU_64:
516 case CC_OP_ADDC_64:
517 case CC_OP_SUB_64:
518 case CC_OP_SUBU_64:
519 case CC_OP_SUBB_64:
520 case CC_OP_ADD_32:
521 case CC_OP_ADDU_32:
522 case CC_OP_ADDC_32:
523 case CC_OP_SUB_32:
524 case CC_OP_SUBU_32:
525 case CC_OP_SUBB_32:
526 local_cc_op = tcg_const_i32(s->cc_op);
527 break;
528 case CC_OP_CONST0:
529 case CC_OP_CONST1:
530 case CC_OP_CONST2:
531 case CC_OP_CONST3:
532 case CC_OP_STATIC:
533 case CC_OP_DYNAMIC:
534 break;
535 }
536
537 switch (s->cc_op) {
538 case CC_OP_CONST0:
539 case CC_OP_CONST1:
540 case CC_OP_CONST2:
541 case CC_OP_CONST3:
542 /* s->cc_op is the cc value */
543 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
544 break;
545 case CC_OP_STATIC:
546 /* env->cc_op already is the cc value */
547 break;
548 case CC_OP_NZ:
549 case CC_OP_ABS_64:
550 case CC_OP_NABS_64:
551 case CC_OP_ABS_32:
552 case CC_OP_NABS_32:
553 case CC_OP_LTGT0_32:
554 case CC_OP_LTGT0_64:
555 case CC_OP_COMP_32:
556 case CC_OP_COMP_64:
557 case CC_OP_NZ_F32:
558 case CC_OP_NZ_F64:
559 case CC_OP_FLOGR:
560 /* 1 argument */
561 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
562 break;
563 case CC_OP_ICM:
564 case CC_OP_LTGT_32:
565 case CC_OP_LTGT_64:
566 case CC_OP_LTUGTU_32:
567 case CC_OP_LTUGTU_64:
568 case CC_OP_TM_32:
569 case CC_OP_TM_64:
570 case CC_OP_SLA_32:
571 case CC_OP_SLA_64:
572 case CC_OP_NZ_F128:
573 /* 2 arguments */
574 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
575 break;
576 case CC_OP_ADD_64:
577 case CC_OP_ADDU_64:
578 case CC_OP_ADDC_64:
579 case CC_OP_SUB_64:
580 case CC_OP_SUBU_64:
581 case CC_OP_SUBB_64:
582 case CC_OP_ADD_32:
583 case CC_OP_ADDU_32:
584 case CC_OP_ADDC_32:
585 case CC_OP_SUB_32:
586 case CC_OP_SUBU_32:
587 case CC_OP_SUBB_32:
588 /* 3 arguments */
589 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
590 break;
591 case CC_OP_DYNAMIC:
592 /* unknown operation - assume 3 arguments and cc_op in env */
593 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
594 break;
595 default:
596 tcg_abort();
597 }
598
599 if (local_cc_op) {
600 tcg_temp_free_i32(local_cc_op);
601 }
602 if (dummy) {
603 tcg_temp_free_i64(dummy);
604 }
605
606 /* We now have cc in cc_op as constant */
607 set_cc_static(s);
608 }
609
610 static bool use_exit_tb(DisasContext *s)
611 {
612 return s->base.singlestep_enabled ||
613 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
614 (s->base.tb->flags & FLAG_MASK_PER);
615 }
616
617 static bool use_goto_tb(DisasContext *s, uint64_t dest)
618 {
619 if (unlikely(use_exit_tb(s))) {
620 return false;
621 }
622 #ifndef CONFIG_USER_ONLY
623 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
624 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
625 #else
626 return true;
627 #endif
628 }
629
630 static void account_noninline_branch(DisasContext *s, int cc_op)
631 {
632 #ifdef DEBUG_INLINE_BRANCHES
633 inline_branch_miss[cc_op]++;
634 #endif
635 }
636
637 static void account_inline_branch(DisasContext *s, int cc_op)
638 {
639 #ifdef DEBUG_INLINE_BRANCHES
640 inline_branch_hit[cc_op]++;
641 #endif
642 }
643
644 /* Table of mask values to comparison codes, given a comparison as input.
645 For such, CC=3 should not be possible. */
646 static const TCGCond ltgt_cond[16] = {
647 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
648 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
649 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
650 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
651 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
652 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
653 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
654 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
655 };
656
657 /* Table of mask values to comparison codes, given a logic op as input.
658 For such, only CC=0 and CC=1 should be possible. */
659 static const TCGCond nz_cond[16] = {
660 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
661 TCG_COND_NEVER, TCG_COND_NEVER,
662 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
663 TCG_COND_NE, TCG_COND_NE,
664 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
665 TCG_COND_EQ, TCG_COND_EQ,
666 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
667 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
668 };
669
670 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
671 details required to generate a TCG comparison. */
672 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
673 {
674 TCGCond cond;
675 enum cc_op old_cc_op = s->cc_op;
676
677 if (mask == 15 || mask == 0) {
678 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
679 c->u.s32.a = cc_op;
680 c->u.s32.b = cc_op;
681 c->g1 = c->g2 = true;
682 c->is_64 = false;
683 return;
684 }
685
686 /* Find the TCG condition for the mask + cc op. */
687 switch (old_cc_op) {
688 case CC_OP_LTGT0_32:
689 case CC_OP_LTGT0_64:
690 case CC_OP_LTGT_32:
691 case CC_OP_LTGT_64:
692 cond = ltgt_cond[mask];
693 if (cond == TCG_COND_NEVER) {
694 goto do_dynamic;
695 }
696 account_inline_branch(s, old_cc_op);
697 break;
698
699 case CC_OP_LTUGTU_32:
700 case CC_OP_LTUGTU_64:
701 cond = tcg_unsigned_cond(ltgt_cond[mask]);
702 if (cond == TCG_COND_NEVER) {
703 goto do_dynamic;
704 }
705 account_inline_branch(s, old_cc_op);
706 break;
707
708 case CC_OP_NZ:
709 cond = nz_cond[mask];
710 if (cond == TCG_COND_NEVER) {
711 goto do_dynamic;
712 }
713 account_inline_branch(s, old_cc_op);
714 break;
715
716 case CC_OP_TM_32:
717 case CC_OP_TM_64:
718 switch (mask) {
719 case 8:
720 cond = TCG_COND_EQ;
721 break;
722 case 4 | 2 | 1:
723 cond = TCG_COND_NE;
724 break;
725 default:
726 goto do_dynamic;
727 }
728 account_inline_branch(s, old_cc_op);
729 break;
730
731 case CC_OP_ICM:
732 switch (mask) {
733 case 8:
734 cond = TCG_COND_EQ;
735 break;
736 case 4 | 2 | 1:
737 case 4 | 2:
738 cond = TCG_COND_NE;
739 break;
740 default:
741 goto do_dynamic;
742 }
743 account_inline_branch(s, old_cc_op);
744 break;
745
746 case CC_OP_FLOGR:
747 switch (mask & 0xa) {
748 case 8: /* src == 0 -> no one bit found */
749 cond = TCG_COND_EQ;
750 break;
751 case 2: /* src != 0 -> one bit found */
752 cond = TCG_COND_NE;
753 break;
754 default:
755 goto do_dynamic;
756 }
757 account_inline_branch(s, old_cc_op);
758 break;
759
760 case CC_OP_ADDU_32:
761 case CC_OP_ADDU_64:
762 switch (mask) {
763 case 8 | 2: /* vr == 0 */
764 cond = TCG_COND_EQ;
765 break;
766 case 4 | 1: /* vr != 0 */
767 cond = TCG_COND_NE;
768 break;
769 case 8 | 4: /* no carry -> vr >= src */
770 cond = TCG_COND_GEU;
771 break;
772 case 2 | 1: /* carry -> vr < src */
773 cond = TCG_COND_LTU;
774 break;
775 default:
776 goto do_dynamic;
777 }
778 account_inline_branch(s, old_cc_op);
779 break;
780
781 case CC_OP_SUBU_32:
782 case CC_OP_SUBU_64:
783 /* Note that CC=0 is impossible; treat it as dont-care. */
784 switch (mask & 7) {
785 case 2: /* zero -> op1 == op2 */
786 cond = TCG_COND_EQ;
787 break;
788 case 4 | 1: /* !zero -> op1 != op2 */
789 cond = TCG_COND_NE;
790 break;
791 case 4: /* borrow (!carry) -> op1 < op2 */
792 cond = TCG_COND_LTU;
793 break;
794 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
795 cond = TCG_COND_GEU;
796 break;
797 default:
798 goto do_dynamic;
799 }
800 account_inline_branch(s, old_cc_op);
801 break;
802
803 default:
804 do_dynamic:
805 /* Calculate cc value. */
806 gen_op_calc_cc(s);
807 /* FALLTHRU */
808
809 case CC_OP_STATIC:
810 /* Jump based on CC. We'll load up the real cond below;
811 the assignment here merely avoids a compiler warning. */
812 account_noninline_branch(s, old_cc_op);
813 old_cc_op = CC_OP_STATIC;
814 cond = TCG_COND_NEVER;
815 break;
816 }
817
818 /* Load up the arguments of the comparison. */
819 c->is_64 = true;
820 c->g1 = c->g2 = false;
821 switch (old_cc_op) {
822 case CC_OP_LTGT0_32:
823 c->is_64 = false;
824 c->u.s32.a = tcg_temp_new_i32();
825 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
826 c->u.s32.b = tcg_const_i32(0);
827 break;
828 case CC_OP_LTGT_32:
829 case CC_OP_LTUGTU_32:
830 case CC_OP_SUBU_32:
831 c->is_64 = false;
832 c->u.s32.a = tcg_temp_new_i32();
833 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
834 c->u.s32.b = tcg_temp_new_i32();
835 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
836 break;
837
838 case CC_OP_LTGT0_64:
839 case CC_OP_NZ:
840 case CC_OP_FLOGR:
841 c->u.s64.a = cc_dst;
842 c->u.s64.b = tcg_const_i64(0);
843 c->g1 = true;
844 break;
845 case CC_OP_LTGT_64:
846 case CC_OP_LTUGTU_64:
847 case CC_OP_SUBU_64:
848 c->u.s64.a = cc_src;
849 c->u.s64.b = cc_dst;
850 c->g1 = c->g2 = true;
851 break;
852
853 case CC_OP_TM_32:
854 case CC_OP_TM_64:
855 case CC_OP_ICM:
856 c->u.s64.a = tcg_temp_new_i64();
857 c->u.s64.b = tcg_const_i64(0);
858 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
859 break;
860
861 case CC_OP_ADDU_32:
862 c->is_64 = false;
863 c->u.s32.a = tcg_temp_new_i32();
864 c->u.s32.b = tcg_temp_new_i32();
865 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
866 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
867 tcg_gen_movi_i32(c->u.s32.b, 0);
868 } else {
869 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
870 }
871 break;
872
873 case CC_OP_ADDU_64:
874 c->u.s64.a = cc_vr;
875 c->g1 = true;
876 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
877 c->u.s64.b = tcg_const_i64(0);
878 } else {
879 c->u.s64.b = cc_src;
880 c->g2 = true;
881 }
882 break;
883
884 case CC_OP_STATIC:
885 c->is_64 = false;
886 c->u.s32.a = cc_op;
887 c->g1 = true;
888 switch (mask) {
889 case 0x8 | 0x4 | 0x2: /* cc != 3 */
890 cond = TCG_COND_NE;
891 c->u.s32.b = tcg_const_i32(3);
892 break;
893 case 0x8 | 0x4 | 0x1: /* cc != 2 */
894 cond = TCG_COND_NE;
895 c->u.s32.b = tcg_const_i32(2);
896 break;
897 case 0x8 | 0x2 | 0x1: /* cc != 1 */
898 cond = TCG_COND_NE;
899 c->u.s32.b = tcg_const_i32(1);
900 break;
901 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
902 cond = TCG_COND_EQ;
903 c->g1 = false;
904 c->u.s32.a = tcg_temp_new_i32();
905 c->u.s32.b = tcg_const_i32(0);
906 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
907 break;
908 case 0x8 | 0x4: /* cc < 2 */
909 cond = TCG_COND_LTU;
910 c->u.s32.b = tcg_const_i32(2);
911 break;
912 case 0x8: /* cc == 0 */
913 cond = TCG_COND_EQ;
914 c->u.s32.b = tcg_const_i32(0);
915 break;
916 case 0x4 | 0x2 | 0x1: /* cc != 0 */
917 cond = TCG_COND_NE;
918 c->u.s32.b = tcg_const_i32(0);
919 break;
920 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
921 cond = TCG_COND_NE;
922 c->g1 = false;
923 c->u.s32.a = tcg_temp_new_i32();
924 c->u.s32.b = tcg_const_i32(0);
925 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
926 break;
927 case 0x4: /* cc == 1 */
928 cond = TCG_COND_EQ;
929 c->u.s32.b = tcg_const_i32(1);
930 break;
931 case 0x2 | 0x1: /* cc > 1 */
932 cond = TCG_COND_GTU;
933 c->u.s32.b = tcg_const_i32(1);
934 break;
935 case 0x2: /* cc == 2 */
936 cond = TCG_COND_EQ;
937 c->u.s32.b = tcg_const_i32(2);
938 break;
939 case 0x1: /* cc == 3 */
940 cond = TCG_COND_EQ;
941 c->u.s32.b = tcg_const_i32(3);
942 break;
943 default:
944 /* CC is masked by something else: (8 >> cc) & mask. */
945 cond = TCG_COND_NE;
946 c->g1 = false;
947 c->u.s32.a = tcg_const_i32(8);
948 c->u.s32.b = tcg_const_i32(0);
949 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
950 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
951 break;
952 }
953 break;
954
955 default:
956 abort();
957 }
958 c->cond = cond;
959 }
960
961 static void free_compare(DisasCompare *c)
962 {
963 if (!c->g1) {
964 if (c->is_64) {
965 tcg_temp_free_i64(c->u.s64.a);
966 } else {
967 tcg_temp_free_i32(c->u.s32.a);
968 }
969 }
970 if (!c->g2) {
971 if (c->is_64) {
972 tcg_temp_free_i64(c->u.s64.b);
973 } else {
974 tcg_temp_free_i32(c->u.s32.b);
975 }
976 }
977 }
978
979 /* ====================================================================== */
980 /* Define the insn format enumeration. */
981 #define F0(N) FMT_##N,
982 #define F1(N, X1) F0(N)
983 #define F2(N, X1, X2) F0(N)
984 #define F3(N, X1, X2, X3) F0(N)
985 #define F4(N, X1, X2, X3, X4) F0(N)
986 #define F5(N, X1, X2, X3, X4, X5) F0(N)
987
988 typedef enum {
989 #include "insn-format.def"
990 } DisasFormat;
991
992 #undef F0
993 #undef F1
994 #undef F2
995 #undef F3
996 #undef F4
997 #undef F5
998
999 /* Define a structure to hold the decoded fields. We'll store each inside
1000 an array indexed by an enum. In order to conserve memory, we'll arrange
1001 for fields that do not exist at the same time to overlap, thus the "C"
1002 for compact. For checking purposes there is an "O" for original index
1003 as well that will be applied to availability bitmaps. */
1004
1005 enum DisasFieldIndexO {
1006 FLD_O_r1,
1007 FLD_O_r2,
1008 FLD_O_r3,
1009 FLD_O_m1,
1010 FLD_O_m3,
1011 FLD_O_m4,
1012 FLD_O_b1,
1013 FLD_O_b2,
1014 FLD_O_b4,
1015 FLD_O_d1,
1016 FLD_O_d2,
1017 FLD_O_d4,
1018 FLD_O_x2,
1019 FLD_O_l1,
1020 FLD_O_l2,
1021 FLD_O_i1,
1022 FLD_O_i2,
1023 FLD_O_i3,
1024 FLD_O_i4,
1025 FLD_O_i5
1026 };
1027
1028 enum DisasFieldIndexC {
1029 FLD_C_r1 = 0,
1030 FLD_C_m1 = 0,
1031 FLD_C_b1 = 0,
1032 FLD_C_i1 = 0,
1033
1034 FLD_C_r2 = 1,
1035 FLD_C_b2 = 1,
1036 FLD_C_i2 = 1,
1037
1038 FLD_C_r3 = 2,
1039 FLD_C_m3 = 2,
1040 FLD_C_i3 = 2,
1041
1042 FLD_C_m4 = 3,
1043 FLD_C_b4 = 3,
1044 FLD_C_i4 = 3,
1045 FLD_C_l1 = 3,
1046
1047 FLD_C_i5 = 4,
1048 FLD_C_d1 = 4,
1049
1050 FLD_C_d2 = 5,
1051
1052 FLD_C_d4 = 6,
1053 FLD_C_x2 = 6,
1054 FLD_C_l2 = 6,
1055
1056 NUM_C_FIELD = 7
1057 };
1058
1059 struct DisasFields {
1060 uint64_t raw_insn;
1061 unsigned op:8;
1062 unsigned op2:8;
1063 unsigned presentC:16;
1064 unsigned int presentO;
1065 int c[NUM_C_FIELD];
1066 };
1067
1068 /* This is the way fields are to be accessed out of DisasFields. */
1069 #define have_field(S, F) have_field1((S), FLD_O_##F)
1070 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1071
1072 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1073 {
1074 return (f->presentO >> c) & 1;
1075 }
1076
1077 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1078 enum DisasFieldIndexC c)
1079 {
1080 assert(have_field1(f, o));
1081 return f->c[c];
1082 }
1083
1084 /* Describe the layout of each field in each format. */
1085 typedef struct DisasField {
1086 unsigned int beg:8;
1087 unsigned int size:8;
1088 unsigned int type:2;
1089 unsigned int indexC:6;
1090 enum DisasFieldIndexO indexO:8;
1091 } DisasField;
1092
1093 typedef struct DisasFormatInfo {
1094 DisasField op[NUM_C_FIELD];
1095 } DisasFormatInfo;
1096
1097 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1098 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1099 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1100 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1101 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1102 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1103 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1104 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1105 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1106 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1107 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1108 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1109 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1110 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1111
1112 #define F0(N) { { } },
1113 #define F1(N, X1) { { X1 } },
1114 #define F2(N, X1, X2) { { X1, X2 } },
1115 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1116 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1117 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1118
1119 static const DisasFormatInfo format_info[] = {
1120 #include "insn-format.def"
1121 };
1122
1123 #undef F0
1124 #undef F1
1125 #undef F2
1126 #undef F3
1127 #undef F4
1128 #undef F5
1129 #undef R
1130 #undef M
1131 #undef BD
1132 #undef BXD
1133 #undef BDL
1134 #undef BXDL
1135 #undef I
1136 #undef L
1137
1138 /* Generally, we'll extract operands into this structures, operate upon
1139 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1140 of routines below for more details. */
1141 typedef struct {
1142 bool g_out, g_out2, g_in1, g_in2;
1143 TCGv_i64 out, out2, in1, in2;
1144 TCGv_i64 addr1;
1145 } DisasOps;
1146
1147 /* Instructions can place constraints on their operands, raising specification
1148 exceptions if they are violated. To make this easy to automate, each "in1",
1149 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1150 of the following, or 0. To make this easy to document, we'll put the
1151 SPEC_<name> defines next to <name>. */
1152
1153 #define SPEC_r1_even 1
1154 #define SPEC_r2_even 2
1155 #define SPEC_r3_even 4
1156 #define SPEC_r1_f128 8
1157 #define SPEC_r2_f128 16
1158
1159 /* Return values from translate_one, indicating the state of the TB. */
1160
1161 /* We are not using a goto_tb (for whatever reason), but have updated
1162 the PC (for whatever reason), so there's no need to do it again on
1163 exiting the TB. */
1164 #define DISAS_PC_UPDATED DISAS_TARGET_0
1165
1166 /* We have emitted one or more goto_tb. No fixup required. */
1167 #define DISAS_GOTO_TB DISAS_TARGET_1
1168
1169 /* We have updated the PC and CC values. */
1170 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1171
1172 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1173 updated the PC for the next instruction to be executed. */
1174 #define DISAS_PC_STALE DISAS_TARGET_3
1175
1176 /* We are exiting the TB to the main loop. */
1177 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1178
1179
1180 /* Instruction flags */
1181 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1182 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1183 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1184 #define IF_BFP 0x0008 /* binary floating point instruction */
1185 #define IF_DFP 0x0010 /* decimal floating point instruction */
1186 #define IF_PRIV 0x0020 /* privileged instruction */
1187
1188 struct DisasInsn {
1189 unsigned opc:16;
1190 unsigned flags:16;
1191 DisasFormat fmt:8;
1192 unsigned fac:8;
1193 unsigned spec:8;
1194
1195 const char *name;
1196
1197 /* Pre-process arguments before HELP_OP. */
1198 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1199 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1200 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1201
1202 /*
1203 * Post-process output after HELP_OP.
1204 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1205 */
1206 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1207 void (*help_cout)(DisasContext *, DisasOps *);
1208
1209 /* Implement the operation itself. */
1210 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1211
1212 uint64_t data;
1213 };
1214
1215 /* ====================================================================== */
1216 /* Miscellaneous helpers, used by several operations. */
1217
1218 static void help_l2_shift(DisasContext *s, DisasFields *f,
1219 DisasOps *o, int mask)
1220 {
1221 int b2 = get_field(f, b2);
1222 int d2 = get_field(f, d2);
1223
1224 if (b2 == 0) {
1225 o->in2 = tcg_const_i64(d2 & mask);
1226 } else {
1227 o->in2 = get_address(s, 0, b2, d2);
1228 tcg_gen_andi_i64(o->in2, o->in2, mask);
1229 }
1230 }
1231
1232 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1233 {
1234 if (dest == s->pc_tmp) {
1235 per_branch(s, true);
1236 return DISAS_NEXT;
1237 }
1238 if (use_goto_tb(s, dest)) {
1239 update_cc_op(s);
1240 per_breaking_event(s);
1241 tcg_gen_goto_tb(0);
1242 tcg_gen_movi_i64(psw_addr, dest);
1243 tcg_gen_exit_tb(s->base.tb, 0);
1244 return DISAS_GOTO_TB;
1245 } else {
1246 tcg_gen_movi_i64(psw_addr, dest);
1247 per_branch(s, false);
1248 return DISAS_PC_UPDATED;
1249 }
1250 }
1251
1252 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1253 bool is_imm, int imm, TCGv_i64 cdest)
1254 {
1255 DisasJumpType ret;
1256 uint64_t dest = s->base.pc_next + 2 * imm;
1257 TCGLabel *lab;
1258
1259 /* Take care of the special cases first. */
1260 if (c->cond == TCG_COND_NEVER) {
1261 ret = DISAS_NEXT;
1262 goto egress;
1263 }
1264 if (is_imm) {
1265 if (dest == s->pc_tmp) {
1266 /* Branch to next. */
1267 per_branch(s, true);
1268 ret = DISAS_NEXT;
1269 goto egress;
1270 }
1271 if (c->cond == TCG_COND_ALWAYS) {
1272 ret = help_goto_direct(s, dest);
1273 goto egress;
1274 }
1275 } else {
1276 if (!cdest) {
1277 /* E.g. bcr %r0 -> no branch. */
1278 ret = DISAS_NEXT;
1279 goto egress;
1280 }
1281 if (c->cond == TCG_COND_ALWAYS) {
1282 tcg_gen_mov_i64(psw_addr, cdest);
1283 per_branch(s, false);
1284 ret = DISAS_PC_UPDATED;
1285 goto egress;
1286 }
1287 }
1288
1289 if (use_goto_tb(s, s->pc_tmp)) {
1290 if (is_imm && use_goto_tb(s, dest)) {
1291 /* Both exits can use goto_tb. */
1292 update_cc_op(s);
1293
1294 lab = gen_new_label();
1295 if (c->is_64) {
1296 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1297 } else {
1298 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1299 }
1300
1301 /* Branch not taken. */
1302 tcg_gen_goto_tb(0);
1303 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1304 tcg_gen_exit_tb(s->base.tb, 0);
1305
1306 /* Branch taken. */
1307 gen_set_label(lab);
1308 per_breaking_event(s);
1309 tcg_gen_goto_tb(1);
1310 tcg_gen_movi_i64(psw_addr, dest);
1311 tcg_gen_exit_tb(s->base.tb, 1);
1312
1313 ret = DISAS_GOTO_TB;
1314 } else {
1315 /* Fallthru can use goto_tb, but taken branch cannot. */
1316 /* Store taken branch destination before the brcond. This
1317 avoids having to allocate a new local temp to hold it.
1318 We'll overwrite this in the not taken case anyway. */
1319 if (!is_imm) {
1320 tcg_gen_mov_i64(psw_addr, cdest);
1321 }
1322
1323 lab = gen_new_label();
1324 if (c->is_64) {
1325 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1326 } else {
1327 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1328 }
1329
1330 /* Branch not taken. */
1331 update_cc_op(s);
1332 tcg_gen_goto_tb(0);
1333 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1334 tcg_gen_exit_tb(s->base.tb, 0);
1335
1336 gen_set_label(lab);
1337 if (is_imm) {
1338 tcg_gen_movi_i64(psw_addr, dest);
1339 }
1340 per_breaking_event(s);
1341 ret = DISAS_PC_UPDATED;
1342 }
1343 } else {
1344 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1345 Most commonly we're single-stepping or some other condition that
1346 disables all use of goto_tb. Just update the PC and exit. */
1347
1348 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1349 if (is_imm) {
1350 cdest = tcg_const_i64(dest);
1351 }
1352
1353 if (c->is_64) {
1354 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1355 cdest, next);
1356 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1357 } else {
1358 TCGv_i32 t0 = tcg_temp_new_i32();
1359 TCGv_i64 t1 = tcg_temp_new_i64();
1360 TCGv_i64 z = tcg_const_i64(0);
1361 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1362 tcg_gen_extu_i32_i64(t1, t0);
1363 tcg_temp_free_i32(t0);
1364 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1365 per_branch_cond(s, TCG_COND_NE, t1, z);
1366 tcg_temp_free_i64(t1);
1367 tcg_temp_free_i64(z);
1368 }
1369
1370 if (is_imm) {
1371 tcg_temp_free_i64(cdest);
1372 }
1373 tcg_temp_free_i64(next);
1374
1375 ret = DISAS_PC_UPDATED;
1376 }
1377
1378 egress:
1379 free_compare(c);
1380 return ret;
1381 }
1382
1383 /* ====================================================================== */
1384 /* The operations. These perform the bulk of the work for any insn,
1385 usually after the operands have been loaded and output initialized. */
1386
1387 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1388 {
1389 TCGv_i64 z, n;
1390 z = tcg_const_i64(0);
1391 n = tcg_temp_new_i64();
1392 tcg_gen_neg_i64(n, o->in2);
1393 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1394 tcg_temp_free_i64(n);
1395 tcg_temp_free_i64(z);
1396 return DISAS_NEXT;
1397 }
1398
1399 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1400 {
1401 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1402 return DISAS_NEXT;
1403 }
1404
1405 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1406 {
1407 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1408 return DISAS_NEXT;
1409 }
1410
1411 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1412 {
1413 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1414 tcg_gen_mov_i64(o->out2, o->in2);
1415 return DISAS_NEXT;
1416 }
1417
1418 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1419 {
1420 tcg_gen_add_i64(o->out, o->in1, o->in2);
1421 return DISAS_NEXT;
1422 }
1423
1424 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1425 {
1426 DisasCompare cmp;
1427 TCGv_i64 carry;
1428
1429 tcg_gen_add_i64(o->out, o->in1, o->in2);
1430
1431 /* The carry flag is the msb of CC, therefore the branch mask that would
1432 create that comparison is 3. Feeding the generated comparison to
1433 setcond produces the carry flag that we desire. */
1434 disas_jcc(s, &cmp, 3);
1435 carry = tcg_temp_new_i64();
1436 if (cmp.is_64) {
1437 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1438 } else {
1439 TCGv_i32 t = tcg_temp_new_i32();
1440 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1441 tcg_gen_extu_i32_i64(carry, t);
1442 tcg_temp_free_i32(t);
1443 }
1444 free_compare(&cmp);
1445
1446 tcg_gen_add_i64(o->out, o->out, carry);
1447 tcg_temp_free_i64(carry);
1448 return DISAS_NEXT;
1449 }
1450
1451 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1452 {
1453 o->in1 = tcg_temp_new_i64();
1454
1455 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1456 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1457 } else {
1458 /* Perform the atomic addition in memory. */
1459 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1460 s->insn->data);
1461 }
1462
1463 /* Recompute also for atomic case: needed for setting CC. */
1464 tcg_gen_add_i64(o->out, o->in1, o->in2);
1465
1466 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1467 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1468 }
1469 return DISAS_NEXT;
1470 }
1471
1472 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1473 {
1474 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1475 return DISAS_NEXT;
1476 }
1477
1478 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1479 {
1480 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1481 return DISAS_NEXT;
1482 }
1483
1484 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1485 {
1486 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1487 return_low128(o->out2);
1488 return DISAS_NEXT;
1489 }
1490
1491 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1492 {
1493 tcg_gen_and_i64(o->out, o->in1, o->in2);
1494 return DISAS_NEXT;
1495 }
1496
1497 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1498 {
1499 int shift = s->insn->data & 0xff;
1500 int size = s->insn->data >> 8;
1501 uint64_t mask = ((1ull << size) - 1) << shift;
1502
1503 assert(!o->g_in2);
1504 tcg_gen_shli_i64(o->in2, o->in2, shift);
1505 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1506 tcg_gen_and_i64(o->out, o->in1, o->in2);
1507
1508 /* Produce the CC from only the bits manipulated. */
1509 tcg_gen_andi_i64(cc_dst, o->out, mask);
1510 set_cc_nz_u64(s, cc_dst);
1511 return DISAS_NEXT;
1512 }
1513
1514 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1515 {
1516 o->in1 = tcg_temp_new_i64();
1517
1518 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1519 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1520 } else {
1521 /* Perform the atomic operation in memory. */
1522 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1523 s->insn->data);
1524 }
1525
1526 /* Recompute also for atomic case: needed for setting CC. */
1527 tcg_gen_and_i64(o->out, o->in1, o->in2);
1528
1529 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1530 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1531 }
1532 return DISAS_NEXT;
1533 }
1534
1535 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1536 {
1537 pc_to_link_info(o->out, s, s->pc_tmp);
1538 if (o->in2) {
1539 tcg_gen_mov_i64(psw_addr, o->in2);
1540 per_branch(s, false);
1541 return DISAS_PC_UPDATED;
1542 } else {
1543 return DISAS_NEXT;
1544 }
1545 }
1546
1547 static void save_link_info(DisasContext *s, DisasOps *o)
1548 {
1549 TCGv_i64 t;
1550
1551 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1552 pc_to_link_info(o->out, s, s->pc_tmp);
1553 return;
1554 }
1555 gen_op_calc_cc(s);
1556 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1557 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1558 t = tcg_temp_new_i64();
1559 tcg_gen_shri_i64(t, psw_mask, 16);
1560 tcg_gen_andi_i64(t, t, 0x0f000000);
1561 tcg_gen_or_i64(o->out, o->out, t);
1562 tcg_gen_extu_i32_i64(t, cc_op);
1563 tcg_gen_shli_i64(t, t, 28);
1564 tcg_gen_or_i64(o->out, o->out, t);
1565 tcg_temp_free_i64(t);
1566 }
1567
1568 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1569 {
1570 save_link_info(s, o);
1571 if (o->in2) {
1572 tcg_gen_mov_i64(psw_addr, o->in2);
1573 per_branch(s, false);
1574 return DISAS_PC_UPDATED;
1575 } else {
1576 return DISAS_NEXT;
1577 }
1578 }
1579
1580 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1581 {
1582 pc_to_link_info(o->out, s, s->pc_tmp);
1583 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2));
1584 }
1585
1586 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1587 {
1588 int m1 = get_field(s->fields, m1);
1589 bool is_imm = have_field(s->fields, i2);
1590 int imm = is_imm ? get_field(s->fields, i2) : 0;
1591 DisasCompare c;
1592
1593 /* BCR with R2 = 0 causes no branching */
1594 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1595 if (m1 == 14) {
1596 /* Perform serialization */
1597 /* FIXME: check for fast-BCR-serialization facility */
1598 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1599 }
1600 if (m1 == 15) {
1601 /* Perform serialization */
1602 /* FIXME: perform checkpoint-synchronisation */
1603 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1604 }
1605 return DISAS_NEXT;
1606 }
1607
1608 disas_jcc(s, &c, m1);
1609 return help_branch(s, &c, is_imm, imm, o->in2);
1610 }
1611
1612 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1613 {
1614 int r1 = get_field(s->fields, r1);
1615 bool is_imm = have_field(s->fields, i2);
1616 int imm = is_imm ? get_field(s->fields, i2) : 0;
1617 DisasCompare c;
1618 TCGv_i64 t;
1619
1620 c.cond = TCG_COND_NE;
1621 c.is_64 = false;
1622 c.g1 = false;
1623 c.g2 = false;
1624
1625 t = tcg_temp_new_i64();
1626 tcg_gen_subi_i64(t, regs[r1], 1);
1627 store_reg32_i64(r1, t);
1628 c.u.s32.a = tcg_temp_new_i32();
1629 c.u.s32.b = tcg_const_i32(0);
1630 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1631 tcg_temp_free_i64(t);
1632
1633 return help_branch(s, &c, is_imm, imm, o->in2);
1634 }
1635
1636 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1637 {
1638 int r1 = get_field(s->fields, r1);
1639 int imm = get_field(s->fields, i2);
1640 DisasCompare c;
1641 TCGv_i64 t;
1642
1643 c.cond = TCG_COND_NE;
1644 c.is_64 = false;
1645 c.g1 = false;
1646 c.g2 = false;
1647
1648 t = tcg_temp_new_i64();
1649 tcg_gen_shri_i64(t, regs[r1], 32);
1650 tcg_gen_subi_i64(t, t, 1);
1651 store_reg32h_i64(r1, t);
1652 c.u.s32.a = tcg_temp_new_i32();
1653 c.u.s32.b = tcg_const_i32(0);
1654 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1655 tcg_temp_free_i64(t);
1656
1657 return help_branch(s, &c, 1, imm, o->in2);
1658 }
1659
1660 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1661 {
1662 int r1 = get_field(s->fields, r1);
1663 bool is_imm = have_field(s->fields, i2);
1664 int imm = is_imm ? get_field(s->fields, i2) : 0;
1665 DisasCompare c;
1666
1667 c.cond = TCG_COND_NE;
1668 c.is_64 = true;
1669 c.g1 = true;
1670 c.g2 = false;
1671
1672 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1673 c.u.s64.a = regs[r1];
1674 c.u.s64.b = tcg_const_i64(0);
1675
1676 return help_branch(s, &c, is_imm, imm, o->in2);
1677 }
1678
1679 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1680 {
1681 int r1 = get_field(s->fields, r1);
1682 int r3 = get_field(s->fields, r3);
1683 bool is_imm = have_field(s->fields, i2);
1684 int imm = is_imm ? get_field(s->fields, i2) : 0;
1685 DisasCompare c;
1686 TCGv_i64 t;
1687
1688 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1689 c.is_64 = false;
1690 c.g1 = false;
1691 c.g2 = false;
1692
1693 t = tcg_temp_new_i64();
1694 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1695 c.u.s32.a = tcg_temp_new_i32();
1696 c.u.s32.b = tcg_temp_new_i32();
1697 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1698 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1699 store_reg32_i64(r1, t);
1700 tcg_temp_free_i64(t);
1701
1702 return help_branch(s, &c, is_imm, imm, o->in2);
1703 }
1704
1705 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1706 {
1707 int r1 = get_field(s->fields, r1);
1708 int r3 = get_field(s->fields, r3);
1709 bool is_imm = have_field(s->fields, i2);
1710 int imm = is_imm ? get_field(s->fields, i2) : 0;
1711 DisasCompare c;
1712
1713 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1714 c.is_64 = true;
1715
1716 if (r1 == (r3 | 1)) {
1717 c.u.s64.b = load_reg(r3 | 1);
1718 c.g2 = false;
1719 } else {
1720 c.u.s64.b = regs[r3 | 1];
1721 c.g2 = true;
1722 }
1723
1724 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1725 c.u.s64.a = regs[r1];
1726 c.g1 = true;
1727
1728 return help_branch(s, &c, is_imm, imm, o->in2);
1729 }
1730
1731 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1732 {
1733 int imm, m3 = get_field(s->fields, m3);
1734 bool is_imm;
1735 DisasCompare c;
1736
1737 c.cond = ltgt_cond[m3];
1738 if (s->insn->data) {
1739 c.cond = tcg_unsigned_cond(c.cond);
1740 }
1741 c.is_64 = c.g1 = c.g2 = true;
1742 c.u.s64.a = o->in1;
1743 c.u.s64.b = o->in2;
1744
1745 is_imm = have_field(s->fields, i4);
1746 if (is_imm) {
1747 imm = get_field(s->fields, i4);
1748 } else {
1749 imm = 0;
1750 o->out = get_address(s, 0, get_field(s->fields, b4),
1751 get_field(s->fields, d4));
1752 }
1753
1754 return help_branch(s, &c, is_imm, imm, o->out);
1755 }
1756
1757 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1758 {
1759 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1760 set_cc_static(s);
1761 return DISAS_NEXT;
1762 }
1763
1764 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1765 {
1766 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1767 set_cc_static(s);
1768 return DISAS_NEXT;
1769 }
1770
1771 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1772 {
1773 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1774 set_cc_static(s);
1775 return DISAS_NEXT;
1776 }
1777
1778 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1779 {
1780 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1781 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1782 tcg_temp_free_i32(m3);
1783 gen_set_cc_nz_f32(s, o->in2);
1784 return DISAS_NEXT;
1785 }
1786
1787 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1788 {
1789 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1790 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1791 tcg_temp_free_i32(m3);
1792 gen_set_cc_nz_f64(s, o->in2);
1793 return DISAS_NEXT;
1794 }
1795
1796 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1797 {
1798 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1799 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1800 tcg_temp_free_i32(m3);
1801 gen_set_cc_nz_f128(s, o->in1, o->in2);
1802 return DISAS_NEXT;
1803 }
1804
1805 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1806 {
1807 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1808 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1809 tcg_temp_free_i32(m3);
1810 gen_set_cc_nz_f32(s, o->in2);
1811 return DISAS_NEXT;
1812 }
1813
1814 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1815 {
1816 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1817 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1818 tcg_temp_free_i32(m3);
1819 gen_set_cc_nz_f64(s, o->in2);
1820 return DISAS_NEXT;
1821 }
1822
1823 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1824 {
1825 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1826 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1827 tcg_temp_free_i32(m3);
1828 gen_set_cc_nz_f128(s, o->in1, o->in2);
1829 return DISAS_NEXT;
1830 }
1831
1832 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1833 {
1834 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1835 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1836 tcg_temp_free_i32(m3);
1837 gen_set_cc_nz_f32(s, o->in2);
1838 return DISAS_NEXT;
1839 }
1840
1841 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1842 {
1843 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1844 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1845 tcg_temp_free_i32(m3);
1846 gen_set_cc_nz_f64(s, o->in2);
1847 return DISAS_NEXT;
1848 }
1849
1850 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1851 {
1852 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1853 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1854 tcg_temp_free_i32(m3);
1855 gen_set_cc_nz_f128(s, o->in1, o->in2);
1856 return DISAS_NEXT;
1857 }
1858
1859 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1860 {
1861 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1862 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1863 tcg_temp_free_i32(m3);
1864 gen_set_cc_nz_f32(s, o->in2);
1865 return DISAS_NEXT;
1866 }
1867
1868 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1869 {
1870 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1871 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1872 tcg_temp_free_i32(m3);
1873 gen_set_cc_nz_f64(s, o->in2);
1874 return DISAS_NEXT;
1875 }
1876
1877 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1878 {
1879 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1880 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1881 tcg_temp_free_i32(m3);
1882 gen_set_cc_nz_f128(s, o->in1, o->in2);
1883 return DISAS_NEXT;
1884 }
1885
1886 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1887 {
1888 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1889 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1890 tcg_temp_free_i32(m3);
1891 return DISAS_NEXT;
1892 }
1893
1894 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1895 {
1896 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1897 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1898 tcg_temp_free_i32(m3);
1899 return DISAS_NEXT;
1900 }
1901
1902 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1903 {
1904 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1905 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1906 tcg_temp_free_i32(m3);
1907 return_low128(o->out2);
1908 return DISAS_NEXT;
1909 }
1910
1911 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1912 {
1913 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1914 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1915 tcg_temp_free_i32(m3);
1916 return DISAS_NEXT;
1917 }
1918
1919 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1920 {
1921 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1922 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1923 tcg_temp_free_i32(m3);
1924 return DISAS_NEXT;
1925 }
1926
1927 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1928 {
1929 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1930 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1931 tcg_temp_free_i32(m3);
1932 return_low128(o->out2);
1933 return DISAS_NEXT;
1934 }
1935
1936 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1937 {
1938 int r2 = get_field(s->fields, r2);
1939 TCGv_i64 len = tcg_temp_new_i64();
1940
1941 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1942 set_cc_static(s);
1943 return_low128(o->out);
1944
1945 tcg_gen_add_i64(regs[r2], regs[r2], len);
1946 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1947 tcg_temp_free_i64(len);
1948
1949 return DISAS_NEXT;
1950 }
1951
1952 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1953 {
1954 int l = get_field(s->fields, l1);
1955 TCGv_i32 vl;
1956
1957 switch (l + 1) {
1958 case 1:
1959 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1960 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1961 break;
1962 case 2:
1963 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1964 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1965 break;
1966 case 4:
1967 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1968 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1969 break;
1970 case 8:
1971 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1972 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1973 break;
1974 default:
1975 vl = tcg_const_i32(l);
1976 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1977 tcg_temp_free_i32(vl);
1978 set_cc_static(s);
1979 return DISAS_NEXT;
1980 }
1981 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1982 return DISAS_NEXT;
1983 }
1984
1985 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
1986 {
1987 int r1 = get_field(s->fields, r1);
1988 int r2 = get_field(s->fields, r2);
1989 TCGv_i32 t1, t2;
1990
1991 /* r1 and r2 must be even. */
1992 if (r1 & 1 || r2 & 1) {
1993 gen_program_exception(s, PGM_SPECIFICATION);
1994 return DISAS_NORETURN;
1995 }
1996
1997 t1 = tcg_const_i32(r1);
1998 t2 = tcg_const_i32(r2);
1999 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2000 tcg_temp_free_i32(t1);
2001 tcg_temp_free_i32(t2);
2002 set_cc_static(s);
2003 return DISAS_NEXT;
2004 }
2005
2006 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2007 {
2008 int r1 = get_field(s->fields, r1);
2009 int r3 = get_field(s->fields, r3);
2010 TCGv_i32 t1, t3;
2011
2012 /* r1 and r3 must be even. */
2013 if (r1 & 1 || r3 & 1) {
2014 gen_program_exception(s, PGM_SPECIFICATION);
2015 return DISAS_NORETURN;
2016 }
2017
2018 t1 = tcg_const_i32(r1);
2019 t3 = tcg_const_i32(r3);
2020 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2021 tcg_temp_free_i32(t1);
2022 tcg_temp_free_i32(t3);
2023 set_cc_static(s);
2024 return DISAS_NEXT;
2025 }
2026
2027 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2028 {
2029 int r1 = get_field(s->fields, r1);
2030 int r3 = get_field(s->fields, r3);
2031 TCGv_i32 t1, t3;
2032
2033 /* r1 and r3 must be even. */
2034 if (r1 & 1 || r3 & 1) {
2035 gen_program_exception(s, PGM_SPECIFICATION);
2036 return DISAS_NORETURN;
2037 }
2038
2039 t1 = tcg_const_i32(r1);
2040 t3 = tcg_const_i32(r3);
2041 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2042 tcg_temp_free_i32(t1);
2043 tcg_temp_free_i32(t3);
2044 set_cc_static(s);
2045 return DISAS_NEXT;
2046 }
2047
2048 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2049 {
2050 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2051 TCGv_i32 t1 = tcg_temp_new_i32();
2052 tcg_gen_extrl_i64_i32(t1, o->in1);
2053 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2054 set_cc_static(s);
2055 tcg_temp_free_i32(t1);
2056 tcg_temp_free_i32(m3);
2057 return DISAS_NEXT;
2058 }
2059
2060 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2061 {
2062 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2063 set_cc_static(s);
2064 return_low128(o->in2);
2065 return DISAS_NEXT;
2066 }
2067
2068 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2069 {
2070 TCGv_i64 t = tcg_temp_new_i64();
2071 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2072 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2073 tcg_gen_or_i64(o->out, o->out, t);
2074 tcg_temp_free_i64(t);
2075 return DISAS_NEXT;
2076 }
2077
2078 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2079 {
2080 int d2 = get_field(s->fields, d2);
2081 int b2 = get_field(s->fields, b2);
2082 TCGv_i64 addr, cc;
2083
2084 /* Note that in1 = R3 (new value) and
2085 in2 = (zero-extended) R1 (expected value). */
2086
2087 addr = get_address(s, 0, b2, d2);
2088 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2089 get_mem_index(s), s->insn->data | MO_ALIGN);
2090 tcg_temp_free_i64(addr);
2091
2092 /* Are the memory and expected values (un)equal? Note that this setcond
2093 produces the output CC value, thus the NE sense of the test. */
2094 cc = tcg_temp_new_i64();
2095 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2096 tcg_gen_extrl_i64_i32(cc_op, cc);
2097 tcg_temp_free_i64(cc);
2098 set_cc_static(s);
2099
2100 return DISAS_NEXT;
2101 }
2102
2103 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2104 {
2105 int r1 = get_field(s->fields, r1);
2106 int r3 = get_field(s->fields, r3);
2107 int d2 = get_field(s->fields, d2);
2108 int b2 = get_field(s->fields, b2);
2109 DisasJumpType ret = DISAS_NEXT;
2110 TCGv_i64 addr;
2111 TCGv_i32 t_r1, t_r3;
2112
2113 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2114 addr = get_address(s, 0, b2, d2);
2115 t_r1 = tcg_const_i32(r1);
2116 t_r3 = tcg_const_i32(r3);
2117 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2118 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2119 } else if (HAVE_CMPXCHG128) {
2120 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2121 } else {
2122 gen_helper_exit_atomic(cpu_env);
2123 ret = DISAS_NORETURN;
2124 }
2125 tcg_temp_free_i64(addr);
2126 tcg_temp_free_i32(t_r1);
2127 tcg_temp_free_i32(t_r3);
2128
2129 set_cc_static(s);
2130 return ret;
2131 }
2132
2133 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2134 {
2135 int r3 = get_field(s->fields, r3);
2136 TCGv_i32 t_r3 = tcg_const_i32(r3);
2137
2138 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2139 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2140 } else {
2141 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2142 }
2143 tcg_temp_free_i32(t_r3);
2144
2145 set_cc_static(s);
2146 return DISAS_NEXT;
2147 }
2148
2149 #ifndef CONFIG_USER_ONLY
2150 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2151 {
2152 TCGMemOp mop = s->insn->data;
2153 TCGv_i64 addr, old, cc;
2154 TCGLabel *lab = gen_new_label();
2155
2156 /* Note that in1 = R1 (zero-extended expected value),
2157 out = R1 (original reg), out2 = R1+1 (new value). */
2158
2159 addr = tcg_temp_new_i64();
2160 old = tcg_temp_new_i64();
2161 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2162 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2163 get_mem_index(s), mop | MO_ALIGN);
2164 tcg_temp_free_i64(addr);
2165
2166 /* Are the memory and expected values (un)equal? */
2167 cc = tcg_temp_new_i64();
2168 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2169 tcg_gen_extrl_i64_i32(cc_op, cc);
2170
2171 /* Write back the output now, so that it happens before the
2172 following branch, so that we don't need local temps. */
2173 if ((mop & MO_SIZE) == MO_32) {
2174 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2175 } else {
2176 tcg_gen_mov_i64(o->out, old);
2177 }
2178 tcg_temp_free_i64(old);
2179
2180 /* If the comparison was equal, and the LSB of R2 was set,
2181 then we need to flush the TLB (for all cpus). */
2182 tcg_gen_xori_i64(cc, cc, 1);
2183 tcg_gen_and_i64(cc, cc, o->in2);
2184 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2185 tcg_temp_free_i64(cc);
2186
2187 gen_helper_purge(cpu_env);
2188 gen_set_label(lab);
2189
2190 return DISAS_NEXT;
2191 }
2192 #endif
2193
2194 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2195 {
2196 TCGv_i64 t1 = tcg_temp_new_i64();
2197 TCGv_i32 t2 = tcg_temp_new_i32();
2198 tcg_gen_extrl_i64_i32(t2, o->in1);
2199 gen_helper_cvd(t1, t2);
2200 tcg_temp_free_i32(t2);
2201 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2202 tcg_temp_free_i64(t1);
2203 return DISAS_NEXT;
2204 }
2205
2206 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2207 {
2208 int m3 = get_field(s->fields, m3);
2209 TCGLabel *lab = gen_new_label();
2210 TCGCond c;
2211
2212 c = tcg_invert_cond(ltgt_cond[m3]);
2213 if (s->insn->data) {
2214 c = tcg_unsigned_cond(c);
2215 }
2216 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2217
2218 /* Trap. */
2219 gen_trap(s);
2220
2221 gen_set_label(lab);
2222 return DISAS_NEXT;
2223 }
2224
2225 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2226 {
2227 int m3 = get_field(s->fields, m3);
2228 int r1 = get_field(s->fields, r1);
2229 int r2 = get_field(s->fields, r2);
2230 TCGv_i32 tr1, tr2, chk;
2231
2232 /* R1 and R2 must both be even. */
2233 if ((r1 | r2) & 1) {
2234 gen_program_exception(s, PGM_SPECIFICATION);
2235 return DISAS_NORETURN;
2236 }
2237 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2238 m3 = 0;
2239 }
2240
2241 tr1 = tcg_const_i32(r1);
2242 tr2 = tcg_const_i32(r2);
2243 chk = tcg_const_i32(m3);
2244
2245 switch (s->insn->data) {
2246 case 12:
2247 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2248 break;
2249 case 14:
2250 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2251 break;
2252 case 21:
2253 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2254 break;
2255 case 24:
2256 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2257 break;
2258 case 41:
2259 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2260 break;
2261 case 42:
2262 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2263 break;
2264 default:
2265 g_assert_not_reached();
2266 }
2267
2268 tcg_temp_free_i32(tr1);
2269 tcg_temp_free_i32(tr2);
2270 tcg_temp_free_i32(chk);
2271 set_cc_static(s);
2272 return DISAS_NEXT;
2273 }
2274
2275 #ifndef CONFIG_USER_ONLY
2276 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2277 {
2278 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2279 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2280 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2281
2282 gen_helper_diag(cpu_env, r1, r3, func_code);
2283
2284 tcg_temp_free_i32(func_code);
2285 tcg_temp_free_i32(r3);
2286 tcg_temp_free_i32(r1);
2287 return DISAS_NEXT;
2288 }
2289 #endif
2290
2291 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2292 {
2293 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2294 return_low128(o->out);
2295 return DISAS_NEXT;
2296 }
2297
2298 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2299 {
2300 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2301 return_low128(o->out);
2302 return DISAS_NEXT;
2303 }
2304
2305 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2306 {
2307 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2308 return_low128(o->out);
2309 return DISAS_NEXT;
2310 }
2311
2312 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2313 {
2314 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2315 return_low128(o->out);
2316 return DISAS_NEXT;
2317 }
2318
2319 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2320 {
2321 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2322 return DISAS_NEXT;
2323 }
2324
2325 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2326 {
2327 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2328 return DISAS_NEXT;
2329 }
2330
2331 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2332 {
2333 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2334 return_low128(o->out2);
2335 return DISAS_NEXT;
2336 }
2337
2338 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2339 {
2340 int r2 = get_field(s->fields, r2);
2341 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2342 return DISAS_NEXT;
2343 }
2344
2345 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2346 {
2347 /* No cache information provided. */
2348 tcg_gen_movi_i64(o->out, -1);
2349 return DISAS_NEXT;
2350 }
2351
2352 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2353 {
2354 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2355 return DISAS_NEXT;
2356 }
2357
2358 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2359 {
2360 int r1 = get_field(s->fields, r1);
2361 int r2 = get_field(s->fields, r2);
2362 TCGv_i64 t = tcg_temp_new_i64();
2363
2364 /* Note the "subsequently" in the PoO, which implies a defined result
2365 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2366 tcg_gen_shri_i64(t, psw_mask, 32);
2367 store_reg32_i64(r1, t);
2368 if (r2 != 0) {
2369 store_reg32_i64(r2, psw_mask);
2370 }
2371
2372 tcg_temp_free_i64(t);
2373 return DISAS_NEXT;
2374 }
2375
2376 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2377 {
2378 int r1 = get_field(s->fields, r1);
2379 TCGv_i32 ilen;
2380 TCGv_i64 v1;
2381
2382 /* Nested EXECUTE is not allowed. */
2383 if (unlikely(s->ex_value)) {
2384 gen_program_exception(s, PGM_EXECUTE);
2385 return DISAS_NORETURN;
2386 }
2387
2388 update_psw_addr(s);
2389 update_cc_op(s);
2390
2391 if (r1 == 0) {
2392 v1 = tcg_const_i64(0);
2393 } else {
2394 v1 = regs[r1];
2395 }
2396
2397 ilen = tcg_const_i32(s->ilen);
2398 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2399 tcg_temp_free_i32(ilen);
2400
2401 if (r1 == 0) {
2402 tcg_temp_free_i64(v1);
2403 }
2404
2405 return DISAS_PC_CC_UPDATED;
2406 }
2407
2408 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2409 {
2410 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2411 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2412 tcg_temp_free_i32(m3);
2413 return DISAS_NEXT;
2414 }
2415
2416 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2417 {
2418 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2419 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2420 tcg_temp_free_i32(m3);
2421 return DISAS_NEXT;
2422 }
2423
2424 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2425 {
2426 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2427 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2428 return_low128(o->out2);
2429 tcg_temp_free_i32(m3);
2430 return DISAS_NEXT;
2431 }
2432
2433 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2434 {
2435 /* We'll use the original input for cc computation, since we get to
2436 compare that against 0, which ought to be better than comparing
2437 the real output against 64. It also lets cc_dst be a convenient
2438 temporary during our computation. */
2439 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2440
2441 /* R1 = IN ? CLZ(IN) : 64. */
2442 tcg_gen_clzi_i64(o->out, o->in2, 64);
2443
2444 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2445 value by 64, which is undefined. But since the shift is 64 iff the
2446 input is zero, we still get the correct result after and'ing. */
2447 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2448 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2449 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2450 return DISAS_NEXT;
2451 }
2452
2453 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2454 {
2455 int m3 = get_field(s->fields, m3);
2456 int pos, len, base = s->insn->data;
2457 TCGv_i64 tmp = tcg_temp_new_i64();
2458 uint64_t ccm;
2459
2460 switch (m3) {
2461 case 0xf:
2462 /* Effectively a 32-bit load. */
2463 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2464 len = 32;
2465 goto one_insert;
2466
2467 case 0xc:
2468 case 0x6:
2469 case 0x3:
2470 /* Effectively a 16-bit load. */
2471 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2472 len = 16;
2473 goto one_insert;
2474
2475 case 0x8:
2476 case 0x4:
2477 case 0x2:
2478 case 0x1:
2479 /* Effectively an 8-bit load. */
2480 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2481 len = 8;
2482 goto one_insert;
2483
2484 one_insert:
2485 pos = base + ctz32(m3) * 8;
2486 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2487 ccm = ((1ull << len) - 1) << pos;
2488 break;
2489
2490 default:
2491 /* This is going to be a sequence of loads and inserts. */
2492 pos = base + 32 - 8;
2493 ccm = 0;
2494 while (m3) {
2495 if (m3 & 0x8) {
2496 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2497 tcg_gen_addi_i64(o->in2, o->in2, 1);
2498 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2499 ccm |= 0xff << pos;
2500 }
2501 m3 = (m3 << 1) & 0xf;
2502 pos -= 8;
2503 }
2504 break;
2505 }
2506
2507 tcg_gen_movi_i64(tmp, ccm);
2508 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2509 tcg_temp_free_i64(tmp);
2510 return DISAS_NEXT;
2511 }
2512
2513 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2514 {
2515 int shift = s->insn->data & 0xff;
2516 int size = s->insn->data >> 8;
2517 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2518 return DISAS_NEXT;
2519 }
2520
2521 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2522 {
2523 TCGv_i64 t1, t2;
2524
2525 gen_op_calc_cc(s);
2526 t1 = tcg_temp_new_i64();
2527 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2528 t2 = tcg_temp_new_i64();
2529 tcg_gen_extu_i32_i64(t2, cc_op);
2530 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2531 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2532 tcg_temp_free_i64(t1);
2533 tcg_temp_free_i64(t2);
2534 return DISAS_NEXT;
2535 }
2536
2537 #ifndef CONFIG_USER_ONLY
2538 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2539 {
2540 TCGv_i32 m4;
2541
2542 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2543 m4 = tcg_const_i32(get_field(s->fields, m4));
2544 } else {
2545 m4 = tcg_const_i32(0);
2546 }
2547 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2548 tcg_temp_free_i32(m4);
2549 return DISAS_NEXT;
2550 }
2551
2552 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2553 {
2554 TCGv_i32 m4;
2555
2556 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2557 m4 = tcg_const_i32(get_field(s->fields, m4));
2558 } else {
2559 m4 = tcg_const_i32(0);
2560 }
2561 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2562 tcg_temp_free_i32(m4);
2563 return DISAS_NEXT;
2564 }
2565
2566 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2567 {
2568 gen_helper_iske(o->out, cpu_env, o->in2);
2569 return DISAS_NEXT;
2570 }
2571 #endif
2572
2573 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2574 {
2575 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2576 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2577 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2578 TCGv_i32 t_r1, t_r2, t_r3, type;
2579
2580 switch (s->insn->data) {
2581 case S390_FEAT_TYPE_KMCTR:
2582 if (r3 & 1 || !r3) {
2583 gen_program_exception(s, PGM_SPECIFICATION);
2584 return DISAS_NORETURN;
2585 }
2586 /* FALL THROUGH */
2587 case S390_FEAT_TYPE_PPNO:
2588 case S390_FEAT_TYPE_KMF:
2589 case S390_FEAT_TYPE_KMC:
2590 case S390_FEAT_TYPE_KMO:
2591 case S390_FEAT_TYPE_KM:
2592 if (r1 & 1 || !r1) {
2593 gen_program_exception(s, PGM_SPECIFICATION);
2594 return DISAS_NORETURN;
2595 }
2596 /* FALL THROUGH */
2597 case S390_FEAT_TYPE_KMAC:
2598 case S390_FEAT_TYPE_KIMD:
2599 case S390_FEAT_TYPE_KLMD:
2600 if (r2 & 1 || !r2) {
2601 gen_program_exception(s, PGM_SPECIFICATION);
2602 return DISAS_NORETURN;
2603 }
2604 /* FALL THROUGH */
2605 case S390_FEAT_TYPE_PCKMO:
2606 case S390_FEAT_TYPE_PCC:
2607 break;
2608 default:
2609 g_assert_not_reached();
2610 };
2611
2612 t_r1 = tcg_const_i32(r1);
2613 t_r2 = tcg_const_i32(r2);
2614 t_r3 = tcg_const_i32(r3);
2615 type = tcg_const_i32(s->insn->data);
2616 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2617 set_cc_static(s);
2618 tcg_temp_free_i32(t_r1);
2619 tcg_temp_free_i32(t_r2);
2620 tcg_temp_free_i32(t_r3);
2621 tcg_temp_free_i32(type);
2622 return DISAS_NEXT;
2623 }
2624
2625 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2626 {
2627 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2628 set_cc_static(s);
2629 return DISAS_NEXT;
2630 }
2631
2632 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2633 {
2634 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2635 set_cc_static(s);
2636 return DISAS_NEXT;
2637 }
2638
2639 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2640 {
2641 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2642 set_cc_static(s);
2643 return DISAS_NEXT;
2644 }
2645
2646 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2647 {
2648 /* The real output is indeed the original value in memory;
2649 recompute the addition for the computation of CC. */
2650 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2651 s->insn->data | MO_ALIGN);
2652 /* However, we need to recompute the addition for setting CC. */
2653 tcg_gen_add_i64(o->out, o->in1, o->in2);
2654 return DISAS_NEXT;
2655 }
2656
2657 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2658 {
2659 /* The real output is indeed the original value in memory;
2660 recompute the addition for the computation of CC. */
2661 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2662 s->insn->data | MO_ALIGN);
2663 /* However, we need to recompute the operation for setting CC. */
2664 tcg_gen_and_i64(o->out, o->in1, o->in2);
2665 return DISAS_NEXT;
2666 }
2667
2668 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2669 {
2670 /* The real output is indeed the original value in memory;
2671 recompute the addition for the computation of CC. */
2672 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2673 s->insn->data | MO_ALIGN);
2674 /* However, we need to recompute the operation for setting CC. */
2675 tcg_gen_or_i64(o->out, o->in1, o->in2);
2676 return DISAS_NEXT;
2677 }
2678
2679 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2680 {
2681 /* The real output is indeed the original value in memory;
2682 recompute the addition for the computation of CC. */
2683 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2684 s->insn->data | MO_ALIGN);
2685 /* However, we need to recompute the operation for setting CC. */
2686 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2687 return DISAS_NEXT;
2688 }
2689
2690 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2691 {
2692 gen_helper_ldeb(o->out, cpu_env, o->in2);
2693 return DISAS_NEXT;
2694 }
2695
2696 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2697 {
2698 gen_helper_ledb(o->out, cpu_env, o->in2);
2699 return DISAS_NEXT;
2700 }
2701
2702 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2703 {
2704 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2705 return DISAS_NEXT;
2706 }
2707
2708 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2709 {
2710 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2711 return DISAS_NEXT;
2712 }
2713
2714 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2715 {
2716 gen_helper_lxdb(o->out, cpu_env, o->in2);
2717 return_low128(o->out2);
2718 return DISAS_NEXT;
2719 }
2720
2721 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2722 {
2723 gen_helper_lxeb(o->out, cpu_env, o->in2);
2724 return_low128(o->out2);
2725 return DISAS_NEXT;
2726 }
2727
2728 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2729 {
2730 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2731 return DISAS_NEXT;
2732 }
2733
2734 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2735 {
2736 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2737 return DISAS_NEXT;
2738 }
2739
2740 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2741 {
2742 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2743 return DISAS_NEXT;
2744 }
2745
2746 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2747 {
2748 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2749 return DISAS_NEXT;
2750 }
2751
2752 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2753 {
2754 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2755 return DISAS_NEXT;
2756 }
2757
2758 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2759 {
2760 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2761 return DISAS_NEXT;
2762 }
2763
2764 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2765 {
2766 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2767 return DISAS_NEXT;
2768 }
2769
2770 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2771 {
2772 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2773 return DISAS_NEXT;
2774 }
2775
2776 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2777 {
2778 TCGLabel *lab = gen_new_label();
2779 store_reg32_i64(get_field(s->fields, r1), o->in2);
2780 /* The value is stored even in case of trap. */
2781 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2782 gen_trap(s);
2783 gen_set_label(lab);
2784 return DISAS_NEXT;
2785 }
2786
2787 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2788 {
2789 TCGLabel *lab = gen_new_label();
2790 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2791 /* The value is stored even in case of trap. */
2792 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2793 gen_trap(s);
2794 gen_set_label(lab);
2795 return DISAS_NEXT;
2796 }
2797
2798 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2799 {
2800 TCGLabel *lab = gen_new_label();
2801 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2802 /* The value is stored even in case of trap. */
2803 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2804 gen_trap(s);
2805 gen_set_label(lab);
2806 return DISAS_NEXT;
2807 }
2808
2809 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2810 {
2811 TCGLabel *lab = gen_new_label();
2812 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2813 /* The value is stored even in case of trap. */
2814 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2815 gen_trap(s);
2816 gen_set_label(lab);
2817 return DISAS_NEXT;
2818 }
2819
2820 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2821 {
2822 TCGLabel *lab = gen_new_label();
2823 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2824 /* The value is stored even in case of trap. */
2825 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2826 gen_trap(s);
2827 gen_set_label(lab);
2828 return DISAS_NEXT;
2829 }
2830
2831 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2832 {
2833 DisasCompare c;
2834
2835 disas_jcc(s, &c, get_field(s->fields, m3));
2836
2837 if (c.is_64) {
2838 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2839 o->in2, o->in1);
2840 free_compare(&c);
2841 } else {
2842 TCGv_i32 t32 = tcg_temp_new_i32();
2843 TCGv_i64 t, z;
2844
2845 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2846 free_compare(&c);
2847
2848 t = tcg_temp_new_i64();
2849 tcg_gen_extu_i32_i64(t, t32);
2850 tcg_temp_free_i32(t32);
2851
2852 z = tcg_const_i64(0);
2853 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2854 tcg_temp_free_i64(t);
2855 tcg_temp_free_i64(z);
2856 }
2857
2858 return DISAS_NEXT;
2859 }
2860
2861 #ifndef CONFIG_USER_ONLY
2862 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2863 {
2864 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2865 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2866 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2867 tcg_temp_free_i32(r1);
2868 tcg_temp_free_i32(r3);
2869 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2870 return DISAS_PC_STALE_NOCHAIN;
2871 }
2872
2873 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2874 {
2875 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2876 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2877 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2878 tcg_temp_free_i32(r1);
2879 tcg_temp_free_i32(r3);
2880 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2881 return DISAS_PC_STALE_NOCHAIN;
2882 }
2883
2884 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2885 {
2886 gen_helper_lra(o->out, cpu_env, o->in2);
2887 set_cc_static(s);
2888 return DISAS_NEXT;
2889 }
2890
2891 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2892 {
2893 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2894 return DISAS_NEXT;
2895 }
2896
2897 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2898 {
2899 TCGv_i64 t1, t2;
2900
2901 per_breaking_event(s);
2902
2903 t1 = tcg_temp_new_i64();
2904 t2 = tcg_temp_new_i64();
2905 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2906 MO_TEUL | MO_ALIGN_8);
2907 tcg_gen_addi_i64(o->in2, o->in2, 4);
2908 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2909 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2910 tcg_gen_shli_i64(t1, t1, 32);
2911 gen_helper_load_psw(cpu_env, t1, t2);
2912 tcg_temp_free_i64(t1);
2913 tcg_temp_free_i64(t2);
2914 return DISAS_NORETURN;
2915 }
2916
2917 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2918 {
2919 TCGv_i64 t1, t2;
2920
2921 per_breaking_event(s);
2922
2923 t1 = tcg_temp_new_i64();
2924 t2 = tcg_temp_new_i64();
2925 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2926 MO_TEQ | MO_ALIGN_8);
2927 tcg_gen_addi_i64(o->in2, o->in2, 8);
2928 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2929 gen_helper_load_psw(cpu_env, t1, t2);
2930 tcg_temp_free_i64(t1);
2931 tcg_temp_free_i64(t2);
2932 return DISAS_NORETURN;
2933 }
2934 #endif
2935
2936 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2937 {
2938 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2939 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2940 gen_helper_lam(cpu_env, r1, o->in2, r3);
2941 tcg_temp_free_i32(r1);
2942 tcg_temp_free_i32(r3);
2943 return DISAS_NEXT;
2944 }
2945
2946 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2947 {
2948 int r1 = get_field(s->fields, r1);
2949 int r3 = get_field(s->fields, r3);
2950 TCGv_i64 t1, t2;
2951
2952 /* Only one register to read. */
2953 t1 = tcg_temp_new_i64();
2954 if (unlikely(r1 == r3)) {
2955 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2956 store_reg32_i64(r1, t1);
2957 tcg_temp_free(t1);
2958 return DISAS_NEXT;
2959 }
2960
2961 /* First load the values of the first and last registers to trigger
2962 possible page faults. */
2963 t2 = tcg_temp_new_i64();
2964 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2965 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2966 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2967 store_reg32_i64(r1, t1);
2968 store_reg32_i64(r3, t2);
2969
2970 /* Only two registers to read. */
2971 if (((r1 + 1) & 15) == r3) {
2972 tcg_temp_free(t2);
2973 tcg_temp_free(t1);
2974 return DISAS_NEXT;
2975 }
2976
2977 /* Then load the remaining registers. Page fault can't occur. */
2978 r3 = (r3 - 1) & 15;
2979 tcg_gen_movi_i64(t2, 4);
2980 while (r1 != r3) {
2981 r1 = (r1 + 1) & 15;
2982 tcg_gen_add_i64(o->in2, o->in2, t2);
2983 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2984 store_reg32_i64(r1, t1);
2985 }
2986 tcg_temp_free(t2);
2987 tcg_temp_free(t1);
2988
2989 return DISAS_NEXT;
2990 }
2991
2992 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
2993 {
2994 int r1 = get_field(s->fields, r1);
2995 int r3 = get_field(s->fields, r3);
2996 TCGv_i64 t1, t2;
2997
2998 /* Only one register to read. */
2999 t1 = tcg_temp_new_i64();
3000 if (unlikely(r1 == r3)) {
3001 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3002 store_reg32h_i64(r1, t1);
3003 tcg_temp_free(t1);
3004 return DISAS_NEXT;
3005 }
3006
3007 /* First load the values of the first and last registers to trigger
3008 possible page faults. */
3009 t2 = tcg_temp_new_i64();
3010 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3011 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3012 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3013 store_reg32h_i64(r1, t1);
3014 store_reg32h_i64(r3, t2);
3015
3016 /* Only two registers to read. */
3017 if (((r1 + 1) & 15) == r3) {
3018 tcg_temp_free(t2);
3019 tcg_temp_free(t1);
3020 return DISAS_NEXT;
3021 }
3022
3023 /* Then load the remaining registers. Page fault can't occur. */
3024 r3 = (r3 - 1) & 15;
3025 tcg_gen_movi_i64(t2, 4);
3026 while (r1 != r3) {
3027 r1 = (r1 + 1) & 15;
3028 tcg_gen_add_i64(o->in2, o->in2, t2);
3029 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3030 store_reg32h_i64(r1, t1);
3031 }
3032 tcg_temp_free(t2);
3033 tcg_temp_free(t1);
3034
3035 return DISAS_NEXT;
3036 }
3037
3038 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3039 {
3040 int r1 = get_field(s->fields, r1);
3041 int r3 = get_field(s->fields, r3);
3042 TCGv_i64 t1, t2;
3043
3044 /* Only one register to read. */
3045 if (unlikely(r1 == r3)) {
3046 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3047 return DISAS_NEXT;
3048 }
3049
3050 /* First load the values of the first and last registers to trigger
3051 possible page faults. */
3052 t1 = tcg_temp_new_i64();
3053 t2 = tcg_temp_new_i64();
3054 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3055 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3056 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3057 tcg_gen_mov_i64(regs[r1], t1);
3058 tcg_temp_free(t2);
3059
3060 /* Only two registers to read. */
3061 if (((r1 + 1) & 15) == r3) {
3062 tcg_temp_free(t1);
3063 return DISAS_NEXT;
3064 }
3065
3066 /* Then load the remaining registers. Page fault can't occur. */
3067 r3 = (r3 - 1) & 15;
3068 tcg_gen_movi_i64(t1, 8);
3069 while (r1 != r3) {
3070 r1 = (r1 + 1) & 15;
3071 tcg_gen_add_i64(o->in2, o->in2, t1);
3072 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3073 }
3074 tcg_temp_free(t1);
3075
3076 return DISAS_NEXT;
3077 }
3078
3079 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3080 {
3081 TCGv_i64 a1, a2;
3082 TCGMemOp mop = s->insn->data;
3083
3084 /* In a parallel context, stop the world and single step. */
3085 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3086 update_psw_addr(s);
3087 update_cc_op(s);
3088 gen_exception(EXCP_ATOMIC);
3089 return DISAS_NORETURN;
3090 }
3091
3092 /* In a serial context, perform the two loads ... */
3093 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
3094 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3095 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3096 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3097 tcg_temp_free_i64(a1);
3098 tcg_temp_free_i64(a2);
3099
3100 /* ... and indicate that we performed them while interlocked. */
3101 gen_op_movi_cc(s, 0);
3102 return DISAS_NEXT;
3103 }
3104
3105 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3106 {
3107 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3108 gen_helper_lpq(o->out, cpu_env, o->in2);
3109 } else if (HAVE_ATOMIC128) {
3110 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3111 } else {
3112 gen_helper_exit_atomic(cpu_env);
3113 return DISAS_NORETURN;
3114 }
3115 return_low128(o->out2);
3116 return DISAS_NEXT;
3117 }
3118
3119 #ifndef CONFIG_USER_ONLY
3120 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3121 {
3122 gen_helper_lura(o->out, cpu_env, o->in2);
3123 return DISAS_NEXT;
3124 }
3125
3126 static DisasJumpType op_lurag(DisasContext *s, DisasOps *o)
3127 {
3128 gen_helper_lurag(o->out, cpu_env, o->in2);
3129 return DISAS_NEXT;
3130 }
3131 #endif
3132
3133 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3134 {
3135 tcg_gen_andi_i64(o->out, o->in2, -256);
3136 return DISAS_NEXT;
3137 }
3138
3139 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3140 {
3141 o->out = o->in2;
3142 o->g_out = o->g_in2;
3143 o->in2 = NULL;
3144 o->g_in2 = false;
3145 return DISAS_NEXT;
3146 }
3147
3148 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3149 {
3150 int b2 = get_field(s->fields, b2);
3151 TCGv ar1 = tcg_temp_new_i64();
3152
3153 o->out = o->in2;
3154 o->g_out = o->g_in2;
3155 o->in2 = NULL;
3156 o->g_in2 = false;
3157
3158 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3159 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3160 tcg_gen_movi_i64(ar1, 0);
3161 break;
3162 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3163 tcg_gen_movi_i64(ar1, 1);
3164 break;
3165 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3166 if (b2) {
3167 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3168 } else {
3169 tcg_gen_movi_i64(ar1, 0);
3170 }
3171 break;
3172 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3173 tcg_gen_movi_i64(ar1, 2);
3174 break;
3175 }
3176
3177 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3178 tcg_temp_free_i64(ar1);
3179
3180 return DISAS_NEXT;
3181 }
3182
3183 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3184 {
3185 o->out = o->in1;
3186 o->out2 = o->in2;
3187 o->g_out = o->g_in1;
3188 o->g_out2 = o->g_in2;
3189 o->in1 = NULL;
3190 o->in2 = NULL;
3191 o->g_in1 = o->g_in2 = false;
3192 return DISAS_NEXT;
3193 }
3194
3195 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3196 {
3197 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3198 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3199 tcg_temp_free_i32(l);
3200 return DISAS_NEXT;
3201 }
3202
3203 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3204 {
3205 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3206 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3207 tcg_temp_free_i32(l);
3208 return DISAS_NEXT;
3209 }
3210
3211 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3212 {
3213 int r1 = get_field(s->fields, r1);
3214 int r2 = get_field(s->fields, r2);
3215 TCGv_i32 t1, t2;
3216
3217 /* r1 and r2 must be even. */
3218 if (r1 & 1 || r2 & 1) {
3219 gen_program_exception(s, PGM_SPECIFICATION);
3220 return DISAS_NORETURN;
3221 }
3222
3223 t1 = tcg_const_i32(r1);
3224 t2 = tcg_const_i32(r2);
3225 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3226 tcg_temp_free_i32(t1);
3227 tcg_temp_free_i32(t2);
3228 set_cc_static(s);
3229 return DISAS_NEXT;
3230 }
3231
3232 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3233 {
3234 int r1 = get_field(s->fields, r1);
3235 int r3 = get_field(s->fields, r3);
3236 TCGv_i32 t1, t3;
3237
3238 /* r1 and r3 must be even. */
3239 if (r1 & 1 || r3 & 1) {
3240 gen_program_exception(s, PGM_SPECIFICATION);
3241 return DISAS_NORETURN;
3242 }
3243
3244 t1 = tcg_const_i32(r1);
3245 t3 = tcg_const_i32(r3);
3246 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3247 tcg_temp_free_i32(t1);
3248 tcg_temp_free_i32(t3);
3249 set_cc_static(s);
3250 return DISAS_NEXT;
3251 }
3252
3253 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3254 {
3255 int r1 = get_field(s->fields, r1);
3256 int r3 = get_field(s->fields, r3);
3257 TCGv_i32 t1, t3;
3258
3259 /* r1 and r3 must be even. */
3260 if (r1 & 1 || r3 & 1) {
3261 gen_program_exception(s, PGM_SPECIFICATION);
3262 return DISAS_NORETURN;
3263 }
3264
3265 t1 = tcg_const_i32(r1);
3266 t3 = tcg_const_i32(r3);
3267 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3268 tcg_temp_free_i32(t1);
3269 tcg_temp_free_i32(t3);
3270 set_cc_static(s);
3271 return DISAS_NEXT;
3272 }
3273
3274 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3275 {
3276 int r3 = get_field(s->fields, r3);
3277 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3278 set_cc_static(s);
3279 return DISAS_NEXT;
3280 }
3281
3282 #ifndef CONFIG_USER_ONLY
3283 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3284 {
3285 int r1 = get_field(s->fields, l1);
3286 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3287 set_cc_static(s);
3288 return DISAS_NEXT;
3289 }
3290
3291 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3292 {
3293 int r1 = get_field(s->fields, l1);
3294 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3295 set_cc_static(s);
3296 return DISAS_NEXT;
3297 }
3298 #endif
3299
3300 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3301 {
3302 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3303 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3304 tcg_temp_free_i32(l);
3305 return DISAS_NEXT;
3306 }
3307
3308 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3309 {
3310 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3311 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3312 tcg_temp_free_i32(l);
3313 return DISAS_NEXT;
3314 }
3315
3316 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3317 {
3318 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3319 set_cc_static(s);
3320 return DISAS_NEXT;
3321 }
3322
3323 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3324 {
3325 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3326 set_cc_static(s);
3327 return_low128(o->in2);
3328 return DISAS_NEXT;
3329 }
3330
3331 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3332 {
3333 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3334 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3335 tcg_temp_free_i32(l);
3336 return DISAS_NEXT;
3337 }
3338
3339 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3340 {
3341 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3342 return DISAS_NEXT;
3343 }
3344
3345 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3346 {
3347 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3348 return DISAS_NEXT;
3349 }
3350
3351 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3352 {
3353 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3354 return DISAS_NEXT;
3355 }
3356
3357 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3358 {
3359 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3360 return DISAS_NEXT;
3361 }
3362
3363 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3364 {
3365 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3366 return DISAS_NEXT;
3367 }
3368
3369 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3370 {
3371 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3372 return_low128(o->out2);
3373 return DISAS_NEXT;
3374 }
3375
3376 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3377 {
3378 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3379 return_low128(o->out2);
3380 return DISAS_NEXT;
3381 }
3382
3383 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3384 {
3385 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3386 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3387 tcg_temp_free_i64(r3);
3388 return DISAS_NEXT;
3389 }
3390
3391 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3392 {
3393 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3394 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3395 tcg_temp_free_i64(r3);
3396 return DISAS_NEXT;
3397 }
3398
3399 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3400 {
3401 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3402 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3403 tcg_temp_free_i64(r3);
3404 return DISAS_NEXT;
3405 }
3406
3407 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3408 {
3409 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3410 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3411 tcg_temp_free_i64(r3);
3412 return DISAS_NEXT;
3413 }
3414
3415 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3416 {
3417 TCGv_i64 z, n;
3418 z = tcg_const_i64(0);
3419 n = tcg_temp_new_i64();
3420 tcg_gen_neg_i64(n, o->in2);
3421 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3422 tcg_temp_free_i64(n);
3423 tcg_temp_free_i64(z);
3424 return DISAS_NEXT;
3425 }
3426
3427 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3428 {
3429 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3430 return DISAS_NEXT;
3431 }
3432
3433 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3434 {
3435 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3436 return DISAS_NEXT;
3437 }
3438
3439 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3440 {
3441 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3442 tcg_gen_mov_i64(o->out2, o->in2);
3443 return DISAS_NEXT;
3444 }
3445
3446 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3447 {
3448 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3449 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3450 tcg_temp_free_i32(l);
3451 set_cc_static(s);
3452 return DISAS_NEXT;
3453 }
3454
3455 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3456 {
3457 tcg_gen_neg_i64(o->out, o->in2);
3458 return DISAS_NEXT;
3459 }
3460
3461 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3462 {
3463 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3464 return DISAS_NEXT;
3465 }
3466
3467 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3468 {
3469 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3470 return DISAS_NEXT;
3471 }
3472
3473 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3474 {
3475 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3476 tcg_gen_mov_i64(o->out2, o->in2);
3477 return DISAS_NEXT;
3478 }
3479
3480 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3481 {
3482 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3483 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3484 tcg_temp_free_i32(l);
3485 set_cc_static(s);
3486 return DISAS_NEXT;
3487 }
3488
3489 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3490 {
3491 tcg_gen_or_i64(o->out, o->in1, o->in2);
3492 return DISAS_NEXT;
3493 }
3494
3495 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3496 {
3497 int shift = s->insn->data & 0xff;
3498 int size = s->insn->data >> 8;
3499 uint64_t mask = ((1ull << size) - 1) << shift;
3500
3501 assert(!o->g_in2);
3502 tcg_gen_shli_i64(o->in2, o->in2, shift);
3503 tcg_gen_or_i64(o->out, o->in1, o->in2);
3504
3505 /* Produce the CC from only the bits manipulated. */
3506 tcg_gen_andi_i64(cc_dst, o->out, mask);
3507 set_cc_nz_u64(s, cc_dst);
3508 return DISAS_NEXT;
3509 }
3510
3511 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3512 {
3513 o->in1 = tcg_temp_new_i64();
3514
3515 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3516 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3517 } else {
3518 /* Perform the atomic operation in memory. */
3519 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3520 s->insn->data);
3521 }
3522
3523 /* Recompute also for atomic case: needed for setting CC. */
3524 tcg_gen_or_i64(o->out, o->in1, o->in2);
3525
3526 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3527 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3528 }
3529 return DISAS_NEXT;
3530 }
3531
3532 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3533 {
3534 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3535 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3536 tcg_temp_free_i32(l);
3537 return DISAS_NEXT;
3538 }
3539
3540 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3541 {
3542 int l2 = get_field(s->fields, l2) + 1;
3543 TCGv_i32 l;
3544
3545 /* The length must not exceed 32 bytes. */
3546 if (l2 > 32) {
3547 gen_program_exception(s, PGM_SPECIFICATION);
3548 return DISAS_NORETURN;
3549 }
3550 l = tcg_const_i32(l2);
3551 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3552 tcg_temp_free_i32(l);
3553 return DISAS_NEXT;
3554 }
3555
3556 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3557 {
3558 int l2 = get_field(s->fields, l2) + 1;
3559 TCGv_i32 l;
3560
3561 /* The length must be even and should not exceed 64 bytes. */
3562 if ((l2 & 1) || (l2 > 64)) {
3563 gen_program_exception(s, PGM_SPECIFICATION);
3564 return DISAS_NORETURN;
3565 }
3566 l = tcg_const_i32(l2);
3567 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3568 tcg_temp_free_i32(l);
3569 return DISAS_NEXT;
3570 }
3571
3572 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3573 {
3574 gen_helper_popcnt(o->out, o->in2);
3575 return DISAS_NEXT;
3576 }
3577
3578 #ifndef CONFIG_USER_ONLY
3579 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3580 {
3581 gen_helper_ptlb(cpu_env);
3582 return DISAS_NEXT;
3583 }
3584 #endif
3585
3586 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3587 {
3588 int i3 = get_field(s->fields, i3);
3589 int i4 = get_field(s->fields, i4);
3590 int i5 = get_field(s->fields, i5);
3591 int do_zero = i4 & 0x80;
3592 uint64_t mask, imask, pmask;
3593 int pos, len, rot;
3594
3595 /* Adjust the arguments for the specific insn. */
3596 switch (s->fields->op2) {
3597 case 0x55: /* risbg */
3598 case 0x59: /* risbgn */
3599 i3 &= 63;
3600 i4 &= 63;
3601 pmask = ~0;
3602 break;
3603 case 0x5d: /* risbhg */
3604 i3 &= 31;
3605 i4 &= 31;
3606 pmask = 0xffffffff00000000ull;
3607 break;
3608 case 0x51: /* risblg */
3609 i3 &= 31;
3610 i4 &= 31;
3611 pmask = 0x00000000ffffffffull;
3612 break;
3613 default:
3614 g_assert_not_reached();
3615 }
3616
3617 /* MASK is the set of bits to be inserted from R2.
3618 Take care for I3/I4 wraparound. */
3619 mask = pmask >> i3;
3620 if (i3 <= i4) {
3621 mask ^= pmask >> i4 >> 1;
3622 } else {
3623 mask |= ~(pmask >> i4 >> 1);
3624 }
3625 mask &= pmask;
3626
3627 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3628 insns, we need to keep the other half of the register. */
3629 imask = ~mask | ~pmask;
3630 if (do_zero) {
3631 imask = ~pmask;
3632 }
3633
3634 len = i4 - i3 + 1;
3635 pos = 63 - i4;
3636 rot = i5 & 63;
3637 if (s->fields->op2 == 0x5d) {
3638 pos += 32;
3639 }
3640
3641 /* In some cases we can implement this with extract. */
3642 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3643 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3644 return DISAS_NEXT;
3645 }
3646
3647 /* In some cases we can implement this with deposit. */
3648 if (len > 0 && (imask == 0 || ~mask == imask)) {
3649 /* Note that we rotate the bits to be inserted to the lsb, not to
3650 the position as described in the PoO. */
3651 rot = (rot - pos) & 63;
3652 } else {
3653 pos = -1;
3654 }
3655
3656 /* Rotate the input as necessary. */
3657 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3658
3659 /* Insert the selected bits into the output. */
3660 if (pos >= 0) {
3661 if (imask == 0) {
3662 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3663 } else {
3664 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3665 }
3666 } else if (imask == 0) {
3667 tcg_gen_andi_i64(o->out, o->in2, mask);
3668 } else {
3669 tcg_gen_andi_i64(o->in2, o->in2, mask);
3670 tcg_gen_andi_i64(o->out, o->out, imask);
3671 tcg_gen_or_i64(o->out, o->out, o->in2);
3672 }
3673 return DISAS_NEXT;
3674 }
3675
3676 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3677 {
3678 int i3 = get_field(s->fields, i3);
3679 int i4 = get_field(s->fields, i4);
3680 int i5 = get_field(s->fields, i5);
3681 uint64_t mask;
3682
3683 /* If this is a test-only form, arrange to discard the result. */
3684 if (i3 & 0x80) {
3685 o->out = tcg_temp_new_i64();
3686 o->g_out = false;
3687 }
3688
3689 i3 &= 63;
3690 i4 &= 63;
3691 i5 &= 63;
3692
3693 /* MASK is the set of bits to be operated on from R2.
3694 Take care for I3/I4 wraparound. */
3695 mask = ~0ull >> i3;
3696 if (i3 <= i4) {
3697 mask ^= ~0ull >> i4 >> 1;
3698 } else {
3699 mask |= ~(~0ull >> i4 >> 1);
3700 }
3701
3702 /* Rotate the input as necessary. */
3703 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3704
3705 /* Operate. */
3706 switch (s->fields->op2) {
3707 case 0x55: /* AND */
3708 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3709 tcg_gen_and_i64(o->out, o->out, o->in2);
3710 break;
3711 case 0x56: /* OR */
3712 tcg_gen_andi_i64(o->in2, o->in2, mask);
3713 tcg_gen_or_i64(o->out, o->out, o->in2);
3714 break;
3715 case 0x57: /* XOR */
3716 tcg_gen_andi_i64(o->in2, o->in2, mask);
3717 tcg_gen_xor_i64(o->out, o->out, o->in2);
3718 break;
3719 default:
3720 abort();
3721 }
3722
3723 /* Set the CC. */
3724 tcg_gen_andi_i64(cc_dst, o->out, mask);
3725 set_cc_nz_u64(s, cc_dst);
3726 return DISAS_NEXT;
3727 }
3728
3729 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3730 {
3731 tcg_gen_bswap16_i64(o->out, o->in2);
3732 return DISAS_NEXT;
3733 }
3734
3735 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3736 {
3737 tcg_gen_bswap32_i64(o->out, o->in2);
3738 return DISAS_NEXT;
3739 }
3740
3741 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3742 {
3743 tcg_gen_bswap64_i64(o->out, o->in2);
3744 return DISAS_NEXT;
3745 }
3746
3747 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3748 {
3749 TCGv_i32 t1 = tcg_temp_new_i32();
3750 TCGv_i32 t2 = tcg_temp_new_i32();
3751 TCGv_i32 to = tcg_temp_new_i32();
3752 tcg_gen_extrl_i64_i32(t1, o->in1);
3753 tcg_gen_extrl_i64_i32(t2, o->in2);
3754 tcg_gen_rotl_i32(to, t1, t2);
3755 tcg_gen_extu_i32_i64(o->out, to);
3756 tcg_temp_free_i32(t1);
3757 tcg_temp_free_i32(t2);
3758 tcg_temp_free_i32(to);
3759 return DISAS_NEXT;
3760 }
3761
3762 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3763 {
3764 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3765 return DISAS_NEXT;
3766 }
3767
3768 #ifndef CONFIG_USER_ONLY
3769 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3770 {
3771 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3772 set_cc_static(s);
3773 return DISAS_NEXT;
3774 }
3775
3776 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3777 {
3778 gen_helper_sacf(cpu_env, o->in2);
3779 /* Addressing mode has changed, so end the block. */
3780 return DISAS_PC_STALE;
3781 }
3782 #endif
3783
3784 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3785 {
3786 int sam = s->insn->data;
3787 TCGv_i64 tsam;
3788 uint64_t mask;
3789
3790 switch (sam) {
3791 case 0:
3792 mask = 0xffffff;
3793 break;
3794 case 1:
3795 mask = 0x7fffffff;
3796 break;
3797 default:
3798 mask = -1;
3799 break;
3800 }
3801
3802 /* Bizarre but true, we check the address of the current insn for the
3803 specification exception, not the next to be executed. Thus the PoO
3804 documents that Bad Things Happen two bytes before the end. */
3805 if (s->base.pc_next & ~mask) {
3806 gen_program_exception(s, PGM_SPECIFICATION);
3807 return DISAS_NORETURN;
3808 }
3809 s->pc_tmp &= mask;
3810
3811 tsam = tcg_const_i64(sam);
3812 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3813 tcg_temp_free_i64(tsam);
3814
3815 /* Always exit the TB, since we (may have) changed execution mode. */
3816 return DISAS_PC_STALE;
3817 }
3818
3819 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3820 {
3821 int r1 = get_field(s->fields, r1);
3822 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3823 return DISAS_NEXT;
3824 }
3825
3826 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3827 {
3828 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3829 return DISAS_NEXT;
3830 }
3831
3832 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3833 {
3834 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3835 return DISAS_NEXT;
3836 }
3837
3838 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3839 {
3840 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3841 return_low128(o->out2);
3842 return DISAS_NEXT;
3843 }
3844
3845 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3846 {
3847 gen_helper_sqeb(o->out, cpu_env, o->in2);
3848 return DISAS_NEXT;
3849 }
3850
3851 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3852 {
3853 gen_helper_sqdb(o->out, cpu_env, o->in2);
3854 return DISAS_NEXT;
3855 }
3856
3857 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3858 {
3859 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3860 return_low128(o->out2);
3861 return DISAS_NEXT;
3862 }
3863
3864 #ifndef CONFIG_USER_ONLY
3865 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3866 {
3867 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3868 set_cc_static(s);
3869 return DISAS_NEXT;
3870 }
3871
3872 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3873 {
3874 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3875 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3876 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3877 set_cc_static(s);
3878 tcg_temp_free_i32(r1);
3879 tcg_temp_free_i32(r3);
3880 return DISAS_NEXT;
3881 }
3882 #endif
3883
3884 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3885 {
3886 DisasCompare c;
3887 TCGv_i64 a, h;
3888 TCGLabel *lab;
3889 int r1;
3890
3891 disas_jcc(s, &c, get_field(s->fields, m3));
3892
3893 /* We want to store when the condition is fulfilled, so branch
3894 out when it's not */
3895 c.cond = tcg_invert_cond(c.cond);
3896
3897 lab = gen_new_label();
3898 if (c.is_64) {
3899 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3900 } else {
3901 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3902 }
3903 free_compare(&c);
3904
3905 r1 = get_field(s->fields, r1);
3906 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3907 switch (s->insn->data) {
3908 case 1: /* STOCG */
3909 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3910 break;
3911 case 0: /* STOC */
3912 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3913 break;
3914 case 2: /* STOCFH */
3915 h = tcg_temp_new_i64();
3916 tcg_gen_shri_i64(h, regs[r1], 32);
3917 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3918 tcg_temp_free_i64(h);
3919 break;
3920 default:
3921 g_assert_not_reached();
3922 }
3923 tcg_temp_free_i64(a);
3924
3925 gen_set_label(lab);
3926 return DISAS_NEXT;
3927 }
3928
3929 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3930 {
3931 uint64_t sign = 1ull << s->insn->data;
3932 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3933 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3934 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3935 /* The arithmetic left shift is curious in that it does not affect
3936 the sign bit. Copy that over from the source unchanged. */
3937 tcg_gen_andi_i64(o->out, o->out, ~sign);
3938 tcg_gen_andi_i64(o->in1, o->in1, sign);
3939 tcg_gen_or_i64(o->out, o->out, o->in1);
3940 return DISAS_NEXT;
3941 }
3942
3943 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3944 {
3945 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3946 return DISAS_NEXT;
3947 }
3948
3949 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3950 {
3951 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3952 return DISAS_NEXT;
3953 }
3954
3955 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3956 {
3957 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3958 return DISAS_NEXT;
3959 }
3960
3961 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
3962 {
3963 gen_helper_sfpc(cpu_env, o->in2);
3964 return DISAS_NEXT;
3965 }
3966
3967 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
3968 {
3969 gen_helper_sfas(cpu_env, o->in2);
3970 return DISAS_NEXT;
3971 }
3972
3973 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
3974 {
3975 int b2 = get_field(s->fields, b2);
3976 int d2 = get_field(s->fields, d2);
3977 TCGv_i64 t1 = tcg_temp_new_i64();
3978 TCGv_i64 t2 = tcg_temp_new_i64();
3979 int mask, pos, len;
3980
3981 switch (s->fields->op2) {
3982 case 0x99: /* SRNM */
3983 pos = 0, len = 2;
3984 break;
3985 case 0xb8: /* SRNMB */
3986 pos = 0, len = 3;
3987 break;
3988 case 0xb9: /* SRNMT */
3989 pos = 4, len = 3;
3990 break;
3991 default:
3992 tcg_abort();
3993 }
3994 mask = (1 << len) - 1;
3995
3996 /* Insert the value into the appropriate field of the FPC. */
3997 if (b2 == 0) {
3998 tcg_gen_movi_i64(t1, d2 & mask);
3999 } else {
4000 tcg_gen_addi_i64(t1, regs[b2], d2);
4001 tcg_gen_andi_i64(t1, t1, mask);
4002 }
4003 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
4004 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
4005 tcg_temp_free_i64(t1);
4006
4007 /* Then install the new FPC to set the rounding mode in fpu_status. */
4008 gen_helper_sfpc(cpu_env, t2);
4009 tcg_temp_free_i64(t2);
4010 return DISAS_NEXT;
4011 }
4012
4013 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4014 {
4015 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4016 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4017 set_cc_static(s);
4018
4019 tcg_gen_shri_i64(o->in1, o->in1, 24);
4020 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4021 return DISAS_NEXT;
4022 }
4023
4024 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4025 {
4026 int b1 = get_field(s->fields, b1);
4027 int d1 = get_field(s->fields, d1);
4028 int b2 = get_field(s->fields, b2);
4029 int d2 = get_field(s->fields, d2);
4030 int r3 = get_field(s->fields, r3);
4031 TCGv_i64 tmp = tcg_temp_new_i64();
4032
4033 /* fetch all operands first */
4034 o->in1 = tcg_temp_new_i64();
4035 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4036 o->in2 = tcg_temp_new_i64();
4037 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4038 o->addr1 = get_address(s, 0, r3, 0);
4039
4040 /* load the third operand into r3 before modifying anything */
4041 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4042
4043 /* subtract CPU timer from first operand and store in GR0 */
4044 gen_helper_stpt(tmp, cpu_env);
4045 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4046
4047 /* store second operand in GR1 */
4048 tcg_gen_mov_i64(regs[1], o->in2);
4049
4050 tcg_temp_free_i64(tmp);
4051 return DISAS_NEXT;
4052 }
4053
4054 #ifndef CONFIG_USER_ONLY
4055 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4056 {
4057 tcg_gen_shri_i64(o->in2, o->in2, 4);
4058 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4059 return DISAS_NEXT;
4060 }
4061
4062 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4063 {
4064 gen_helper_sske(cpu_env, o->in1, o->in2);
4065 return DISAS_NEXT;
4066 }
4067
4068 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4069 {
4070 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4071 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4072 return DISAS_PC_STALE_NOCHAIN;
4073 }
4074
4075 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4076 {
4077 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4078 return DISAS_NEXT;
4079 }
4080 #endif
4081
4082 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4083 {
4084 gen_helper_stck(o->out, cpu_env);
4085 /* ??? We don't implement clock states. */
4086 gen_op_movi_cc(s, 0);
4087 return DISAS_NEXT;
4088 }
4089
4090 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4091 {
4092 TCGv_i64 c1 = tcg_temp_new_i64();
4093 TCGv_i64 c2 = tcg_temp_new_i64();
4094 TCGv_i64 todpr = tcg_temp_new_i64();
4095 gen_helper_stck(c1, cpu_env);
4096 /* 16 bit value store in an uint32_t (only valid bits set) */
4097 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4098 /* Shift the 64-bit value into its place as a zero-extended
4099 104-bit value. Note that "bit positions 64-103 are always
4100 non-zero so that they compare differently to STCK"; we set
4101 the least significant bit to 1. */
4102 tcg_gen_shli_i64(c2, c1, 56);
4103 tcg_gen_shri_i64(c1, c1, 8);
4104 tcg_gen_ori_i64(c2, c2, 0x10000);
4105 tcg_gen_or_i64(c2, c2, todpr);
4106 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4107 tcg_gen_addi_i64(o->in2, o->in2, 8);
4108 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4109 tcg_temp_free_i64(c1);
4110 tcg_temp_free_i64(c2);
4111 tcg_temp_free_i64(todpr);
4112 /* ??? We don't implement clock states. */
4113 gen_op_movi_cc(s, 0);
4114 return DISAS_NEXT;
4115 }
4116
4117 #ifndef CONFIG_USER_ONLY
4118 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4119 {
4120 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4121 gen_helper_sck(cc_op, cpu_env, o->in1);
4122 set_cc_static(s);
4123 return DISAS_NEXT;
4124 }
4125
4126 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4127 {
4128 gen_helper_sckc(cpu_env, o->in2);
4129 return DISAS_NEXT;
4130 }
4131
4132 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4133 {
4134 gen_helper_sckpf(cpu_env, regs[0]);
4135 return DISAS_NEXT;
4136 }
4137
4138 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4139 {
4140 gen_helper_stckc(o->out, cpu_env);
4141 return DISAS_NEXT;
4142 }
4143
4144 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4145 {
4146 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4147 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4148 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4149 tcg_temp_free_i32(r1);
4150 tcg_temp_free_i32(r3);
4151 return DISAS_NEXT;
4152 }
4153
4154 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4155 {
4156 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4157 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4158 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4159 tcg_temp_free_i32(r1);
4160 tcg_temp_free_i32(r3);
4161 return DISAS_NEXT;
4162 }
4163
4164 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4165 {
4166 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4167 return DISAS_NEXT;
4168 }
4169
4170 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4171 {
4172 gen_helper_spt(cpu_env, o->in2);
4173 return DISAS_NEXT;
4174 }
4175
4176 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4177 {
4178 gen_helper_stfl(cpu_env);
4179 return DISAS_NEXT;
4180 }
4181
4182 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4183 {
4184 gen_helper_stpt(o->out, cpu_env);
4185 return DISAS_NEXT;
4186 }
4187
4188 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4189 {
4190 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4191 set_cc_static(s);
4192 return DISAS_NEXT;
4193 }
4194
4195 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4196 {
4197 gen_helper_spx(cpu_env, o->in2);
4198 return DISAS_NEXT;
4199 }
4200
4201 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4202 {
4203 gen_helper_xsch(cpu_env, regs[1]);
4204 set_cc_static(s);
4205 return DISAS_NEXT;
4206 }
4207
4208 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4209 {
4210 gen_helper_csch(cpu_env, regs[1]);
4211 set_cc_static(s);
4212 return DISAS_NEXT;
4213 }
4214
4215 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4216 {
4217 gen_helper_hsch(cpu_env, regs[1]);
4218 set_cc_static(s);
4219 return DISAS_NEXT;
4220 }
4221
4222 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4223 {
4224 gen_helper_msch(cpu_env, regs[1], o->in2);
4225 set_cc_static(s);
4226 return DISAS_NEXT;
4227 }
4228
4229 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4230 {
4231 gen_helper_rchp(cpu_env, regs[1]);
4232 set_cc_static(s);
4233 return DISAS_NEXT;
4234 }
4235
4236 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4237 {
4238 gen_helper_rsch(cpu_env, regs[1]);
4239 set_cc_static(s);
4240 return DISAS_NEXT;
4241 }
4242
4243 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4244 {
4245 gen_helper_sal(cpu_env, regs[1]);
4246 return DISAS_NEXT;
4247 }
4248
4249 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4250 {
4251 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4252 return DISAS_NEXT;
4253 }
4254
4255 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4256 {
4257 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4258 gen_op_movi_cc(s, 3);
4259 return DISAS_NEXT;
4260 }
4261
4262 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4263 {
4264 /* The instruction is suppressed if not provided. */
4265 return DISAS_NEXT;
4266 }
4267
4268 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4269 {
4270 gen_helper_ssch(cpu_env, regs[1], o->in2);
4271 set_cc_static(s);
4272 return DISAS_NEXT;
4273 }
4274
4275 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4276 {
4277 gen_helper_stsch(cpu_env, regs[1], o->in2);
4278 set_cc_static(s);
4279 return DISAS_NEXT;
4280 }
4281
4282 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4283 {
4284 gen_helper_stcrw(cpu_env, o->in2);
4285 set_cc_static(s);
4286 return DISAS_NEXT;
4287 }
4288
4289 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4290 {
4291 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4292 set_cc_static(s);
4293 return DISAS_NEXT;
4294 }
4295
4296 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4297 {
4298 gen_helper_tsch(cpu_env, regs[1], o->in2);
4299 set_cc_static(s);
4300 return DISAS_NEXT;
4301 }
4302
4303 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4304 {
4305 gen_helper_chsc(cpu_env, o->in2);
4306 set_cc_static(s);
4307 return DISAS_NEXT;
4308 }
4309
4310 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4311 {
4312 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4313 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4314 return DISAS_NEXT;
4315 }
4316
4317 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4318 {
4319 uint64_t i2 = get_field(s->fields, i2);
4320 TCGv_i64 t;
4321
4322 /* It is important to do what the instruction name says: STORE THEN.
4323 If we let the output hook perform the store then if we fault and
4324 restart, we'll have the wrong SYSTEM MASK in place. */
4325 t = tcg_temp_new_i64();
4326 tcg_gen_shri_i64(t, psw_mask, 56);
4327 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4328 tcg_temp_free_i64(t);
4329
4330 if (s->fields->op == 0xac) {
4331 tcg_gen_andi_i64(psw_mask, psw_mask,
4332 (i2 << 56) | 0x00ffffffffffffffull);
4333 } else {
4334 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4335 }
4336
4337 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4338 return DISAS_PC_STALE_NOCHAIN;
4339 }
4340
4341 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4342 {
4343 gen_helper_stura(cpu_env, o->in2, o->in1);
4344 return DISAS_NEXT;
4345 }
4346
4347 static DisasJumpType op_sturg(DisasContext *s, DisasOps *o)
4348 {
4349 gen_helper_sturg(cpu_env, o->in2, o->in1);
4350 return DISAS_NEXT;
4351 }
4352 #endif
4353
4354 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4355 {
4356 gen_helper_stfle(cc_op, cpu_env, o->in2);
4357 set_cc_static(s);
4358 return DISAS_NEXT;
4359 }
4360
4361 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4362 {
4363 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4364 return DISAS_NEXT;
4365 }
4366
4367 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4368 {
4369 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4370 return DISAS_NEXT;
4371 }
4372
4373 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4374 {
4375 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4376 return DISAS_NEXT;
4377 }
4378
4379 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4380 {
4381 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4382 return DISAS_NEXT;
4383 }
4384
4385 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4386 {
4387 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4388 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4389 gen_helper_stam(cpu_env, r1, o->in2, r3);
4390 tcg_temp_free_i32(r1);
4391 tcg_temp_free_i32(r3);
4392 return DISAS_NEXT;
4393 }
4394
4395 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4396 {
4397 int m3 = get_field(s->fields, m3);
4398 int pos, base = s->insn->data;
4399 TCGv_i64 tmp = tcg_temp_new_i64();
4400
4401 pos = base + ctz32(m3) * 8;
4402 switch (m3) {
4403 case 0xf:
4404 /* Effectively a 32-bit store. */
4405 tcg_gen_shri_i64(tmp, o->in1, pos);
4406 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4407 break;
4408
4409 case 0xc:
4410 case 0x6:
4411 case 0x3:
4412 /* Effectively a 16-bit store. */
4413 tcg_gen_shri_i64(tmp, o->in1, pos);
4414 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4415 break;
4416
4417 case 0x8:
4418 case 0x4:
4419 case 0x2:
4420 case 0x1:
4421 /* Effectively an 8-bit store. */
4422 tcg_gen_shri_i64(tmp, o->in1, pos);
4423 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4424 break;
4425
4426 default:
4427 /* This is going to be a sequence of shifts and stores. */
4428 pos = base + 32 - 8;
4429 while (m3) {
4430 if (m3 & 0x8) {
4431 tcg_gen_shri_i64(tmp, o->in1, pos);
4432 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4433 tcg_gen_addi_i64(o->in2, o->in2, 1);
4434 }
4435 m3 = (m3 << 1) & 0xf;
4436 pos -= 8;
4437 }
4438 break;
4439 }
4440 tcg_temp_free_i64(tmp);
4441 return DISAS_NEXT;
4442 }
4443
4444 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4445 {
4446 int r1 = get_field(s->fields, r1);
4447 int r3 = get_field(s->fields, r3);
4448 int size = s->insn->data;
4449 TCGv_i64 tsize = tcg_const_i64(size);
4450
4451 while (1) {
4452 if (size == 8) {
4453 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4454 } else {
4455 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4456 }
4457 if (r1 == r3) {
4458 break;
4459 }
4460 tcg_gen_add_i64(o->in2, o->in2, tsize);
4461 r1 = (r1 + 1) & 15;
4462 }
4463
4464 tcg_temp_free_i64(tsize);
4465 return DISAS_NEXT;
4466 }
4467
4468 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4469 {
4470 int r1 = get_field(s->fields, r1);
4471 int r3 = get_field(s->fields, r3);
4472 TCGv_i64 t = tcg_temp_new_i64();
4473 TCGv_i64 t4 = tcg_const_i64(4);
4474 TCGv_i64 t32 = tcg_const_i64(32);
4475
4476 while (1) {
4477 tcg_gen_shl_i64(t, regs[r1], t32);
4478 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4479 if (r1 == r3) {
4480 break;
4481 }
4482 tcg_gen_add_i64(o->in2, o->in2, t4);
4483 r1 = (r1 + 1) & 15;
4484 }
4485
4486 tcg_temp_free_i64(t);
4487 tcg_temp_free_i64(t4);
4488 tcg_temp_free_i64(t32);
4489 return DISAS_NEXT;
4490 }
4491
4492 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4493 {
4494 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4495 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4496 } else if (HAVE_ATOMIC128) {
4497 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4498 } else {
4499 gen_helper_exit_atomic(cpu_env);
4500 return DISAS_NORETURN;
4501 }
4502 return DISAS_NEXT;
4503 }
4504
4505 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4506 {
4507 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4508 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4509
4510 gen_helper_srst(cpu_env, r1, r2);
4511
4512 tcg_temp_free_i32(r1);
4513 tcg_temp_free_i32(r2);
4514 set_cc_static(s);
4515 return DISAS_NEXT;
4516 }
4517
4518 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4519 {
4520 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4521 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4522
4523 gen_helper_srstu(cpu_env, r1, r2);
4524
4525 tcg_temp_free_i32(r1);
4526 tcg_temp_free_i32(r2);
4527 set_cc_static(s);
4528 return DISAS_NEXT;
4529 }
4530
4531 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4532 {
4533 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4534 return DISAS_NEXT;
4535 }
4536
4537 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4538 {
4539 DisasCompare cmp;
4540 TCGv_i64 borrow;
4541
4542 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4543
4544 /* The !borrow flag is the msb of CC. Since we want the inverse of
4545 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4546 disas_jcc(s, &cmp, 8 | 4);
4547 borrow = tcg_temp_new_i64();
4548 if (cmp.is_64) {
4549 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4550 } else {
4551 TCGv_i32 t = tcg_temp_new_i32();
4552 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4553 tcg_gen_extu_i32_i64(borrow, t);
4554 tcg_temp_free_i32(t);
4555 }
4556 free_compare(&cmp);
4557
4558 tcg_gen_sub_i64(o->out, o->out, borrow);
4559 tcg_temp_free_i64(borrow);
4560 return DISAS_NEXT;
4561 }
4562
4563 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4564 {
4565 TCGv_i32 t;
4566
4567 update_psw_addr(s);
4568 update_cc_op(s);
4569
4570 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4571 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4572 tcg_temp_free_i32(t);
4573
4574 t = tcg_const_i32(s->ilen);
4575 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4576 tcg_temp_free_i32(t);
4577
4578 gen_exception(EXCP_SVC);
4579 return DISAS_NORETURN;
4580 }
4581
4582 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4583 {
4584 int cc = 0;
4585
4586 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4587 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4588 gen_op_movi_cc(s, cc);
4589 return DISAS_NEXT;
4590 }
4591
4592 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4593 {
4594 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4595 set_cc_static(s);
4596 return DISAS_NEXT;
4597 }
4598
4599 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4600 {
4601 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4602 set_cc_static(s);
4603 return DISAS_NEXT;
4604 }
4605
4606 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4607 {
4608 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4609 set_cc_static(s);
4610 return DISAS_NEXT;
4611 }
4612
4613 #ifndef CONFIG_USER_ONLY
4614
4615 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4616 {
4617 gen_helper_testblock(cc_op, cpu_env, o->in2);
4618 set_cc_static(s);
4619 return DISAS_NEXT;
4620 }
4621
4622 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4623 {
4624 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4625 set_cc_static(s);
4626 return DISAS_NEXT;
4627 }
4628
4629 #endif
4630
4631 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4632 {
4633 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4634 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4635 tcg_temp_free_i32(l1);
4636 set_cc_static(s);
4637 return DISAS_NEXT;
4638 }
4639
4640 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4641 {
4642 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4643 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4644 tcg_temp_free_i32(l);
4645 set_cc_static(s);
4646 return DISAS_NEXT;
4647 }
4648
4649 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4650 {
4651 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4652 return_low128(o->out2);
4653 set_cc_static(s);
4654 return DISAS_NEXT;
4655 }
4656
4657 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4658 {
4659 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4660 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4661 tcg_temp_free_i32(l);
4662 set_cc_static(s);
4663 return DISAS_NEXT;
4664 }
4665
4666 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4667 {
4668 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4669 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4670 tcg_temp_free_i32(l);
4671 set_cc_static(s);
4672 return DISAS_NEXT;
4673 }
4674
4675 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4676 {
4677 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4678 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4679 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4680 TCGv_i32 tst = tcg_temp_new_i32();
4681 int m3 = get_field(s->fields, m3);
4682
4683 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4684 m3 = 0;
4685 }
4686 if (m3 & 1) {
4687 tcg_gen_movi_i32(tst, -1);
4688 } else {
4689 tcg_gen_extrl_i64_i32(tst, regs[0]);
4690 if (s->insn->opc & 3) {
4691 tcg_gen_ext8u_i32(tst, tst);
4692 } else {
4693 tcg_gen_ext16u_i32(tst, tst);
4694 }
4695 }
4696 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4697
4698 tcg_temp_free_i32(r1);
4699 tcg_temp_free_i32(r2);
4700 tcg_temp_free_i32(sizes);
4701 tcg_temp_free_i32(tst);
4702 set_cc_static(s);
4703 return DISAS_NEXT;
4704 }
4705
4706 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4707 {
4708 TCGv_i32 t1 = tcg_const_i32(0xff);
4709 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4710 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4711 tcg_temp_free_i32(t1);
4712 set_cc_static(s);
4713 return DISAS_NEXT;
4714 }
4715
4716 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4717 {
4718 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4719 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4720 tcg_temp_free_i32(l);
4721 return DISAS_NEXT;
4722 }
4723
4724 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4725 {
4726 int l1 = get_field(s->fields, l1) + 1;
4727 TCGv_i32 l;
4728
4729 /* The length must not exceed 32 bytes. */
4730 if (l1 > 32) {
4731 gen_program_exception(s, PGM_SPECIFICATION);
4732 return DISAS_NORETURN;
4733 }
4734 l = tcg_const_i32(l1);
4735 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4736 tcg_temp_free_i32(l);
4737 set_cc_static(s);
4738 return DISAS_NEXT;
4739 }
4740
4741 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4742 {
4743 int l1 = get_field(s->fields, l1) + 1;
4744 TCGv_i32 l;
4745
4746 /* The length must be even and should not exceed 64 bytes. */
4747 if ((l1 & 1) || (l1 > 64)) {
4748 gen_program_exception(s, PGM_SPECIFICATION);
4749 return DISAS_NORETURN;
4750 }
4751 l = tcg_const_i32(l1);
4752 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4753 tcg_temp_free_i32(l);
4754 set_cc_static(s);
4755 return DISAS_NEXT;
4756 }
4757
4758
4759 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4760 {
4761 int d1 = get_field(s->fields, d1);
4762 int d2 = get_field(s->fields, d2);
4763 int b1 = get_field(s->fields, b1);
4764 int b2 = get_field(s->fields, b2);
4765 int l = get_field(s->fields, l1);
4766 TCGv_i32 t32;
4767
4768 o->addr1 = get_address(s, 0, b1, d1);
4769
4770 /* If the addresses are identical, this is a store/memset of zero. */
4771 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4772 o->in2 = tcg_const_i64(0);
4773
4774 l++;
4775 while (l >= 8) {
4776 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4777 l -= 8;
4778 if (l > 0) {
4779 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4780 }
4781 }
4782 if (l >= 4) {
4783 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4784 l -= 4;
4785 if (l > 0) {
4786 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4787 }
4788 }
4789 if (l >= 2) {
4790 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4791 l -= 2;
4792 if (l > 0) {
4793 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4794 }
4795 }
4796 if (l) {
4797 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4798 }
4799 gen_op_movi_cc(s, 0);
4800 return DISAS_NEXT;
4801 }
4802
4803 /* But in general we'll defer to a helper. */
4804 o->in2 = get_address(s, 0, b2, d2);
4805 t32 = tcg_const_i32(l);
4806 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4807 tcg_temp_free_i32(t32);
4808 set_cc_static(s);
4809 return DISAS_NEXT;
4810 }
4811
4812 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4813 {
4814 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4815 return DISAS_NEXT;
4816 }
4817
4818 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4819 {
4820 int shift = s->insn->data & 0xff;
4821 int size = s->insn->data >> 8;
4822 uint64_t mask = ((1ull << size) - 1) << shift;
4823
4824 assert(!o->g_in2);
4825 tcg_gen_shli_i64(o->in2, o->in2, shift);
4826 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4827
4828 /* Produce the CC from only the bits manipulated. */
4829 tcg_gen_andi_i64(cc_dst, o->out, mask);
4830 set_cc_nz_u64(s, cc_dst);
4831 return DISAS_NEXT;
4832 }
4833
4834 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4835 {
4836 o->in1 = tcg_temp_new_i64();
4837
4838 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4839 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4840 } else {
4841 /* Perform the atomic operation in memory. */
4842 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4843 s->insn->data);
4844 }
4845
4846 /* Recompute also for atomic case: needed for setting CC. */
4847 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4848
4849 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4850 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4851 }
4852 return DISAS_NEXT;
4853 }
4854
4855 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4856 {
4857 o->out = tcg_const_i64(0);
4858 return DISAS_NEXT;
4859 }
4860
4861 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4862 {
4863 o->out = tcg_const_i64(0);
4864 o->out2 = o->out;
4865 o->g_out2 = true;
4866 return DISAS_NEXT;
4867 }
4868
4869 #ifndef CONFIG_USER_ONLY
4870 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4871 {
4872 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4873
4874 gen_helper_clp(cpu_env, r2);
4875 tcg_temp_free_i32(r2);
4876 set_cc_static(s);
4877 return DISAS_NEXT;
4878 }
4879
4880 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4881 {
4882 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4883 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4884
4885 gen_helper_pcilg(cpu_env, r1, r2);
4886 tcg_temp_free_i32(r1);
4887 tcg_temp_free_i32(r2);
4888 set_cc_static(s);
4889 return DISAS_NEXT;
4890 }
4891
4892 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4893 {
4894 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4895 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4896
4897 gen_helper_pcistg(cpu_env, r1, r2);
4898 tcg_temp_free_i32(r1);
4899 tcg_temp_free_i32(r2);
4900 set_cc_static(s);
4901 return DISAS_NEXT;
4902 }
4903
4904 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4905 {
4906 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4907 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4908
4909 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4910 tcg_temp_free_i32(ar);
4911 tcg_temp_free_i32(r1);
4912 set_cc_static(s);
4913 return DISAS_NEXT;
4914 }
4915
4916 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4917 {
4918 gen_helper_sic(cpu_env, o->in1, o->in2);
4919 return DISAS_NEXT;
4920 }
4921
4922 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4923 {
4924 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4925 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4926
4927 gen_helper_rpcit(cpu_env, r1, r2);
4928 tcg_temp_free_i32(r1);
4929 tcg_temp_free_i32(r2);
4930 set_cc_static(s);
4931 return DISAS_NEXT;
4932 }
4933
4934 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4935 {
4936 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4937 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4938 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4939
4940 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4941 tcg_temp_free_i32(ar);
4942 tcg_temp_free_i32(r1);
4943 tcg_temp_free_i32(r3);
4944 set_cc_static(s);
4945 return DISAS_NEXT;
4946 }
4947
4948 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4949 {
4950 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4951 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4952
4953 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4954 tcg_temp_free_i32(ar);
4955 tcg_temp_free_i32(r1);
4956 set_cc_static(s);
4957 return DISAS_NEXT;
4958 }
4959 #endif
4960
4961 /* ====================================================================== */
4962 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4963 the original inputs), update the various cc data structures in order to
4964 be able to compute the new condition code. */
4965
4966 static void cout_abs32(DisasContext *s, DisasOps *o)
4967 {
4968 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4969 }
4970
4971 static void cout_abs64(DisasContext *s, DisasOps *o)
4972 {
4973 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4974 }
4975
4976 static void cout_adds32(DisasContext *s, DisasOps *o)
4977 {
4978 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4979 }
4980
4981 static void cout_adds64(DisasContext *s, DisasOps *o)
4982 {
4983 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4984 }
4985
4986 static void cout_addu32(DisasContext *s, DisasOps *o)
4987 {
4988 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4989 }
4990
4991 static void cout_addu64(DisasContext *s, DisasOps *o)
4992 {
4993 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4994 }
4995
4996 static void cout_addc32(DisasContext *s, DisasOps *o)
4997 {
4998 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4999 }
5000
5001 static void cout_addc64(DisasContext *s, DisasOps *o)
5002 {
5003 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
5004 }
5005
5006 static void cout_cmps32(DisasContext *s, DisasOps *o)
5007 {
5008 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5009 }
5010
5011 static void cout_cmps64(DisasContext *s, DisasOps *o)
5012 {
5013 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5014 }
5015
5016 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5017 {
5018 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5019 }
5020
5021 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5022 {
5023 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5024 }
5025
5026 static void cout_f32(DisasContext *s, DisasOps *o)
5027 {
5028 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5029 }
5030
5031 static void cout_f64(DisasContext *s, DisasOps *o)
5032 {
5033 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5034 }
5035
5036 static void cout_f128(DisasContext *s, DisasOps *o)
5037 {
5038 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5039 }
5040
5041 static void cout_nabs32(DisasContext *s, DisasOps *o)
5042 {
5043 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5044 }
5045
5046 static void cout_nabs64(DisasContext *s, DisasOps *o)
5047 {
5048 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5049 }
5050
5051 static void cout_neg32(DisasContext *s, DisasOps *o)
5052 {
5053 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5054 }
5055
5056 static void cout_neg64(DisasContext *s, DisasOps *o)
5057 {
5058 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5059 }
5060
5061 static void cout_nz32(DisasContext *s, DisasOps *o)
5062 {
5063 tcg_gen_ext32u_i64(cc_dst, o->out);
5064 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5065 }
5066
5067 static void cout_nz64(DisasContext *s, DisasOps *o)
5068 {
5069 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5070 }
5071
5072 static void cout_s32(DisasContext *s, DisasOps *o)
5073 {
5074 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5075 }
5076
5077 static void cout_s64(DisasContext *s, DisasOps *o)
5078 {
5079 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5080 }
5081
5082 static void cout_subs32(DisasContext *s, DisasOps *o)
5083 {
5084 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5085 }
5086
5087 static void cout_subs64(DisasContext *s, DisasOps *o)
5088 {
5089 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5090 }
5091
5092 static void cout_subu32(DisasContext *s, DisasOps *o)
5093 {
5094 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5095 }
5096
5097 static void cout_subu64(DisasContext *s, DisasOps *o)
5098 {
5099 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5100 }
5101
5102 static void cout_subb32(DisasContext *s, DisasOps *o)
5103 {
5104 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5105 }
5106
5107 static void cout_subb64(DisasContext *s, DisasOps *o)
5108 {
5109 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5110 }
5111
5112 static void cout_tm32(DisasContext *s, DisasOps *o)
5113 {
5114 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5115 }
5116
5117 static void cout_tm64(DisasContext *s, DisasOps *o)
5118 {
5119 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5120 }
5121
5122 /* ====================================================================== */
5123 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5124 with the TCG register to which we will write. Used in combination with
5125 the "wout" generators, in some cases we need a new temporary, and in
5126 some cases we can write to a TCG global. */
5127
5128 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5129 {
5130 o->out = tcg_temp_new_i64();
5131 }
5132 #define SPEC_prep_new 0
5133
5134 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5135 {
5136 o->out = tcg_temp_new_i64();
5137 o->out2 = tcg_temp_new_i64();
5138 }
5139 #define SPEC_prep_new_P 0
5140
5141 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5142 {
5143 o->out = regs[get_field(f, r1)];
5144 o->g_out = true;
5145 }
5146 #define SPEC_prep_r1 0
5147
5148 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5149 {
5150 int r1 = get_field(f, r1);
5151 o->out = regs[r1];
5152 o->out2 = regs[r1 + 1];
5153 o->g_out = o->g_out2 = true;
5154 }
5155 #define SPEC_prep_r1_P SPEC_r1_even
5156
5157 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5158 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5159 {
5160 o->out = load_freg(get_field(f, r1));
5161 o->out2 = load_freg(get_field(f, r1) + 2);
5162 }
5163 #define SPEC_prep_x1 SPEC_r1_f128
5164
5165 /* ====================================================================== */
5166 /* The "Write OUTput" generators. These generally perform some non-trivial
5167 copy of data to TCG globals, or to main memory. The trivial cases are
5168 generally handled by having a "prep" generator install the TCG global
5169 as the destination of the operation. */
5170
5171 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5172 {
5173 store_reg(get_field(f, r1), o->out);
5174 }
5175 #define SPEC_wout_r1 0
5176
5177 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5178 {
5179 int r1 = get_field(f, r1);
5180 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5181 }
5182 #define SPEC_wout_r1_8 0
5183
5184 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5185 {
5186 int r1 = get_field(f, r1);
5187 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5188 }
5189 #define SPEC_wout_r1_16 0
5190
5191 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5192 {
5193 store_reg32_i64(get_field(f, r1), o->out);
5194 }
5195 #define SPEC_wout_r1_32 0
5196
5197 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5198 {
5199 store_reg32h_i64(get_field(f, r1), o->out);
5200 }
5201 #define SPEC_wout_r1_32h 0
5202
5203 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5204 {
5205 int r1 = get_field(f, r1);
5206 store_reg32_i64(r1, o->out);
5207 store_reg32_i64(r1 + 1, o->out2);
5208 }
5209 #define SPEC_wout_r1_P32 SPEC_r1_even
5210
5211 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5212 {
5213 int r1 = get_field(f, r1);
5214 store_reg32_i64(r1 + 1, o->out);
5215 tcg_gen_shri_i64(o->out, o->out, 32);
5216 store_reg32_i64(r1, o->out);
5217 }
5218 #define SPEC_wout_r1_D32 SPEC_r1_even
5219
5220 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5221 {
5222 int r3 = get_field(f, r3);
5223 store_reg32_i64(r3, o->out);
5224 store_reg32_i64(r3 + 1, o->out2);
5225 }
5226 #define SPEC_wout_r3_P32 SPEC_r3_even
5227
5228 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5229 {
5230 int r3 = get_field(f, r3);
5231 store_reg(r3, o->out);
5232 store_reg(r3 + 1, o->out2);
5233 }
5234 #define SPEC_wout_r3_P64 SPEC_r3_even
5235
5236 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5237 {
5238 store_freg32_i64(get_field(f, r1), o->out);
5239 }
5240 #define SPEC_wout_e1 0
5241
5242 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5243 {
5244 store_freg(get_field(f, r1), o->out);
5245 }
5246 #define SPEC_wout_f1 0
5247
5248 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5249 {
5250 int f1 = get_field(s->fields, r1);
5251 store_freg(f1, o->out);
5252 store_freg(f1 + 2, o->out2);
5253 }
5254 #define SPEC_wout_x1 SPEC_r1_f128
5255
5256 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5257 {
5258 if (get_field(f, r1) != get_field(f, r2)) {
5259 store_reg32_i64(get_field(f, r1), o->out);
5260 }
5261 }
5262 #define SPEC_wout_cond_r1r2_32 0
5263
5264 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5265 {
5266 if (get_field(f, r1) != get_field(f, r2)) {
5267 store_freg32_i64(get_field(f, r1), o->out);
5268 }
5269 }
5270 #define SPEC_wout_cond_e1e2 0
5271
5272 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5273 {
5274 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5275 }
5276 #define SPEC_wout_m1_8 0
5277
5278 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5279 {
5280 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5281 }
5282 #define SPEC_wout_m1_16 0
5283
5284 #ifndef CONFIG_USER_ONLY
5285 static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5286 {
5287 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5288 }
5289 #define SPEC_wout_m1_16a 0
5290 #endif
5291
5292 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5293 {
5294 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5295 }
5296 #define SPEC_wout_m1_32 0
5297
5298 #ifndef CONFIG_USER_ONLY
5299 static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5300 {
5301 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5302 }
5303 #define SPEC_wout_m1_32a 0
5304 #endif
5305
5306 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5307 {
5308 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5309 }
5310 #define SPEC_wout_m1_64 0
5311
5312 #ifndef CONFIG_USER_ONLY
5313 static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5314 {
5315 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5316 }
5317 #define SPEC_wout_m1_64a 0
5318 #endif
5319
5320 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5321 {
5322 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5323 }
5324 #define SPEC_wout_m2_32 0
5325
5326 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5327 {
5328 store_reg(get_field(f, r1), o->in2);
5329 }
5330 #define SPEC_wout_in2_r1 0
5331
5332 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5333 {
5334 store_reg32_i64(get_field(f, r1), o->in2);
5335 }
5336 #define SPEC_wout_in2_r1_32 0
5337
5338 /* ====================================================================== */
5339 /* The "INput 1" generators. These load the first operand to an insn. */
5340
5341 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5342 {
5343 o->in1 = load_reg(get_field(f, r1));
5344 }
5345 #define SPEC_in1_r1 0
5346
5347 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5348 {
5349 o->in1 = regs[get_field(f, r1)];
5350 o->g_in1 = true;
5351 }
5352 #define SPEC_in1_r1_o 0
5353
5354 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5355 {
5356 o->in1 = tcg_temp_new_i64();
5357 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5358 }
5359 #define SPEC_in1_r1_32s 0
5360
5361 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5362 {
5363 o->in1 = tcg_temp_new_i64();
5364 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5365 }
5366 #define SPEC_in1_r1_32u 0
5367
5368 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5369 {
5370 o->in1 = tcg_temp_new_i64();
5371 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5372 }
5373 #define SPEC_in1_r1_sr32 0
5374
5375 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5376 {
5377 o->in1 = load_reg(get_field(f, r1) + 1);
5378 }
5379 #define SPEC_in1_r1p1 SPEC_r1_even
5380
5381 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5382 {
5383 o->in1 = tcg_temp_new_i64();
5384 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5385 }
5386 #define SPEC_in1_r1p1_32s SPEC_r1_even
5387
5388 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5389 {
5390 o->in1 = tcg_temp_new_i64();
5391 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5392 }
5393 #define SPEC_in1_r1p1_32u SPEC_r1_even
5394
5395 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5396 {
5397 int r1 = get_field(f, r1);
5398 o->in1 = tcg_temp_new_i64();
5399 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5400 }
5401 #define SPEC_in1_r1_D32 SPEC_r1_even
5402
5403 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5404 {
5405 o->in1 = load_reg(get_field(f, r2));
5406 }
5407 #define SPEC_in1_r2 0
5408
5409 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5410 {
5411 o->in1 = tcg_temp_new_i64();
5412 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5413 }
5414 #define SPEC_in1_r2_sr32 0
5415
5416 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5417 {
5418 o->in1 = load_reg(get_field(f, r3));
5419 }
5420 #define SPEC_in1_r3 0
5421
5422 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5423 {
5424 o->in1 = regs[get_field(f, r3)];
5425 o->g_in1 = true;
5426 }
5427 #define SPEC_in1_r3_o 0
5428
5429 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5430 {
5431 o->in1 = tcg_temp_new_i64();
5432 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5433 }
5434 #define SPEC_in1_r3_32s 0
5435
5436 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5437 {
5438 o->in1 = tcg_temp_new_i64();
5439 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5440 }
5441 #define SPEC_in1_r3_32u 0
5442
5443 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5444 {
5445 int r3 = get_field(f, r3);
5446 o->in1 = tcg_temp_new_i64();
5447 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5448 }
5449 #define SPEC_in1_r3_D32 SPEC_r3_even
5450
5451 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5452 {
5453 o->in1 = load_freg32_i64(get_field(f, r1));
5454 }
5455 #define SPEC_in1_e1 0
5456
5457 static void in1_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5458 {
5459 o->in1 = load_freg(get_field(f, r1));
5460 }
5461 #define SPEC_in1_f1 0
5462
5463 /* Load the high double word of an extended (128-bit) format FP number */
5464 static void in1_x2h(DisasContext *s, DisasFields *f, DisasOps *o)
5465 {
5466 o->in1 = load_freg(get_field(f, r2));
5467 }
5468 #define SPEC_in1_x2h SPEC_r2_f128
5469
5470 static void in1_f3(DisasContext *s, DisasFields *f, DisasOps *o)
5471 {
5472 o->in1 = load_freg(get_field(f, r3));
5473 }
5474 #define SPEC_in1_f3 0
5475
5476 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5477 {
5478 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5479 }
5480 #define SPEC_in1_la1 0
5481
5482 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5483 {
5484 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5485 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5486 }
5487 #define SPEC_in1_la2 0
5488
5489 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5490 {
5491 in1_la1(s, f, o);
5492 o->in1 = tcg_temp_new_i64();
5493 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5494 }
5495 #define SPEC_in1_m1_8u 0
5496
5497 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5498 {
5499 in1_la1(s, f, o);
5500 o->in1 = tcg_temp_new_i64();
5501 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5502 }
5503 #define SPEC_in1_m1_16s 0
5504
5505 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5506 {
5507 in1_la1(s, f, o);
5508 o->in1 = tcg_temp_new_i64();
5509 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5510 }
5511 #define SPEC_in1_m1_16u 0
5512
5513 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5514 {
5515 in1_la1(s, f, o);
5516 o->in1 = tcg_temp_new_i64();
5517 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5518 }
5519 #define SPEC_in1_m1_32s 0
5520
5521 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5522 {
5523 in1_la1(s, f, o);
5524 o->in1 = tcg_temp_new_i64();
5525 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5526 }
5527 #define SPEC_in1_m1_32u 0
5528
5529 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5530 {
5531 in1_la1(s, f, o);
5532 o->in1 = tcg_temp_new_i64();
5533 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5534 }
5535 #define SPEC_in1_m1_64 0
5536
5537 /* ====================================================================== */
5538 /* The "INput 2" generators. These load the second operand to an insn. */
5539
5540 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5541 {
5542 o->in2 = regs[get_field(f, r1)];
5543 o->g_in2 = true;
5544 }
5545 #define SPEC_in2_r1_o 0
5546
5547 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5548 {
5549 o->in2 = tcg_temp_new_i64();
5550 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5551 }
5552 #define SPEC_in2_r1_16u 0
5553
5554 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5555 {
5556 o->in2 = tcg_temp_new_i64();
5557 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5558 }
5559 #define SPEC_in2_r1_32u 0
5560
5561 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5562 {
5563 int r1 = get_field(f, r1);
5564 o->in2 = tcg_temp_new_i64();
5565 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5566 }
5567 #define SPEC_in2_r1_D32 SPEC_r1_even
5568
5569 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5570 {
5571 o->in2 = load_reg(get_field(f, r2));
5572 }
5573 #define SPEC_in2_r2 0
5574
5575 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5576 {
5577 o->in2 = regs[get_field(f, r2)];
5578 o->g_in2 = true;
5579 }
5580 #define SPEC_in2_r2_o 0
5581
5582 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5583 {
5584 int r2 = get_field(f, r2);
5585 if (r2 != 0) {
5586 o->in2 = load_reg(r2);
5587 }
5588 }
5589 #define SPEC_in2_r2_nz 0
5590
5591 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5592 {
5593 o->in2 = tcg_temp_new_i64();
5594 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5595 }
5596 #define SPEC_in2_r2_8s 0
5597
5598 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5599 {
5600 o->in2 = tcg_temp_new_i64();
5601 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5602 }
5603 #define SPEC_in2_r2_8u 0
5604
5605 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5606 {
5607 o->in2 = tcg_temp_new_i64();
5608 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5609 }
5610 #define SPEC_in2_r2_16s 0
5611
5612 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5613 {
5614 o->in2 = tcg_temp_new_i64();
5615 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5616 }
5617 #define SPEC_in2_r2_16u 0
5618
5619 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5620 {
5621 o->in2 = load_reg(get_field(f, r3));
5622 }
5623 #define SPEC_in2_r3 0
5624
5625 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5626 {
5627 o->in2 = tcg_temp_new_i64();
5628 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5629 }
5630 #define SPEC_in2_r3_sr32 0
5631
5632 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5633 {
5634 o->in2 = tcg_temp_new_i64();
5635 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5636 }
5637 #define SPEC_in2_r2_32s 0
5638
5639 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5640 {
5641 o->in2 = tcg_temp_new_i64();
5642 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5643 }
5644 #define SPEC_in2_r2_32u 0
5645
5646 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5647 {
5648 o->in2 = tcg_temp_new_i64();
5649 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5650 }
5651 #define SPEC_in2_r2_sr32 0
5652
5653 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5654 {
5655 o->in2 = load_freg32_i64(get_field(f, r2));
5656 }
5657 #define SPEC_in2_e2 0
5658
5659 static void in2_f2(DisasContext *s, DisasFields *f, DisasOps *o)
5660 {
5661 o->in2 = load_freg(get_field(f, r2));
5662 }
5663 #define SPEC_in2_f2 0
5664
5665 /* Load the low double word of an extended (128-bit) format FP number */
5666 static void in2_x2l(DisasContext *s, DisasFields *f, DisasOps *o)
5667 {
5668 o->in2 = load_freg(get_field(f, r2) + 2);
5669 }
5670 #define SPEC_in2_x2l SPEC_r2_f128
5671
5672 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5673 {
5674 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5675 }
5676 #define SPEC_in2_ra2 0
5677
5678 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5679 {
5680 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5681 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5682 }
5683 #define SPEC_in2_a2 0
5684
5685 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5686 {
5687 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(f, i2) * 2);
5688 }
5689 #define SPEC_in2_ri2 0
5690
5691 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5692 {
5693 help_l2_shift(s, f, o, 31);
5694 }
5695 #define SPEC_in2_sh32 0
5696
5697 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5698 {
5699 help_l2_shift(s, f, o, 63);
5700 }
5701 #define SPEC_in2_sh64 0
5702
5703 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5704 {
5705 in2_a2(s, f, o);
5706 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5707 }
5708 #define SPEC_in2_m2_8u 0
5709
5710 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5711 {
5712 in2_a2(s, f, o);
5713 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5714 }
5715 #define SPEC_in2_m2_16s 0
5716
5717 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5718 {
5719 in2_a2(s, f, o);
5720 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5721 }
5722 #define SPEC_in2_m2_16u 0
5723
5724 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5725 {
5726 in2_a2(s, f, o);
5727 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5728 }
5729 #define SPEC_in2_m2_32s 0
5730
5731 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5732 {
5733 in2_a2(s, f, o);
5734 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5735 }
5736 #define SPEC_in2_m2_32u 0
5737
5738 #ifndef CONFIG_USER_ONLY
5739 static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5740 {
5741 in2_a2(s, f, o);
5742 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5743 }
5744 #define SPEC_in2_m2_32ua 0
5745 #endif
5746
5747 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5748 {
5749 in2_a2(s, f, o);
5750 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5751 }
5752 #define SPEC_in2_m2_64 0
5753
5754 #ifndef CONFIG_USER_ONLY
5755 static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5756 {
5757 in2_a2(s, f, o);
5758 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5759 }
5760 #define SPEC_in2_m2_64a 0
5761 #endif
5762
5763 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5764 {
5765 in2_ri2(s, f, o);
5766 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5767 }
5768 #define SPEC_in2_mri2_16u 0
5769
5770 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5771 {
5772 in2_ri2(s, f, o);
5773 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5774 }
5775 #define SPEC_in2_mri2_32s 0
5776
5777 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5778 {
5779 in2_ri2(s, f, o);
5780 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5781 }
5782 #define SPEC_in2_mri2_32u 0
5783
5784 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5785 {
5786 in2_ri2(s, f, o);
5787 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5788 }
5789 #define SPEC_in2_mri2_64 0
5790
5791 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5792 {
5793 o->in2 = tcg_const_i64(get_field(f, i2));
5794 }
5795 #define SPEC_in2_i2 0
5796
5797 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5798 {
5799 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5800 }
5801 #define SPEC_in2_i2_8u 0
5802
5803 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5804 {
5805 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5806 }
5807 #define SPEC_in2_i2_16u 0
5808
5809 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5810 {
5811 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5812 }
5813 #define SPEC_in2_i2_32u 0
5814
5815 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5816 {
5817 uint64_t i2 = (uint16_t)get_field(f, i2);
5818 o->in2 = tcg_const_i64(i2 << s->insn->data);
5819 }
5820 #define SPEC_in2_i2_16u_shl 0
5821
5822 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5823 {
5824 uint64_t i2 = (uint32_t)get_field(f, i2);
5825 o->in2 = tcg_const_i64(i2 << s->insn->data);
5826 }
5827 #define SPEC_in2_i2_32u_shl 0
5828
5829 #ifndef CONFIG_USER_ONLY
5830 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5831 {
5832 o->in2 = tcg_const_i64(s->fields->raw_insn);
5833 }
5834 #define SPEC_in2_insn 0
5835 #endif
5836
5837 /* ====================================================================== */
5838
5839 /* Find opc within the table of insns. This is formulated as a switch
5840 statement so that (1) we get compile-time notice of cut-paste errors
5841 for duplicated opcodes, and (2) the compiler generates the binary
5842 search tree, rather than us having to post-process the table. */
5843
5844 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5845 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5846
5847 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5848 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5849
5850 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5851 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5852
5853 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5854
5855 enum DisasInsnEnum {
5856 #include "insn-data.def"
5857 };
5858
5859 #undef E
5860 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
5861 .opc = OPC, \
5862 .flags = FL, \
5863 .fmt = FMT_##FT, \
5864 .fac = FAC_##FC, \
5865 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5866 .name = #NM, \
5867 .help_in1 = in1_##I1, \
5868 .help_in2 = in2_##I2, \
5869 .help_prep = prep_##P, \
5870 .help_wout = wout_##W, \
5871 .help_cout = cout_##CC, \
5872 .help_op = op_##OP, \
5873 .data = D \
5874 },
5875
5876 /* Allow 0 to be used for NULL in the table below. */
5877 #define in1_0 NULL
5878 #define in2_0 NULL
5879 #define prep_0 NULL
5880 #define wout_0 NULL
5881 #define cout_0 NULL
5882 #define op_0 NULL
5883
5884 #define SPEC_in1_0 0
5885 #define SPEC_in2_0 0
5886 #define SPEC_prep_0 0
5887 #define SPEC_wout_0 0
5888
5889 /* Give smaller names to the various facilities. */
5890 #define FAC_Z S390_FEAT_ZARCH
5891 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5892 #define FAC_DFP S390_FEAT_DFP
5893 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5894 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5895 #define FAC_EE S390_FEAT_EXECUTE_EXT
5896 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5897 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5898 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5899 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5900 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5901 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5902 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5903 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5904 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5905 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5906 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5907 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5908 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5909 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5910 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5911 #define FAC_SFLE S390_FEAT_STFLE
5912 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5913 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5914 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5915 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5916 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5917 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5918 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5919 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5920 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5921 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5922 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5923 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5924 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5925 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5926 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5927 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5928
5929 static const DisasInsn insn_info[] = {
5930 #include "insn-data.def"
5931 };
5932
5933 #undef E
5934 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
5935 case OPC: return &insn_info[insn_ ## NM];
5936
5937 static const DisasInsn *lookup_opc(uint16_t opc)
5938 {
5939 switch (opc) {
5940 #include "insn-data.def"
5941 default:
5942 return NULL;
5943 }
5944 }
5945
5946 #undef F
5947 #undef E
5948 #undef D
5949 #undef C
5950
5951 /* Extract a field from the insn. The INSN should be left-aligned in
5952 the uint64_t so that we can more easily utilize the big-bit-endian
5953 definitions we extract from the Principals of Operation. */
5954
5955 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5956 {
5957 uint32_t r, m;
5958
5959 if (f->size == 0) {
5960 return;
5961 }
5962
5963 /* Zero extract the field from the insn. */
5964 r = (insn << f->beg) >> (64 - f->size);
5965
5966 /* Sign-extend, or un-swap the field as necessary. */
5967 switch (f->type) {
5968 case 0: /* unsigned */
5969 break;
5970 case 1: /* signed */
5971 assert(f->size <= 32);
5972 m = 1u << (f->size - 1);
5973 r = (r ^ m) - m;
5974 break;
5975 case 2: /* dl+dh split, signed 20 bit. */
5976 r = ((int8_t)r << 12) | (r >> 8);
5977 break;
5978 default:
5979 abort();
5980 }
5981
5982 /* Validate that the "compressed" encoding we selected above is valid.
5983 I.e. we havn't make two different original fields overlap. */
5984 assert(((o->presentC >> f->indexC) & 1) == 0);
5985 o->presentC |= 1 << f->indexC;
5986 o->presentO |= 1 << f->indexO;
5987
5988 o->c[f->indexC] = r;
5989 }
5990
5991 /* Lookup the insn at the current PC, extracting the operands into O and
5992 returning the info struct for the insn. Returns NULL for invalid insn. */
5993
5994 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5995 DisasFields *f)
5996 {
5997 uint64_t insn, pc = s->base.pc_next;
5998 int op, op2, ilen;
5999 const DisasInsn *info;
6000
6001 if (unlikely(s->ex_value)) {
6002 /* Drop the EX data now, so that it's clear on exception paths. */
6003 TCGv_i64 zero = tcg_const_i64(0);
6004 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6005 tcg_temp_free_i64(zero);
6006
6007 /* Extract the values saved by EXECUTE. */
6008 insn = s->ex_value & 0xffffffffffff0000ull;
6009 ilen = s->ex_value & 0xf;
6010 op = insn >> 56;
6011 } else {
6012 insn = ld_code2(env, pc);
6013 op = (insn >> 8) & 0xff;
6014 ilen = get_ilen(op);
6015 switch (ilen) {
6016 case 2:
6017 insn = insn << 48;
6018 break;
6019 case 4:
6020 insn = ld_code4(env, pc) << 32;
6021 break;
6022 case 6:
6023 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6024 break;
6025 default:
6026 g_assert_not_reached();
6027 }
6028 }
6029 s->pc_tmp = s->base.pc_next + ilen;
6030 s->ilen = ilen;
6031
6032 /* We can't actually determine the insn format until we've looked up
6033 the full insn opcode. Which we can't do without locating the
6034 secondary opcode. Assume by default that OP2 is at bit 40; for
6035 those smaller insns that don't actually have a secondary opcode
6036 this will correctly result in OP2 = 0. */
6037 switch (op) {
6038 case 0x01: /* E */
6039 case 0x80: /* S */
6040 case 0x82: /* S */
6041 case 0x93: /* S */
6042 case 0xb2: /* S, RRF, RRE, IE */
6043 case 0xb3: /* RRE, RRD, RRF */
6044 case 0xb9: /* RRE, RRF */
6045 case 0xe5: /* SSE, SIL */
6046 op2 = (insn << 8) >> 56;
6047 break;
6048 case 0xa5: /* RI */
6049 case 0xa7: /* RI */
6050 case 0xc0: /* RIL */
6051 case 0xc2: /* RIL */
6052 case 0xc4: /* RIL */
6053 case 0xc6: /* RIL */
6054 case 0xc8: /* SSF */
6055 case 0xcc: /* RIL */
6056 op2 = (insn << 12) >> 60;
6057 break;
6058 case 0xc5: /* MII */
6059 case 0xc7: /* SMI */
6060 case 0xd0 ... 0xdf: /* SS */
6061 case 0xe1: /* SS */
6062 case 0xe2: /* SS */
6063 case 0xe8: /* SS */
6064 case 0xe9: /* SS */
6065 case 0xea: /* SS */
6066 case 0xee ... 0xf3: /* SS */
6067 case 0xf8 ... 0xfd: /* SS */
6068 op2 = 0;
6069 break;
6070 default:
6071 op2 = (insn << 40) >> 56;
6072 break;
6073 }
6074
6075 memset(f, 0, sizeof(*f));
6076 f->raw_insn = insn;
6077 f->op = op;
6078 f->op2 = op2;
6079
6080 /* Lookup the instruction. */
6081 info = lookup_opc(op << 8 | op2);
6082
6083 /* If we found it, extract the operands. */
6084 if (info != NULL) {
6085 DisasFormat fmt = info->fmt;
6086 int i;
6087
6088 for (i = 0; i < NUM_C_FIELD; ++i) {
6089 extract_field(f, &format_info[fmt].op[i], insn);
6090 }
6091 }
6092 return info;
6093 }
6094
6095 static bool is_afp_reg(int reg)
6096 {
6097 return reg % 2 || reg > 6;
6098 }
6099
6100 static bool is_fp_pair(int reg)
6101 {
6102 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6103 return !(reg & 0x2);
6104 }
6105
6106 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6107 {
6108 const DisasInsn *insn;
6109 DisasJumpType ret = DISAS_NEXT;
6110 DisasFields f;
6111 DisasOps o = {};
6112
6113 /* Search for the insn in the table. */
6114 insn = extract_insn(env, s, &f);
6115
6116 /* Not found means unimplemented/illegal opcode. */
6117 if (insn == NULL) {
6118 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6119 f.op, f.op2);
6120 gen_illegal_opcode(s);
6121 return DISAS_NORETURN;
6122 }
6123
6124 #ifndef CONFIG_USER_ONLY
6125 if (s->base.tb->flags & FLAG_MASK_PER) {
6126 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6127 gen_helper_per_ifetch(cpu_env, addr);
6128 tcg_temp_free_i64(addr);
6129 }
6130 #endif
6131
6132 /* process flags */
6133 if (insn->flags) {
6134 /* privileged instruction */
6135 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6136 gen_program_exception(s, PGM_PRIVILEGED);
6137 return DISAS_NORETURN;
6138 }
6139
6140 /* if AFP is not enabled, instructions and registers are forbidden */
6141 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6142 uint8_t dxc = 0;
6143
6144 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(&f, r1))) {
6145 dxc = 1;
6146 }
6147 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(&f, r2))) {
6148 dxc = 1;
6149 }
6150 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(&f, r3))) {
6151 dxc = 1;
6152 }
6153 if (insn->flags & IF_BFP) {
6154 dxc = 2;
6155 }
6156 if (insn->flags & IF_DFP) {
6157 dxc = 3;
6158 }
6159 if (dxc) {
6160 gen_data_exception(dxc);
6161 return DISAS_NORETURN;
6162 }
6163 }
6164 }
6165
6166 /* Check for insn specification exceptions. */
6167 if (insn->spec) {
6168 if ((insn->spec & SPEC_r1_even && get_field(&f, r1) & 1) ||
6169 (insn->spec & SPEC_r2_even && get_field(&f, r2) & 1) ||
6170 (insn->spec & SPEC_r3_even && get_field(&f, r3) & 1) ||
6171 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(&f, r1))) ||
6172 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(&f, r2)))) {
6173 gen_program_exception(s, PGM_SPECIFICATION);
6174 return DISAS_NORETURN;
6175 }
6176 }
6177
6178 /* Set up the strutures we use to communicate with the helpers. */
6179 s->insn = insn;
6180 s->fields = &f;
6181
6182 /* Implement the instruction. */
6183 if (insn->help_in1) {
6184 insn->help_in1(s, &f, &o);
6185 }
6186 if (insn->help_in2) {
6187 insn->help_in2(s, &f, &o);
6188 }
6189 if (insn->help_prep) {
6190 insn->help_prep(s, &f, &o);
6191 }
6192 if (insn->help_op) {
6193 ret = insn->help_op(s, &o);
6194 }
6195 if (ret != DISAS_NORETURN) {
6196 if (insn->help_wout) {
6197 insn->help_wout(s, &f, &o);
6198 }
6199 if (insn->help_cout) {
6200 insn->help_cout(s, &o);
6201 }
6202 }
6203
6204 /* Free any temporaries created by the helpers. */
6205 if (o.out && !o.g_out) {
6206 tcg_temp_free_i64(o.out);
6207 }
6208 if (o.out2 && !o.g_out2) {
6209 tcg_temp_free_i64(o.out2);
6210 }
6211 if (o.in1 && !o.g_in1) {
6212 tcg_temp_free_i64(o.in1);
6213 }
6214 if (o.in2 && !o.g_in2) {
6215 tcg_temp_free_i64(o.in2);
6216 }
6217 if (o.addr1) {
6218 tcg_temp_free_i64(o.addr1);
6219 }
6220
6221 #ifndef CONFIG_USER_ONLY
6222 if (s->base.tb->flags & FLAG_MASK_PER) {
6223 /* An exception might be triggered, save PSW if not already done. */
6224 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6225 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6226 }
6227
6228 /* Call the helper to check for a possible PER exception. */
6229 gen_helper_per_check_exception(cpu_env);
6230 }
6231 #endif
6232
6233 /* Advance to the next instruction. */
6234 s->base.pc_next = s->pc_tmp;
6235 return ret;
6236 }
6237
6238 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6239 {
6240 DisasContext *dc = container_of(dcbase, DisasContext, base);
6241
6242 /* 31-bit mode */
6243 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6244 dc->base.pc_first &= 0x7fffffff;
6245 dc->base.pc_next = dc->base.pc_first;
6246 }
6247
6248 dc->cc_op = CC_OP_DYNAMIC;
6249 dc->ex_value = dc->base.tb->cs_base;
6250 dc->do_debug = dc->base.singlestep_enabled;
6251 }
6252
6253 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6254 {
6255 }
6256
6257 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6258 {
6259 DisasContext *dc = container_of(dcbase, DisasContext, base);
6260
6261 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6262 }
6263
6264 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6265 const CPUBreakpoint *bp)
6266 {
6267 DisasContext *dc = container_of(dcbase, DisasContext, base);
6268
6269 dc->base.is_jmp = DISAS_PC_STALE;
6270 dc->do_debug = true;
6271 /* The address covered by the breakpoint must be included in
6272 [tb->pc, tb->pc + tb->size) in order to for it to be
6273 properly cleared -- thus we increment the PC here so that
6274 the logic setting tb->size does the right thing. */
6275 dc->base.pc_next += 2;
6276 return true;
6277 }
6278
6279 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6280 {
6281 CPUS390XState *env = cs->env_ptr;
6282 DisasContext *dc = container_of(dcbase, DisasContext, base);
6283
6284 dc->base.is_jmp = translate_one(env, dc);
6285 if (dc->base.is_jmp == DISAS_NEXT) {
6286 uint64_t page_start;
6287
6288 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6289 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6290 dc->base.is_jmp = DISAS_TOO_MANY;
6291 }
6292 }
6293 }
6294
6295 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6296 {
6297 DisasContext *dc = container_of(dcbase, DisasContext, base);
6298
6299 switch (dc->base.is_jmp) {
6300 case DISAS_GOTO_TB:
6301 case DISAS_NORETURN:
6302 break;
6303 case DISAS_TOO_MANY:
6304 case DISAS_PC_STALE:
6305 case DISAS_PC_STALE_NOCHAIN:
6306 update_psw_addr(dc);
6307 /* FALLTHRU */
6308 case DISAS_PC_UPDATED:
6309 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6310 cc op type is in env */
6311 update_cc_op(dc);
6312 /* FALLTHRU */
6313 case DISAS_PC_CC_UPDATED:
6314 /* Exit the TB, either by raising a debug exception or by return. */
6315 if (dc->do_debug) {
6316 gen_exception(EXCP_DEBUG);
6317 } else if (use_exit_tb(dc) ||
6318 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6319 tcg_gen_exit_tb(NULL, 0);
6320 } else {
6321 tcg_gen_lookup_and_goto_ptr();
6322 }
6323 break;
6324 default:
6325 g_assert_not_reached();
6326 }
6327 }
6328
6329 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6330 {
6331 DisasContext *dc = container_of(dcbase, DisasContext, base);
6332
6333 if (unlikely(dc->ex_value)) {
6334 /* ??? Unfortunately log_target_disas can't use host memory. */
6335 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6336 } else {
6337 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6338 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6339 }
6340 }
6341
6342 static const TranslatorOps s390x_tr_ops = {
6343 .init_disas_context = s390x_tr_init_disas_context,
6344 .tb_start = s390x_tr_tb_start,
6345 .insn_start = s390x_tr_insn_start,
6346 .breakpoint_check = s390x_tr_breakpoint_check,
6347 .translate_insn = s390x_tr_translate_insn,
6348 .tb_stop = s390x_tr_tb_stop,
6349 .disas_log = s390x_tr_disas_log,
6350 };
6351
6352 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
6353 {
6354 DisasContext dc;
6355
6356 translator_loop(&s390x_tr_ops, &dc.base, cs, tb);
6357 }
6358
6359 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6360 target_ulong *data)
6361 {
6362 int cc_op = data[1];
6363 env->psw.addr = data[0];
6364 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6365 env->cc_op = cc_op;
6366 }
6367 }