]> git.proxmox.com Git - mirror_qemu.git/blob - target/s390x/translate.c
target/s390x: use "core-id" for cpu number/address/id handling
[mirror_qemu.git] / target / s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40
41 /* global register indexes */
42 static TCGv_env cpu_env;
43
44 #include "exec/gen-icount.h"
45 #include "exec/helper-proto.h"
46 #include "exec/helper-gen.h"
47
48 #include "trace-tcg.h"
49 #include "exec/log.h"
50
51
52 /* Information that (most) every instruction needs to manipulate. */
53 typedef struct DisasContext DisasContext;
54 typedef struct DisasInsn DisasInsn;
55 typedef struct DisasFields DisasFields;
56
57 struct DisasContext {
58 struct TranslationBlock *tb;
59 const DisasInsn *insn;
60 DisasFields *fields;
61 uint64_t ex_value;
62 uint64_t pc, next_pc;
63 uint32_t ilen;
64 enum cc_op cc_op;
65 bool singlestep_enabled;
66 };
67
68 /* Information carried about a condition to be evaluated. */
69 typedef struct {
70 TCGCond cond:8;
71 bool is_64;
72 bool g1;
73 bool g2;
74 union {
75 struct { TCGv_i64 a, b; } s64;
76 struct { TCGv_i32 a, b; } s32;
77 } u;
78 } DisasCompare;
79
80 /* is_jmp field values */
81 #define DISAS_EXCP DISAS_TARGET_0
82
83 #ifdef DEBUG_INLINE_BRANCHES
84 static uint64_t inline_branch_hit[CC_OP_MAX];
85 static uint64_t inline_branch_miss[CC_OP_MAX];
86 #endif
87
88 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
89 {
90 if (!(s->tb->flags & FLAG_MASK_64)) {
91 if (s->tb->flags & FLAG_MASK_32) {
92 return pc | 0x80000000;
93 }
94 }
95 return pc;
96 }
97
98 static TCGv_i64 psw_addr;
99 static TCGv_i64 psw_mask;
100 static TCGv_i64 gbea;
101
102 static TCGv_i32 cc_op;
103 static TCGv_i64 cc_src;
104 static TCGv_i64 cc_dst;
105 static TCGv_i64 cc_vr;
106
107 static char cpu_reg_names[32][4];
108 static TCGv_i64 regs[16];
109 static TCGv_i64 fregs[16];
110
111 void s390x_translate_init(void)
112 {
113 int i;
114
115 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
116 tcg_ctx.tcg_env = cpu_env;
117 psw_addr = tcg_global_mem_new_i64(cpu_env,
118 offsetof(CPUS390XState, psw.addr),
119 "psw_addr");
120 psw_mask = tcg_global_mem_new_i64(cpu_env,
121 offsetof(CPUS390XState, psw.mask),
122 "psw_mask");
123 gbea = tcg_global_mem_new_i64(cpu_env,
124 offsetof(CPUS390XState, gbea),
125 "gbea");
126
127 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
128 "cc_op");
129 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
130 "cc_src");
131 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
132 "cc_dst");
133 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
134 "cc_vr");
135
136 for (i = 0; i < 16; i++) {
137 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
138 regs[i] = tcg_global_mem_new(cpu_env,
139 offsetof(CPUS390XState, regs[i]),
140 cpu_reg_names[i]);
141 }
142
143 for (i = 0; i < 16; i++) {
144 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
145 fregs[i] = tcg_global_mem_new(cpu_env,
146 offsetof(CPUS390XState, vregs[i][0].d),
147 cpu_reg_names[i + 16]);
148 }
149 }
150
151 static TCGv_i64 load_reg(int reg)
152 {
153 TCGv_i64 r = tcg_temp_new_i64();
154 tcg_gen_mov_i64(r, regs[reg]);
155 return r;
156 }
157
158 static TCGv_i64 load_freg32_i64(int reg)
159 {
160 TCGv_i64 r = tcg_temp_new_i64();
161 tcg_gen_shri_i64(r, fregs[reg], 32);
162 return r;
163 }
164
165 static void store_reg(int reg, TCGv_i64 v)
166 {
167 tcg_gen_mov_i64(regs[reg], v);
168 }
169
170 static void store_freg(int reg, TCGv_i64 v)
171 {
172 tcg_gen_mov_i64(fregs[reg], v);
173 }
174
175 static void store_reg32_i64(int reg, TCGv_i64 v)
176 {
177 /* 32 bit register writes keep the upper half */
178 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
179 }
180
181 static void store_reg32h_i64(int reg, TCGv_i64 v)
182 {
183 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
184 }
185
186 static void store_freg32_i64(int reg, TCGv_i64 v)
187 {
188 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
189 }
190
191 static void return_low128(TCGv_i64 dest)
192 {
193 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
194 }
195
196 static void update_psw_addr(DisasContext *s)
197 {
198 /* psw.addr */
199 tcg_gen_movi_i64(psw_addr, s->pc);
200 }
201
202 static void per_branch(DisasContext *s, bool to_next)
203 {
204 #ifndef CONFIG_USER_ONLY
205 tcg_gen_movi_i64(gbea, s->pc);
206
207 if (s->tb->flags & FLAG_MASK_PER) {
208 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
209 gen_helper_per_branch(cpu_env, gbea, next_pc);
210 if (to_next) {
211 tcg_temp_free_i64(next_pc);
212 }
213 }
214 #endif
215 }
216
217 static void per_branch_cond(DisasContext *s, TCGCond cond,
218 TCGv_i64 arg1, TCGv_i64 arg2)
219 {
220 #ifndef CONFIG_USER_ONLY
221 if (s->tb->flags & FLAG_MASK_PER) {
222 TCGLabel *lab = gen_new_label();
223 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
224
225 tcg_gen_movi_i64(gbea, s->pc);
226 gen_helper_per_branch(cpu_env, gbea, psw_addr);
227
228 gen_set_label(lab);
229 } else {
230 TCGv_i64 pc = tcg_const_i64(s->pc);
231 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
232 tcg_temp_free_i64(pc);
233 }
234 #endif
235 }
236
237 static void per_breaking_event(DisasContext *s)
238 {
239 tcg_gen_movi_i64(gbea, s->pc);
240 }
241
242 static void update_cc_op(DisasContext *s)
243 {
244 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
245 tcg_gen_movi_i32(cc_op, s->cc_op);
246 }
247 }
248
249 static void potential_page_fault(DisasContext *s)
250 {
251 update_psw_addr(s);
252 update_cc_op(s);
253 }
254
255 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
256 {
257 return (uint64_t)cpu_lduw_code(env, pc);
258 }
259
260 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
261 {
262 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
263 }
264
265 static int get_mem_index(DisasContext *s)
266 {
267 switch (s->tb->flags & FLAG_MASK_ASC) {
268 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
269 return 0;
270 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
271 return 1;
272 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
273 return 2;
274 default:
275 tcg_abort();
276 break;
277 }
278 }
279
280 static void gen_exception(int excp)
281 {
282 TCGv_i32 tmp = tcg_const_i32(excp);
283 gen_helper_exception(cpu_env, tmp);
284 tcg_temp_free_i32(tmp);
285 }
286
287 static void gen_program_exception(DisasContext *s, int code)
288 {
289 TCGv_i32 tmp;
290
291 /* Remember what pgm exeption this was. */
292 tmp = tcg_const_i32(code);
293 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
294 tcg_temp_free_i32(tmp);
295
296 tmp = tcg_const_i32(s->ilen);
297 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
298 tcg_temp_free_i32(tmp);
299
300 /* update the psw */
301 update_psw_addr(s);
302
303 /* Save off cc. */
304 update_cc_op(s);
305
306 /* Trigger exception. */
307 gen_exception(EXCP_PGM);
308 }
309
310 static inline void gen_illegal_opcode(DisasContext *s)
311 {
312 gen_program_exception(s, PGM_OPERATION);
313 }
314
315 static inline void gen_trap(DisasContext *s)
316 {
317 TCGv_i32 t;
318
319 /* Set DXC to 0xff. */
320 t = tcg_temp_new_i32();
321 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
322 tcg_gen_ori_i32(t, t, 0xff00);
323 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
324 tcg_temp_free_i32(t);
325
326 gen_program_exception(s, PGM_DATA);
327 }
328
329 #ifndef CONFIG_USER_ONLY
330 static void check_privileged(DisasContext *s)
331 {
332 if (s->tb->flags & FLAG_MASK_PSTATE) {
333 gen_program_exception(s, PGM_PRIVILEGED);
334 }
335 }
336 #endif
337
338 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
339 {
340 TCGv_i64 tmp = tcg_temp_new_i64();
341 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
342
343 /* Note that d2 is limited to 20 bits, signed. If we crop negative
344 displacements early we create larger immedate addends. */
345
346 /* Note that addi optimizes the imm==0 case. */
347 if (b2 && x2) {
348 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
349 tcg_gen_addi_i64(tmp, tmp, d2);
350 } else if (b2) {
351 tcg_gen_addi_i64(tmp, regs[b2], d2);
352 } else if (x2) {
353 tcg_gen_addi_i64(tmp, regs[x2], d2);
354 } else {
355 if (need_31) {
356 d2 &= 0x7fffffff;
357 need_31 = false;
358 }
359 tcg_gen_movi_i64(tmp, d2);
360 }
361 if (need_31) {
362 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
363 }
364
365 return tmp;
366 }
367
368 static inline bool live_cc_data(DisasContext *s)
369 {
370 return (s->cc_op != CC_OP_DYNAMIC
371 && s->cc_op != CC_OP_STATIC
372 && s->cc_op > 3);
373 }
374
375 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
376 {
377 if (live_cc_data(s)) {
378 tcg_gen_discard_i64(cc_src);
379 tcg_gen_discard_i64(cc_dst);
380 tcg_gen_discard_i64(cc_vr);
381 }
382 s->cc_op = CC_OP_CONST0 + val;
383 }
384
385 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
386 {
387 if (live_cc_data(s)) {
388 tcg_gen_discard_i64(cc_src);
389 tcg_gen_discard_i64(cc_vr);
390 }
391 tcg_gen_mov_i64(cc_dst, dst);
392 s->cc_op = op;
393 }
394
395 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
396 TCGv_i64 dst)
397 {
398 if (live_cc_data(s)) {
399 tcg_gen_discard_i64(cc_vr);
400 }
401 tcg_gen_mov_i64(cc_src, src);
402 tcg_gen_mov_i64(cc_dst, dst);
403 s->cc_op = op;
404 }
405
406 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
407 TCGv_i64 dst, TCGv_i64 vr)
408 {
409 tcg_gen_mov_i64(cc_src, src);
410 tcg_gen_mov_i64(cc_dst, dst);
411 tcg_gen_mov_i64(cc_vr, vr);
412 s->cc_op = op;
413 }
414
415 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
416 {
417 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
418 }
419
420 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
421 {
422 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
423 }
424
425 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
426 {
427 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
428 }
429
430 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
431 {
432 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
433 }
434
435 /* CC value is in env->cc_op */
436 static void set_cc_static(DisasContext *s)
437 {
438 if (live_cc_data(s)) {
439 tcg_gen_discard_i64(cc_src);
440 tcg_gen_discard_i64(cc_dst);
441 tcg_gen_discard_i64(cc_vr);
442 }
443 s->cc_op = CC_OP_STATIC;
444 }
445
446 /* calculates cc into cc_op */
447 static void gen_op_calc_cc(DisasContext *s)
448 {
449 TCGv_i32 local_cc_op;
450 TCGv_i64 dummy;
451
452 TCGV_UNUSED_I32(local_cc_op);
453 TCGV_UNUSED_I64(dummy);
454 switch (s->cc_op) {
455 default:
456 dummy = tcg_const_i64(0);
457 /* FALLTHRU */
458 case CC_OP_ADD_64:
459 case CC_OP_ADDU_64:
460 case CC_OP_ADDC_64:
461 case CC_OP_SUB_64:
462 case CC_OP_SUBU_64:
463 case CC_OP_SUBB_64:
464 case CC_OP_ADD_32:
465 case CC_OP_ADDU_32:
466 case CC_OP_ADDC_32:
467 case CC_OP_SUB_32:
468 case CC_OP_SUBU_32:
469 case CC_OP_SUBB_32:
470 local_cc_op = tcg_const_i32(s->cc_op);
471 break;
472 case CC_OP_CONST0:
473 case CC_OP_CONST1:
474 case CC_OP_CONST2:
475 case CC_OP_CONST3:
476 case CC_OP_STATIC:
477 case CC_OP_DYNAMIC:
478 break;
479 }
480
481 switch (s->cc_op) {
482 case CC_OP_CONST0:
483 case CC_OP_CONST1:
484 case CC_OP_CONST2:
485 case CC_OP_CONST3:
486 /* s->cc_op is the cc value */
487 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
488 break;
489 case CC_OP_STATIC:
490 /* env->cc_op already is the cc value */
491 break;
492 case CC_OP_NZ:
493 case CC_OP_ABS_64:
494 case CC_OP_NABS_64:
495 case CC_OP_ABS_32:
496 case CC_OP_NABS_32:
497 case CC_OP_LTGT0_32:
498 case CC_OP_LTGT0_64:
499 case CC_OP_COMP_32:
500 case CC_OP_COMP_64:
501 case CC_OP_NZ_F32:
502 case CC_OP_NZ_F64:
503 case CC_OP_FLOGR:
504 /* 1 argument */
505 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
506 break;
507 case CC_OP_ICM:
508 case CC_OP_LTGT_32:
509 case CC_OP_LTGT_64:
510 case CC_OP_LTUGTU_32:
511 case CC_OP_LTUGTU_64:
512 case CC_OP_TM_32:
513 case CC_OP_TM_64:
514 case CC_OP_SLA_32:
515 case CC_OP_SLA_64:
516 case CC_OP_NZ_F128:
517 /* 2 arguments */
518 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
519 break;
520 case CC_OP_ADD_64:
521 case CC_OP_ADDU_64:
522 case CC_OP_ADDC_64:
523 case CC_OP_SUB_64:
524 case CC_OP_SUBU_64:
525 case CC_OP_SUBB_64:
526 case CC_OP_ADD_32:
527 case CC_OP_ADDU_32:
528 case CC_OP_ADDC_32:
529 case CC_OP_SUB_32:
530 case CC_OP_SUBU_32:
531 case CC_OP_SUBB_32:
532 /* 3 arguments */
533 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
534 break;
535 case CC_OP_DYNAMIC:
536 /* unknown operation - assume 3 arguments and cc_op in env */
537 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
538 break;
539 default:
540 tcg_abort();
541 }
542
543 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
544 tcg_temp_free_i32(local_cc_op);
545 }
546 if (!TCGV_IS_UNUSED_I64(dummy)) {
547 tcg_temp_free_i64(dummy);
548 }
549
550 /* We now have cc in cc_op as constant */
551 set_cc_static(s);
552 }
553
554 static bool use_exit_tb(DisasContext *s)
555 {
556 return (s->singlestep_enabled ||
557 (s->tb->cflags & CF_LAST_IO) ||
558 (s->tb->flags & FLAG_MASK_PER));
559 }
560
561 static bool use_goto_tb(DisasContext *s, uint64_t dest)
562 {
563 if (unlikely(use_exit_tb(s))) {
564 return false;
565 }
566 #ifndef CONFIG_USER_ONLY
567 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
568 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
569 #else
570 return true;
571 #endif
572 }
573
574 static void account_noninline_branch(DisasContext *s, int cc_op)
575 {
576 #ifdef DEBUG_INLINE_BRANCHES
577 inline_branch_miss[cc_op]++;
578 #endif
579 }
580
581 static void account_inline_branch(DisasContext *s, int cc_op)
582 {
583 #ifdef DEBUG_INLINE_BRANCHES
584 inline_branch_hit[cc_op]++;
585 #endif
586 }
587
588 /* Table of mask values to comparison codes, given a comparison as input.
589 For such, CC=3 should not be possible. */
590 static const TCGCond ltgt_cond[16] = {
591 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
592 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
593 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
594 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
595 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
596 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
597 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
598 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
599 };
600
601 /* Table of mask values to comparison codes, given a logic op as input.
602 For such, only CC=0 and CC=1 should be possible. */
603 static const TCGCond nz_cond[16] = {
604 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
605 TCG_COND_NEVER, TCG_COND_NEVER,
606 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
607 TCG_COND_NE, TCG_COND_NE,
608 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
609 TCG_COND_EQ, TCG_COND_EQ,
610 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
611 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
612 };
613
614 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
615 details required to generate a TCG comparison. */
616 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
617 {
618 TCGCond cond;
619 enum cc_op old_cc_op = s->cc_op;
620
621 if (mask == 15 || mask == 0) {
622 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
623 c->u.s32.a = cc_op;
624 c->u.s32.b = cc_op;
625 c->g1 = c->g2 = true;
626 c->is_64 = false;
627 return;
628 }
629
630 /* Find the TCG condition for the mask + cc op. */
631 switch (old_cc_op) {
632 case CC_OP_LTGT0_32:
633 case CC_OP_LTGT0_64:
634 case CC_OP_LTGT_32:
635 case CC_OP_LTGT_64:
636 cond = ltgt_cond[mask];
637 if (cond == TCG_COND_NEVER) {
638 goto do_dynamic;
639 }
640 account_inline_branch(s, old_cc_op);
641 break;
642
643 case CC_OP_LTUGTU_32:
644 case CC_OP_LTUGTU_64:
645 cond = tcg_unsigned_cond(ltgt_cond[mask]);
646 if (cond == TCG_COND_NEVER) {
647 goto do_dynamic;
648 }
649 account_inline_branch(s, old_cc_op);
650 break;
651
652 case CC_OP_NZ:
653 cond = nz_cond[mask];
654 if (cond == TCG_COND_NEVER) {
655 goto do_dynamic;
656 }
657 account_inline_branch(s, old_cc_op);
658 break;
659
660 case CC_OP_TM_32:
661 case CC_OP_TM_64:
662 switch (mask) {
663 case 8:
664 cond = TCG_COND_EQ;
665 break;
666 case 4 | 2 | 1:
667 cond = TCG_COND_NE;
668 break;
669 default:
670 goto do_dynamic;
671 }
672 account_inline_branch(s, old_cc_op);
673 break;
674
675 case CC_OP_ICM:
676 switch (mask) {
677 case 8:
678 cond = TCG_COND_EQ;
679 break;
680 case 4 | 2 | 1:
681 case 4 | 2:
682 cond = TCG_COND_NE;
683 break;
684 default:
685 goto do_dynamic;
686 }
687 account_inline_branch(s, old_cc_op);
688 break;
689
690 case CC_OP_FLOGR:
691 switch (mask & 0xa) {
692 case 8: /* src == 0 -> no one bit found */
693 cond = TCG_COND_EQ;
694 break;
695 case 2: /* src != 0 -> one bit found */
696 cond = TCG_COND_NE;
697 break;
698 default:
699 goto do_dynamic;
700 }
701 account_inline_branch(s, old_cc_op);
702 break;
703
704 case CC_OP_ADDU_32:
705 case CC_OP_ADDU_64:
706 switch (mask) {
707 case 8 | 2: /* vr == 0 */
708 cond = TCG_COND_EQ;
709 break;
710 case 4 | 1: /* vr != 0 */
711 cond = TCG_COND_NE;
712 break;
713 case 8 | 4: /* no carry -> vr >= src */
714 cond = TCG_COND_GEU;
715 break;
716 case 2 | 1: /* carry -> vr < src */
717 cond = TCG_COND_LTU;
718 break;
719 default:
720 goto do_dynamic;
721 }
722 account_inline_branch(s, old_cc_op);
723 break;
724
725 case CC_OP_SUBU_32:
726 case CC_OP_SUBU_64:
727 /* Note that CC=0 is impossible; treat it as dont-care. */
728 switch (mask & 7) {
729 case 2: /* zero -> op1 == op2 */
730 cond = TCG_COND_EQ;
731 break;
732 case 4 | 1: /* !zero -> op1 != op2 */
733 cond = TCG_COND_NE;
734 break;
735 case 4: /* borrow (!carry) -> op1 < op2 */
736 cond = TCG_COND_LTU;
737 break;
738 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
739 cond = TCG_COND_GEU;
740 break;
741 default:
742 goto do_dynamic;
743 }
744 account_inline_branch(s, old_cc_op);
745 break;
746
747 default:
748 do_dynamic:
749 /* Calculate cc value. */
750 gen_op_calc_cc(s);
751 /* FALLTHRU */
752
753 case CC_OP_STATIC:
754 /* Jump based on CC. We'll load up the real cond below;
755 the assignment here merely avoids a compiler warning. */
756 account_noninline_branch(s, old_cc_op);
757 old_cc_op = CC_OP_STATIC;
758 cond = TCG_COND_NEVER;
759 break;
760 }
761
762 /* Load up the arguments of the comparison. */
763 c->is_64 = true;
764 c->g1 = c->g2 = false;
765 switch (old_cc_op) {
766 case CC_OP_LTGT0_32:
767 c->is_64 = false;
768 c->u.s32.a = tcg_temp_new_i32();
769 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
770 c->u.s32.b = tcg_const_i32(0);
771 break;
772 case CC_OP_LTGT_32:
773 case CC_OP_LTUGTU_32:
774 case CC_OP_SUBU_32:
775 c->is_64 = false;
776 c->u.s32.a = tcg_temp_new_i32();
777 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
778 c->u.s32.b = tcg_temp_new_i32();
779 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
780 break;
781
782 case CC_OP_LTGT0_64:
783 case CC_OP_NZ:
784 case CC_OP_FLOGR:
785 c->u.s64.a = cc_dst;
786 c->u.s64.b = tcg_const_i64(0);
787 c->g1 = true;
788 break;
789 case CC_OP_LTGT_64:
790 case CC_OP_LTUGTU_64:
791 case CC_OP_SUBU_64:
792 c->u.s64.a = cc_src;
793 c->u.s64.b = cc_dst;
794 c->g1 = c->g2 = true;
795 break;
796
797 case CC_OP_TM_32:
798 case CC_OP_TM_64:
799 case CC_OP_ICM:
800 c->u.s64.a = tcg_temp_new_i64();
801 c->u.s64.b = tcg_const_i64(0);
802 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
803 break;
804
805 case CC_OP_ADDU_32:
806 c->is_64 = false;
807 c->u.s32.a = tcg_temp_new_i32();
808 c->u.s32.b = tcg_temp_new_i32();
809 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
810 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
811 tcg_gen_movi_i32(c->u.s32.b, 0);
812 } else {
813 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
814 }
815 break;
816
817 case CC_OP_ADDU_64:
818 c->u.s64.a = cc_vr;
819 c->g1 = true;
820 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
821 c->u.s64.b = tcg_const_i64(0);
822 } else {
823 c->u.s64.b = cc_src;
824 c->g2 = true;
825 }
826 break;
827
828 case CC_OP_STATIC:
829 c->is_64 = false;
830 c->u.s32.a = cc_op;
831 c->g1 = true;
832 switch (mask) {
833 case 0x8 | 0x4 | 0x2: /* cc != 3 */
834 cond = TCG_COND_NE;
835 c->u.s32.b = tcg_const_i32(3);
836 break;
837 case 0x8 | 0x4 | 0x1: /* cc != 2 */
838 cond = TCG_COND_NE;
839 c->u.s32.b = tcg_const_i32(2);
840 break;
841 case 0x8 | 0x2 | 0x1: /* cc != 1 */
842 cond = TCG_COND_NE;
843 c->u.s32.b = tcg_const_i32(1);
844 break;
845 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
846 cond = TCG_COND_EQ;
847 c->g1 = false;
848 c->u.s32.a = tcg_temp_new_i32();
849 c->u.s32.b = tcg_const_i32(0);
850 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
851 break;
852 case 0x8 | 0x4: /* cc < 2 */
853 cond = TCG_COND_LTU;
854 c->u.s32.b = tcg_const_i32(2);
855 break;
856 case 0x8: /* cc == 0 */
857 cond = TCG_COND_EQ;
858 c->u.s32.b = tcg_const_i32(0);
859 break;
860 case 0x4 | 0x2 | 0x1: /* cc != 0 */
861 cond = TCG_COND_NE;
862 c->u.s32.b = tcg_const_i32(0);
863 break;
864 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
865 cond = TCG_COND_NE;
866 c->g1 = false;
867 c->u.s32.a = tcg_temp_new_i32();
868 c->u.s32.b = tcg_const_i32(0);
869 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
870 break;
871 case 0x4: /* cc == 1 */
872 cond = TCG_COND_EQ;
873 c->u.s32.b = tcg_const_i32(1);
874 break;
875 case 0x2 | 0x1: /* cc > 1 */
876 cond = TCG_COND_GTU;
877 c->u.s32.b = tcg_const_i32(1);
878 break;
879 case 0x2: /* cc == 2 */
880 cond = TCG_COND_EQ;
881 c->u.s32.b = tcg_const_i32(2);
882 break;
883 case 0x1: /* cc == 3 */
884 cond = TCG_COND_EQ;
885 c->u.s32.b = tcg_const_i32(3);
886 break;
887 default:
888 /* CC is masked by something else: (8 >> cc) & mask. */
889 cond = TCG_COND_NE;
890 c->g1 = false;
891 c->u.s32.a = tcg_const_i32(8);
892 c->u.s32.b = tcg_const_i32(0);
893 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
894 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
895 break;
896 }
897 break;
898
899 default:
900 abort();
901 }
902 c->cond = cond;
903 }
904
905 static void free_compare(DisasCompare *c)
906 {
907 if (!c->g1) {
908 if (c->is_64) {
909 tcg_temp_free_i64(c->u.s64.a);
910 } else {
911 tcg_temp_free_i32(c->u.s32.a);
912 }
913 }
914 if (!c->g2) {
915 if (c->is_64) {
916 tcg_temp_free_i64(c->u.s64.b);
917 } else {
918 tcg_temp_free_i32(c->u.s32.b);
919 }
920 }
921 }
922
923 /* ====================================================================== */
924 /* Define the insn format enumeration. */
925 #define F0(N) FMT_##N,
926 #define F1(N, X1) F0(N)
927 #define F2(N, X1, X2) F0(N)
928 #define F3(N, X1, X2, X3) F0(N)
929 #define F4(N, X1, X2, X3, X4) F0(N)
930 #define F5(N, X1, X2, X3, X4, X5) F0(N)
931
932 typedef enum {
933 #include "insn-format.def"
934 } DisasFormat;
935
936 #undef F0
937 #undef F1
938 #undef F2
939 #undef F3
940 #undef F4
941 #undef F5
942
943 /* Define a structure to hold the decoded fields. We'll store each inside
944 an array indexed by an enum. In order to conserve memory, we'll arrange
945 for fields that do not exist at the same time to overlap, thus the "C"
946 for compact. For checking purposes there is an "O" for original index
947 as well that will be applied to availability bitmaps. */
948
949 enum DisasFieldIndexO {
950 FLD_O_r1,
951 FLD_O_r2,
952 FLD_O_r3,
953 FLD_O_m1,
954 FLD_O_m3,
955 FLD_O_m4,
956 FLD_O_b1,
957 FLD_O_b2,
958 FLD_O_b4,
959 FLD_O_d1,
960 FLD_O_d2,
961 FLD_O_d4,
962 FLD_O_x2,
963 FLD_O_l1,
964 FLD_O_l2,
965 FLD_O_i1,
966 FLD_O_i2,
967 FLD_O_i3,
968 FLD_O_i4,
969 FLD_O_i5
970 };
971
972 enum DisasFieldIndexC {
973 FLD_C_r1 = 0,
974 FLD_C_m1 = 0,
975 FLD_C_b1 = 0,
976 FLD_C_i1 = 0,
977
978 FLD_C_r2 = 1,
979 FLD_C_b2 = 1,
980 FLD_C_i2 = 1,
981
982 FLD_C_r3 = 2,
983 FLD_C_m3 = 2,
984 FLD_C_i3 = 2,
985
986 FLD_C_m4 = 3,
987 FLD_C_b4 = 3,
988 FLD_C_i4 = 3,
989 FLD_C_l1 = 3,
990
991 FLD_C_i5 = 4,
992 FLD_C_d1 = 4,
993
994 FLD_C_d2 = 5,
995
996 FLD_C_d4 = 6,
997 FLD_C_x2 = 6,
998 FLD_C_l2 = 6,
999
1000 NUM_C_FIELD = 7
1001 };
1002
1003 struct DisasFields {
1004 uint64_t raw_insn;
1005 unsigned op:8;
1006 unsigned op2:8;
1007 unsigned presentC:16;
1008 unsigned int presentO;
1009 int c[NUM_C_FIELD];
1010 };
1011
1012 /* This is the way fields are to be accessed out of DisasFields. */
1013 #define have_field(S, F) have_field1((S), FLD_O_##F)
1014 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1015
1016 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1017 {
1018 return (f->presentO >> c) & 1;
1019 }
1020
1021 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1022 enum DisasFieldIndexC c)
1023 {
1024 assert(have_field1(f, o));
1025 return f->c[c];
1026 }
1027
1028 /* Describe the layout of each field in each format. */
1029 typedef struct DisasField {
1030 unsigned int beg:8;
1031 unsigned int size:8;
1032 unsigned int type:2;
1033 unsigned int indexC:6;
1034 enum DisasFieldIndexO indexO:8;
1035 } DisasField;
1036
1037 typedef struct DisasFormatInfo {
1038 DisasField op[NUM_C_FIELD];
1039 } DisasFormatInfo;
1040
1041 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1042 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1043 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1044 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1045 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1046 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1047 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1048 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1049 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1050 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1051 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1052 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1053 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1054 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1055
1056 #define F0(N) { { } },
1057 #define F1(N, X1) { { X1 } },
1058 #define F2(N, X1, X2) { { X1, X2 } },
1059 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1060 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1061 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1062
1063 static const DisasFormatInfo format_info[] = {
1064 #include "insn-format.def"
1065 };
1066
1067 #undef F0
1068 #undef F1
1069 #undef F2
1070 #undef F3
1071 #undef F4
1072 #undef F5
1073 #undef R
1074 #undef M
1075 #undef BD
1076 #undef BXD
1077 #undef BDL
1078 #undef BXDL
1079 #undef I
1080 #undef L
1081
1082 /* Generally, we'll extract operands into this structures, operate upon
1083 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1084 of routines below for more details. */
1085 typedef struct {
1086 bool g_out, g_out2, g_in1, g_in2;
1087 TCGv_i64 out, out2, in1, in2;
1088 TCGv_i64 addr1;
1089 } DisasOps;
1090
1091 /* Instructions can place constraints on their operands, raising specification
1092 exceptions if they are violated. To make this easy to automate, each "in1",
1093 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1094 of the following, or 0. To make this easy to document, we'll put the
1095 SPEC_<name> defines next to <name>. */
1096
1097 #define SPEC_r1_even 1
1098 #define SPEC_r2_even 2
1099 #define SPEC_r3_even 4
1100 #define SPEC_r1_f128 8
1101 #define SPEC_r2_f128 16
1102
1103 /* Return values from translate_one, indicating the state of the TB. */
1104 typedef enum {
1105 /* Continue the TB. */
1106 NO_EXIT,
1107 /* We have emitted one or more goto_tb. No fixup required. */
1108 EXIT_GOTO_TB,
1109 /* We are not using a goto_tb (for whatever reason), but have updated
1110 the PC (for whatever reason), so there's no need to do it again on
1111 exiting the TB. */
1112 EXIT_PC_UPDATED,
1113 /* We have updated the PC and CC values. */
1114 EXIT_PC_CC_UPDATED,
1115 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1116 updated the PC for the next instruction to be executed. */
1117 EXIT_PC_STALE,
1118 /* We are exiting the TB to the main loop. */
1119 EXIT_PC_STALE_NOCHAIN,
1120 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1121 No following code will be executed. */
1122 EXIT_NORETURN,
1123 } ExitStatus;
1124
1125 struct DisasInsn {
1126 unsigned opc:16;
1127 DisasFormat fmt:8;
1128 unsigned fac:8;
1129 unsigned spec:8;
1130
1131 const char *name;
1132
1133 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1134 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1135 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1136 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1137 void (*help_cout)(DisasContext *, DisasOps *);
1138 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1139
1140 uint64_t data;
1141 };
1142
1143 /* ====================================================================== */
1144 /* Miscellaneous helpers, used by several operations. */
1145
1146 static void help_l2_shift(DisasContext *s, DisasFields *f,
1147 DisasOps *o, int mask)
1148 {
1149 int b2 = get_field(f, b2);
1150 int d2 = get_field(f, d2);
1151
1152 if (b2 == 0) {
1153 o->in2 = tcg_const_i64(d2 & mask);
1154 } else {
1155 o->in2 = get_address(s, 0, b2, d2);
1156 tcg_gen_andi_i64(o->in2, o->in2, mask);
1157 }
1158 }
1159
1160 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1161 {
1162 if (dest == s->next_pc) {
1163 per_branch(s, true);
1164 return NO_EXIT;
1165 }
1166 if (use_goto_tb(s, dest)) {
1167 update_cc_op(s);
1168 per_breaking_event(s);
1169 tcg_gen_goto_tb(0);
1170 tcg_gen_movi_i64(psw_addr, dest);
1171 tcg_gen_exit_tb((uintptr_t)s->tb);
1172 return EXIT_GOTO_TB;
1173 } else {
1174 tcg_gen_movi_i64(psw_addr, dest);
1175 per_branch(s, false);
1176 return EXIT_PC_UPDATED;
1177 }
1178 }
1179
1180 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1181 bool is_imm, int imm, TCGv_i64 cdest)
1182 {
1183 ExitStatus ret;
1184 uint64_t dest = s->pc + 2 * imm;
1185 TCGLabel *lab;
1186
1187 /* Take care of the special cases first. */
1188 if (c->cond == TCG_COND_NEVER) {
1189 ret = NO_EXIT;
1190 goto egress;
1191 }
1192 if (is_imm) {
1193 if (dest == s->next_pc) {
1194 /* Branch to next. */
1195 per_branch(s, true);
1196 ret = NO_EXIT;
1197 goto egress;
1198 }
1199 if (c->cond == TCG_COND_ALWAYS) {
1200 ret = help_goto_direct(s, dest);
1201 goto egress;
1202 }
1203 } else {
1204 if (TCGV_IS_UNUSED_I64(cdest)) {
1205 /* E.g. bcr %r0 -> no branch. */
1206 ret = NO_EXIT;
1207 goto egress;
1208 }
1209 if (c->cond == TCG_COND_ALWAYS) {
1210 tcg_gen_mov_i64(psw_addr, cdest);
1211 per_branch(s, false);
1212 ret = EXIT_PC_UPDATED;
1213 goto egress;
1214 }
1215 }
1216
1217 if (use_goto_tb(s, s->next_pc)) {
1218 if (is_imm && use_goto_tb(s, dest)) {
1219 /* Both exits can use goto_tb. */
1220 update_cc_op(s);
1221
1222 lab = gen_new_label();
1223 if (c->is_64) {
1224 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1225 } else {
1226 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1227 }
1228
1229 /* Branch not taken. */
1230 tcg_gen_goto_tb(0);
1231 tcg_gen_movi_i64(psw_addr, s->next_pc);
1232 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1233
1234 /* Branch taken. */
1235 gen_set_label(lab);
1236 per_breaking_event(s);
1237 tcg_gen_goto_tb(1);
1238 tcg_gen_movi_i64(psw_addr, dest);
1239 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1240
1241 ret = EXIT_GOTO_TB;
1242 } else {
1243 /* Fallthru can use goto_tb, but taken branch cannot. */
1244 /* Store taken branch destination before the brcond. This
1245 avoids having to allocate a new local temp to hold it.
1246 We'll overwrite this in the not taken case anyway. */
1247 if (!is_imm) {
1248 tcg_gen_mov_i64(psw_addr, cdest);
1249 }
1250
1251 lab = gen_new_label();
1252 if (c->is_64) {
1253 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1254 } else {
1255 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1256 }
1257
1258 /* Branch not taken. */
1259 update_cc_op(s);
1260 tcg_gen_goto_tb(0);
1261 tcg_gen_movi_i64(psw_addr, s->next_pc);
1262 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1263
1264 gen_set_label(lab);
1265 if (is_imm) {
1266 tcg_gen_movi_i64(psw_addr, dest);
1267 }
1268 per_breaking_event(s);
1269 ret = EXIT_PC_UPDATED;
1270 }
1271 } else {
1272 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1273 Most commonly we're single-stepping or some other condition that
1274 disables all use of goto_tb. Just update the PC and exit. */
1275
1276 TCGv_i64 next = tcg_const_i64(s->next_pc);
1277 if (is_imm) {
1278 cdest = tcg_const_i64(dest);
1279 }
1280
1281 if (c->is_64) {
1282 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1283 cdest, next);
1284 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1285 } else {
1286 TCGv_i32 t0 = tcg_temp_new_i32();
1287 TCGv_i64 t1 = tcg_temp_new_i64();
1288 TCGv_i64 z = tcg_const_i64(0);
1289 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1290 tcg_gen_extu_i32_i64(t1, t0);
1291 tcg_temp_free_i32(t0);
1292 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1293 per_branch_cond(s, TCG_COND_NE, t1, z);
1294 tcg_temp_free_i64(t1);
1295 tcg_temp_free_i64(z);
1296 }
1297
1298 if (is_imm) {
1299 tcg_temp_free_i64(cdest);
1300 }
1301 tcg_temp_free_i64(next);
1302
1303 ret = EXIT_PC_UPDATED;
1304 }
1305
1306 egress:
1307 free_compare(c);
1308 return ret;
1309 }
1310
1311 /* ====================================================================== */
1312 /* The operations. These perform the bulk of the work for any insn,
1313 usually after the operands have been loaded and output initialized. */
1314
1315 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1316 {
1317 TCGv_i64 z, n;
1318 z = tcg_const_i64(0);
1319 n = tcg_temp_new_i64();
1320 tcg_gen_neg_i64(n, o->in2);
1321 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1322 tcg_temp_free_i64(n);
1323 tcg_temp_free_i64(z);
1324 return NO_EXIT;
1325 }
1326
1327 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1328 {
1329 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1330 return NO_EXIT;
1331 }
1332
1333 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1334 {
1335 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1336 return NO_EXIT;
1337 }
1338
1339 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1340 {
1341 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1342 tcg_gen_mov_i64(o->out2, o->in2);
1343 return NO_EXIT;
1344 }
1345
1346 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1347 {
1348 tcg_gen_add_i64(o->out, o->in1, o->in2);
1349 return NO_EXIT;
1350 }
1351
1352 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1353 {
1354 DisasCompare cmp;
1355 TCGv_i64 carry;
1356
1357 tcg_gen_add_i64(o->out, o->in1, o->in2);
1358
1359 /* The carry flag is the msb of CC, therefore the branch mask that would
1360 create that comparison is 3. Feeding the generated comparison to
1361 setcond produces the carry flag that we desire. */
1362 disas_jcc(s, &cmp, 3);
1363 carry = tcg_temp_new_i64();
1364 if (cmp.is_64) {
1365 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1366 } else {
1367 TCGv_i32 t = tcg_temp_new_i32();
1368 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1369 tcg_gen_extu_i32_i64(carry, t);
1370 tcg_temp_free_i32(t);
1371 }
1372 free_compare(&cmp);
1373
1374 tcg_gen_add_i64(o->out, o->out, carry);
1375 tcg_temp_free_i64(carry);
1376 return NO_EXIT;
1377 }
1378
1379 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1380 {
1381 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1382 return NO_EXIT;
1383 }
1384
1385 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1386 {
1387 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1388 return NO_EXIT;
1389 }
1390
1391 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1392 {
1393 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1394 return_low128(o->out2);
1395 return NO_EXIT;
1396 }
1397
1398 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1399 {
1400 tcg_gen_and_i64(o->out, o->in1, o->in2);
1401 return NO_EXIT;
1402 }
1403
1404 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1405 {
1406 int shift = s->insn->data & 0xff;
1407 int size = s->insn->data >> 8;
1408 uint64_t mask = ((1ull << size) - 1) << shift;
1409
1410 assert(!o->g_in2);
1411 tcg_gen_shli_i64(o->in2, o->in2, shift);
1412 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1413 tcg_gen_and_i64(o->out, o->in1, o->in2);
1414
1415 /* Produce the CC from only the bits manipulated. */
1416 tcg_gen_andi_i64(cc_dst, o->out, mask);
1417 set_cc_nz_u64(s, cc_dst);
1418 return NO_EXIT;
1419 }
1420
1421 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1422 {
1423 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1424 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1425 tcg_gen_mov_i64(psw_addr, o->in2);
1426 per_branch(s, false);
1427 return EXIT_PC_UPDATED;
1428 } else {
1429 return NO_EXIT;
1430 }
1431 }
1432
1433 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1434 {
1435 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1436 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1437 }
1438
1439 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1440 {
1441 int m1 = get_field(s->fields, m1);
1442 bool is_imm = have_field(s->fields, i2);
1443 int imm = is_imm ? get_field(s->fields, i2) : 0;
1444 DisasCompare c;
1445
1446 /* BCR with R2 = 0 causes no branching */
1447 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1448 if (m1 == 14) {
1449 /* Perform serialization */
1450 /* FIXME: check for fast-BCR-serialization facility */
1451 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1452 }
1453 if (m1 == 15) {
1454 /* Perform serialization */
1455 /* FIXME: perform checkpoint-synchronisation */
1456 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1457 }
1458 return NO_EXIT;
1459 }
1460
1461 disas_jcc(s, &c, m1);
1462 return help_branch(s, &c, is_imm, imm, o->in2);
1463 }
1464
1465 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1466 {
1467 int r1 = get_field(s->fields, r1);
1468 bool is_imm = have_field(s->fields, i2);
1469 int imm = is_imm ? get_field(s->fields, i2) : 0;
1470 DisasCompare c;
1471 TCGv_i64 t;
1472
1473 c.cond = TCG_COND_NE;
1474 c.is_64 = false;
1475 c.g1 = false;
1476 c.g2 = false;
1477
1478 t = tcg_temp_new_i64();
1479 tcg_gen_subi_i64(t, regs[r1], 1);
1480 store_reg32_i64(r1, t);
1481 c.u.s32.a = tcg_temp_new_i32();
1482 c.u.s32.b = tcg_const_i32(0);
1483 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1484 tcg_temp_free_i64(t);
1485
1486 return help_branch(s, &c, is_imm, imm, o->in2);
1487 }
1488
1489 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1490 {
1491 int r1 = get_field(s->fields, r1);
1492 int imm = get_field(s->fields, i2);
1493 DisasCompare c;
1494 TCGv_i64 t;
1495
1496 c.cond = TCG_COND_NE;
1497 c.is_64 = false;
1498 c.g1 = false;
1499 c.g2 = false;
1500
1501 t = tcg_temp_new_i64();
1502 tcg_gen_shri_i64(t, regs[r1], 32);
1503 tcg_gen_subi_i64(t, t, 1);
1504 store_reg32h_i64(r1, t);
1505 c.u.s32.a = tcg_temp_new_i32();
1506 c.u.s32.b = tcg_const_i32(0);
1507 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1508 tcg_temp_free_i64(t);
1509
1510 return help_branch(s, &c, 1, imm, o->in2);
1511 }
1512
1513 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1514 {
1515 int r1 = get_field(s->fields, r1);
1516 bool is_imm = have_field(s->fields, i2);
1517 int imm = is_imm ? get_field(s->fields, i2) : 0;
1518 DisasCompare c;
1519
1520 c.cond = TCG_COND_NE;
1521 c.is_64 = true;
1522 c.g1 = true;
1523 c.g2 = false;
1524
1525 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1526 c.u.s64.a = regs[r1];
1527 c.u.s64.b = tcg_const_i64(0);
1528
1529 return help_branch(s, &c, is_imm, imm, o->in2);
1530 }
1531
1532 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1533 {
1534 int r1 = get_field(s->fields, r1);
1535 int r3 = get_field(s->fields, r3);
1536 bool is_imm = have_field(s->fields, i2);
1537 int imm = is_imm ? get_field(s->fields, i2) : 0;
1538 DisasCompare c;
1539 TCGv_i64 t;
1540
1541 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1542 c.is_64 = false;
1543 c.g1 = false;
1544 c.g2 = false;
1545
1546 t = tcg_temp_new_i64();
1547 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1548 c.u.s32.a = tcg_temp_new_i32();
1549 c.u.s32.b = tcg_temp_new_i32();
1550 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1551 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1552 store_reg32_i64(r1, t);
1553 tcg_temp_free_i64(t);
1554
1555 return help_branch(s, &c, is_imm, imm, o->in2);
1556 }
1557
1558 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1559 {
1560 int r1 = get_field(s->fields, r1);
1561 int r3 = get_field(s->fields, r3);
1562 bool is_imm = have_field(s->fields, i2);
1563 int imm = is_imm ? get_field(s->fields, i2) : 0;
1564 DisasCompare c;
1565
1566 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1567 c.is_64 = true;
1568
1569 if (r1 == (r3 | 1)) {
1570 c.u.s64.b = load_reg(r3 | 1);
1571 c.g2 = false;
1572 } else {
1573 c.u.s64.b = regs[r3 | 1];
1574 c.g2 = true;
1575 }
1576
1577 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1578 c.u.s64.a = regs[r1];
1579 c.g1 = true;
1580
1581 return help_branch(s, &c, is_imm, imm, o->in2);
1582 }
1583
1584 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1585 {
1586 int imm, m3 = get_field(s->fields, m3);
1587 bool is_imm;
1588 DisasCompare c;
1589
1590 c.cond = ltgt_cond[m3];
1591 if (s->insn->data) {
1592 c.cond = tcg_unsigned_cond(c.cond);
1593 }
1594 c.is_64 = c.g1 = c.g2 = true;
1595 c.u.s64.a = o->in1;
1596 c.u.s64.b = o->in2;
1597
1598 is_imm = have_field(s->fields, i4);
1599 if (is_imm) {
1600 imm = get_field(s->fields, i4);
1601 } else {
1602 imm = 0;
1603 o->out = get_address(s, 0, get_field(s->fields, b4),
1604 get_field(s->fields, d4));
1605 }
1606
1607 return help_branch(s, &c, is_imm, imm, o->out);
1608 }
1609
1610 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1611 {
1612 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1613 set_cc_static(s);
1614 return NO_EXIT;
1615 }
1616
1617 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1618 {
1619 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1620 set_cc_static(s);
1621 return NO_EXIT;
1622 }
1623
1624 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1625 {
1626 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1627 set_cc_static(s);
1628 return NO_EXIT;
1629 }
1630
1631 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1632 {
1633 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1634 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1635 tcg_temp_free_i32(m3);
1636 gen_set_cc_nz_f32(s, o->in2);
1637 return NO_EXIT;
1638 }
1639
1640 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1641 {
1642 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1643 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1644 tcg_temp_free_i32(m3);
1645 gen_set_cc_nz_f64(s, o->in2);
1646 return NO_EXIT;
1647 }
1648
1649 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1650 {
1651 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1652 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1653 tcg_temp_free_i32(m3);
1654 gen_set_cc_nz_f128(s, o->in1, o->in2);
1655 return NO_EXIT;
1656 }
1657
1658 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1659 {
1660 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1661 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1662 tcg_temp_free_i32(m3);
1663 gen_set_cc_nz_f32(s, o->in2);
1664 return NO_EXIT;
1665 }
1666
1667 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1668 {
1669 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1670 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1671 tcg_temp_free_i32(m3);
1672 gen_set_cc_nz_f64(s, o->in2);
1673 return NO_EXIT;
1674 }
1675
1676 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1677 {
1678 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1679 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1680 tcg_temp_free_i32(m3);
1681 gen_set_cc_nz_f128(s, o->in1, o->in2);
1682 return NO_EXIT;
1683 }
1684
1685 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1686 {
1687 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1688 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1689 tcg_temp_free_i32(m3);
1690 gen_set_cc_nz_f32(s, o->in2);
1691 return NO_EXIT;
1692 }
1693
1694 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1695 {
1696 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1697 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1698 tcg_temp_free_i32(m3);
1699 gen_set_cc_nz_f64(s, o->in2);
1700 return NO_EXIT;
1701 }
1702
1703 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1704 {
1705 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1706 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1707 tcg_temp_free_i32(m3);
1708 gen_set_cc_nz_f128(s, o->in1, o->in2);
1709 return NO_EXIT;
1710 }
1711
1712 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1713 {
1714 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1715 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1716 tcg_temp_free_i32(m3);
1717 gen_set_cc_nz_f32(s, o->in2);
1718 return NO_EXIT;
1719 }
1720
1721 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1722 {
1723 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1724 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1725 tcg_temp_free_i32(m3);
1726 gen_set_cc_nz_f64(s, o->in2);
1727 return NO_EXIT;
1728 }
1729
1730 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1731 {
1732 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1733 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1734 tcg_temp_free_i32(m3);
1735 gen_set_cc_nz_f128(s, o->in1, o->in2);
1736 return NO_EXIT;
1737 }
1738
1739 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1740 {
1741 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1742 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1743 tcg_temp_free_i32(m3);
1744 return NO_EXIT;
1745 }
1746
1747 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1748 {
1749 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1750 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1751 tcg_temp_free_i32(m3);
1752 return NO_EXIT;
1753 }
1754
1755 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1756 {
1757 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1758 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1759 tcg_temp_free_i32(m3);
1760 return_low128(o->out2);
1761 return NO_EXIT;
1762 }
1763
1764 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1765 {
1766 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1767 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1768 tcg_temp_free_i32(m3);
1769 return NO_EXIT;
1770 }
1771
1772 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1773 {
1774 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1775 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1776 tcg_temp_free_i32(m3);
1777 return NO_EXIT;
1778 }
1779
1780 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1781 {
1782 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1783 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1784 tcg_temp_free_i32(m3);
1785 return_low128(o->out2);
1786 return NO_EXIT;
1787 }
1788
1789 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1790 {
1791 int r2 = get_field(s->fields, r2);
1792 TCGv_i64 len = tcg_temp_new_i64();
1793
1794 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1795 set_cc_static(s);
1796 return_low128(o->out);
1797
1798 tcg_gen_add_i64(regs[r2], regs[r2], len);
1799 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1800 tcg_temp_free_i64(len);
1801
1802 return NO_EXIT;
1803 }
1804
1805 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1806 {
1807 int l = get_field(s->fields, l1);
1808 TCGv_i32 vl;
1809
1810 switch (l + 1) {
1811 case 1:
1812 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1813 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1814 break;
1815 case 2:
1816 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1817 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1818 break;
1819 case 4:
1820 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1821 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1822 break;
1823 case 8:
1824 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1825 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1826 break;
1827 default:
1828 vl = tcg_const_i32(l);
1829 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1830 tcg_temp_free_i32(vl);
1831 set_cc_static(s);
1832 return NO_EXIT;
1833 }
1834 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1835 return NO_EXIT;
1836 }
1837
1838 static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1839 {
1840 int r1 = get_field(s->fields, r1);
1841 int r2 = get_field(s->fields, r2);
1842 TCGv_i32 t1, t2;
1843
1844 /* r1 and r2 must be even. */
1845 if (r1 & 1 || r2 & 1) {
1846 gen_program_exception(s, PGM_SPECIFICATION);
1847 return EXIT_NORETURN;
1848 }
1849
1850 t1 = tcg_const_i32(r1);
1851 t2 = tcg_const_i32(r2);
1852 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1853 tcg_temp_free_i32(t1);
1854 tcg_temp_free_i32(t2);
1855 set_cc_static(s);
1856 return NO_EXIT;
1857 }
1858
1859 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1860 {
1861 int r1 = get_field(s->fields, r1);
1862 int r3 = get_field(s->fields, r3);
1863 TCGv_i32 t1, t3;
1864
1865 /* r1 and r3 must be even. */
1866 if (r1 & 1 || r3 & 1) {
1867 gen_program_exception(s, PGM_SPECIFICATION);
1868 return EXIT_NORETURN;
1869 }
1870
1871 t1 = tcg_const_i32(r1);
1872 t3 = tcg_const_i32(r3);
1873 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1874 tcg_temp_free_i32(t1);
1875 tcg_temp_free_i32(t3);
1876 set_cc_static(s);
1877 return NO_EXIT;
1878 }
1879
1880 static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1881 {
1882 int r1 = get_field(s->fields, r1);
1883 int r3 = get_field(s->fields, r3);
1884 TCGv_i32 t1, t3;
1885
1886 /* r1 and r3 must be even. */
1887 if (r1 & 1 || r3 & 1) {
1888 gen_program_exception(s, PGM_SPECIFICATION);
1889 return EXIT_NORETURN;
1890 }
1891
1892 t1 = tcg_const_i32(r1);
1893 t3 = tcg_const_i32(r3);
1894 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1895 tcg_temp_free_i32(t1);
1896 tcg_temp_free_i32(t3);
1897 set_cc_static(s);
1898 return NO_EXIT;
1899 }
1900
1901 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1902 {
1903 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1904 TCGv_i32 t1 = tcg_temp_new_i32();
1905 tcg_gen_extrl_i64_i32(t1, o->in1);
1906 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1907 set_cc_static(s);
1908 tcg_temp_free_i32(t1);
1909 tcg_temp_free_i32(m3);
1910 return NO_EXIT;
1911 }
1912
1913 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1914 {
1915 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1916 set_cc_static(s);
1917 return_low128(o->in2);
1918 return NO_EXIT;
1919 }
1920
1921 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1922 {
1923 TCGv_i64 t = tcg_temp_new_i64();
1924 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1925 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1926 tcg_gen_or_i64(o->out, o->out, t);
1927 tcg_temp_free_i64(t);
1928 return NO_EXIT;
1929 }
1930
1931 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1932 {
1933 int d2 = get_field(s->fields, d2);
1934 int b2 = get_field(s->fields, b2);
1935 TCGv_i64 addr, cc;
1936
1937 /* Note that in1 = R3 (new value) and
1938 in2 = (zero-extended) R1 (expected value). */
1939
1940 addr = get_address(s, 0, b2, d2);
1941 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1942 get_mem_index(s), s->insn->data | MO_ALIGN);
1943 tcg_temp_free_i64(addr);
1944
1945 /* Are the memory and expected values (un)equal? Note that this setcond
1946 produces the output CC value, thus the NE sense of the test. */
1947 cc = tcg_temp_new_i64();
1948 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1949 tcg_gen_extrl_i64_i32(cc_op, cc);
1950 tcg_temp_free_i64(cc);
1951 set_cc_static(s);
1952
1953 return NO_EXIT;
1954 }
1955
1956 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1957 {
1958 int r1 = get_field(s->fields, r1);
1959 int r3 = get_field(s->fields, r3);
1960 int d2 = get_field(s->fields, d2);
1961 int b2 = get_field(s->fields, b2);
1962 TCGv_i64 addr;
1963 TCGv_i32 t_r1, t_r3;
1964
1965 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1966 addr = get_address(s, 0, b2, d2);
1967 t_r1 = tcg_const_i32(r1);
1968 t_r3 = tcg_const_i32(r3);
1969 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
1970 tcg_temp_free_i64(addr);
1971 tcg_temp_free_i32(t_r1);
1972 tcg_temp_free_i32(t_r3);
1973
1974 set_cc_static(s);
1975 return NO_EXIT;
1976 }
1977
1978 static ExitStatus op_csst(DisasContext *s, DisasOps *o)
1979 {
1980 int r3 = get_field(s->fields, r3);
1981 TCGv_i32 t_r3 = tcg_const_i32(r3);
1982
1983 gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
1984 tcg_temp_free_i32(t_r3);
1985
1986 set_cc_static(s);
1987 return NO_EXIT;
1988 }
1989
1990 #ifndef CONFIG_USER_ONLY
1991 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1992 {
1993 TCGMemOp mop = s->insn->data;
1994 TCGv_i64 addr, old, cc;
1995 TCGLabel *lab = gen_new_label();
1996
1997 /* Note that in1 = R1 (zero-extended expected value),
1998 out = R1 (original reg), out2 = R1+1 (new value). */
1999
2000 check_privileged(s);
2001 addr = tcg_temp_new_i64();
2002 old = tcg_temp_new_i64();
2003 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2004 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2005 get_mem_index(s), mop | MO_ALIGN);
2006 tcg_temp_free_i64(addr);
2007
2008 /* Are the memory and expected values (un)equal? */
2009 cc = tcg_temp_new_i64();
2010 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2011 tcg_gen_extrl_i64_i32(cc_op, cc);
2012
2013 /* Write back the output now, so that it happens before the
2014 following branch, so that we don't need local temps. */
2015 if ((mop & MO_SIZE) == MO_32) {
2016 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2017 } else {
2018 tcg_gen_mov_i64(o->out, old);
2019 }
2020 tcg_temp_free_i64(old);
2021
2022 /* If the comparison was equal, and the LSB of R2 was set,
2023 then we need to flush the TLB (for all cpus). */
2024 tcg_gen_xori_i64(cc, cc, 1);
2025 tcg_gen_and_i64(cc, cc, o->in2);
2026 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2027 tcg_temp_free_i64(cc);
2028
2029 gen_helper_purge(cpu_env);
2030 gen_set_label(lab);
2031
2032 return NO_EXIT;
2033 }
2034 #endif
2035
2036 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2037 {
2038 TCGv_i64 t1 = tcg_temp_new_i64();
2039 TCGv_i32 t2 = tcg_temp_new_i32();
2040 tcg_gen_extrl_i64_i32(t2, o->in1);
2041 gen_helper_cvd(t1, t2);
2042 tcg_temp_free_i32(t2);
2043 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2044 tcg_temp_free_i64(t1);
2045 return NO_EXIT;
2046 }
2047
2048 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2049 {
2050 int m3 = get_field(s->fields, m3);
2051 TCGLabel *lab = gen_new_label();
2052 TCGCond c;
2053
2054 c = tcg_invert_cond(ltgt_cond[m3]);
2055 if (s->insn->data) {
2056 c = tcg_unsigned_cond(c);
2057 }
2058 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2059
2060 /* Trap. */
2061 gen_trap(s);
2062
2063 gen_set_label(lab);
2064 return NO_EXIT;
2065 }
2066
2067 static ExitStatus op_cuXX(DisasContext *s, DisasOps *o)
2068 {
2069 int m3 = get_field(s->fields, m3);
2070 int r1 = get_field(s->fields, r1);
2071 int r2 = get_field(s->fields, r2);
2072 TCGv_i32 tr1, tr2, chk;
2073
2074 /* R1 and R2 must both be even. */
2075 if ((r1 | r2) & 1) {
2076 gen_program_exception(s, PGM_SPECIFICATION);
2077 return EXIT_NORETURN;
2078 }
2079 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2080 m3 = 0;
2081 }
2082
2083 tr1 = tcg_const_i32(r1);
2084 tr2 = tcg_const_i32(r2);
2085 chk = tcg_const_i32(m3);
2086
2087 switch (s->insn->data) {
2088 case 12:
2089 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2090 break;
2091 case 14:
2092 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2093 break;
2094 case 21:
2095 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2096 break;
2097 case 24:
2098 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2099 break;
2100 case 41:
2101 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2102 break;
2103 case 42:
2104 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2105 break;
2106 default:
2107 g_assert_not_reached();
2108 }
2109
2110 tcg_temp_free_i32(tr1);
2111 tcg_temp_free_i32(tr2);
2112 tcg_temp_free_i32(chk);
2113 set_cc_static(s);
2114 return NO_EXIT;
2115 }
2116
2117 #ifndef CONFIG_USER_ONLY
2118 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2119 {
2120 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2121 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2122 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2123
2124 check_privileged(s);
2125 update_psw_addr(s);
2126 gen_op_calc_cc(s);
2127
2128 gen_helper_diag(cpu_env, r1, r3, func_code);
2129
2130 tcg_temp_free_i32(func_code);
2131 tcg_temp_free_i32(r3);
2132 tcg_temp_free_i32(r1);
2133 return NO_EXIT;
2134 }
2135 #endif
2136
2137 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2138 {
2139 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2140 return_low128(o->out);
2141 return NO_EXIT;
2142 }
2143
2144 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2145 {
2146 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2147 return_low128(o->out);
2148 return NO_EXIT;
2149 }
2150
2151 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2152 {
2153 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2154 return_low128(o->out);
2155 return NO_EXIT;
2156 }
2157
2158 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2159 {
2160 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2161 return_low128(o->out);
2162 return NO_EXIT;
2163 }
2164
2165 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2166 {
2167 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2168 return NO_EXIT;
2169 }
2170
2171 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2172 {
2173 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2174 return NO_EXIT;
2175 }
2176
2177 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2178 {
2179 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2180 return_low128(o->out2);
2181 return NO_EXIT;
2182 }
2183
2184 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2185 {
2186 int r2 = get_field(s->fields, r2);
2187 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2188 return NO_EXIT;
2189 }
2190
2191 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2192 {
2193 /* No cache information provided. */
2194 tcg_gen_movi_i64(o->out, -1);
2195 return NO_EXIT;
2196 }
2197
2198 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2199 {
2200 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2201 return NO_EXIT;
2202 }
2203
2204 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2205 {
2206 int r1 = get_field(s->fields, r1);
2207 int r2 = get_field(s->fields, r2);
2208 TCGv_i64 t = tcg_temp_new_i64();
2209
2210 /* Note the "subsequently" in the PoO, which implies a defined result
2211 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2212 tcg_gen_shri_i64(t, psw_mask, 32);
2213 store_reg32_i64(r1, t);
2214 if (r2 != 0) {
2215 store_reg32_i64(r2, psw_mask);
2216 }
2217
2218 tcg_temp_free_i64(t);
2219 return NO_EXIT;
2220 }
2221
2222 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2223 {
2224 int r1 = get_field(s->fields, r1);
2225 TCGv_i32 ilen;
2226 TCGv_i64 v1;
2227
2228 /* Nested EXECUTE is not allowed. */
2229 if (unlikely(s->ex_value)) {
2230 gen_program_exception(s, PGM_EXECUTE);
2231 return EXIT_NORETURN;
2232 }
2233
2234 update_psw_addr(s);
2235 update_cc_op(s);
2236
2237 if (r1 == 0) {
2238 v1 = tcg_const_i64(0);
2239 } else {
2240 v1 = regs[r1];
2241 }
2242
2243 ilen = tcg_const_i32(s->ilen);
2244 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2245 tcg_temp_free_i32(ilen);
2246
2247 if (r1 == 0) {
2248 tcg_temp_free_i64(v1);
2249 }
2250
2251 return EXIT_PC_CC_UPDATED;
2252 }
2253
2254 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2255 {
2256 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2257 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2258 tcg_temp_free_i32(m3);
2259 return NO_EXIT;
2260 }
2261
2262 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2263 {
2264 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2265 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2266 tcg_temp_free_i32(m3);
2267 return NO_EXIT;
2268 }
2269
2270 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2271 {
2272 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2273 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2274 return_low128(o->out2);
2275 tcg_temp_free_i32(m3);
2276 return NO_EXIT;
2277 }
2278
2279 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2280 {
2281 /* We'll use the original input for cc computation, since we get to
2282 compare that against 0, which ought to be better than comparing
2283 the real output against 64. It also lets cc_dst be a convenient
2284 temporary during our computation. */
2285 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2286
2287 /* R1 = IN ? CLZ(IN) : 64. */
2288 tcg_gen_clzi_i64(o->out, o->in2, 64);
2289
2290 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2291 value by 64, which is undefined. But since the shift is 64 iff the
2292 input is zero, we still get the correct result after and'ing. */
2293 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2294 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2295 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2296 return NO_EXIT;
2297 }
2298
2299 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2300 {
2301 int m3 = get_field(s->fields, m3);
2302 int pos, len, base = s->insn->data;
2303 TCGv_i64 tmp = tcg_temp_new_i64();
2304 uint64_t ccm;
2305
2306 switch (m3) {
2307 case 0xf:
2308 /* Effectively a 32-bit load. */
2309 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2310 len = 32;
2311 goto one_insert;
2312
2313 case 0xc:
2314 case 0x6:
2315 case 0x3:
2316 /* Effectively a 16-bit load. */
2317 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2318 len = 16;
2319 goto one_insert;
2320
2321 case 0x8:
2322 case 0x4:
2323 case 0x2:
2324 case 0x1:
2325 /* Effectively an 8-bit load. */
2326 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2327 len = 8;
2328 goto one_insert;
2329
2330 one_insert:
2331 pos = base + ctz32(m3) * 8;
2332 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2333 ccm = ((1ull << len) - 1) << pos;
2334 break;
2335
2336 default:
2337 /* This is going to be a sequence of loads and inserts. */
2338 pos = base + 32 - 8;
2339 ccm = 0;
2340 while (m3) {
2341 if (m3 & 0x8) {
2342 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2343 tcg_gen_addi_i64(o->in2, o->in2, 1);
2344 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2345 ccm |= 0xff << pos;
2346 }
2347 m3 = (m3 << 1) & 0xf;
2348 pos -= 8;
2349 }
2350 break;
2351 }
2352
2353 tcg_gen_movi_i64(tmp, ccm);
2354 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2355 tcg_temp_free_i64(tmp);
2356 return NO_EXIT;
2357 }
2358
2359 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2360 {
2361 int shift = s->insn->data & 0xff;
2362 int size = s->insn->data >> 8;
2363 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2364 return NO_EXIT;
2365 }
2366
2367 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2368 {
2369 TCGv_i64 t1;
2370
2371 gen_op_calc_cc(s);
2372 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2373
2374 t1 = tcg_temp_new_i64();
2375 tcg_gen_shli_i64(t1, psw_mask, 20);
2376 tcg_gen_shri_i64(t1, t1, 36);
2377 tcg_gen_or_i64(o->out, o->out, t1);
2378
2379 tcg_gen_extu_i32_i64(t1, cc_op);
2380 tcg_gen_shli_i64(t1, t1, 28);
2381 tcg_gen_or_i64(o->out, o->out, t1);
2382 tcg_temp_free_i64(t1);
2383 return NO_EXIT;
2384 }
2385
2386 #ifndef CONFIG_USER_ONLY
2387 static ExitStatus op_idte(DisasContext *s, DisasOps *o)
2388 {
2389 TCGv_i32 m4;
2390
2391 check_privileged(s);
2392 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2393 m4 = tcg_const_i32(get_field(s->fields, m4));
2394 } else {
2395 m4 = tcg_const_i32(0);
2396 }
2397 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2398 tcg_temp_free_i32(m4);
2399 return NO_EXIT;
2400 }
2401
2402 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2403 {
2404 TCGv_i32 m4;
2405
2406 check_privileged(s);
2407 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2408 m4 = tcg_const_i32(get_field(s->fields, m4));
2409 } else {
2410 m4 = tcg_const_i32(0);
2411 }
2412 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2413 tcg_temp_free_i32(m4);
2414 return NO_EXIT;
2415 }
2416
2417 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2418 {
2419 check_privileged(s);
2420 gen_helper_iske(o->out, cpu_env, o->in2);
2421 return NO_EXIT;
2422 }
2423 #endif
2424
2425 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2426 {
2427 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2428 set_cc_static(s);
2429 return NO_EXIT;
2430 }
2431
2432 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2433 {
2434 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2435 set_cc_static(s);
2436 return NO_EXIT;
2437 }
2438
2439 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2440 {
2441 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2442 set_cc_static(s);
2443 return NO_EXIT;
2444 }
2445
2446 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2447 {
2448 /* The real output is indeed the original value in memory;
2449 recompute the addition for the computation of CC. */
2450 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2451 s->insn->data | MO_ALIGN);
2452 /* However, we need to recompute the addition for setting CC. */
2453 tcg_gen_add_i64(o->out, o->in1, o->in2);
2454 return NO_EXIT;
2455 }
2456
2457 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2458 {
2459 /* The real output is indeed the original value in memory;
2460 recompute the addition for the computation of CC. */
2461 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2462 s->insn->data | MO_ALIGN);
2463 /* However, we need to recompute the operation for setting CC. */
2464 tcg_gen_and_i64(o->out, o->in1, o->in2);
2465 return NO_EXIT;
2466 }
2467
2468 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2469 {
2470 /* The real output is indeed the original value in memory;
2471 recompute the addition for the computation of CC. */
2472 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2473 s->insn->data | MO_ALIGN);
2474 /* However, we need to recompute the operation for setting CC. */
2475 tcg_gen_or_i64(o->out, o->in1, o->in2);
2476 return NO_EXIT;
2477 }
2478
2479 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2480 {
2481 /* The real output is indeed the original value in memory;
2482 recompute the addition for the computation of CC. */
2483 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2484 s->insn->data | MO_ALIGN);
2485 /* However, we need to recompute the operation for setting CC. */
2486 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2487 return NO_EXIT;
2488 }
2489
2490 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2491 {
2492 gen_helper_ldeb(o->out, cpu_env, o->in2);
2493 return NO_EXIT;
2494 }
2495
2496 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2497 {
2498 gen_helper_ledb(o->out, cpu_env, o->in2);
2499 return NO_EXIT;
2500 }
2501
2502 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2503 {
2504 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2505 return NO_EXIT;
2506 }
2507
2508 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2509 {
2510 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2511 return NO_EXIT;
2512 }
2513
2514 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2515 {
2516 gen_helper_lxdb(o->out, cpu_env, o->in2);
2517 return_low128(o->out2);
2518 return NO_EXIT;
2519 }
2520
2521 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2522 {
2523 gen_helper_lxeb(o->out, cpu_env, o->in2);
2524 return_low128(o->out2);
2525 return NO_EXIT;
2526 }
2527
2528 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2529 {
2530 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2531 return NO_EXIT;
2532 }
2533
2534 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2535 {
2536 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2537 return NO_EXIT;
2538 }
2539
2540 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2541 {
2542 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2543 return NO_EXIT;
2544 }
2545
2546 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2547 {
2548 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2549 return NO_EXIT;
2550 }
2551
2552 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2553 {
2554 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2555 return NO_EXIT;
2556 }
2557
2558 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2559 {
2560 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2561 return NO_EXIT;
2562 }
2563
2564 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2565 {
2566 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2567 return NO_EXIT;
2568 }
2569
2570 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2571 {
2572 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2573 return NO_EXIT;
2574 }
2575
2576 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2577 {
2578 TCGLabel *lab = gen_new_label();
2579 store_reg32_i64(get_field(s->fields, r1), o->in2);
2580 /* The value is stored even in case of trap. */
2581 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2582 gen_trap(s);
2583 gen_set_label(lab);
2584 return NO_EXIT;
2585 }
2586
2587 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2588 {
2589 TCGLabel *lab = gen_new_label();
2590 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2591 /* The value is stored even in case of trap. */
2592 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2593 gen_trap(s);
2594 gen_set_label(lab);
2595 return NO_EXIT;
2596 }
2597
2598 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2599 {
2600 TCGLabel *lab = gen_new_label();
2601 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2602 /* The value is stored even in case of trap. */
2603 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2604 gen_trap(s);
2605 gen_set_label(lab);
2606 return NO_EXIT;
2607 }
2608
2609 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2610 {
2611 TCGLabel *lab = gen_new_label();
2612 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2613 /* The value is stored even in case of trap. */
2614 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2615 gen_trap(s);
2616 gen_set_label(lab);
2617 return NO_EXIT;
2618 }
2619
2620 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2621 {
2622 TCGLabel *lab = gen_new_label();
2623 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2624 /* The value is stored even in case of trap. */
2625 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2626 gen_trap(s);
2627 gen_set_label(lab);
2628 return NO_EXIT;
2629 }
2630
2631 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2632 {
2633 DisasCompare c;
2634
2635 disas_jcc(s, &c, get_field(s->fields, m3));
2636
2637 if (c.is_64) {
2638 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2639 o->in2, o->in1);
2640 free_compare(&c);
2641 } else {
2642 TCGv_i32 t32 = tcg_temp_new_i32();
2643 TCGv_i64 t, z;
2644
2645 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2646 free_compare(&c);
2647
2648 t = tcg_temp_new_i64();
2649 tcg_gen_extu_i32_i64(t, t32);
2650 tcg_temp_free_i32(t32);
2651
2652 z = tcg_const_i64(0);
2653 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2654 tcg_temp_free_i64(t);
2655 tcg_temp_free_i64(z);
2656 }
2657
2658 return NO_EXIT;
2659 }
2660
2661 #ifndef CONFIG_USER_ONLY
2662 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2663 {
2664 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2665 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2666 check_privileged(s);
2667 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2668 tcg_temp_free_i32(r1);
2669 tcg_temp_free_i32(r3);
2670 return NO_EXIT;
2671 }
2672
2673 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2674 {
2675 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2676 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2677 check_privileged(s);
2678 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2679 tcg_temp_free_i32(r1);
2680 tcg_temp_free_i32(r3);
2681 return NO_EXIT;
2682 }
2683
2684 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2685 {
2686 check_privileged(s);
2687 gen_helper_lra(o->out, cpu_env, o->in2);
2688 set_cc_static(s);
2689 return NO_EXIT;
2690 }
2691
2692 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2693 {
2694 check_privileged(s);
2695
2696 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2697 return NO_EXIT;
2698 }
2699
2700 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2701 {
2702 TCGv_i64 t1, t2;
2703
2704 check_privileged(s);
2705 per_breaking_event(s);
2706
2707 t1 = tcg_temp_new_i64();
2708 t2 = tcg_temp_new_i64();
2709 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2710 tcg_gen_addi_i64(o->in2, o->in2, 4);
2711 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2712 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2713 tcg_gen_shli_i64(t1, t1, 32);
2714 gen_helper_load_psw(cpu_env, t1, t2);
2715 tcg_temp_free_i64(t1);
2716 tcg_temp_free_i64(t2);
2717 return EXIT_NORETURN;
2718 }
2719
2720 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2721 {
2722 TCGv_i64 t1, t2;
2723
2724 check_privileged(s);
2725 per_breaking_event(s);
2726
2727 t1 = tcg_temp_new_i64();
2728 t2 = tcg_temp_new_i64();
2729 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2730 tcg_gen_addi_i64(o->in2, o->in2, 8);
2731 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2732 gen_helper_load_psw(cpu_env, t1, t2);
2733 tcg_temp_free_i64(t1);
2734 tcg_temp_free_i64(t2);
2735 return EXIT_NORETURN;
2736 }
2737 #endif
2738
2739 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2740 {
2741 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2742 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2743 gen_helper_lam(cpu_env, r1, o->in2, r3);
2744 tcg_temp_free_i32(r1);
2745 tcg_temp_free_i32(r3);
2746 return NO_EXIT;
2747 }
2748
2749 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2750 {
2751 int r1 = get_field(s->fields, r1);
2752 int r3 = get_field(s->fields, r3);
2753 TCGv_i64 t1, t2;
2754
2755 /* Only one register to read. */
2756 t1 = tcg_temp_new_i64();
2757 if (unlikely(r1 == r3)) {
2758 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2759 store_reg32_i64(r1, t1);
2760 tcg_temp_free(t1);
2761 return NO_EXIT;
2762 }
2763
2764 /* First load the values of the first and last registers to trigger
2765 possible page faults. */
2766 t2 = tcg_temp_new_i64();
2767 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2768 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2769 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2770 store_reg32_i64(r1, t1);
2771 store_reg32_i64(r3, t2);
2772
2773 /* Only two registers to read. */
2774 if (((r1 + 1) & 15) == r3) {
2775 tcg_temp_free(t2);
2776 tcg_temp_free(t1);
2777 return NO_EXIT;
2778 }
2779
2780 /* Then load the remaining registers. Page fault can't occur. */
2781 r3 = (r3 - 1) & 15;
2782 tcg_gen_movi_i64(t2, 4);
2783 while (r1 != r3) {
2784 r1 = (r1 + 1) & 15;
2785 tcg_gen_add_i64(o->in2, o->in2, t2);
2786 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2787 store_reg32_i64(r1, t1);
2788 }
2789 tcg_temp_free(t2);
2790 tcg_temp_free(t1);
2791
2792 return NO_EXIT;
2793 }
2794
2795 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2796 {
2797 int r1 = get_field(s->fields, r1);
2798 int r3 = get_field(s->fields, r3);
2799 TCGv_i64 t1, t2;
2800
2801 /* Only one register to read. */
2802 t1 = tcg_temp_new_i64();
2803 if (unlikely(r1 == r3)) {
2804 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2805 store_reg32h_i64(r1, t1);
2806 tcg_temp_free(t1);
2807 return NO_EXIT;
2808 }
2809
2810 /* First load the values of the first and last registers to trigger
2811 possible page faults. */
2812 t2 = tcg_temp_new_i64();
2813 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2814 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2815 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2816 store_reg32h_i64(r1, t1);
2817 store_reg32h_i64(r3, t2);
2818
2819 /* Only two registers to read. */
2820 if (((r1 + 1) & 15) == r3) {
2821 tcg_temp_free(t2);
2822 tcg_temp_free(t1);
2823 return NO_EXIT;
2824 }
2825
2826 /* Then load the remaining registers. Page fault can't occur. */
2827 r3 = (r3 - 1) & 15;
2828 tcg_gen_movi_i64(t2, 4);
2829 while (r1 != r3) {
2830 r1 = (r1 + 1) & 15;
2831 tcg_gen_add_i64(o->in2, o->in2, t2);
2832 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2833 store_reg32h_i64(r1, t1);
2834 }
2835 tcg_temp_free(t2);
2836 tcg_temp_free(t1);
2837
2838 return NO_EXIT;
2839 }
2840
2841 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2842 {
2843 int r1 = get_field(s->fields, r1);
2844 int r3 = get_field(s->fields, r3);
2845 TCGv_i64 t1, t2;
2846
2847 /* Only one register to read. */
2848 if (unlikely(r1 == r3)) {
2849 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2850 return NO_EXIT;
2851 }
2852
2853 /* First load the values of the first and last registers to trigger
2854 possible page faults. */
2855 t1 = tcg_temp_new_i64();
2856 t2 = tcg_temp_new_i64();
2857 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2858 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2859 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2860 tcg_gen_mov_i64(regs[r1], t1);
2861 tcg_temp_free(t2);
2862
2863 /* Only two registers to read. */
2864 if (((r1 + 1) & 15) == r3) {
2865 tcg_temp_free(t1);
2866 return NO_EXIT;
2867 }
2868
2869 /* Then load the remaining registers. Page fault can't occur. */
2870 r3 = (r3 - 1) & 15;
2871 tcg_gen_movi_i64(t1, 8);
2872 while (r1 != r3) {
2873 r1 = (r1 + 1) & 15;
2874 tcg_gen_add_i64(o->in2, o->in2, t1);
2875 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2876 }
2877 tcg_temp_free(t1);
2878
2879 return NO_EXIT;
2880 }
2881
2882 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2883 {
2884 TCGv_i64 a1, a2;
2885 TCGMemOp mop = s->insn->data;
2886
2887 /* In a parallel context, stop the world and single step. */
2888 if (parallel_cpus) {
2889 potential_page_fault(s);
2890 gen_exception(EXCP_ATOMIC);
2891 return EXIT_NORETURN;
2892 }
2893
2894 /* In a serial context, perform the two loads ... */
2895 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2896 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2897 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2898 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2899 tcg_temp_free_i64(a1);
2900 tcg_temp_free_i64(a2);
2901
2902 /* ... and indicate that we performed them while interlocked. */
2903 gen_op_movi_cc(s, 0);
2904 return NO_EXIT;
2905 }
2906
2907 static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
2908 {
2909 gen_helper_lpq(o->out, cpu_env, o->in2);
2910 return_low128(o->out2);
2911 return NO_EXIT;
2912 }
2913
2914 #ifndef CONFIG_USER_ONLY
2915 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2916 {
2917 check_privileged(s);
2918 potential_page_fault(s);
2919 gen_helper_lura(o->out, cpu_env, o->in2);
2920 return NO_EXIT;
2921 }
2922
2923 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2924 {
2925 check_privileged(s);
2926 potential_page_fault(s);
2927 gen_helper_lurag(o->out, cpu_env, o->in2);
2928 return NO_EXIT;
2929 }
2930 #endif
2931
2932 static ExitStatus op_lzrb(DisasContext *s, DisasOps *o)
2933 {
2934 tcg_gen_andi_i64(o->out, o->in2, -256);
2935 return NO_EXIT;
2936 }
2937
2938 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2939 {
2940 o->out = o->in2;
2941 o->g_out = o->g_in2;
2942 TCGV_UNUSED_I64(o->in2);
2943 o->g_in2 = false;
2944 return NO_EXIT;
2945 }
2946
2947 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2948 {
2949 int b2 = get_field(s->fields, b2);
2950 TCGv ar1 = tcg_temp_new_i64();
2951
2952 o->out = o->in2;
2953 o->g_out = o->g_in2;
2954 TCGV_UNUSED_I64(o->in2);
2955 o->g_in2 = false;
2956
2957 switch (s->tb->flags & FLAG_MASK_ASC) {
2958 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
2959 tcg_gen_movi_i64(ar1, 0);
2960 break;
2961 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
2962 tcg_gen_movi_i64(ar1, 1);
2963 break;
2964 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
2965 if (b2) {
2966 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2967 } else {
2968 tcg_gen_movi_i64(ar1, 0);
2969 }
2970 break;
2971 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
2972 tcg_gen_movi_i64(ar1, 2);
2973 break;
2974 }
2975
2976 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2977 tcg_temp_free_i64(ar1);
2978
2979 return NO_EXIT;
2980 }
2981
2982 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2983 {
2984 o->out = o->in1;
2985 o->out2 = o->in2;
2986 o->g_out = o->g_in1;
2987 o->g_out2 = o->g_in2;
2988 TCGV_UNUSED_I64(o->in1);
2989 TCGV_UNUSED_I64(o->in2);
2990 o->g_in1 = o->g_in2 = false;
2991 return NO_EXIT;
2992 }
2993
2994 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2995 {
2996 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2997 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2998 tcg_temp_free_i32(l);
2999 return NO_EXIT;
3000 }
3001
3002 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
3003 {
3004 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3005 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3006 tcg_temp_free_i32(l);
3007 return NO_EXIT;
3008 }
3009
3010 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3011 {
3012 int r1 = get_field(s->fields, r1);
3013 int r2 = get_field(s->fields, r2);
3014 TCGv_i32 t1, t2;
3015
3016 /* r1 and r2 must be even. */
3017 if (r1 & 1 || r2 & 1) {
3018 gen_program_exception(s, PGM_SPECIFICATION);
3019 return EXIT_NORETURN;
3020 }
3021
3022 t1 = tcg_const_i32(r1);
3023 t2 = tcg_const_i32(r2);
3024 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3025 tcg_temp_free_i32(t1);
3026 tcg_temp_free_i32(t2);
3027 set_cc_static(s);
3028 return NO_EXIT;
3029 }
3030
3031 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3032 {
3033 int r1 = get_field(s->fields, r1);
3034 int r3 = get_field(s->fields, r3);
3035 TCGv_i32 t1, t3;
3036
3037 /* r1 and r3 must be even. */
3038 if (r1 & 1 || r3 & 1) {
3039 gen_program_exception(s, PGM_SPECIFICATION);
3040 return EXIT_NORETURN;
3041 }
3042
3043 t1 = tcg_const_i32(r1);
3044 t3 = tcg_const_i32(r3);
3045 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3046 tcg_temp_free_i32(t1);
3047 tcg_temp_free_i32(t3);
3048 set_cc_static(s);
3049 return NO_EXIT;
3050 }
3051
3052 static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3053 {
3054 int r1 = get_field(s->fields, r1);
3055 int r3 = get_field(s->fields, r3);
3056 TCGv_i32 t1, t3;
3057
3058 /* r1 and r3 must be even. */
3059 if (r1 & 1 || r3 & 1) {
3060 gen_program_exception(s, PGM_SPECIFICATION);
3061 return EXIT_NORETURN;
3062 }
3063
3064 t1 = tcg_const_i32(r1);
3065 t3 = tcg_const_i32(r3);
3066 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3067 tcg_temp_free_i32(t1);
3068 tcg_temp_free_i32(t3);
3069 set_cc_static(s);
3070 return NO_EXIT;
3071 }
3072
3073 static ExitStatus op_mvcos(DisasContext *s, DisasOps *o)
3074 {
3075 int r3 = get_field(s->fields, r3);
3076 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3077 set_cc_static(s);
3078 return NO_EXIT;
3079 }
3080
3081 #ifndef CONFIG_USER_ONLY
3082 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3083 {
3084 int r1 = get_field(s->fields, l1);
3085 check_privileged(s);
3086 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3087 set_cc_static(s);
3088 return NO_EXIT;
3089 }
3090
3091 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3092 {
3093 int r1 = get_field(s->fields, l1);
3094 check_privileged(s);
3095 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3096 set_cc_static(s);
3097 return NO_EXIT;
3098 }
3099 #endif
3100
3101 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3102 {
3103 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3104 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3105 tcg_temp_free_i32(l);
3106 return NO_EXIT;
3107 }
3108
3109 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3110 {
3111 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3112 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3113 tcg_temp_free_i32(l);
3114 return NO_EXIT;
3115 }
3116
3117 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3118 {
3119 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3120 set_cc_static(s);
3121 return NO_EXIT;
3122 }
3123
3124 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3125 {
3126 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3127 set_cc_static(s);
3128 return_low128(o->in2);
3129 return NO_EXIT;
3130 }
3131
3132 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3133 {
3134 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3135 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3136 tcg_temp_free_i32(l);
3137 return NO_EXIT;
3138 }
3139
3140 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3141 {
3142 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3143 return NO_EXIT;
3144 }
3145
3146 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3147 {
3148 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3149 return NO_EXIT;
3150 }
3151
3152 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3153 {
3154 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3155 return NO_EXIT;
3156 }
3157
3158 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3159 {
3160 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3161 return NO_EXIT;
3162 }
3163
3164 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3165 {
3166 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3167 return NO_EXIT;
3168 }
3169
3170 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3171 {
3172 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3173 return_low128(o->out2);
3174 return NO_EXIT;
3175 }
3176
3177 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3178 {
3179 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3180 return_low128(o->out2);
3181 return NO_EXIT;
3182 }
3183
3184 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3185 {
3186 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3187 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3188 tcg_temp_free_i64(r3);
3189 return NO_EXIT;
3190 }
3191
3192 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3193 {
3194 int r3 = get_field(s->fields, r3);
3195 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3196 return NO_EXIT;
3197 }
3198
3199 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3200 {
3201 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3202 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3203 tcg_temp_free_i64(r3);
3204 return NO_EXIT;
3205 }
3206
3207 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3208 {
3209 int r3 = get_field(s->fields, r3);
3210 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3211 return NO_EXIT;
3212 }
3213
3214 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3215 {
3216 TCGv_i64 z, n;
3217 z = tcg_const_i64(0);
3218 n = tcg_temp_new_i64();
3219 tcg_gen_neg_i64(n, o->in2);
3220 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3221 tcg_temp_free_i64(n);
3222 tcg_temp_free_i64(z);
3223 return NO_EXIT;
3224 }
3225
3226 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3227 {
3228 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3229 return NO_EXIT;
3230 }
3231
3232 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3233 {
3234 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3235 return NO_EXIT;
3236 }
3237
3238 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3239 {
3240 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3241 tcg_gen_mov_i64(o->out2, o->in2);
3242 return NO_EXIT;
3243 }
3244
3245 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3246 {
3247 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3248 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3249 tcg_temp_free_i32(l);
3250 set_cc_static(s);
3251 return NO_EXIT;
3252 }
3253
3254 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3255 {
3256 tcg_gen_neg_i64(o->out, o->in2);
3257 return NO_EXIT;
3258 }
3259
3260 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3261 {
3262 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3263 return NO_EXIT;
3264 }
3265
3266 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3267 {
3268 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3269 return NO_EXIT;
3270 }
3271
3272 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3273 {
3274 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3275 tcg_gen_mov_i64(o->out2, o->in2);
3276 return NO_EXIT;
3277 }
3278
3279 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3280 {
3281 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3282 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3283 tcg_temp_free_i32(l);
3284 set_cc_static(s);
3285 return NO_EXIT;
3286 }
3287
3288 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3289 {
3290 tcg_gen_or_i64(o->out, o->in1, o->in2);
3291 return NO_EXIT;
3292 }
3293
3294 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3295 {
3296 int shift = s->insn->data & 0xff;
3297 int size = s->insn->data >> 8;
3298 uint64_t mask = ((1ull << size) - 1) << shift;
3299
3300 assert(!o->g_in2);
3301 tcg_gen_shli_i64(o->in2, o->in2, shift);
3302 tcg_gen_or_i64(o->out, o->in1, o->in2);
3303
3304 /* Produce the CC from only the bits manipulated. */
3305 tcg_gen_andi_i64(cc_dst, o->out, mask);
3306 set_cc_nz_u64(s, cc_dst);
3307 return NO_EXIT;
3308 }
3309
3310 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3311 {
3312 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3313 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3314 tcg_temp_free_i32(l);
3315 return NO_EXIT;
3316 }
3317
3318 static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3319 {
3320 int l2 = get_field(s->fields, l2) + 1;
3321 TCGv_i32 l;
3322
3323 /* The length must not exceed 32 bytes. */
3324 if (l2 > 32) {
3325 gen_program_exception(s, PGM_SPECIFICATION);
3326 return EXIT_NORETURN;
3327 }
3328 l = tcg_const_i32(l2);
3329 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3330 tcg_temp_free_i32(l);
3331 return NO_EXIT;
3332 }
3333
3334 static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3335 {
3336 int l2 = get_field(s->fields, l2) + 1;
3337 TCGv_i32 l;
3338
3339 /* The length must be even and should not exceed 64 bytes. */
3340 if ((l2 & 1) || (l2 > 64)) {
3341 gen_program_exception(s, PGM_SPECIFICATION);
3342 return EXIT_NORETURN;
3343 }
3344 l = tcg_const_i32(l2);
3345 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3346 tcg_temp_free_i32(l);
3347 return NO_EXIT;
3348 }
3349
3350 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3351 {
3352 gen_helper_popcnt(o->out, o->in2);
3353 return NO_EXIT;
3354 }
3355
3356 #ifndef CONFIG_USER_ONLY
3357 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3358 {
3359 check_privileged(s);
3360 gen_helper_ptlb(cpu_env);
3361 return NO_EXIT;
3362 }
3363 #endif
3364
3365 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3366 {
3367 int i3 = get_field(s->fields, i3);
3368 int i4 = get_field(s->fields, i4);
3369 int i5 = get_field(s->fields, i5);
3370 int do_zero = i4 & 0x80;
3371 uint64_t mask, imask, pmask;
3372 int pos, len, rot;
3373
3374 /* Adjust the arguments for the specific insn. */
3375 switch (s->fields->op2) {
3376 case 0x55: /* risbg */
3377 i3 &= 63;
3378 i4 &= 63;
3379 pmask = ~0;
3380 break;
3381 case 0x5d: /* risbhg */
3382 i3 &= 31;
3383 i4 &= 31;
3384 pmask = 0xffffffff00000000ull;
3385 break;
3386 case 0x51: /* risblg */
3387 i3 &= 31;
3388 i4 &= 31;
3389 pmask = 0x00000000ffffffffull;
3390 break;
3391 default:
3392 abort();
3393 }
3394
3395 /* MASK is the set of bits to be inserted from R2.
3396 Take care for I3/I4 wraparound. */
3397 mask = pmask >> i3;
3398 if (i3 <= i4) {
3399 mask ^= pmask >> i4 >> 1;
3400 } else {
3401 mask |= ~(pmask >> i4 >> 1);
3402 }
3403 mask &= pmask;
3404
3405 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3406 insns, we need to keep the other half of the register. */
3407 imask = ~mask | ~pmask;
3408 if (do_zero) {
3409 if (s->fields->op2 == 0x55) {
3410 imask = 0;
3411 } else {
3412 imask = ~pmask;
3413 }
3414 }
3415
3416 len = i4 - i3 + 1;
3417 pos = 63 - i4;
3418 rot = i5 & 63;
3419 if (s->fields->op2 == 0x5d) {
3420 pos += 32;
3421 }
3422
3423 /* In some cases we can implement this with extract. */
3424 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3425 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3426 return NO_EXIT;
3427 }
3428
3429 /* In some cases we can implement this with deposit. */
3430 if (len > 0 && (imask == 0 || ~mask == imask)) {
3431 /* Note that we rotate the bits to be inserted to the lsb, not to
3432 the position as described in the PoO. */
3433 rot = (rot - pos) & 63;
3434 } else {
3435 pos = -1;
3436 }
3437
3438 /* Rotate the input as necessary. */
3439 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3440
3441 /* Insert the selected bits into the output. */
3442 if (pos >= 0) {
3443 if (imask == 0) {
3444 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3445 } else {
3446 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3447 }
3448 } else if (imask == 0) {
3449 tcg_gen_andi_i64(o->out, o->in2, mask);
3450 } else {
3451 tcg_gen_andi_i64(o->in2, o->in2, mask);
3452 tcg_gen_andi_i64(o->out, o->out, imask);
3453 tcg_gen_or_i64(o->out, o->out, o->in2);
3454 }
3455 return NO_EXIT;
3456 }
3457
3458 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3459 {
3460 int i3 = get_field(s->fields, i3);
3461 int i4 = get_field(s->fields, i4);
3462 int i5 = get_field(s->fields, i5);
3463 uint64_t mask;
3464
3465 /* If this is a test-only form, arrange to discard the result. */
3466 if (i3 & 0x80) {
3467 o->out = tcg_temp_new_i64();
3468 o->g_out = false;
3469 }
3470
3471 i3 &= 63;
3472 i4 &= 63;
3473 i5 &= 63;
3474
3475 /* MASK is the set of bits to be operated on from R2.
3476 Take care for I3/I4 wraparound. */
3477 mask = ~0ull >> i3;
3478 if (i3 <= i4) {
3479 mask ^= ~0ull >> i4 >> 1;
3480 } else {
3481 mask |= ~(~0ull >> i4 >> 1);
3482 }
3483
3484 /* Rotate the input as necessary. */
3485 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3486
3487 /* Operate. */
3488 switch (s->fields->op2) {
3489 case 0x55: /* AND */
3490 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3491 tcg_gen_and_i64(o->out, o->out, o->in2);
3492 break;
3493 case 0x56: /* OR */
3494 tcg_gen_andi_i64(o->in2, o->in2, mask);
3495 tcg_gen_or_i64(o->out, o->out, o->in2);
3496 break;
3497 case 0x57: /* XOR */
3498 tcg_gen_andi_i64(o->in2, o->in2, mask);
3499 tcg_gen_xor_i64(o->out, o->out, o->in2);
3500 break;
3501 default:
3502 abort();
3503 }
3504
3505 /* Set the CC. */
3506 tcg_gen_andi_i64(cc_dst, o->out, mask);
3507 set_cc_nz_u64(s, cc_dst);
3508 return NO_EXIT;
3509 }
3510
3511 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3512 {
3513 tcg_gen_bswap16_i64(o->out, o->in2);
3514 return NO_EXIT;
3515 }
3516
3517 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3518 {
3519 tcg_gen_bswap32_i64(o->out, o->in2);
3520 return NO_EXIT;
3521 }
3522
3523 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3524 {
3525 tcg_gen_bswap64_i64(o->out, o->in2);
3526 return NO_EXIT;
3527 }
3528
3529 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3530 {
3531 TCGv_i32 t1 = tcg_temp_new_i32();
3532 TCGv_i32 t2 = tcg_temp_new_i32();
3533 TCGv_i32 to = tcg_temp_new_i32();
3534 tcg_gen_extrl_i64_i32(t1, o->in1);
3535 tcg_gen_extrl_i64_i32(t2, o->in2);
3536 tcg_gen_rotl_i32(to, t1, t2);
3537 tcg_gen_extu_i32_i64(o->out, to);
3538 tcg_temp_free_i32(t1);
3539 tcg_temp_free_i32(t2);
3540 tcg_temp_free_i32(to);
3541 return NO_EXIT;
3542 }
3543
3544 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3545 {
3546 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3547 return NO_EXIT;
3548 }
3549
3550 #ifndef CONFIG_USER_ONLY
3551 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3552 {
3553 check_privileged(s);
3554 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3555 set_cc_static(s);
3556 return NO_EXIT;
3557 }
3558
3559 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3560 {
3561 check_privileged(s);
3562 gen_helper_sacf(cpu_env, o->in2);
3563 /* Addressing mode has changed, so end the block. */
3564 return EXIT_PC_STALE;
3565 }
3566 #endif
3567
3568 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3569 {
3570 int sam = s->insn->data;
3571 TCGv_i64 tsam;
3572 uint64_t mask;
3573
3574 switch (sam) {
3575 case 0:
3576 mask = 0xffffff;
3577 break;
3578 case 1:
3579 mask = 0x7fffffff;
3580 break;
3581 default:
3582 mask = -1;
3583 break;
3584 }
3585
3586 /* Bizarre but true, we check the address of the current insn for the
3587 specification exception, not the next to be executed. Thus the PoO
3588 documents that Bad Things Happen two bytes before the end. */
3589 if (s->pc & ~mask) {
3590 gen_program_exception(s, PGM_SPECIFICATION);
3591 return EXIT_NORETURN;
3592 }
3593 s->next_pc &= mask;
3594
3595 tsam = tcg_const_i64(sam);
3596 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3597 tcg_temp_free_i64(tsam);
3598
3599 /* Always exit the TB, since we (may have) changed execution mode. */
3600 return EXIT_PC_STALE;
3601 }
3602
3603 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3604 {
3605 int r1 = get_field(s->fields, r1);
3606 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3607 return NO_EXIT;
3608 }
3609
3610 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3611 {
3612 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3613 return NO_EXIT;
3614 }
3615
3616 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3617 {
3618 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3619 return NO_EXIT;
3620 }
3621
3622 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3623 {
3624 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3625 return_low128(o->out2);
3626 return NO_EXIT;
3627 }
3628
3629 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3630 {
3631 gen_helper_sqeb(o->out, cpu_env, o->in2);
3632 return NO_EXIT;
3633 }
3634
3635 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3636 {
3637 gen_helper_sqdb(o->out, cpu_env, o->in2);
3638 return NO_EXIT;
3639 }
3640
3641 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3642 {
3643 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3644 return_low128(o->out2);
3645 return NO_EXIT;
3646 }
3647
3648 #ifndef CONFIG_USER_ONLY
3649 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3650 {
3651 check_privileged(s);
3652 potential_page_fault(s);
3653 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3654 set_cc_static(s);
3655 return NO_EXIT;
3656 }
3657
3658 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3659 {
3660 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3661 check_privileged(s);
3662 potential_page_fault(s);
3663 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3664 set_cc_static(s);
3665 tcg_temp_free_i32(r1);
3666 return NO_EXIT;
3667 }
3668 #endif
3669
3670 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3671 {
3672 DisasCompare c;
3673 TCGv_i64 a, h;
3674 TCGLabel *lab;
3675 int r1;
3676
3677 disas_jcc(s, &c, get_field(s->fields, m3));
3678
3679 /* We want to store when the condition is fulfilled, so branch
3680 out when it's not */
3681 c.cond = tcg_invert_cond(c.cond);
3682
3683 lab = gen_new_label();
3684 if (c.is_64) {
3685 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3686 } else {
3687 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3688 }
3689 free_compare(&c);
3690
3691 r1 = get_field(s->fields, r1);
3692 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3693 switch (s->insn->data) {
3694 case 1: /* STOCG */
3695 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3696 break;
3697 case 0: /* STOC */
3698 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3699 break;
3700 case 2: /* STOCFH */
3701 h = tcg_temp_new_i64();
3702 tcg_gen_shri_i64(h, regs[r1], 32);
3703 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3704 tcg_temp_free_i64(h);
3705 break;
3706 default:
3707 g_assert_not_reached();
3708 }
3709 tcg_temp_free_i64(a);
3710
3711 gen_set_label(lab);
3712 return NO_EXIT;
3713 }
3714
3715 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3716 {
3717 uint64_t sign = 1ull << s->insn->data;
3718 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3719 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3720 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3721 /* The arithmetic left shift is curious in that it does not affect
3722 the sign bit. Copy that over from the source unchanged. */
3723 tcg_gen_andi_i64(o->out, o->out, ~sign);
3724 tcg_gen_andi_i64(o->in1, o->in1, sign);
3725 tcg_gen_or_i64(o->out, o->out, o->in1);
3726 return NO_EXIT;
3727 }
3728
3729 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3730 {
3731 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3732 return NO_EXIT;
3733 }
3734
3735 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3736 {
3737 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3738 return NO_EXIT;
3739 }
3740
3741 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3742 {
3743 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3744 return NO_EXIT;
3745 }
3746
3747 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3748 {
3749 gen_helper_sfpc(cpu_env, o->in2);
3750 return NO_EXIT;
3751 }
3752
3753 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3754 {
3755 gen_helper_sfas(cpu_env, o->in2);
3756 return NO_EXIT;
3757 }
3758
3759 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3760 {
3761 int b2 = get_field(s->fields, b2);
3762 int d2 = get_field(s->fields, d2);
3763 TCGv_i64 t1 = tcg_temp_new_i64();
3764 TCGv_i64 t2 = tcg_temp_new_i64();
3765 int mask, pos, len;
3766
3767 switch (s->fields->op2) {
3768 case 0x99: /* SRNM */
3769 pos = 0, len = 2;
3770 break;
3771 case 0xb8: /* SRNMB */
3772 pos = 0, len = 3;
3773 break;
3774 case 0xb9: /* SRNMT */
3775 pos = 4, len = 3;
3776 break;
3777 default:
3778 tcg_abort();
3779 }
3780 mask = (1 << len) - 1;
3781
3782 /* Insert the value into the appropriate field of the FPC. */
3783 if (b2 == 0) {
3784 tcg_gen_movi_i64(t1, d2 & mask);
3785 } else {
3786 tcg_gen_addi_i64(t1, regs[b2], d2);
3787 tcg_gen_andi_i64(t1, t1, mask);
3788 }
3789 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3790 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3791 tcg_temp_free_i64(t1);
3792
3793 /* Then install the new FPC to set the rounding mode in fpu_status. */
3794 gen_helper_sfpc(cpu_env, t2);
3795 tcg_temp_free_i64(t2);
3796 return NO_EXIT;
3797 }
3798
3799 #ifndef CONFIG_USER_ONLY
3800 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3801 {
3802 check_privileged(s);
3803 tcg_gen_shri_i64(o->in2, o->in2, 4);
3804 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3805 return NO_EXIT;
3806 }
3807
3808 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3809 {
3810 check_privileged(s);
3811 gen_helper_sske(cpu_env, o->in1, o->in2);
3812 return NO_EXIT;
3813 }
3814
3815 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3816 {
3817 check_privileged(s);
3818 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3819 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3820 return EXIT_PC_STALE_NOCHAIN;
3821 }
3822
3823 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3824 {
3825 check_privileged(s);
3826 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
3827 return NO_EXIT;
3828 }
3829
3830 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3831 {
3832 gen_helper_stck(o->out, cpu_env);
3833 /* ??? We don't implement clock states. */
3834 gen_op_movi_cc(s, 0);
3835 return NO_EXIT;
3836 }
3837
3838 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3839 {
3840 TCGv_i64 c1 = tcg_temp_new_i64();
3841 TCGv_i64 c2 = tcg_temp_new_i64();
3842 gen_helper_stck(c1, cpu_env);
3843 /* Shift the 64-bit value into its place as a zero-extended
3844 104-bit value. Note that "bit positions 64-103 are always
3845 non-zero so that they compare differently to STCK"; we set
3846 the least significant bit to 1. */
3847 tcg_gen_shli_i64(c2, c1, 56);
3848 tcg_gen_shri_i64(c1, c1, 8);
3849 tcg_gen_ori_i64(c2, c2, 0x10000);
3850 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3851 tcg_gen_addi_i64(o->in2, o->in2, 8);
3852 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3853 tcg_temp_free_i64(c1);
3854 tcg_temp_free_i64(c2);
3855 /* ??? We don't implement clock states. */
3856 gen_op_movi_cc(s, 0);
3857 return NO_EXIT;
3858 }
3859
3860 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3861 {
3862 check_privileged(s);
3863 gen_helper_sckc(cpu_env, o->in2);
3864 return NO_EXIT;
3865 }
3866
3867 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3868 {
3869 check_privileged(s);
3870 gen_helper_stckc(o->out, cpu_env);
3871 return NO_EXIT;
3872 }
3873
3874 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3875 {
3876 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3877 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3878 check_privileged(s);
3879 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3880 tcg_temp_free_i32(r1);
3881 tcg_temp_free_i32(r3);
3882 return NO_EXIT;
3883 }
3884
3885 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3886 {
3887 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3888 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3889 check_privileged(s);
3890 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3891 tcg_temp_free_i32(r1);
3892 tcg_temp_free_i32(r3);
3893 return NO_EXIT;
3894 }
3895
3896 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3897 {
3898 check_privileged(s);
3899 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
3900 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
3901 return NO_EXIT;
3902 }
3903
3904 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3905 {
3906 check_privileged(s);
3907 gen_helper_spt(cpu_env, o->in2);
3908 return NO_EXIT;
3909 }
3910
3911 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3912 {
3913 check_privileged(s);
3914 gen_helper_stfl(cpu_env);
3915 return NO_EXIT;
3916 }
3917
3918 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3919 {
3920 check_privileged(s);
3921 gen_helper_stpt(o->out, cpu_env);
3922 return NO_EXIT;
3923 }
3924
3925 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3926 {
3927 check_privileged(s);
3928 potential_page_fault(s);
3929 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3930 set_cc_static(s);
3931 return NO_EXIT;
3932 }
3933
3934 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3935 {
3936 check_privileged(s);
3937 gen_helper_spx(cpu_env, o->in2);
3938 return NO_EXIT;
3939 }
3940
3941 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3942 {
3943 check_privileged(s);
3944 potential_page_fault(s);
3945 gen_helper_xsch(cpu_env, regs[1]);
3946 set_cc_static(s);
3947 return NO_EXIT;
3948 }
3949
3950 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3951 {
3952 check_privileged(s);
3953 potential_page_fault(s);
3954 gen_helper_csch(cpu_env, regs[1]);
3955 set_cc_static(s);
3956 return NO_EXIT;
3957 }
3958
3959 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3960 {
3961 check_privileged(s);
3962 potential_page_fault(s);
3963 gen_helper_hsch(cpu_env, regs[1]);
3964 set_cc_static(s);
3965 return NO_EXIT;
3966 }
3967
3968 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3969 {
3970 check_privileged(s);
3971 potential_page_fault(s);
3972 gen_helper_msch(cpu_env, regs[1], o->in2);
3973 set_cc_static(s);
3974 return NO_EXIT;
3975 }
3976
3977 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3978 {
3979 check_privileged(s);
3980 potential_page_fault(s);
3981 gen_helper_rchp(cpu_env, regs[1]);
3982 set_cc_static(s);
3983 return NO_EXIT;
3984 }
3985
3986 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3987 {
3988 check_privileged(s);
3989 potential_page_fault(s);
3990 gen_helper_rsch(cpu_env, regs[1]);
3991 set_cc_static(s);
3992 return NO_EXIT;
3993 }
3994
3995 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3996 {
3997 check_privileged(s);
3998 potential_page_fault(s);
3999 gen_helper_ssch(cpu_env, regs[1], o->in2);
4000 set_cc_static(s);
4001 return NO_EXIT;
4002 }
4003
4004 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
4005 {
4006 check_privileged(s);
4007 potential_page_fault(s);
4008 gen_helper_stsch(cpu_env, regs[1], o->in2);
4009 set_cc_static(s);
4010 return NO_EXIT;
4011 }
4012
4013 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
4014 {
4015 check_privileged(s);
4016 potential_page_fault(s);
4017 gen_helper_tsch(cpu_env, regs[1], o->in2);
4018 set_cc_static(s);
4019 return NO_EXIT;
4020 }
4021
4022 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
4023 {
4024 check_privileged(s);
4025 potential_page_fault(s);
4026 gen_helper_chsc(cpu_env, o->in2);
4027 set_cc_static(s);
4028 return NO_EXIT;
4029 }
4030
4031 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
4032 {
4033 check_privileged(s);
4034 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4035 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4036 return NO_EXIT;
4037 }
4038
4039 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
4040 {
4041 uint64_t i2 = get_field(s->fields, i2);
4042 TCGv_i64 t;
4043
4044 check_privileged(s);
4045
4046 /* It is important to do what the instruction name says: STORE THEN.
4047 If we let the output hook perform the store then if we fault and
4048 restart, we'll have the wrong SYSTEM MASK in place. */
4049 t = tcg_temp_new_i64();
4050 tcg_gen_shri_i64(t, psw_mask, 56);
4051 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4052 tcg_temp_free_i64(t);
4053
4054 if (s->fields->op == 0xac) {
4055 tcg_gen_andi_i64(psw_mask, psw_mask,
4056 (i2 << 56) | 0x00ffffffffffffffull);
4057 } else {
4058 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4059 }
4060
4061 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4062 return EXIT_PC_STALE_NOCHAIN;
4063 }
4064
4065 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4066 {
4067 check_privileged(s);
4068 potential_page_fault(s);
4069 gen_helper_stura(cpu_env, o->in2, o->in1);
4070 return NO_EXIT;
4071 }
4072
4073 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4074 {
4075 check_privileged(s);
4076 potential_page_fault(s);
4077 gen_helper_sturg(cpu_env, o->in2, o->in1);
4078 return NO_EXIT;
4079 }
4080 #endif
4081
4082 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4083 {
4084 potential_page_fault(s);
4085 gen_helper_stfle(cc_op, cpu_env, o->in2);
4086 set_cc_static(s);
4087 return NO_EXIT;
4088 }
4089
4090 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4091 {
4092 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4093 return NO_EXIT;
4094 }
4095
4096 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4097 {
4098 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4099 return NO_EXIT;
4100 }
4101
4102 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4103 {
4104 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4105 return NO_EXIT;
4106 }
4107
4108 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4109 {
4110 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4111 return NO_EXIT;
4112 }
4113
4114 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4115 {
4116 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4117 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4118 gen_helper_stam(cpu_env, r1, o->in2, r3);
4119 tcg_temp_free_i32(r1);
4120 tcg_temp_free_i32(r3);
4121 return NO_EXIT;
4122 }
4123
4124 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4125 {
4126 int m3 = get_field(s->fields, m3);
4127 int pos, base = s->insn->data;
4128 TCGv_i64 tmp = tcg_temp_new_i64();
4129
4130 pos = base + ctz32(m3) * 8;
4131 switch (m3) {
4132 case 0xf:
4133 /* Effectively a 32-bit store. */
4134 tcg_gen_shri_i64(tmp, o->in1, pos);
4135 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4136 break;
4137
4138 case 0xc:
4139 case 0x6:
4140 case 0x3:
4141 /* Effectively a 16-bit store. */
4142 tcg_gen_shri_i64(tmp, o->in1, pos);
4143 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4144 break;
4145
4146 case 0x8:
4147 case 0x4:
4148 case 0x2:
4149 case 0x1:
4150 /* Effectively an 8-bit store. */
4151 tcg_gen_shri_i64(tmp, o->in1, pos);
4152 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4153 break;
4154
4155 default:
4156 /* This is going to be a sequence of shifts and stores. */
4157 pos = base + 32 - 8;
4158 while (m3) {
4159 if (m3 & 0x8) {
4160 tcg_gen_shri_i64(tmp, o->in1, pos);
4161 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4162 tcg_gen_addi_i64(o->in2, o->in2, 1);
4163 }
4164 m3 = (m3 << 1) & 0xf;
4165 pos -= 8;
4166 }
4167 break;
4168 }
4169 tcg_temp_free_i64(tmp);
4170 return NO_EXIT;
4171 }
4172
4173 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4174 {
4175 int r1 = get_field(s->fields, r1);
4176 int r3 = get_field(s->fields, r3);
4177 int size = s->insn->data;
4178 TCGv_i64 tsize = tcg_const_i64(size);
4179
4180 while (1) {
4181 if (size == 8) {
4182 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4183 } else {
4184 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4185 }
4186 if (r1 == r3) {
4187 break;
4188 }
4189 tcg_gen_add_i64(o->in2, o->in2, tsize);
4190 r1 = (r1 + 1) & 15;
4191 }
4192
4193 tcg_temp_free_i64(tsize);
4194 return NO_EXIT;
4195 }
4196
4197 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4198 {
4199 int r1 = get_field(s->fields, r1);
4200 int r3 = get_field(s->fields, r3);
4201 TCGv_i64 t = tcg_temp_new_i64();
4202 TCGv_i64 t4 = tcg_const_i64(4);
4203 TCGv_i64 t32 = tcg_const_i64(32);
4204
4205 while (1) {
4206 tcg_gen_shl_i64(t, regs[r1], t32);
4207 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4208 if (r1 == r3) {
4209 break;
4210 }
4211 tcg_gen_add_i64(o->in2, o->in2, t4);
4212 r1 = (r1 + 1) & 15;
4213 }
4214
4215 tcg_temp_free_i64(t);
4216 tcg_temp_free_i64(t4);
4217 tcg_temp_free_i64(t32);
4218 return NO_EXIT;
4219 }
4220
4221 static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
4222 {
4223 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4224 return NO_EXIT;
4225 }
4226
4227 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4228 {
4229 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4230 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4231
4232 gen_helper_srst(cpu_env, r1, r2);
4233
4234 tcg_temp_free_i32(r1);
4235 tcg_temp_free_i32(r2);
4236 set_cc_static(s);
4237 return NO_EXIT;
4238 }
4239
4240 static ExitStatus op_srstu(DisasContext *s, DisasOps *o)
4241 {
4242 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4243 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4244
4245 gen_helper_srstu(cpu_env, r1, r2);
4246
4247 tcg_temp_free_i32(r1);
4248 tcg_temp_free_i32(r2);
4249 set_cc_static(s);
4250 return NO_EXIT;
4251 }
4252
4253 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4254 {
4255 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4256 return NO_EXIT;
4257 }
4258
4259 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4260 {
4261 DisasCompare cmp;
4262 TCGv_i64 borrow;
4263
4264 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4265
4266 /* The !borrow flag is the msb of CC. Since we want the inverse of
4267 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4268 disas_jcc(s, &cmp, 8 | 4);
4269 borrow = tcg_temp_new_i64();
4270 if (cmp.is_64) {
4271 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4272 } else {
4273 TCGv_i32 t = tcg_temp_new_i32();
4274 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4275 tcg_gen_extu_i32_i64(borrow, t);
4276 tcg_temp_free_i32(t);
4277 }
4278 free_compare(&cmp);
4279
4280 tcg_gen_sub_i64(o->out, o->out, borrow);
4281 tcg_temp_free_i64(borrow);
4282 return NO_EXIT;
4283 }
4284
4285 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4286 {
4287 TCGv_i32 t;
4288
4289 update_psw_addr(s);
4290 update_cc_op(s);
4291
4292 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4293 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4294 tcg_temp_free_i32(t);
4295
4296 t = tcg_const_i32(s->ilen);
4297 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4298 tcg_temp_free_i32(t);
4299
4300 gen_exception(EXCP_SVC);
4301 return EXIT_NORETURN;
4302 }
4303
4304 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4305 {
4306 int cc = 0;
4307
4308 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4309 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4310 gen_op_movi_cc(s, cc);
4311 return NO_EXIT;
4312 }
4313
4314 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4315 {
4316 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4317 set_cc_static(s);
4318 return NO_EXIT;
4319 }
4320
4321 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4322 {
4323 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4324 set_cc_static(s);
4325 return NO_EXIT;
4326 }
4327
4328 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4329 {
4330 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4331 set_cc_static(s);
4332 return NO_EXIT;
4333 }
4334
4335 #ifndef CONFIG_USER_ONLY
4336
4337 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4338 {
4339 check_privileged(s);
4340 gen_helper_testblock(cc_op, cpu_env, o->in2);
4341 set_cc_static(s);
4342 return NO_EXIT;
4343 }
4344
4345 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4346 {
4347 gen_helper_tprot(cc_op, o->addr1, o->in2);
4348 set_cc_static(s);
4349 return NO_EXIT;
4350 }
4351
4352 #endif
4353
4354 static ExitStatus op_tp(DisasContext *s, DisasOps *o)
4355 {
4356 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4357 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4358 tcg_temp_free_i32(l1);
4359 set_cc_static(s);
4360 return NO_EXIT;
4361 }
4362
4363 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4364 {
4365 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4366 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4367 tcg_temp_free_i32(l);
4368 set_cc_static(s);
4369 return NO_EXIT;
4370 }
4371
4372 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4373 {
4374 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4375 return_low128(o->out2);
4376 set_cc_static(s);
4377 return NO_EXIT;
4378 }
4379
4380 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4381 {
4382 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4383 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4384 tcg_temp_free_i32(l);
4385 set_cc_static(s);
4386 return NO_EXIT;
4387 }
4388
4389 static ExitStatus op_trtr(DisasContext *s, DisasOps *o)
4390 {
4391 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4392 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4393 tcg_temp_free_i32(l);
4394 set_cc_static(s);
4395 return NO_EXIT;
4396 }
4397
4398 static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
4399 {
4400 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4401 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4402 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4403 TCGv_i32 tst = tcg_temp_new_i32();
4404 int m3 = get_field(s->fields, m3);
4405
4406 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4407 m3 = 0;
4408 }
4409 if (m3 & 1) {
4410 tcg_gen_movi_i32(tst, -1);
4411 } else {
4412 tcg_gen_extrl_i64_i32(tst, regs[0]);
4413 if (s->insn->opc & 3) {
4414 tcg_gen_ext8u_i32(tst, tst);
4415 } else {
4416 tcg_gen_ext16u_i32(tst, tst);
4417 }
4418 }
4419 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4420
4421 tcg_temp_free_i32(r1);
4422 tcg_temp_free_i32(r2);
4423 tcg_temp_free_i32(sizes);
4424 tcg_temp_free_i32(tst);
4425 set_cc_static(s);
4426 return NO_EXIT;
4427 }
4428
4429 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4430 {
4431 TCGv_i32 t1 = tcg_const_i32(0xff);
4432 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4433 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4434 tcg_temp_free_i32(t1);
4435 set_cc_static(s);
4436 return NO_EXIT;
4437 }
4438
4439 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4440 {
4441 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4442 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4443 tcg_temp_free_i32(l);
4444 return NO_EXIT;
4445 }
4446
4447 static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4448 {
4449 int l1 = get_field(s->fields, l1) + 1;
4450 TCGv_i32 l;
4451
4452 /* The length must not exceed 32 bytes. */
4453 if (l1 > 32) {
4454 gen_program_exception(s, PGM_SPECIFICATION);
4455 return EXIT_NORETURN;
4456 }
4457 l = tcg_const_i32(l1);
4458 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4459 tcg_temp_free_i32(l);
4460 set_cc_static(s);
4461 return NO_EXIT;
4462 }
4463
4464 static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4465 {
4466 int l1 = get_field(s->fields, l1) + 1;
4467 TCGv_i32 l;
4468
4469 /* The length must be even and should not exceed 64 bytes. */
4470 if ((l1 & 1) || (l1 > 64)) {
4471 gen_program_exception(s, PGM_SPECIFICATION);
4472 return EXIT_NORETURN;
4473 }
4474 l = tcg_const_i32(l1);
4475 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4476 tcg_temp_free_i32(l);
4477 set_cc_static(s);
4478 return NO_EXIT;
4479 }
4480
4481
4482 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4483 {
4484 int d1 = get_field(s->fields, d1);
4485 int d2 = get_field(s->fields, d2);
4486 int b1 = get_field(s->fields, b1);
4487 int b2 = get_field(s->fields, b2);
4488 int l = get_field(s->fields, l1);
4489 TCGv_i32 t32;
4490
4491 o->addr1 = get_address(s, 0, b1, d1);
4492
4493 /* If the addresses are identical, this is a store/memset of zero. */
4494 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4495 o->in2 = tcg_const_i64(0);
4496
4497 l++;
4498 while (l >= 8) {
4499 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4500 l -= 8;
4501 if (l > 0) {
4502 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4503 }
4504 }
4505 if (l >= 4) {
4506 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4507 l -= 4;
4508 if (l > 0) {
4509 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4510 }
4511 }
4512 if (l >= 2) {
4513 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4514 l -= 2;
4515 if (l > 0) {
4516 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4517 }
4518 }
4519 if (l) {
4520 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4521 }
4522 gen_op_movi_cc(s, 0);
4523 return NO_EXIT;
4524 }
4525
4526 /* But in general we'll defer to a helper. */
4527 o->in2 = get_address(s, 0, b2, d2);
4528 t32 = tcg_const_i32(l);
4529 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4530 tcg_temp_free_i32(t32);
4531 set_cc_static(s);
4532 return NO_EXIT;
4533 }
4534
4535 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4536 {
4537 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4538 return NO_EXIT;
4539 }
4540
4541 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4542 {
4543 int shift = s->insn->data & 0xff;
4544 int size = s->insn->data >> 8;
4545 uint64_t mask = ((1ull << size) - 1) << shift;
4546
4547 assert(!o->g_in2);
4548 tcg_gen_shli_i64(o->in2, o->in2, shift);
4549 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4550
4551 /* Produce the CC from only the bits manipulated. */
4552 tcg_gen_andi_i64(cc_dst, o->out, mask);
4553 set_cc_nz_u64(s, cc_dst);
4554 return NO_EXIT;
4555 }
4556
4557 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4558 {
4559 o->out = tcg_const_i64(0);
4560 return NO_EXIT;
4561 }
4562
4563 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4564 {
4565 o->out = tcg_const_i64(0);
4566 o->out2 = o->out;
4567 o->g_out2 = true;
4568 return NO_EXIT;
4569 }
4570
4571 /* ====================================================================== */
4572 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4573 the original inputs), update the various cc data structures in order to
4574 be able to compute the new condition code. */
4575
4576 static void cout_abs32(DisasContext *s, DisasOps *o)
4577 {
4578 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4579 }
4580
4581 static void cout_abs64(DisasContext *s, DisasOps *o)
4582 {
4583 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4584 }
4585
4586 static void cout_adds32(DisasContext *s, DisasOps *o)
4587 {
4588 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4589 }
4590
4591 static void cout_adds64(DisasContext *s, DisasOps *o)
4592 {
4593 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4594 }
4595
4596 static void cout_addu32(DisasContext *s, DisasOps *o)
4597 {
4598 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4599 }
4600
4601 static void cout_addu64(DisasContext *s, DisasOps *o)
4602 {
4603 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4604 }
4605
4606 static void cout_addc32(DisasContext *s, DisasOps *o)
4607 {
4608 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4609 }
4610
4611 static void cout_addc64(DisasContext *s, DisasOps *o)
4612 {
4613 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4614 }
4615
4616 static void cout_cmps32(DisasContext *s, DisasOps *o)
4617 {
4618 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4619 }
4620
4621 static void cout_cmps64(DisasContext *s, DisasOps *o)
4622 {
4623 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4624 }
4625
4626 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4627 {
4628 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4629 }
4630
4631 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4632 {
4633 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4634 }
4635
4636 static void cout_f32(DisasContext *s, DisasOps *o)
4637 {
4638 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4639 }
4640
4641 static void cout_f64(DisasContext *s, DisasOps *o)
4642 {
4643 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4644 }
4645
4646 static void cout_f128(DisasContext *s, DisasOps *o)
4647 {
4648 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4649 }
4650
4651 static void cout_nabs32(DisasContext *s, DisasOps *o)
4652 {
4653 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4654 }
4655
4656 static void cout_nabs64(DisasContext *s, DisasOps *o)
4657 {
4658 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4659 }
4660
4661 static void cout_neg32(DisasContext *s, DisasOps *o)
4662 {
4663 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4664 }
4665
4666 static void cout_neg64(DisasContext *s, DisasOps *o)
4667 {
4668 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4669 }
4670
4671 static void cout_nz32(DisasContext *s, DisasOps *o)
4672 {
4673 tcg_gen_ext32u_i64(cc_dst, o->out);
4674 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4675 }
4676
4677 static void cout_nz64(DisasContext *s, DisasOps *o)
4678 {
4679 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4680 }
4681
4682 static void cout_s32(DisasContext *s, DisasOps *o)
4683 {
4684 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4685 }
4686
4687 static void cout_s64(DisasContext *s, DisasOps *o)
4688 {
4689 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4690 }
4691
4692 static void cout_subs32(DisasContext *s, DisasOps *o)
4693 {
4694 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4695 }
4696
4697 static void cout_subs64(DisasContext *s, DisasOps *o)
4698 {
4699 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4700 }
4701
4702 static void cout_subu32(DisasContext *s, DisasOps *o)
4703 {
4704 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4705 }
4706
4707 static void cout_subu64(DisasContext *s, DisasOps *o)
4708 {
4709 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4710 }
4711
4712 static void cout_subb32(DisasContext *s, DisasOps *o)
4713 {
4714 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4715 }
4716
4717 static void cout_subb64(DisasContext *s, DisasOps *o)
4718 {
4719 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4720 }
4721
4722 static void cout_tm32(DisasContext *s, DisasOps *o)
4723 {
4724 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4725 }
4726
4727 static void cout_tm64(DisasContext *s, DisasOps *o)
4728 {
4729 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4730 }
4731
4732 /* ====================================================================== */
4733 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4734 with the TCG register to which we will write. Used in combination with
4735 the "wout" generators, in some cases we need a new temporary, and in
4736 some cases we can write to a TCG global. */
4737
4738 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4739 {
4740 o->out = tcg_temp_new_i64();
4741 }
4742 #define SPEC_prep_new 0
4743
4744 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4745 {
4746 o->out = tcg_temp_new_i64();
4747 o->out2 = tcg_temp_new_i64();
4748 }
4749 #define SPEC_prep_new_P 0
4750
4751 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4752 {
4753 o->out = regs[get_field(f, r1)];
4754 o->g_out = true;
4755 }
4756 #define SPEC_prep_r1 0
4757
4758 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4759 {
4760 int r1 = get_field(f, r1);
4761 o->out = regs[r1];
4762 o->out2 = regs[r1 + 1];
4763 o->g_out = o->g_out2 = true;
4764 }
4765 #define SPEC_prep_r1_P SPEC_r1_even
4766
4767 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4768 {
4769 o->out = fregs[get_field(f, r1)];
4770 o->g_out = true;
4771 }
4772 #define SPEC_prep_f1 0
4773
4774 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4775 {
4776 int r1 = get_field(f, r1);
4777 o->out = fregs[r1];
4778 o->out2 = fregs[r1 + 2];
4779 o->g_out = o->g_out2 = true;
4780 }
4781 #define SPEC_prep_x1 SPEC_r1_f128
4782
4783 /* ====================================================================== */
4784 /* The "Write OUTput" generators. These generally perform some non-trivial
4785 copy of data to TCG globals, or to main memory. The trivial cases are
4786 generally handled by having a "prep" generator install the TCG global
4787 as the destination of the operation. */
4788
4789 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4790 {
4791 store_reg(get_field(f, r1), o->out);
4792 }
4793 #define SPEC_wout_r1 0
4794
4795 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4796 {
4797 int r1 = get_field(f, r1);
4798 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4799 }
4800 #define SPEC_wout_r1_8 0
4801
4802 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4803 {
4804 int r1 = get_field(f, r1);
4805 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4806 }
4807 #define SPEC_wout_r1_16 0
4808
4809 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4810 {
4811 store_reg32_i64(get_field(f, r1), o->out);
4812 }
4813 #define SPEC_wout_r1_32 0
4814
4815 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4816 {
4817 store_reg32h_i64(get_field(f, r1), o->out);
4818 }
4819 #define SPEC_wout_r1_32h 0
4820
4821 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4822 {
4823 int r1 = get_field(f, r1);
4824 store_reg32_i64(r1, o->out);
4825 store_reg32_i64(r1 + 1, o->out2);
4826 }
4827 #define SPEC_wout_r1_P32 SPEC_r1_even
4828
4829 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4830 {
4831 int r1 = get_field(f, r1);
4832 store_reg32_i64(r1 + 1, o->out);
4833 tcg_gen_shri_i64(o->out, o->out, 32);
4834 store_reg32_i64(r1, o->out);
4835 }
4836 #define SPEC_wout_r1_D32 SPEC_r1_even
4837
4838 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4839 {
4840 int r3 = get_field(f, r3);
4841 store_reg32_i64(r3, o->out);
4842 store_reg32_i64(r3 + 1, o->out2);
4843 }
4844 #define SPEC_wout_r3_P32 SPEC_r3_even
4845
4846 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4847 {
4848 int r3 = get_field(f, r3);
4849 store_reg(r3, o->out);
4850 store_reg(r3 + 1, o->out2);
4851 }
4852 #define SPEC_wout_r3_P64 SPEC_r3_even
4853
4854 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4855 {
4856 store_freg32_i64(get_field(f, r1), o->out);
4857 }
4858 #define SPEC_wout_e1 0
4859
4860 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4861 {
4862 store_freg(get_field(f, r1), o->out);
4863 }
4864 #define SPEC_wout_f1 0
4865
4866 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4867 {
4868 int f1 = get_field(s->fields, r1);
4869 store_freg(f1, o->out);
4870 store_freg(f1 + 2, o->out2);
4871 }
4872 #define SPEC_wout_x1 SPEC_r1_f128
4873
4874 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4875 {
4876 if (get_field(f, r1) != get_field(f, r2)) {
4877 store_reg32_i64(get_field(f, r1), o->out);
4878 }
4879 }
4880 #define SPEC_wout_cond_r1r2_32 0
4881
4882 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4883 {
4884 if (get_field(f, r1) != get_field(f, r2)) {
4885 store_freg32_i64(get_field(f, r1), o->out);
4886 }
4887 }
4888 #define SPEC_wout_cond_e1e2 0
4889
4890 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4891 {
4892 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4893 }
4894 #define SPEC_wout_m1_8 0
4895
4896 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4897 {
4898 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4899 }
4900 #define SPEC_wout_m1_16 0
4901
4902 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4903 {
4904 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4905 }
4906 #define SPEC_wout_m1_32 0
4907
4908 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4909 {
4910 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4911 }
4912 #define SPEC_wout_m1_64 0
4913
4914 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4915 {
4916 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4917 }
4918 #define SPEC_wout_m2_32 0
4919
4920 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4921 {
4922 store_reg(get_field(f, r1), o->in2);
4923 }
4924 #define SPEC_wout_in2_r1 0
4925
4926 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4927 {
4928 store_reg32_i64(get_field(f, r1), o->in2);
4929 }
4930 #define SPEC_wout_in2_r1_32 0
4931
4932 /* ====================================================================== */
4933 /* The "INput 1" generators. These load the first operand to an insn. */
4934
4935 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4936 {
4937 o->in1 = load_reg(get_field(f, r1));
4938 }
4939 #define SPEC_in1_r1 0
4940
4941 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4942 {
4943 o->in1 = regs[get_field(f, r1)];
4944 o->g_in1 = true;
4945 }
4946 #define SPEC_in1_r1_o 0
4947
4948 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4949 {
4950 o->in1 = tcg_temp_new_i64();
4951 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4952 }
4953 #define SPEC_in1_r1_32s 0
4954
4955 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4956 {
4957 o->in1 = tcg_temp_new_i64();
4958 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4959 }
4960 #define SPEC_in1_r1_32u 0
4961
4962 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4963 {
4964 o->in1 = tcg_temp_new_i64();
4965 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4966 }
4967 #define SPEC_in1_r1_sr32 0
4968
4969 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4970 {
4971 o->in1 = load_reg(get_field(f, r1) + 1);
4972 }
4973 #define SPEC_in1_r1p1 SPEC_r1_even
4974
4975 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4976 {
4977 o->in1 = tcg_temp_new_i64();
4978 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4979 }
4980 #define SPEC_in1_r1p1_32s SPEC_r1_even
4981
4982 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4983 {
4984 o->in1 = tcg_temp_new_i64();
4985 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4986 }
4987 #define SPEC_in1_r1p1_32u SPEC_r1_even
4988
4989 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4990 {
4991 int r1 = get_field(f, r1);
4992 o->in1 = tcg_temp_new_i64();
4993 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4994 }
4995 #define SPEC_in1_r1_D32 SPEC_r1_even
4996
4997 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4998 {
4999 o->in1 = load_reg(get_field(f, r2));
5000 }
5001 #define SPEC_in1_r2 0
5002
5003 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5004 {
5005 o->in1 = tcg_temp_new_i64();
5006 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5007 }
5008 #define SPEC_in1_r2_sr32 0
5009
5010 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5011 {
5012 o->in1 = load_reg(get_field(f, r3));
5013 }
5014 #define SPEC_in1_r3 0
5015
5016 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5017 {
5018 o->in1 = regs[get_field(f, r3)];
5019 o->g_in1 = true;
5020 }
5021 #define SPEC_in1_r3_o 0
5022
5023 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5024 {
5025 o->in1 = tcg_temp_new_i64();
5026 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5027 }
5028 #define SPEC_in1_r3_32s 0
5029
5030 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5031 {
5032 o->in1 = tcg_temp_new_i64();
5033 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5034 }
5035 #define SPEC_in1_r3_32u 0
5036
5037 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5038 {
5039 int r3 = get_field(f, r3);
5040 o->in1 = tcg_temp_new_i64();
5041 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5042 }
5043 #define SPEC_in1_r3_D32 SPEC_r3_even
5044
5045 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5046 {
5047 o->in1 = load_freg32_i64(get_field(f, r1));
5048 }
5049 #define SPEC_in1_e1 0
5050
5051 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5052 {
5053 o->in1 = fregs[get_field(f, r1)];
5054 o->g_in1 = true;
5055 }
5056 #define SPEC_in1_f1_o 0
5057
5058 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5059 {
5060 int r1 = get_field(f, r1);
5061 o->out = fregs[r1];
5062 o->out2 = fregs[r1 + 2];
5063 o->g_out = o->g_out2 = true;
5064 }
5065 #define SPEC_in1_x1_o SPEC_r1_f128
5066
5067 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5068 {
5069 o->in1 = fregs[get_field(f, r3)];
5070 o->g_in1 = true;
5071 }
5072 #define SPEC_in1_f3_o 0
5073
5074 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5075 {
5076 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5077 }
5078 #define SPEC_in1_la1 0
5079
5080 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5081 {
5082 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5083 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5084 }
5085 #define SPEC_in1_la2 0
5086
5087 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5088 {
5089 in1_la1(s, f, o);
5090 o->in1 = tcg_temp_new_i64();
5091 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5092 }
5093 #define SPEC_in1_m1_8u 0
5094
5095 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5096 {
5097 in1_la1(s, f, o);
5098 o->in1 = tcg_temp_new_i64();
5099 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5100 }
5101 #define SPEC_in1_m1_16s 0
5102
5103 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5104 {
5105 in1_la1(s, f, o);
5106 o->in1 = tcg_temp_new_i64();
5107 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5108 }
5109 #define SPEC_in1_m1_16u 0
5110
5111 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5112 {
5113 in1_la1(s, f, o);
5114 o->in1 = tcg_temp_new_i64();
5115 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5116 }
5117 #define SPEC_in1_m1_32s 0
5118
5119 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5120 {
5121 in1_la1(s, f, o);
5122 o->in1 = tcg_temp_new_i64();
5123 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5124 }
5125 #define SPEC_in1_m1_32u 0
5126
5127 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5128 {
5129 in1_la1(s, f, o);
5130 o->in1 = tcg_temp_new_i64();
5131 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5132 }
5133 #define SPEC_in1_m1_64 0
5134
5135 /* ====================================================================== */
5136 /* The "INput 2" generators. These load the second operand to an insn. */
5137
5138 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5139 {
5140 o->in2 = regs[get_field(f, r1)];
5141 o->g_in2 = true;
5142 }
5143 #define SPEC_in2_r1_o 0
5144
5145 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5146 {
5147 o->in2 = tcg_temp_new_i64();
5148 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5149 }
5150 #define SPEC_in2_r1_16u 0
5151
5152 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5153 {
5154 o->in2 = tcg_temp_new_i64();
5155 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5156 }
5157 #define SPEC_in2_r1_32u 0
5158
5159 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5160 {
5161 int r1 = get_field(f, r1);
5162 o->in2 = tcg_temp_new_i64();
5163 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5164 }
5165 #define SPEC_in2_r1_D32 SPEC_r1_even
5166
5167 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5168 {
5169 o->in2 = load_reg(get_field(f, r2));
5170 }
5171 #define SPEC_in2_r2 0
5172
5173 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5174 {
5175 o->in2 = regs[get_field(f, r2)];
5176 o->g_in2 = true;
5177 }
5178 #define SPEC_in2_r2_o 0
5179
5180 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5181 {
5182 int r2 = get_field(f, r2);
5183 if (r2 != 0) {
5184 o->in2 = load_reg(r2);
5185 }
5186 }
5187 #define SPEC_in2_r2_nz 0
5188
5189 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5190 {
5191 o->in2 = tcg_temp_new_i64();
5192 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5193 }
5194 #define SPEC_in2_r2_8s 0
5195
5196 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5197 {
5198 o->in2 = tcg_temp_new_i64();
5199 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5200 }
5201 #define SPEC_in2_r2_8u 0
5202
5203 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5204 {
5205 o->in2 = tcg_temp_new_i64();
5206 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5207 }
5208 #define SPEC_in2_r2_16s 0
5209
5210 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5211 {
5212 o->in2 = tcg_temp_new_i64();
5213 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5214 }
5215 #define SPEC_in2_r2_16u 0
5216
5217 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5218 {
5219 o->in2 = load_reg(get_field(f, r3));
5220 }
5221 #define SPEC_in2_r3 0
5222
5223 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5224 {
5225 o->in2 = tcg_temp_new_i64();
5226 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5227 }
5228 #define SPEC_in2_r3_sr32 0
5229
5230 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5231 {
5232 o->in2 = tcg_temp_new_i64();
5233 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5234 }
5235 #define SPEC_in2_r2_32s 0
5236
5237 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5238 {
5239 o->in2 = tcg_temp_new_i64();
5240 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5241 }
5242 #define SPEC_in2_r2_32u 0
5243
5244 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5245 {
5246 o->in2 = tcg_temp_new_i64();
5247 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5248 }
5249 #define SPEC_in2_r2_sr32 0
5250
5251 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5252 {
5253 o->in2 = load_freg32_i64(get_field(f, r2));
5254 }
5255 #define SPEC_in2_e2 0
5256
5257 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5258 {
5259 o->in2 = fregs[get_field(f, r2)];
5260 o->g_in2 = true;
5261 }
5262 #define SPEC_in2_f2_o 0
5263
5264 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5265 {
5266 int r2 = get_field(f, r2);
5267 o->in1 = fregs[r2];
5268 o->in2 = fregs[r2 + 2];
5269 o->g_in1 = o->g_in2 = true;
5270 }
5271 #define SPEC_in2_x2_o SPEC_r2_f128
5272
5273 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5274 {
5275 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5276 }
5277 #define SPEC_in2_ra2 0
5278
5279 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5280 {
5281 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5282 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5283 }
5284 #define SPEC_in2_a2 0
5285
5286 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5287 {
5288 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5289 }
5290 #define SPEC_in2_ri2 0
5291
5292 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5293 {
5294 help_l2_shift(s, f, o, 31);
5295 }
5296 #define SPEC_in2_sh32 0
5297
5298 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5299 {
5300 help_l2_shift(s, f, o, 63);
5301 }
5302 #define SPEC_in2_sh64 0
5303
5304 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5305 {
5306 in2_a2(s, f, o);
5307 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5308 }
5309 #define SPEC_in2_m2_8u 0
5310
5311 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5312 {
5313 in2_a2(s, f, o);
5314 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5315 }
5316 #define SPEC_in2_m2_16s 0
5317
5318 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5319 {
5320 in2_a2(s, f, o);
5321 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5322 }
5323 #define SPEC_in2_m2_16u 0
5324
5325 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5326 {
5327 in2_a2(s, f, o);
5328 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5329 }
5330 #define SPEC_in2_m2_32s 0
5331
5332 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5333 {
5334 in2_a2(s, f, o);
5335 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5336 }
5337 #define SPEC_in2_m2_32u 0
5338
5339 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5340 {
5341 in2_a2(s, f, o);
5342 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5343 }
5344 #define SPEC_in2_m2_64 0
5345
5346 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5347 {
5348 in2_ri2(s, f, o);
5349 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5350 }
5351 #define SPEC_in2_mri2_16u 0
5352
5353 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5354 {
5355 in2_ri2(s, f, o);
5356 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5357 }
5358 #define SPEC_in2_mri2_32s 0
5359
5360 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5361 {
5362 in2_ri2(s, f, o);
5363 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5364 }
5365 #define SPEC_in2_mri2_32u 0
5366
5367 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5368 {
5369 in2_ri2(s, f, o);
5370 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5371 }
5372 #define SPEC_in2_mri2_64 0
5373
5374 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5375 {
5376 o->in2 = tcg_const_i64(get_field(f, i2));
5377 }
5378 #define SPEC_in2_i2 0
5379
5380 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5381 {
5382 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5383 }
5384 #define SPEC_in2_i2_8u 0
5385
5386 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5387 {
5388 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5389 }
5390 #define SPEC_in2_i2_16u 0
5391
5392 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5393 {
5394 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5395 }
5396 #define SPEC_in2_i2_32u 0
5397
5398 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5399 {
5400 uint64_t i2 = (uint16_t)get_field(f, i2);
5401 o->in2 = tcg_const_i64(i2 << s->insn->data);
5402 }
5403 #define SPEC_in2_i2_16u_shl 0
5404
5405 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5406 {
5407 uint64_t i2 = (uint32_t)get_field(f, i2);
5408 o->in2 = tcg_const_i64(i2 << s->insn->data);
5409 }
5410 #define SPEC_in2_i2_32u_shl 0
5411
5412 #ifndef CONFIG_USER_ONLY
5413 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5414 {
5415 o->in2 = tcg_const_i64(s->fields->raw_insn);
5416 }
5417 #define SPEC_in2_insn 0
5418 #endif
5419
5420 /* ====================================================================== */
5421
5422 /* Find opc within the table of insns. This is formulated as a switch
5423 statement so that (1) we get compile-time notice of cut-paste errors
5424 for duplicated opcodes, and (2) the compiler generates the binary
5425 search tree, rather than us having to post-process the table. */
5426
5427 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5428 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5429
5430 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5431
5432 enum DisasInsnEnum {
5433 #include "insn-data.def"
5434 };
5435
5436 #undef D
5437 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5438 .opc = OPC, \
5439 .fmt = FMT_##FT, \
5440 .fac = FAC_##FC, \
5441 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5442 .name = #NM, \
5443 .help_in1 = in1_##I1, \
5444 .help_in2 = in2_##I2, \
5445 .help_prep = prep_##P, \
5446 .help_wout = wout_##W, \
5447 .help_cout = cout_##CC, \
5448 .help_op = op_##OP, \
5449 .data = D \
5450 },
5451
5452 /* Allow 0 to be used for NULL in the table below. */
5453 #define in1_0 NULL
5454 #define in2_0 NULL
5455 #define prep_0 NULL
5456 #define wout_0 NULL
5457 #define cout_0 NULL
5458 #define op_0 NULL
5459
5460 #define SPEC_in1_0 0
5461 #define SPEC_in2_0 0
5462 #define SPEC_prep_0 0
5463 #define SPEC_wout_0 0
5464
5465 /* Give smaller names to the various facilities. */
5466 #define FAC_Z S390_FEAT_ZARCH
5467 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5468 #define FAC_DFP S390_FEAT_DFP
5469 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5470 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5471 #define FAC_EE S390_FEAT_EXECUTE_EXT
5472 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5473 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5474 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5475 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5476 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5477 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5478 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5479 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5480 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5481 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5482 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5483 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5484 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5485 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5486 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5487 #define FAC_SFLE S390_FEAT_STFLE
5488 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5489 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5490 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5491 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5492 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5493 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5494 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5495 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5496 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5497
5498 static const DisasInsn insn_info[] = {
5499 #include "insn-data.def"
5500 };
5501
5502 #undef D
5503 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5504 case OPC: return &insn_info[insn_ ## NM];
5505
5506 static const DisasInsn *lookup_opc(uint16_t opc)
5507 {
5508 switch (opc) {
5509 #include "insn-data.def"
5510 default:
5511 return NULL;
5512 }
5513 }
5514
5515 #undef D
5516 #undef C
5517
5518 /* Extract a field from the insn. The INSN should be left-aligned in
5519 the uint64_t so that we can more easily utilize the big-bit-endian
5520 definitions we extract from the Principals of Operation. */
5521
5522 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5523 {
5524 uint32_t r, m;
5525
5526 if (f->size == 0) {
5527 return;
5528 }
5529
5530 /* Zero extract the field from the insn. */
5531 r = (insn << f->beg) >> (64 - f->size);
5532
5533 /* Sign-extend, or un-swap the field as necessary. */
5534 switch (f->type) {
5535 case 0: /* unsigned */
5536 break;
5537 case 1: /* signed */
5538 assert(f->size <= 32);
5539 m = 1u << (f->size - 1);
5540 r = (r ^ m) - m;
5541 break;
5542 case 2: /* dl+dh split, signed 20 bit. */
5543 r = ((int8_t)r << 12) | (r >> 8);
5544 break;
5545 default:
5546 abort();
5547 }
5548
5549 /* Validate that the "compressed" encoding we selected above is valid.
5550 I.e. we havn't make two different original fields overlap. */
5551 assert(((o->presentC >> f->indexC) & 1) == 0);
5552 o->presentC |= 1 << f->indexC;
5553 o->presentO |= 1 << f->indexO;
5554
5555 o->c[f->indexC] = r;
5556 }
5557
5558 /* Lookup the insn at the current PC, extracting the operands into O and
5559 returning the info struct for the insn. Returns NULL for invalid insn. */
5560
5561 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5562 DisasFields *f)
5563 {
5564 uint64_t insn, pc = s->pc;
5565 int op, op2, ilen;
5566 const DisasInsn *info;
5567
5568 if (unlikely(s->ex_value)) {
5569 /* Drop the EX data now, so that it's clear on exception paths. */
5570 TCGv_i64 zero = tcg_const_i64(0);
5571 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5572 tcg_temp_free_i64(zero);
5573
5574 /* Extract the values saved by EXECUTE. */
5575 insn = s->ex_value & 0xffffffffffff0000ull;
5576 ilen = s->ex_value & 0xf;
5577 op = insn >> 56;
5578 } else {
5579 insn = ld_code2(env, pc);
5580 op = (insn >> 8) & 0xff;
5581 ilen = get_ilen(op);
5582 switch (ilen) {
5583 case 2:
5584 insn = insn << 48;
5585 break;
5586 case 4:
5587 insn = ld_code4(env, pc) << 32;
5588 break;
5589 case 6:
5590 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5591 break;
5592 default:
5593 g_assert_not_reached();
5594 }
5595 }
5596 s->next_pc = s->pc + ilen;
5597 s->ilen = ilen;
5598
5599 /* We can't actually determine the insn format until we've looked up
5600 the full insn opcode. Which we can't do without locating the
5601 secondary opcode. Assume by default that OP2 is at bit 40; for
5602 those smaller insns that don't actually have a secondary opcode
5603 this will correctly result in OP2 = 0. */
5604 switch (op) {
5605 case 0x01: /* E */
5606 case 0x80: /* S */
5607 case 0x82: /* S */
5608 case 0x93: /* S */
5609 case 0xb2: /* S, RRF, RRE, IE */
5610 case 0xb3: /* RRE, RRD, RRF */
5611 case 0xb9: /* RRE, RRF */
5612 case 0xe5: /* SSE, SIL */
5613 op2 = (insn << 8) >> 56;
5614 break;
5615 case 0xa5: /* RI */
5616 case 0xa7: /* RI */
5617 case 0xc0: /* RIL */
5618 case 0xc2: /* RIL */
5619 case 0xc4: /* RIL */
5620 case 0xc6: /* RIL */
5621 case 0xc8: /* SSF */
5622 case 0xcc: /* RIL */
5623 op2 = (insn << 12) >> 60;
5624 break;
5625 case 0xc5: /* MII */
5626 case 0xc7: /* SMI */
5627 case 0xd0 ... 0xdf: /* SS */
5628 case 0xe1: /* SS */
5629 case 0xe2: /* SS */
5630 case 0xe8: /* SS */
5631 case 0xe9: /* SS */
5632 case 0xea: /* SS */
5633 case 0xee ... 0xf3: /* SS */
5634 case 0xf8 ... 0xfd: /* SS */
5635 op2 = 0;
5636 break;
5637 default:
5638 op2 = (insn << 40) >> 56;
5639 break;
5640 }
5641
5642 memset(f, 0, sizeof(*f));
5643 f->raw_insn = insn;
5644 f->op = op;
5645 f->op2 = op2;
5646
5647 /* Lookup the instruction. */
5648 info = lookup_opc(op << 8 | op2);
5649
5650 /* If we found it, extract the operands. */
5651 if (info != NULL) {
5652 DisasFormat fmt = info->fmt;
5653 int i;
5654
5655 for (i = 0; i < NUM_C_FIELD; ++i) {
5656 extract_field(f, &format_info[fmt].op[i], insn);
5657 }
5658 }
5659 return info;
5660 }
5661
5662 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5663 {
5664 const DisasInsn *insn;
5665 ExitStatus ret = NO_EXIT;
5666 DisasFields f;
5667 DisasOps o;
5668
5669 /* Search for the insn in the table. */
5670 insn = extract_insn(env, s, &f);
5671
5672 /* Not found means unimplemented/illegal opcode. */
5673 if (insn == NULL) {
5674 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5675 f.op, f.op2);
5676 gen_illegal_opcode(s);
5677 return EXIT_NORETURN;
5678 }
5679
5680 #ifndef CONFIG_USER_ONLY
5681 if (s->tb->flags & FLAG_MASK_PER) {
5682 TCGv_i64 addr = tcg_const_i64(s->pc);
5683 gen_helper_per_ifetch(cpu_env, addr);
5684 tcg_temp_free_i64(addr);
5685 }
5686 #endif
5687
5688 /* Check for insn specification exceptions. */
5689 if (insn->spec) {
5690 int spec = insn->spec, excp = 0, r;
5691
5692 if (spec & SPEC_r1_even) {
5693 r = get_field(&f, r1);
5694 if (r & 1) {
5695 excp = PGM_SPECIFICATION;
5696 }
5697 }
5698 if (spec & SPEC_r2_even) {
5699 r = get_field(&f, r2);
5700 if (r & 1) {
5701 excp = PGM_SPECIFICATION;
5702 }
5703 }
5704 if (spec & SPEC_r3_even) {
5705 r = get_field(&f, r3);
5706 if (r & 1) {
5707 excp = PGM_SPECIFICATION;
5708 }
5709 }
5710 if (spec & SPEC_r1_f128) {
5711 r = get_field(&f, r1);
5712 if (r > 13) {
5713 excp = PGM_SPECIFICATION;
5714 }
5715 }
5716 if (spec & SPEC_r2_f128) {
5717 r = get_field(&f, r2);
5718 if (r > 13) {
5719 excp = PGM_SPECIFICATION;
5720 }
5721 }
5722 if (excp) {
5723 gen_program_exception(s, excp);
5724 return EXIT_NORETURN;
5725 }
5726 }
5727
5728 /* Set up the strutures we use to communicate with the helpers. */
5729 s->insn = insn;
5730 s->fields = &f;
5731 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5732 TCGV_UNUSED_I64(o.out);
5733 TCGV_UNUSED_I64(o.out2);
5734 TCGV_UNUSED_I64(o.in1);
5735 TCGV_UNUSED_I64(o.in2);
5736 TCGV_UNUSED_I64(o.addr1);
5737
5738 /* Implement the instruction. */
5739 if (insn->help_in1) {
5740 insn->help_in1(s, &f, &o);
5741 }
5742 if (insn->help_in2) {
5743 insn->help_in2(s, &f, &o);
5744 }
5745 if (insn->help_prep) {
5746 insn->help_prep(s, &f, &o);
5747 }
5748 if (insn->help_op) {
5749 ret = insn->help_op(s, &o);
5750 }
5751 if (insn->help_wout) {
5752 insn->help_wout(s, &f, &o);
5753 }
5754 if (insn->help_cout) {
5755 insn->help_cout(s, &o);
5756 }
5757
5758 /* Free any temporaries created by the helpers. */
5759 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5760 tcg_temp_free_i64(o.out);
5761 }
5762 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5763 tcg_temp_free_i64(o.out2);
5764 }
5765 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5766 tcg_temp_free_i64(o.in1);
5767 }
5768 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5769 tcg_temp_free_i64(o.in2);
5770 }
5771 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5772 tcg_temp_free_i64(o.addr1);
5773 }
5774
5775 #ifndef CONFIG_USER_ONLY
5776 if (s->tb->flags & FLAG_MASK_PER) {
5777 /* An exception might be triggered, save PSW if not already done. */
5778 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5779 tcg_gen_movi_i64(psw_addr, s->next_pc);
5780 }
5781
5782 /* Save off cc. */
5783 update_cc_op(s);
5784
5785 /* Call the helper to check for a possible PER exception. */
5786 gen_helper_per_check_exception(cpu_env);
5787 }
5788 #endif
5789
5790 /* Advance to the next instruction. */
5791 s->pc = s->next_pc;
5792 return ret;
5793 }
5794
5795 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
5796 {
5797 CPUS390XState *env = cs->env_ptr;
5798 DisasContext dc;
5799 target_ulong pc_start;
5800 uint64_t next_page_start;
5801 int num_insns, max_insns;
5802 ExitStatus status;
5803 bool do_debug;
5804
5805 pc_start = tb->pc;
5806
5807 /* 31-bit mode */
5808 if (!(tb->flags & FLAG_MASK_64)) {
5809 pc_start &= 0x7fffffff;
5810 }
5811
5812 dc.tb = tb;
5813 dc.pc = pc_start;
5814 dc.cc_op = CC_OP_DYNAMIC;
5815 dc.ex_value = tb->cs_base;
5816 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5817
5818 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5819
5820 num_insns = 0;
5821 max_insns = tb->cflags & CF_COUNT_MASK;
5822 if (max_insns == 0) {
5823 max_insns = CF_COUNT_MASK;
5824 }
5825 if (max_insns > TCG_MAX_INSNS) {
5826 max_insns = TCG_MAX_INSNS;
5827 }
5828
5829 gen_tb_start(tb);
5830
5831 do {
5832 tcg_gen_insn_start(dc.pc, dc.cc_op);
5833 num_insns++;
5834
5835 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5836 status = EXIT_PC_STALE;
5837 do_debug = true;
5838 /* The address covered by the breakpoint must be included in
5839 [tb->pc, tb->pc + tb->size) in order to for it to be
5840 properly cleared -- thus we increment the PC here so that
5841 the logic setting tb->size below does the right thing. */
5842 dc.pc += 2;
5843 break;
5844 }
5845
5846 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5847 gen_io_start();
5848 }
5849
5850 status = translate_one(env, &dc);
5851
5852 /* If we reach a page boundary, are single stepping,
5853 or exhaust instruction count, stop generation. */
5854 if (status == NO_EXIT
5855 && (dc.pc >= next_page_start
5856 || tcg_op_buf_full()
5857 || num_insns >= max_insns
5858 || singlestep
5859 || cs->singlestep_enabled
5860 || dc.ex_value)) {
5861 status = EXIT_PC_STALE;
5862 }
5863 } while (status == NO_EXIT);
5864
5865 if (tb->cflags & CF_LAST_IO) {
5866 gen_io_end();
5867 }
5868
5869 switch (status) {
5870 case EXIT_GOTO_TB:
5871 case EXIT_NORETURN:
5872 break;
5873 case EXIT_PC_STALE:
5874 case EXIT_PC_STALE_NOCHAIN:
5875 update_psw_addr(&dc);
5876 /* FALLTHRU */
5877 case EXIT_PC_UPDATED:
5878 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5879 cc op type is in env */
5880 update_cc_op(&dc);
5881 /* FALLTHRU */
5882 case EXIT_PC_CC_UPDATED:
5883 /* Exit the TB, either by raising a debug exception or by return. */
5884 if (do_debug) {
5885 gen_exception(EXCP_DEBUG);
5886 } else if (use_exit_tb(&dc) || status == EXIT_PC_STALE_NOCHAIN) {
5887 tcg_gen_exit_tb(0);
5888 } else {
5889 tcg_gen_lookup_and_goto_ptr(psw_addr);
5890 }
5891 break;
5892 default:
5893 g_assert_not_reached();
5894 }
5895
5896 gen_tb_end(tb, num_insns);
5897
5898 tb->size = dc.pc - pc_start;
5899 tb->icount = num_insns;
5900
5901 #if defined(S390X_DEBUG_DISAS)
5902 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5903 && qemu_log_in_addr_range(pc_start)) {
5904 qemu_log_lock();
5905 if (unlikely(dc.ex_value)) {
5906 /* ??? Unfortunately log_target_disas can't use host memory. */
5907 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
5908 } else {
5909 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5910 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5911 qemu_log("\n");
5912 }
5913 qemu_log_unlock();
5914 }
5915 #endif
5916 }
5917
5918 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5919 target_ulong *data)
5920 {
5921 int cc_op = data[1];
5922 env->psw.addr = data[0];
5923 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5924 env->cc_op = cc_op;
5925 }
5926 }