]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/translate.c
cf65f01f60626403b841fb2288fe40674794f707
[mirror_qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
43
44
45 /* Information that (most) every instruction needs to manipulate. */
46 typedef struct DisasContext DisasContext;
47 typedef struct DisasInsn DisasInsn;
48 typedef struct DisasFields DisasFields;
49
50 struct DisasContext {
51 struct TranslationBlock *tb;
52 const DisasInsn *insn;
53 DisasFields *fields;
54 uint64_t pc, next_pc;
55 enum cc_op cc_op;
56 bool singlestep_enabled;
57 };
58
59 /* Information carried about a condition to be evaluated. */
60 typedef struct {
61 TCGCond cond:8;
62 bool is_64;
63 bool g1;
64 bool g2;
65 union {
66 struct { TCGv_i64 a, b; } s64;
67 struct { TCGv_i32 a, b; } s32;
68 } u;
69 } DisasCompare;
70
71 #define DISAS_EXCP 4
72
73 #ifdef DEBUG_INLINE_BRANCHES
74 static uint64_t inline_branch_hit[CC_OP_MAX];
75 static uint64_t inline_branch_miss[CC_OP_MAX];
76 #endif
77
78 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
79 {
80 if (!(s->tb->flags & FLAG_MASK_64)) {
81 if (s->tb->flags & FLAG_MASK_32) {
82 return pc | 0x80000000;
83 }
84 }
85 return pc;
86 }
87
88 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
89 int flags)
90 {
91 S390CPU *cpu = S390_CPU(cs);
92 CPUS390XState *env = &cpu->env;
93 int i;
94
95 if (env->cc_op > 3) {
96 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
97 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
98 } else {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
100 env->psw.mask, env->psw.addr, env->cc_op);
101 }
102
103 for (i = 0; i < 16; i++) {
104 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
105 if ((i % 4) == 3) {
106 cpu_fprintf(f, "\n");
107 } else {
108 cpu_fprintf(f, " ");
109 }
110 }
111
112 for (i = 0; i < 16; i++) {
113 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
114 if ((i % 4) == 3) {
115 cpu_fprintf(f, "\n");
116 } else {
117 cpu_fprintf(f, " ");
118 }
119 }
120
121 #ifndef CONFIG_USER_ONLY
122 for (i = 0; i < 16; i++) {
123 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
124 if ((i % 4) == 3) {
125 cpu_fprintf(f, "\n");
126 } else {
127 cpu_fprintf(f, " ");
128 }
129 }
130 #endif
131
132 #ifdef DEBUG_INLINE_BRANCHES
133 for (i = 0; i < CC_OP_MAX; i++) {
134 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
135 inline_branch_miss[i], inline_branch_hit[i]);
136 }
137 #endif
138
139 cpu_fprintf(f, "\n");
140 }
141
142 static TCGv_i64 psw_addr;
143 static TCGv_i64 psw_mask;
144
145 static TCGv_i32 cc_op;
146 static TCGv_i64 cc_src;
147 static TCGv_i64 cc_dst;
148 static TCGv_i64 cc_vr;
149
150 static char cpu_reg_names[32][4];
151 static TCGv_i64 regs[16];
152 static TCGv_i64 fregs[16];
153
154 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
155
156 void s390x_translate_init(void)
157 {
158 int i;
159
160 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
161 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
162 offsetof(CPUS390XState, psw.addr),
163 "psw_addr");
164 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
165 offsetof(CPUS390XState, psw.mask),
166 "psw_mask");
167
168 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
169 "cc_op");
170 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
171 "cc_src");
172 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
173 "cc_dst");
174 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
175 "cc_vr");
176
177 for (i = 0; i < 16; i++) {
178 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
179 regs[i] = tcg_global_mem_new(TCG_AREG0,
180 offsetof(CPUS390XState, regs[i]),
181 cpu_reg_names[i]);
182 }
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
186 fregs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, fregs[i].d),
188 cpu_reg_names[i + 16]);
189 }
190 }
191
192 static TCGv_i64 load_reg(int reg)
193 {
194 TCGv_i64 r = tcg_temp_new_i64();
195 tcg_gen_mov_i64(r, regs[reg]);
196 return r;
197 }
198
199 static TCGv_i64 load_freg32_i64(int reg)
200 {
201 TCGv_i64 r = tcg_temp_new_i64();
202 tcg_gen_shri_i64(r, fregs[reg], 32);
203 return r;
204 }
205
206 static void store_reg(int reg, TCGv_i64 v)
207 {
208 tcg_gen_mov_i64(regs[reg], v);
209 }
210
211 static void store_freg(int reg, TCGv_i64 v)
212 {
213 tcg_gen_mov_i64(fregs[reg], v);
214 }
215
216 static void store_reg32_i64(int reg, TCGv_i64 v)
217 {
218 /* 32 bit register writes keep the upper half */
219 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
220 }
221
222 static void store_reg32h_i64(int reg, TCGv_i64 v)
223 {
224 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
225 }
226
227 static void store_freg32_i64(int reg, TCGv_i64 v)
228 {
229 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
230 }
231
232 static void return_low128(TCGv_i64 dest)
233 {
234 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
235 }
236
237 static void update_psw_addr(DisasContext *s)
238 {
239 /* psw.addr */
240 tcg_gen_movi_i64(psw_addr, s->pc);
241 }
242
243 static void update_cc_op(DisasContext *s)
244 {
245 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
246 tcg_gen_movi_i32(cc_op, s->cc_op);
247 }
248 }
249
250 static void potential_page_fault(DisasContext *s)
251 {
252 update_psw_addr(s);
253 update_cc_op(s);
254 }
255
256 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
257 {
258 return (uint64_t)cpu_lduw_code(env, pc);
259 }
260
261 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
262 {
263 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
264 }
265
266 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
267 {
268 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
269 }
270
271 static int get_mem_index(DisasContext *s)
272 {
273 switch (s->tb->flags & FLAG_MASK_ASC) {
274 case PSW_ASC_PRIMARY >> 32:
275 return 0;
276 case PSW_ASC_SECONDARY >> 32:
277 return 1;
278 case PSW_ASC_HOME >> 32:
279 return 2;
280 default:
281 tcg_abort();
282 break;
283 }
284 }
285
286 static void gen_exception(int excp)
287 {
288 TCGv_i32 tmp = tcg_const_i32(excp);
289 gen_helper_exception(cpu_env, tmp);
290 tcg_temp_free_i32(tmp);
291 }
292
293 static void gen_program_exception(DisasContext *s, int code)
294 {
295 TCGv_i32 tmp;
296
297 /* Remember what pgm exeption this was. */
298 tmp = tcg_const_i32(code);
299 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
300 tcg_temp_free_i32(tmp);
301
302 tmp = tcg_const_i32(s->next_pc - s->pc);
303 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
304 tcg_temp_free_i32(tmp);
305
306 /* Advance past instruction. */
307 s->pc = s->next_pc;
308 update_psw_addr(s);
309
310 /* Save off cc. */
311 update_cc_op(s);
312
313 /* Trigger exception. */
314 gen_exception(EXCP_PGM);
315 }
316
317 static inline void gen_illegal_opcode(DisasContext *s)
318 {
319 gen_program_exception(s, PGM_SPECIFICATION);
320 }
321
322 static inline void check_privileged(DisasContext *s)
323 {
324 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
325 gen_program_exception(s, PGM_PRIVILEGED);
326 }
327 }
328
329 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
330 {
331 TCGv_i64 tmp = tcg_temp_new_i64();
332 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
333
334 /* Note that d2 is limited to 20 bits, signed. If we crop negative
335 displacements early we create larger immedate addends. */
336
337 /* Note that addi optimizes the imm==0 case. */
338 if (b2 && x2) {
339 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
340 tcg_gen_addi_i64(tmp, tmp, d2);
341 } else if (b2) {
342 tcg_gen_addi_i64(tmp, regs[b2], d2);
343 } else if (x2) {
344 tcg_gen_addi_i64(tmp, regs[x2], d2);
345 } else {
346 if (need_31) {
347 d2 &= 0x7fffffff;
348 need_31 = false;
349 }
350 tcg_gen_movi_i64(tmp, d2);
351 }
352 if (need_31) {
353 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
354 }
355
356 return tmp;
357 }
358
359 static inline bool live_cc_data(DisasContext *s)
360 {
361 return (s->cc_op != CC_OP_DYNAMIC
362 && s->cc_op != CC_OP_STATIC
363 && s->cc_op > 3);
364 }
365
366 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
367 {
368 if (live_cc_data(s)) {
369 tcg_gen_discard_i64(cc_src);
370 tcg_gen_discard_i64(cc_dst);
371 tcg_gen_discard_i64(cc_vr);
372 }
373 s->cc_op = CC_OP_CONST0 + val;
374 }
375
376 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
377 {
378 if (live_cc_data(s)) {
379 tcg_gen_discard_i64(cc_src);
380 tcg_gen_discard_i64(cc_vr);
381 }
382 tcg_gen_mov_i64(cc_dst, dst);
383 s->cc_op = op;
384 }
385
386 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
387 TCGv_i64 dst)
388 {
389 if (live_cc_data(s)) {
390 tcg_gen_discard_i64(cc_vr);
391 }
392 tcg_gen_mov_i64(cc_src, src);
393 tcg_gen_mov_i64(cc_dst, dst);
394 s->cc_op = op;
395 }
396
397 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
398 TCGv_i64 dst, TCGv_i64 vr)
399 {
400 tcg_gen_mov_i64(cc_src, src);
401 tcg_gen_mov_i64(cc_dst, dst);
402 tcg_gen_mov_i64(cc_vr, vr);
403 s->cc_op = op;
404 }
405
406 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
407 {
408 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
409 }
410
411 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
412 {
413 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
414 }
415
416 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
417 {
418 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
419 }
420
421 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
422 {
423 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
424 }
425
426 /* CC value is in env->cc_op */
427 static void set_cc_static(DisasContext *s)
428 {
429 if (live_cc_data(s)) {
430 tcg_gen_discard_i64(cc_src);
431 tcg_gen_discard_i64(cc_dst);
432 tcg_gen_discard_i64(cc_vr);
433 }
434 s->cc_op = CC_OP_STATIC;
435 }
436
437 /* calculates cc into cc_op */
438 static void gen_op_calc_cc(DisasContext *s)
439 {
440 TCGv_i32 local_cc_op;
441 TCGv_i64 dummy;
442
443 TCGV_UNUSED_I32(local_cc_op);
444 TCGV_UNUSED_I64(dummy);
445 switch (s->cc_op) {
446 default:
447 dummy = tcg_const_i64(0);
448 /* FALLTHRU */
449 case CC_OP_ADD_64:
450 case CC_OP_ADDU_64:
451 case CC_OP_ADDC_64:
452 case CC_OP_SUB_64:
453 case CC_OP_SUBU_64:
454 case CC_OP_SUBB_64:
455 case CC_OP_ADD_32:
456 case CC_OP_ADDU_32:
457 case CC_OP_ADDC_32:
458 case CC_OP_SUB_32:
459 case CC_OP_SUBU_32:
460 case CC_OP_SUBB_32:
461 local_cc_op = tcg_const_i32(s->cc_op);
462 break;
463 case CC_OP_CONST0:
464 case CC_OP_CONST1:
465 case CC_OP_CONST2:
466 case CC_OP_CONST3:
467 case CC_OP_STATIC:
468 case CC_OP_DYNAMIC:
469 break;
470 }
471
472 switch (s->cc_op) {
473 case CC_OP_CONST0:
474 case CC_OP_CONST1:
475 case CC_OP_CONST2:
476 case CC_OP_CONST3:
477 /* s->cc_op is the cc value */
478 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
479 break;
480 case CC_OP_STATIC:
481 /* env->cc_op already is the cc value */
482 break;
483 case CC_OP_NZ:
484 case CC_OP_ABS_64:
485 case CC_OP_NABS_64:
486 case CC_OP_ABS_32:
487 case CC_OP_NABS_32:
488 case CC_OP_LTGT0_32:
489 case CC_OP_LTGT0_64:
490 case CC_OP_COMP_32:
491 case CC_OP_COMP_64:
492 case CC_OP_NZ_F32:
493 case CC_OP_NZ_F64:
494 case CC_OP_FLOGR:
495 /* 1 argument */
496 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
497 break;
498 case CC_OP_ICM:
499 case CC_OP_LTGT_32:
500 case CC_OP_LTGT_64:
501 case CC_OP_LTUGTU_32:
502 case CC_OP_LTUGTU_64:
503 case CC_OP_TM_32:
504 case CC_OP_TM_64:
505 case CC_OP_SLA_32:
506 case CC_OP_SLA_64:
507 case CC_OP_NZ_F128:
508 /* 2 arguments */
509 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
510 break;
511 case CC_OP_ADD_64:
512 case CC_OP_ADDU_64:
513 case CC_OP_ADDC_64:
514 case CC_OP_SUB_64:
515 case CC_OP_SUBU_64:
516 case CC_OP_SUBB_64:
517 case CC_OP_ADD_32:
518 case CC_OP_ADDU_32:
519 case CC_OP_ADDC_32:
520 case CC_OP_SUB_32:
521 case CC_OP_SUBU_32:
522 case CC_OP_SUBB_32:
523 /* 3 arguments */
524 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
525 break;
526 case CC_OP_DYNAMIC:
527 /* unknown operation - assume 3 arguments and cc_op in env */
528 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
529 break;
530 default:
531 tcg_abort();
532 }
533
534 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
535 tcg_temp_free_i32(local_cc_op);
536 }
537 if (!TCGV_IS_UNUSED_I64(dummy)) {
538 tcg_temp_free_i64(dummy);
539 }
540
541 /* We now have cc in cc_op as constant */
542 set_cc_static(s);
543 }
544
545 static int use_goto_tb(DisasContext *s, uint64_t dest)
546 {
547 /* NOTE: we handle the case where the TB spans two pages here */
548 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
549 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
550 && !s->singlestep_enabled
551 && !(s->tb->cflags & CF_LAST_IO));
552 }
553
554 static void account_noninline_branch(DisasContext *s, int cc_op)
555 {
556 #ifdef DEBUG_INLINE_BRANCHES
557 inline_branch_miss[cc_op]++;
558 #endif
559 }
560
561 static void account_inline_branch(DisasContext *s, int cc_op)
562 {
563 #ifdef DEBUG_INLINE_BRANCHES
564 inline_branch_hit[cc_op]++;
565 #endif
566 }
567
568 /* Table of mask values to comparison codes, given a comparison as input.
569 For such, CC=3 should not be possible. */
570 static const TCGCond ltgt_cond[16] = {
571 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
572 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
573 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
574 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
575 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
576 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
577 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
578 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
579 };
580
581 /* Table of mask values to comparison codes, given a logic op as input.
582 For such, only CC=0 and CC=1 should be possible. */
583 static const TCGCond nz_cond[16] = {
584 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
585 TCG_COND_NEVER, TCG_COND_NEVER,
586 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
587 TCG_COND_NE, TCG_COND_NE,
588 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
589 TCG_COND_EQ, TCG_COND_EQ,
590 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
591 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
592 };
593
594 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
595 details required to generate a TCG comparison. */
596 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
597 {
598 TCGCond cond;
599 enum cc_op old_cc_op = s->cc_op;
600
601 if (mask == 15 || mask == 0) {
602 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
603 c->u.s32.a = cc_op;
604 c->u.s32.b = cc_op;
605 c->g1 = c->g2 = true;
606 c->is_64 = false;
607 return;
608 }
609
610 /* Find the TCG condition for the mask + cc op. */
611 switch (old_cc_op) {
612 case CC_OP_LTGT0_32:
613 case CC_OP_LTGT0_64:
614 case CC_OP_LTGT_32:
615 case CC_OP_LTGT_64:
616 cond = ltgt_cond[mask];
617 if (cond == TCG_COND_NEVER) {
618 goto do_dynamic;
619 }
620 account_inline_branch(s, old_cc_op);
621 break;
622
623 case CC_OP_LTUGTU_32:
624 case CC_OP_LTUGTU_64:
625 cond = tcg_unsigned_cond(ltgt_cond[mask]);
626 if (cond == TCG_COND_NEVER) {
627 goto do_dynamic;
628 }
629 account_inline_branch(s, old_cc_op);
630 break;
631
632 case CC_OP_NZ:
633 cond = nz_cond[mask];
634 if (cond == TCG_COND_NEVER) {
635 goto do_dynamic;
636 }
637 account_inline_branch(s, old_cc_op);
638 break;
639
640 case CC_OP_TM_32:
641 case CC_OP_TM_64:
642 switch (mask) {
643 case 8:
644 cond = TCG_COND_EQ;
645 break;
646 case 4 | 2 | 1:
647 cond = TCG_COND_NE;
648 break;
649 default:
650 goto do_dynamic;
651 }
652 account_inline_branch(s, old_cc_op);
653 break;
654
655 case CC_OP_ICM:
656 switch (mask) {
657 case 8:
658 cond = TCG_COND_EQ;
659 break;
660 case 4 | 2 | 1:
661 case 4 | 2:
662 cond = TCG_COND_NE;
663 break;
664 default:
665 goto do_dynamic;
666 }
667 account_inline_branch(s, old_cc_op);
668 break;
669
670 case CC_OP_FLOGR:
671 switch (mask & 0xa) {
672 case 8: /* src == 0 -> no one bit found */
673 cond = TCG_COND_EQ;
674 break;
675 case 2: /* src != 0 -> one bit found */
676 cond = TCG_COND_NE;
677 break;
678 default:
679 goto do_dynamic;
680 }
681 account_inline_branch(s, old_cc_op);
682 break;
683
684 case CC_OP_ADDU_32:
685 case CC_OP_ADDU_64:
686 switch (mask) {
687 case 8 | 2: /* vr == 0 */
688 cond = TCG_COND_EQ;
689 break;
690 case 4 | 1: /* vr != 0 */
691 cond = TCG_COND_NE;
692 break;
693 case 8 | 4: /* no carry -> vr >= src */
694 cond = TCG_COND_GEU;
695 break;
696 case 2 | 1: /* carry -> vr < src */
697 cond = TCG_COND_LTU;
698 break;
699 default:
700 goto do_dynamic;
701 }
702 account_inline_branch(s, old_cc_op);
703 break;
704
705 case CC_OP_SUBU_32:
706 case CC_OP_SUBU_64:
707 /* Note that CC=0 is impossible; treat it as dont-care. */
708 switch (mask & 7) {
709 case 2: /* zero -> op1 == op2 */
710 cond = TCG_COND_EQ;
711 break;
712 case 4 | 1: /* !zero -> op1 != op2 */
713 cond = TCG_COND_NE;
714 break;
715 case 4: /* borrow (!carry) -> op1 < op2 */
716 cond = TCG_COND_LTU;
717 break;
718 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
719 cond = TCG_COND_GEU;
720 break;
721 default:
722 goto do_dynamic;
723 }
724 account_inline_branch(s, old_cc_op);
725 break;
726
727 default:
728 do_dynamic:
729 /* Calculate cc value. */
730 gen_op_calc_cc(s);
731 /* FALLTHRU */
732
733 case CC_OP_STATIC:
734 /* Jump based on CC. We'll load up the real cond below;
735 the assignment here merely avoids a compiler warning. */
736 account_noninline_branch(s, old_cc_op);
737 old_cc_op = CC_OP_STATIC;
738 cond = TCG_COND_NEVER;
739 break;
740 }
741
742 /* Load up the arguments of the comparison. */
743 c->is_64 = true;
744 c->g1 = c->g2 = false;
745 switch (old_cc_op) {
746 case CC_OP_LTGT0_32:
747 c->is_64 = false;
748 c->u.s32.a = tcg_temp_new_i32();
749 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
750 c->u.s32.b = tcg_const_i32(0);
751 break;
752 case CC_OP_LTGT_32:
753 case CC_OP_LTUGTU_32:
754 case CC_OP_SUBU_32:
755 c->is_64 = false;
756 c->u.s32.a = tcg_temp_new_i32();
757 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
758 c->u.s32.b = tcg_temp_new_i32();
759 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
760 break;
761
762 case CC_OP_LTGT0_64:
763 case CC_OP_NZ:
764 case CC_OP_FLOGR:
765 c->u.s64.a = cc_dst;
766 c->u.s64.b = tcg_const_i64(0);
767 c->g1 = true;
768 break;
769 case CC_OP_LTGT_64:
770 case CC_OP_LTUGTU_64:
771 case CC_OP_SUBU_64:
772 c->u.s64.a = cc_src;
773 c->u.s64.b = cc_dst;
774 c->g1 = c->g2 = true;
775 break;
776
777 case CC_OP_TM_32:
778 case CC_OP_TM_64:
779 case CC_OP_ICM:
780 c->u.s64.a = tcg_temp_new_i64();
781 c->u.s64.b = tcg_const_i64(0);
782 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
783 break;
784
785 case CC_OP_ADDU_32:
786 c->is_64 = false;
787 c->u.s32.a = tcg_temp_new_i32();
788 c->u.s32.b = tcg_temp_new_i32();
789 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
790 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
791 tcg_gen_movi_i32(c->u.s32.b, 0);
792 } else {
793 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
794 }
795 break;
796
797 case CC_OP_ADDU_64:
798 c->u.s64.a = cc_vr;
799 c->g1 = true;
800 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
801 c->u.s64.b = tcg_const_i64(0);
802 } else {
803 c->u.s64.b = cc_src;
804 c->g2 = true;
805 }
806 break;
807
808 case CC_OP_STATIC:
809 c->is_64 = false;
810 c->u.s32.a = cc_op;
811 c->g1 = true;
812 switch (mask) {
813 case 0x8 | 0x4 | 0x2: /* cc != 3 */
814 cond = TCG_COND_NE;
815 c->u.s32.b = tcg_const_i32(3);
816 break;
817 case 0x8 | 0x4 | 0x1: /* cc != 2 */
818 cond = TCG_COND_NE;
819 c->u.s32.b = tcg_const_i32(2);
820 break;
821 case 0x8 | 0x2 | 0x1: /* cc != 1 */
822 cond = TCG_COND_NE;
823 c->u.s32.b = tcg_const_i32(1);
824 break;
825 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
826 cond = TCG_COND_EQ;
827 c->g1 = false;
828 c->u.s32.a = tcg_temp_new_i32();
829 c->u.s32.b = tcg_const_i32(0);
830 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
831 break;
832 case 0x8 | 0x4: /* cc < 2 */
833 cond = TCG_COND_LTU;
834 c->u.s32.b = tcg_const_i32(2);
835 break;
836 case 0x8: /* cc == 0 */
837 cond = TCG_COND_EQ;
838 c->u.s32.b = tcg_const_i32(0);
839 break;
840 case 0x4 | 0x2 | 0x1: /* cc != 0 */
841 cond = TCG_COND_NE;
842 c->u.s32.b = tcg_const_i32(0);
843 break;
844 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
845 cond = TCG_COND_NE;
846 c->g1 = false;
847 c->u.s32.a = tcg_temp_new_i32();
848 c->u.s32.b = tcg_const_i32(0);
849 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
850 break;
851 case 0x4: /* cc == 1 */
852 cond = TCG_COND_EQ;
853 c->u.s32.b = tcg_const_i32(1);
854 break;
855 case 0x2 | 0x1: /* cc > 1 */
856 cond = TCG_COND_GTU;
857 c->u.s32.b = tcg_const_i32(1);
858 break;
859 case 0x2: /* cc == 2 */
860 cond = TCG_COND_EQ;
861 c->u.s32.b = tcg_const_i32(2);
862 break;
863 case 0x1: /* cc == 3 */
864 cond = TCG_COND_EQ;
865 c->u.s32.b = tcg_const_i32(3);
866 break;
867 default:
868 /* CC is masked by something else: (8 >> cc) & mask. */
869 cond = TCG_COND_NE;
870 c->g1 = false;
871 c->u.s32.a = tcg_const_i32(8);
872 c->u.s32.b = tcg_const_i32(0);
873 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
874 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
875 break;
876 }
877 break;
878
879 default:
880 abort();
881 }
882 c->cond = cond;
883 }
884
885 static void free_compare(DisasCompare *c)
886 {
887 if (!c->g1) {
888 if (c->is_64) {
889 tcg_temp_free_i64(c->u.s64.a);
890 } else {
891 tcg_temp_free_i32(c->u.s32.a);
892 }
893 }
894 if (!c->g2) {
895 if (c->is_64) {
896 tcg_temp_free_i64(c->u.s64.b);
897 } else {
898 tcg_temp_free_i32(c->u.s32.b);
899 }
900 }
901 }
902
903 /* ====================================================================== */
904 /* Define the insn format enumeration. */
905 #define F0(N) FMT_##N,
906 #define F1(N, X1) F0(N)
907 #define F2(N, X1, X2) F0(N)
908 #define F3(N, X1, X2, X3) F0(N)
909 #define F4(N, X1, X2, X3, X4) F0(N)
910 #define F5(N, X1, X2, X3, X4, X5) F0(N)
911
912 typedef enum {
913 #include "insn-format.def"
914 } DisasFormat;
915
916 #undef F0
917 #undef F1
918 #undef F2
919 #undef F3
920 #undef F4
921 #undef F5
922
923 /* Define a structure to hold the decoded fields. We'll store each inside
924 an array indexed by an enum. In order to conserve memory, we'll arrange
925 for fields that do not exist at the same time to overlap, thus the "C"
926 for compact. For checking purposes there is an "O" for original index
927 as well that will be applied to availability bitmaps. */
928
929 enum DisasFieldIndexO {
930 FLD_O_r1,
931 FLD_O_r2,
932 FLD_O_r3,
933 FLD_O_m1,
934 FLD_O_m3,
935 FLD_O_m4,
936 FLD_O_b1,
937 FLD_O_b2,
938 FLD_O_b4,
939 FLD_O_d1,
940 FLD_O_d2,
941 FLD_O_d4,
942 FLD_O_x2,
943 FLD_O_l1,
944 FLD_O_l2,
945 FLD_O_i1,
946 FLD_O_i2,
947 FLD_O_i3,
948 FLD_O_i4,
949 FLD_O_i5
950 };
951
952 enum DisasFieldIndexC {
953 FLD_C_r1 = 0,
954 FLD_C_m1 = 0,
955 FLD_C_b1 = 0,
956 FLD_C_i1 = 0,
957
958 FLD_C_r2 = 1,
959 FLD_C_b2 = 1,
960 FLD_C_i2 = 1,
961
962 FLD_C_r3 = 2,
963 FLD_C_m3 = 2,
964 FLD_C_i3 = 2,
965
966 FLD_C_m4 = 3,
967 FLD_C_b4 = 3,
968 FLD_C_i4 = 3,
969 FLD_C_l1 = 3,
970
971 FLD_C_i5 = 4,
972 FLD_C_d1 = 4,
973
974 FLD_C_d2 = 5,
975
976 FLD_C_d4 = 6,
977 FLD_C_x2 = 6,
978 FLD_C_l2 = 6,
979
980 NUM_C_FIELD = 7
981 };
982
983 struct DisasFields {
984 unsigned op:8;
985 unsigned op2:8;
986 unsigned presentC:16;
987 unsigned int presentO;
988 int c[NUM_C_FIELD];
989 };
990
991 /* This is the way fields are to be accessed out of DisasFields. */
992 #define have_field(S, F) have_field1((S), FLD_O_##F)
993 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
994
995 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
996 {
997 return (f->presentO >> c) & 1;
998 }
999
1000 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1001 enum DisasFieldIndexC c)
1002 {
1003 assert(have_field1(f, o));
1004 return f->c[c];
1005 }
1006
1007 /* Describe the layout of each field in each format. */
1008 typedef struct DisasField {
1009 unsigned int beg:8;
1010 unsigned int size:8;
1011 unsigned int type:2;
1012 unsigned int indexC:6;
1013 enum DisasFieldIndexO indexO:8;
1014 } DisasField;
1015
1016 typedef struct DisasFormatInfo {
1017 DisasField op[NUM_C_FIELD];
1018 } DisasFormatInfo;
1019
1020 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1021 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1022 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1023 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1024 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1025 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1026 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1027 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1028 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1029 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1030 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1031 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1032 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1033 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1034
1035 #define F0(N) { { } },
1036 #define F1(N, X1) { { X1 } },
1037 #define F2(N, X1, X2) { { X1, X2 } },
1038 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1039 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1040 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1041
1042 static const DisasFormatInfo format_info[] = {
1043 #include "insn-format.def"
1044 };
1045
1046 #undef F0
1047 #undef F1
1048 #undef F2
1049 #undef F3
1050 #undef F4
1051 #undef F5
1052 #undef R
1053 #undef M
1054 #undef BD
1055 #undef BXD
1056 #undef BDL
1057 #undef BXDL
1058 #undef I
1059 #undef L
1060
1061 /* Generally, we'll extract operands into this structures, operate upon
1062 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1063 of routines below for more details. */
1064 typedef struct {
1065 bool g_out, g_out2, g_in1, g_in2;
1066 TCGv_i64 out, out2, in1, in2;
1067 TCGv_i64 addr1;
1068 } DisasOps;
1069
1070 /* Instructions can place constraints on their operands, raising specification
1071 exceptions if they are violated. To make this easy to automate, each "in1",
1072 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1073 of the following, or 0. To make this easy to document, we'll put the
1074 SPEC_<name> defines next to <name>. */
1075
1076 #define SPEC_r1_even 1
1077 #define SPEC_r2_even 2
1078 #define SPEC_r3_even 4
1079 #define SPEC_r1_f128 8
1080 #define SPEC_r2_f128 16
1081
1082 /* Return values from translate_one, indicating the state of the TB. */
1083 typedef enum {
1084 /* Continue the TB. */
1085 NO_EXIT,
1086 /* We have emitted one or more goto_tb. No fixup required. */
1087 EXIT_GOTO_TB,
1088 /* We are not using a goto_tb (for whatever reason), but have updated
1089 the PC (for whatever reason), so there's no need to do it again on
1090 exiting the TB. */
1091 EXIT_PC_UPDATED,
1092 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1093 updated the PC for the next instruction to be executed. */
1094 EXIT_PC_STALE,
1095 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1096 No following code will be executed. */
1097 EXIT_NORETURN,
1098 } ExitStatus;
1099
1100 typedef enum DisasFacility {
1101 FAC_Z, /* zarch (default) */
1102 FAC_CASS, /* compare and swap and store */
1103 FAC_CASS2, /* compare and swap and store 2*/
1104 FAC_DFP, /* decimal floating point */
1105 FAC_DFPR, /* decimal floating point rounding */
1106 FAC_DO, /* distinct operands */
1107 FAC_EE, /* execute extensions */
1108 FAC_EI, /* extended immediate */
1109 FAC_FPE, /* floating point extension */
1110 FAC_FPSSH, /* floating point support sign handling */
1111 FAC_FPRGR, /* FPR-GR transfer */
1112 FAC_GIE, /* general instructions extension */
1113 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1114 FAC_HW, /* high-word */
1115 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1116 FAC_LOC, /* load/store on condition */
1117 FAC_LD, /* long displacement */
1118 FAC_PC, /* population count */
1119 FAC_SCF, /* store clock fast */
1120 FAC_SFLE, /* store facility list extended */
1121 } DisasFacility;
1122
1123 struct DisasInsn {
1124 unsigned opc:16;
1125 DisasFormat fmt:8;
1126 DisasFacility fac:8;
1127 unsigned spec:8;
1128
1129 const char *name;
1130
1131 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1132 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1133 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1134 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1135 void (*help_cout)(DisasContext *, DisasOps *);
1136 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1137
1138 uint64_t data;
1139 };
1140
1141 /* ====================================================================== */
1142 /* Miscellaneous helpers, used by several operations. */
1143
1144 static void help_l2_shift(DisasContext *s, DisasFields *f,
1145 DisasOps *o, int mask)
1146 {
1147 int b2 = get_field(f, b2);
1148 int d2 = get_field(f, d2);
1149
1150 if (b2 == 0) {
1151 o->in2 = tcg_const_i64(d2 & mask);
1152 } else {
1153 o->in2 = get_address(s, 0, b2, d2);
1154 tcg_gen_andi_i64(o->in2, o->in2, mask);
1155 }
1156 }
1157
1158 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1159 {
1160 if (dest == s->next_pc) {
1161 return NO_EXIT;
1162 }
1163 if (use_goto_tb(s, dest)) {
1164 update_cc_op(s);
1165 tcg_gen_goto_tb(0);
1166 tcg_gen_movi_i64(psw_addr, dest);
1167 tcg_gen_exit_tb((uintptr_t)s->tb);
1168 return EXIT_GOTO_TB;
1169 } else {
1170 tcg_gen_movi_i64(psw_addr, dest);
1171 return EXIT_PC_UPDATED;
1172 }
1173 }
1174
1175 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1176 bool is_imm, int imm, TCGv_i64 cdest)
1177 {
1178 ExitStatus ret;
1179 uint64_t dest = s->pc + 2 * imm;
1180 int lab;
1181
1182 /* Take care of the special cases first. */
1183 if (c->cond == TCG_COND_NEVER) {
1184 ret = NO_EXIT;
1185 goto egress;
1186 }
1187 if (is_imm) {
1188 if (dest == s->next_pc) {
1189 /* Branch to next. */
1190 ret = NO_EXIT;
1191 goto egress;
1192 }
1193 if (c->cond == TCG_COND_ALWAYS) {
1194 ret = help_goto_direct(s, dest);
1195 goto egress;
1196 }
1197 } else {
1198 if (TCGV_IS_UNUSED_I64(cdest)) {
1199 /* E.g. bcr %r0 -> no branch. */
1200 ret = NO_EXIT;
1201 goto egress;
1202 }
1203 if (c->cond == TCG_COND_ALWAYS) {
1204 tcg_gen_mov_i64(psw_addr, cdest);
1205 ret = EXIT_PC_UPDATED;
1206 goto egress;
1207 }
1208 }
1209
1210 if (use_goto_tb(s, s->next_pc)) {
1211 if (is_imm && use_goto_tb(s, dest)) {
1212 /* Both exits can use goto_tb. */
1213 update_cc_op(s);
1214
1215 lab = gen_new_label();
1216 if (c->is_64) {
1217 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1218 } else {
1219 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1220 }
1221
1222 /* Branch not taken. */
1223 tcg_gen_goto_tb(0);
1224 tcg_gen_movi_i64(psw_addr, s->next_pc);
1225 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1226
1227 /* Branch taken. */
1228 gen_set_label(lab);
1229 tcg_gen_goto_tb(1);
1230 tcg_gen_movi_i64(psw_addr, dest);
1231 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1232
1233 ret = EXIT_GOTO_TB;
1234 } else {
1235 /* Fallthru can use goto_tb, but taken branch cannot. */
1236 /* Store taken branch destination before the brcond. This
1237 avoids having to allocate a new local temp to hold it.
1238 We'll overwrite this in the not taken case anyway. */
1239 if (!is_imm) {
1240 tcg_gen_mov_i64(psw_addr, cdest);
1241 }
1242
1243 lab = gen_new_label();
1244 if (c->is_64) {
1245 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1246 } else {
1247 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1248 }
1249
1250 /* Branch not taken. */
1251 update_cc_op(s);
1252 tcg_gen_goto_tb(0);
1253 tcg_gen_movi_i64(psw_addr, s->next_pc);
1254 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1255
1256 gen_set_label(lab);
1257 if (is_imm) {
1258 tcg_gen_movi_i64(psw_addr, dest);
1259 }
1260 ret = EXIT_PC_UPDATED;
1261 }
1262 } else {
1263 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1264 Most commonly we're single-stepping or some other condition that
1265 disables all use of goto_tb. Just update the PC and exit. */
1266
1267 TCGv_i64 next = tcg_const_i64(s->next_pc);
1268 if (is_imm) {
1269 cdest = tcg_const_i64(dest);
1270 }
1271
1272 if (c->is_64) {
1273 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1274 cdest, next);
1275 } else {
1276 TCGv_i32 t0 = tcg_temp_new_i32();
1277 TCGv_i64 t1 = tcg_temp_new_i64();
1278 TCGv_i64 z = tcg_const_i64(0);
1279 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1280 tcg_gen_extu_i32_i64(t1, t0);
1281 tcg_temp_free_i32(t0);
1282 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1283 tcg_temp_free_i64(t1);
1284 tcg_temp_free_i64(z);
1285 }
1286
1287 if (is_imm) {
1288 tcg_temp_free_i64(cdest);
1289 }
1290 tcg_temp_free_i64(next);
1291
1292 ret = EXIT_PC_UPDATED;
1293 }
1294
1295 egress:
1296 free_compare(c);
1297 return ret;
1298 }
1299
1300 /* ====================================================================== */
1301 /* The operations. These perform the bulk of the work for any insn,
1302 usually after the operands have been loaded and output initialized. */
1303
1304 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1305 {
1306 gen_helper_abs_i64(o->out, o->in2);
1307 return NO_EXIT;
1308 }
1309
1310 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1311 {
1312 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1313 return NO_EXIT;
1314 }
1315
1316 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1317 {
1318 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1319 return NO_EXIT;
1320 }
1321
1322 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1323 {
1324 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1325 tcg_gen_mov_i64(o->out2, o->in2);
1326 return NO_EXIT;
1327 }
1328
1329 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1330 {
1331 tcg_gen_add_i64(o->out, o->in1, o->in2);
1332 return NO_EXIT;
1333 }
1334
1335 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1336 {
1337 DisasCompare cmp;
1338 TCGv_i64 carry;
1339
1340 tcg_gen_add_i64(o->out, o->in1, o->in2);
1341
1342 /* The carry flag is the msb of CC, therefore the branch mask that would
1343 create that comparison is 3. Feeding the generated comparison to
1344 setcond produces the carry flag that we desire. */
1345 disas_jcc(s, &cmp, 3);
1346 carry = tcg_temp_new_i64();
1347 if (cmp.is_64) {
1348 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1349 } else {
1350 TCGv_i32 t = tcg_temp_new_i32();
1351 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1352 tcg_gen_extu_i32_i64(carry, t);
1353 tcg_temp_free_i32(t);
1354 }
1355 free_compare(&cmp);
1356
1357 tcg_gen_add_i64(o->out, o->out, carry);
1358 tcg_temp_free_i64(carry);
1359 return NO_EXIT;
1360 }
1361
1362 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1363 {
1364 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1365 return NO_EXIT;
1366 }
1367
1368 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1369 {
1370 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1371 return NO_EXIT;
1372 }
1373
1374 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1375 {
1376 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1377 return_low128(o->out2);
1378 return NO_EXIT;
1379 }
1380
1381 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1382 {
1383 tcg_gen_and_i64(o->out, o->in1, o->in2);
1384 return NO_EXIT;
1385 }
1386
1387 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1388 {
1389 int shift = s->insn->data & 0xff;
1390 int size = s->insn->data >> 8;
1391 uint64_t mask = ((1ull << size) - 1) << shift;
1392
1393 assert(!o->g_in2);
1394 tcg_gen_shli_i64(o->in2, o->in2, shift);
1395 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1396 tcg_gen_and_i64(o->out, o->in1, o->in2);
1397
1398 /* Produce the CC from only the bits manipulated. */
1399 tcg_gen_andi_i64(cc_dst, o->out, mask);
1400 set_cc_nz_u64(s, cc_dst);
1401 return NO_EXIT;
1402 }
1403
1404 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1405 {
1406 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1407 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1408 tcg_gen_mov_i64(psw_addr, o->in2);
1409 return EXIT_PC_UPDATED;
1410 } else {
1411 return NO_EXIT;
1412 }
1413 }
1414
1415 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1416 {
1417 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1418 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1419 }
1420
1421 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1422 {
1423 int m1 = get_field(s->fields, m1);
1424 bool is_imm = have_field(s->fields, i2);
1425 int imm = is_imm ? get_field(s->fields, i2) : 0;
1426 DisasCompare c;
1427
1428 disas_jcc(s, &c, m1);
1429 return help_branch(s, &c, is_imm, imm, o->in2);
1430 }
1431
1432 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1433 {
1434 int r1 = get_field(s->fields, r1);
1435 bool is_imm = have_field(s->fields, i2);
1436 int imm = is_imm ? get_field(s->fields, i2) : 0;
1437 DisasCompare c;
1438 TCGv_i64 t;
1439
1440 c.cond = TCG_COND_NE;
1441 c.is_64 = false;
1442 c.g1 = false;
1443 c.g2 = false;
1444
1445 t = tcg_temp_new_i64();
1446 tcg_gen_subi_i64(t, regs[r1], 1);
1447 store_reg32_i64(r1, t);
1448 c.u.s32.a = tcg_temp_new_i32();
1449 c.u.s32.b = tcg_const_i32(0);
1450 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1451 tcg_temp_free_i64(t);
1452
1453 return help_branch(s, &c, is_imm, imm, o->in2);
1454 }
1455
1456 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1457 {
1458 int r1 = get_field(s->fields, r1);
1459 bool is_imm = have_field(s->fields, i2);
1460 int imm = is_imm ? get_field(s->fields, i2) : 0;
1461 DisasCompare c;
1462
1463 c.cond = TCG_COND_NE;
1464 c.is_64 = true;
1465 c.g1 = true;
1466 c.g2 = false;
1467
1468 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1469 c.u.s64.a = regs[r1];
1470 c.u.s64.b = tcg_const_i64(0);
1471
1472 return help_branch(s, &c, is_imm, imm, o->in2);
1473 }
1474
1475 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1476 {
1477 int r1 = get_field(s->fields, r1);
1478 int r3 = get_field(s->fields, r3);
1479 bool is_imm = have_field(s->fields, i2);
1480 int imm = is_imm ? get_field(s->fields, i2) : 0;
1481 DisasCompare c;
1482 TCGv_i64 t;
1483
1484 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1485 c.is_64 = false;
1486 c.g1 = false;
1487 c.g2 = false;
1488
1489 t = tcg_temp_new_i64();
1490 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1491 c.u.s32.a = tcg_temp_new_i32();
1492 c.u.s32.b = tcg_temp_new_i32();
1493 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1494 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1495 store_reg32_i64(r1, t);
1496 tcg_temp_free_i64(t);
1497
1498 return help_branch(s, &c, is_imm, imm, o->in2);
1499 }
1500
1501 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1502 {
1503 int r1 = get_field(s->fields, r1);
1504 int r3 = get_field(s->fields, r3);
1505 bool is_imm = have_field(s->fields, i2);
1506 int imm = is_imm ? get_field(s->fields, i2) : 0;
1507 DisasCompare c;
1508
1509 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1510 c.is_64 = true;
1511
1512 if (r1 == (r3 | 1)) {
1513 c.u.s64.b = load_reg(r3 | 1);
1514 c.g2 = false;
1515 } else {
1516 c.u.s64.b = regs[r3 | 1];
1517 c.g2 = true;
1518 }
1519
1520 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1521 c.u.s64.a = regs[r1];
1522 c.g1 = true;
1523
1524 return help_branch(s, &c, is_imm, imm, o->in2);
1525 }
1526
1527 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1528 {
1529 int imm, m3 = get_field(s->fields, m3);
1530 bool is_imm;
1531 DisasCompare c;
1532
1533 c.cond = ltgt_cond[m3];
1534 if (s->insn->data) {
1535 c.cond = tcg_unsigned_cond(c.cond);
1536 }
1537 c.is_64 = c.g1 = c.g2 = true;
1538 c.u.s64.a = o->in1;
1539 c.u.s64.b = o->in2;
1540
1541 is_imm = have_field(s->fields, i4);
1542 if (is_imm) {
1543 imm = get_field(s->fields, i4);
1544 } else {
1545 imm = 0;
1546 o->out = get_address(s, 0, get_field(s->fields, b4),
1547 get_field(s->fields, d4));
1548 }
1549
1550 return help_branch(s, &c, is_imm, imm, o->out);
1551 }
1552
1553 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1554 {
1555 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1556 set_cc_static(s);
1557 return NO_EXIT;
1558 }
1559
1560 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1561 {
1562 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1563 set_cc_static(s);
1564 return NO_EXIT;
1565 }
1566
1567 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1568 {
1569 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1570 set_cc_static(s);
1571 return NO_EXIT;
1572 }
1573
1574 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1575 {
1576 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1577 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1578 tcg_temp_free_i32(m3);
1579 gen_set_cc_nz_f32(s, o->in2);
1580 return NO_EXIT;
1581 }
1582
1583 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1584 {
1585 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1586 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1587 tcg_temp_free_i32(m3);
1588 gen_set_cc_nz_f64(s, o->in2);
1589 return NO_EXIT;
1590 }
1591
1592 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1593 {
1594 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1595 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1596 tcg_temp_free_i32(m3);
1597 gen_set_cc_nz_f128(s, o->in1, o->in2);
1598 return NO_EXIT;
1599 }
1600
1601 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1602 {
1603 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1604 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1605 tcg_temp_free_i32(m3);
1606 gen_set_cc_nz_f32(s, o->in2);
1607 return NO_EXIT;
1608 }
1609
1610 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1611 {
1612 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1613 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1614 tcg_temp_free_i32(m3);
1615 gen_set_cc_nz_f64(s, o->in2);
1616 return NO_EXIT;
1617 }
1618
1619 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1620 {
1621 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1622 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1623 tcg_temp_free_i32(m3);
1624 gen_set_cc_nz_f128(s, o->in1, o->in2);
1625 return NO_EXIT;
1626 }
1627
1628 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1629 {
1630 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1631 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1632 tcg_temp_free_i32(m3);
1633 gen_set_cc_nz_f32(s, o->in2);
1634 return NO_EXIT;
1635 }
1636
1637 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1638 {
1639 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1640 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1641 tcg_temp_free_i32(m3);
1642 gen_set_cc_nz_f64(s, o->in2);
1643 return NO_EXIT;
1644 }
1645
1646 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1647 {
1648 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1649 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1650 tcg_temp_free_i32(m3);
1651 gen_set_cc_nz_f128(s, o->in1, o->in2);
1652 return NO_EXIT;
1653 }
1654
1655 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1656 {
1657 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1658 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1659 tcg_temp_free_i32(m3);
1660 gen_set_cc_nz_f32(s, o->in2);
1661 return NO_EXIT;
1662 }
1663
1664 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1665 {
1666 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1667 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1668 tcg_temp_free_i32(m3);
1669 gen_set_cc_nz_f64(s, o->in2);
1670 return NO_EXIT;
1671 }
1672
1673 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1674 {
1675 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1676 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1677 tcg_temp_free_i32(m3);
1678 gen_set_cc_nz_f128(s, o->in1, o->in2);
1679 return NO_EXIT;
1680 }
1681
1682 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1683 {
1684 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1685 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1686 tcg_temp_free_i32(m3);
1687 return NO_EXIT;
1688 }
1689
1690 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1691 {
1692 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1693 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1694 tcg_temp_free_i32(m3);
1695 return NO_EXIT;
1696 }
1697
1698 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1699 {
1700 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1701 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1702 tcg_temp_free_i32(m3);
1703 return_low128(o->out2);
1704 return NO_EXIT;
1705 }
1706
1707 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1708 {
1709 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1710 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1711 tcg_temp_free_i32(m3);
1712 return NO_EXIT;
1713 }
1714
1715 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1716 {
1717 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1718 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1719 tcg_temp_free_i32(m3);
1720 return NO_EXIT;
1721 }
1722
1723 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1724 {
1725 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1726 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1727 tcg_temp_free_i32(m3);
1728 return_low128(o->out2);
1729 return NO_EXIT;
1730 }
1731
1732 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1733 {
1734 int r2 = get_field(s->fields, r2);
1735 TCGv_i64 len = tcg_temp_new_i64();
1736
1737 potential_page_fault(s);
1738 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1739 set_cc_static(s);
1740 return_low128(o->out);
1741
1742 tcg_gen_add_i64(regs[r2], regs[r2], len);
1743 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1744 tcg_temp_free_i64(len);
1745
1746 return NO_EXIT;
1747 }
1748
1749 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1750 {
1751 int l = get_field(s->fields, l1);
1752 TCGv_i32 vl;
1753
1754 switch (l + 1) {
1755 case 1:
1756 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1757 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1758 break;
1759 case 2:
1760 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1761 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1762 break;
1763 case 4:
1764 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1765 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1766 break;
1767 case 8:
1768 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1769 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1770 break;
1771 default:
1772 potential_page_fault(s);
1773 vl = tcg_const_i32(l);
1774 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1775 tcg_temp_free_i32(vl);
1776 set_cc_static(s);
1777 return NO_EXIT;
1778 }
1779 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1780 return NO_EXIT;
1781 }
1782
1783 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1784 {
1785 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1786 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1787 potential_page_fault(s);
1788 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1789 tcg_temp_free_i32(r1);
1790 tcg_temp_free_i32(r3);
1791 set_cc_static(s);
1792 return NO_EXIT;
1793 }
1794
1795 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1796 {
1797 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1798 TCGv_i32 t1 = tcg_temp_new_i32();
1799 tcg_gen_trunc_i64_i32(t1, o->in1);
1800 potential_page_fault(s);
1801 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1802 set_cc_static(s);
1803 tcg_temp_free_i32(t1);
1804 tcg_temp_free_i32(m3);
1805 return NO_EXIT;
1806 }
1807
1808 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1809 {
1810 potential_page_fault(s);
1811 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1812 set_cc_static(s);
1813 return_low128(o->in2);
1814 return NO_EXIT;
1815 }
1816
1817 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1818 {
1819 TCGv_i64 t = tcg_temp_new_i64();
1820 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1821 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1822 tcg_gen_or_i64(o->out, o->out, t);
1823 tcg_temp_free_i64(t);
1824 return NO_EXIT;
1825 }
1826
1827 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1828 {
1829 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1830 int d2 = get_field(s->fields, d2);
1831 int b2 = get_field(s->fields, b2);
1832 int is_64 = s->insn->data;
1833 TCGv_i64 addr, mem, cc, z;
1834
1835 /* Note that in1 = R3 (new value) and
1836 in2 = (zero-extended) R1 (expected value). */
1837
1838 /* Load the memory into the (temporary) output. While the PoO only talks
1839 about moving the memory to R1 on inequality, if we include equality it
1840 means that R1 is equal to the memory in all conditions. */
1841 addr = get_address(s, 0, b2, d2);
1842 if (is_64) {
1843 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1844 } else {
1845 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1846 }
1847
1848 /* Are the memory and expected values (un)equal? Note that this setcond
1849 produces the output CC value, thus the NE sense of the test. */
1850 cc = tcg_temp_new_i64();
1851 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1852
1853 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1854 Recall that we are allowed to unconditionally issue the store (and
1855 thus any possible write trap), so (re-)store the original contents
1856 of MEM in case of inequality. */
1857 z = tcg_const_i64(0);
1858 mem = tcg_temp_new_i64();
1859 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1860 if (is_64) {
1861 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1862 } else {
1863 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1864 }
1865 tcg_temp_free_i64(z);
1866 tcg_temp_free_i64(mem);
1867 tcg_temp_free_i64(addr);
1868
1869 /* Store CC back to cc_op. Wait until after the store so that any
1870 exception gets the old cc_op value. */
1871 tcg_gen_trunc_i64_i32(cc_op, cc);
1872 tcg_temp_free_i64(cc);
1873 set_cc_static(s);
1874 return NO_EXIT;
1875 }
1876
1877 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1878 {
1879 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1880 int r1 = get_field(s->fields, r1);
1881 int r3 = get_field(s->fields, r3);
1882 int d2 = get_field(s->fields, d2);
1883 int b2 = get_field(s->fields, b2);
1884 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1885
1886 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1887
1888 addrh = get_address(s, 0, b2, d2);
1889 addrl = get_address(s, 0, b2, d2 + 8);
1890 outh = tcg_temp_new_i64();
1891 outl = tcg_temp_new_i64();
1892
1893 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1894 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1895
1896 /* Fold the double-word compare with arithmetic. */
1897 cc = tcg_temp_new_i64();
1898 z = tcg_temp_new_i64();
1899 tcg_gen_xor_i64(cc, outh, regs[r1]);
1900 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1901 tcg_gen_or_i64(cc, cc, z);
1902 tcg_gen_movi_i64(z, 0);
1903 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1904
1905 memh = tcg_temp_new_i64();
1906 meml = tcg_temp_new_i64();
1907 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1908 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1909 tcg_temp_free_i64(z);
1910
1911 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1912 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1913 tcg_temp_free_i64(memh);
1914 tcg_temp_free_i64(meml);
1915 tcg_temp_free_i64(addrh);
1916 tcg_temp_free_i64(addrl);
1917
1918 /* Save back state now that we've passed all exceptions. */
1919 tcg_gen_mov_i64(regs[r1], outh);
1920 tcg_gen_mov_i64(regs[r1 + 1], outl);
1921 tcg_gen_trunc_i64_i32(cc_op, cc);
1922 tcg_temp_free_i64(outh);
1923 tcg_temp_free_i64(outl);
1924 tcg_temp_free_i64(cc);
1925 set_cc_static(s);
1926 return NO_EXIT;
1927 }
1928
1929 #ifndef CONFIG_USER_ONLY
1930 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1931 {
1932 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1933 check_privileged(s);
1934 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1935 tcg_temp_free_i32(r1);
1936 set_cc_static(s);
1937 return NO_EXIT;
1938 }
1939 #endif
1940
1941 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1942 {
1943 TCGv_i64 t1 = tcg_temp_new_i64();
1944 TCGv_i32 t2 = tcg_temp_new_i32();
1945 tcg_gen_trunc_i64_i32(t2, o->in1);
1946 gen_helper_cvd(t1, t2);
1947 tcg_temp_free_i32(t2);
1948 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1949 tcg_temp_free_i64(t1);
1950 return NO_EXIT;
1951 }
1952
1953 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1954 {
1955 int m3 = get_field(s->fields, m3);
1956 int lab = gen_new_label();
1957 TCGv_i32 t;
1958 TCGCond c;
1959
1960 c = tcg_invert_cond(ltgt_cond[m3]);
1961 if (s->insn->data) {
1962 c = tcg_unsigned_cond(c);
1963 }
1964 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1965
1966 /* Set DXC to 0xff. */
1967 t = tcg_temp_new_i32();
1968 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1969 tcg_gen_ori_i32(t, t, 0xff00);
1970 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1971 tcg_temp_free_i32(t);
1972
1973 /* Trap. */
1974 gen_program_exception(s, PGM_DATA);
1975
1976 gen_set_label(lab);
1977 return NO_EXIT;
1978 }
1979
1980 #ifndef CONFIG_USER_ONLY
1981 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1982 {
1983 TCGv_i32 tmp;
1984
1985 check_privileged(s);
1986 potential_page_fault(s);
1987
1988 /* We pretend the format is RX_a so that D2 is the field we want. */
1989 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1990 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1991 tcg_temp_free_i32(tmp);
1992 return NO_EXIT;
1993 }
1994 #endif
1995
1996 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1997 {
1998 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1999 return_low128(o->out);
2000 return NO_EXIT;
2001 }
2002
2003 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2004 {
2005 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2006 return_low128(o->out);
2007 return NO_EXIT;
2008 }
2009
2010 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2011 {
2012 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2013 return_low128(o->out);
2014 return NO_EXIT;
2015 }
2016
2017 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2018 {
2019 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2020 return_low128(o->out);
2021 return NO_EXIT;
2022 }
2023
2024 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2025 {
2026 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2027 return NO_EXIT;
2028 }
2029
2030 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2031 {
2032 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2033 return NO_EXIT;
2034 }
2035
2036 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2037 {
2038 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2039 return_low128(o->out2);
2040 return NO_EXIT;
2041 }
2042
2043 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2044 {
2045 int r2 = get_field(s->fields, r2);
2046 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2047 return NO_EXIT;
2048 }
2049
2050 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2051 {
2052 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2053 return NO_EXIT;
2054 }
2055
2056 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2057 {
2058 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2059 tb->flags, (ab)use the tb->cs_base field as the address of
2060 the template in memory, and grab 8 bits of tb->flags/cflags for
2061 the contents of the register. We would then recognize all this
2062 in gen_intermediate_code_internal, generating code for exactly
2063 one instruction. This new TB then gets executed normally.
2064
2065 On the other hand, this seems to be mostly used for modifying
2066 MVC inside of memcpy, which needs a helper call anyway. So
2067 perhaps this doesn't bear thinking about any further. */
2068
2069 TCGv_i64 tmp;
2070
2071 update_psw_addr(s);
2072 update_cc_op(s);
2073
2074 tmp = tcg_const_i64(s->next_pc);
2075 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2076 tcg_temp_free_i64(tmp);
2077
2078 set_cc_static(s);
2079 return NO_EXIT;
2080 }
2081
2082 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2083 {
2084 /* We'll use the original input for cc computation, since we get to
2085 compare that against 0, which ought to be better than comparing
2086 the real output against 64. It also lets cc_dst be a convenient
2087 temporary during our computation. */
2088 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2089
2090 /* R1 = IN ? CLZ(IN) : 64. */
2091 gen_helper_clz(o->out, o->in2);
2092
2093 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2094 value by 64, which is undefined. But since the shift is 64 iff the
2095 input is zero, we still get the correct result after and'ing. */
2096 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2097 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2098 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2099 return NO_EXIT;
2100 }
2101
2102 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2103 {
2104 int m3 = get_field(s->fields, m3);
2105 int pos, len, base = s->insn->data;
2106 TCGv_i64 tmp = tcg_temp_new_i64();
2107 uint64_t ccm;
2108
2109 switch (m3) {
2110 case 0xf:
2111 /* Effectively a 32-bit load. */
2112 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2113 len = 32;
2114 goto one_insert;
2115
2116 case 0xc:
2117 case 0x6:
2118 case 0x3:
2119 /* Effectively a 16-bit load. */
2120 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2121 len = 16;
2122 goto one_insert;
2123
2124 case 0x8:
2125 case 0x4:
2126 case 0x2:
2127 case 0x1:
2128 /* Effectively an 8-bit load. */
2129 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2130 len = 8;
2131 goto one_insert;
2132
2133 one_insert:
2134 pos = base + ctz32(m3) * 8;
2135 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2136 ccm = ((1ull << len) - 1) << pos;
2137 break;
2138
2139 default:
2140 /* This is going to be a sequence of loads and inserts. */
2141 pos = base + 32 - 8;
2142 ccm = 0;
2143 while (m3) {
2144 if (m3 & 0x8) {
2145 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2146 tcg_gen_addi_i64(o->in2, o->in2, 1);
2147 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2148 ccm |= 0xff << pos;
2149 }
2150 m3 = (m3 << 1) & 0xf;
2151 pos -= 8;
2152 }
2153 break;
2154 }
2155
2156 tcg_gen_movi_i64(tmp, ccm);
2157 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2158 tcg_temp_free_i64(tmp);
2159 return NO_EXIT;
2160 }
2161
2162 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2163 {
2164 int shift = s->insn->data & 0xff;
2165 int size = s->insn->data >> 8;
2166 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2167 return NO_EXIT;
2168 }
2169
2170 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2171 {
2172 TCGv_i64 t1;
2173
2174 gen_op_calc_cc(s);
2175 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2176
2177 t1 = tcg_temp_new_i64();
2178 tcg_gen_shli_i64(t1, psw_mask, 20);
2179 tcg_gen_shri_i64(t1, t1, 36);
2180 tcg_gen_or_i64(o->out, o->out, t1);
2181
2182 tcg_gen_extu_i32_i64(t1, cc_op);
2183 tcg_gen_shli_i64(t1, t1, 28);
2184 tcg_gen_or_i64(o->out, o->out, t1);
2185 tcg_temp_free_i64(t1);
2186 return NO_EXIT;
2187 }
2188
2189 #ifndef CONFIG_USER_ONLY
2190 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2191 {
2192 check_privileged(s);
2193 gen_helper_ipte(cpu_env, o->in1, o->in2);
2194 return NO_EXIT;
2195 }
2196
2197 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2198 {
2199 check_privileged(s);
2200 gen_helper_iske(o->out, cpu_env, o->in2);
2201 return NO_EXIT;
2202 }
2203 #endif
2204
2205 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2206 {
2207 gen_helper_ldeb(o->out, cpu_env, o->in2);
2208 return NO_EXIT;
2209 }
2210
2211 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2212 {
2213 gen_helper_ledb(o->out, cpu_env, o->in2);
2214 return NO_EXIT;
2215 }
2216
2217 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2218 {
2219 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2220 return NO_EXIT;
2221 }
2222
2223 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2224 {
2225 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2226 return NO_EXIT;
2227 }
2228
2229 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2230 {
2231 gen_helper_lxdb(o->out, cpu_env, o->in2);
2232 return_low128(o->out2);
2233 return NO_EXIT;
2234 }
2235
2236 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2237 {
2238 gen_helper_lxeb(o->out, cpu_env, o->in2);
2239 return_low128(o->out2);
2240 return NO_EXIT;
2241 }
2242
2243 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2244 {
2245 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2246 return NO_EXIT;
2247 }
2248
2249 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2250 {
2251 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2252 return NO_EXIT;
2253 }
2254
2255 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2256 {
2257 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2258 return NO_EXIT;
2259 }
2260
2261 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2262 {
2263 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2264 return NO_EXIT;
2265 }
2266
2267 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2268 {
2269 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2270 return NO_EXIT;
2271 }
2272
2273 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2274 {
2275 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2276 return NO_EXIT;
2277 }
2278
2279 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2280 {
2281 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2282 return NO_EXIT;
2283 }
2284
2285 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2286 {
2287 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2288 return NO_EXIT;
2289 }
2290
2291 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2292 {
2293 DisasCompare c;
2294
2295 disas_jcc(s, &c, get_field(s->fields, m3));
2296
2297 if (c.is_64) {
2298 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2299 o->in2, o->in1);
2300 free_compare(&c);
2301 } else {
2302 TCGv_i32 t32 = tcg_temp_new_i32();
2303 TCGv_i64 t, z;
2304
2305 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2306 free_compare(&c);
2307
2308 t = tcg_temp_new_i64();
2309 tcg_gen_extu_i32_i64(t, t32);
2310 tcg_temp_free_i32(t32);
2311
2312 z = tcg_const_i64(0);
2313 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2314 tcg_temp_free_i64(t);
2315 tcg_temp_free_i64(z);
2316 }
2317
2318 return NO_EXIT;
2319 }
2320
2321 #ifndef CONFIG_USER_ONLY
2322 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2323 {
2324 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2325 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2326 check_privileged(s);
2327 potential_page_fault(s);
2328 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2329 tcg_temp_free_i32(r1);
2330 tcg_temp_free_i32(r3);
2331 return NO_EXIT;
2332 }
2333
2334 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2335 {
2336 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2337 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2338 check_privileged(s);
2339 potential_page_fault(s);
2340 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2341 tcg_temp_free_i32(r1);
2342 tcg_temp_free_i32(r3);
2343 return NO_EXIT;
2344 }
2345 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2346 {
2347 check_privileged(s);
2348 potential_page_fault(s);
2349 gen_helper_lra(o->out, cpu_env, o->in2);
2350 set_cc_static(s);
2351 return NO_EXIT;
2352 }
2353
2354 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2355 {
2356 TCGv_i64 t1, t2;
2357
2358 check_privileged(s);
2359
2360 t1 = tcg_temp_new_i64();
2361 t2 = tcg_temp_new_i64();
2362 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2363 tcg_gen_addi_i64(o->in2, o->in2, 4);
2364 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2365 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2366 tcg_gen_shli_i64(t1, t1, 32);
2367 gen_helper_load_psw(cpu_env, t1, t2);
2368 tcg_temp_free_i64(t1);
2369 tcg_temp_free_i64(t2);
2370 return EXIT_NORETURN;
2371 }
2372
2373 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2374 {
2375 TCGv_i64 t1, t2;
2376
2377 check_privileged(s);
2378
2379 t1 = tcg_temp_new_i64();
2380 t2 = tcg_temp_new_i64();
2381 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2382 tcg_gen_addi_i64(o->in2, o->in2, 8);
2383 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2384 gen_helper_load_psw(cpu_env, t1, t2);
2385 tcg_temp_free_i64(t1);
2386 tcg_temp_free_i64(t2);
2387 return EXIT_NORETURN;
2388 }
2389 #endif
2390
2391 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2392 {
2393 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2394 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2395 potential_page_fault(s);
2396 gen_helper_lam(cpu_env, r1, o->in2, r3);
2397 tcg_temp_free_i32(r1);
2398 tcg_temp_free_i32(r3);
2399 return NO_EXIT;
2400 }
2401
2402 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2403 {
2404 int r1 = get_field(s->fields, r1);
2405 int r3 = get_field(s->fields, r3);
2406 TCGv_i64 t = tcg_temp_new_i64();
2407 TCGv_i64 t4 = tcg_const_i64(4);
2408
2409 while (1) {
2410 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2411 store_reg32_i64(r1, t);
2412 if (r1 == r3) {
2413 break;
2414 }
2415 tcg_gen_add_i64(o->in2, o->in2, t4);
2416 r1 = (r1 + 1) & 15;
2417 }
2418
2419 tcg_temp_free_i64(t);
2420 tcg_temp_free_i64(t4);
2421 return NO_EXIT;
2422 }
2423
2424 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2425 {
2426 int r1 = get_field(s->fields, r1);
2427 int r3 = get_field(s->fields, r3);
2428 TCGv_i64 t = tcg_temp_new_i64();
2429 TCGv_i64 t4 = tcg_const_i64(4);
2430
2431 while (1) {
2432 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2433 store_reg32h_i64(r1, t);
2434 if (r1 == r3) {
2435 break;
2436 }
2437 tcg_gen_add_i64(o->in2, o->in2, t4);
2438 r1 = (r1 + 1) & 15;
2439 }
2440
2441 tcg_temp_free_i64(t);
2442 tcg_temp_free_i64(t4);
2443 return NO_EXIT;
2444 }
2445
2446 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2447 {
2448 int r1 = get_field(s->fields, r1);
2449 int r3 = get_field(s->fields, r3);
2450 TCGv_i64 t8 = tcg_const_i64(8);
2451
2452 while (1) {
2453 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2454 if (r1 == r3) {
2455 break;
2456 }
2457 tcg_gen_add_i64(o->in2, o->in2, t8);
2458 r1 = (r1 + 1) & 15;
2459 }
2460
2461 tcg_temp_free_i64(t8);
2462 return NO_EXIT;
2463 }
2464
2465 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2466 {
2467 o->out = o->in2;
2468 o->g_out = o->g_in2;
2469 TCGV_UNUSED_I64(o->in2);
2470 o->g_in2 = false;
2471 return NO_EXIT;
2472 }
2473
2474 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2475 {
2476 o->out = o->in1;
2477 o->out2 = o->in2;
2478 o->g_out = o->g_in1;
2479 o->g_out2 = o->g_in2;
2480 TCGV_UNUSED_I64(o->in1);
2481 TCGV_UNUSED_I64(o->in2);
2482 o->g_in1 = o->g_in2 = false;
2483 return NO_EXIT;
2484 }
2485
2486 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2487 {
2488 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2489 potential_page_fault(s);
2490 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2491 tcg_temp_free_i32(l);
2492 return NO_EXIT;
2493 }
2494
2495 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2496 {
2497 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2498 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2499 potential_page_fault(s);
2500 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2501 tcg_temp_free_i32(r1);
2502 tcg_temp_free_i32(r2);
2503 set_cc_static(s);
2504 return NO_EXIT;
2505 }
2506
2507 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2508 {
2509 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2510 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2511 potential_page_fault(s);
2512 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2513 tcg_temp_free_i32(r1);
2514 tcg_temp_free_i32(r3);
2515 set_cc_static(s);
2516 return NO_EXIT;
2517 }
2518
2519 #ifndef CONFIG_USER_ONLY
2520 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2521 {
2522 int r1 = get_field(s->fields, l1);
2523 check_privileged(s);
2524 potential_page_fault(s);
2525 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2526 set_cc_static(s);
2527 return NO_EXIT;
2528 }
2529
2530 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2531 {
2532 int r1 = get_field(s->fields, l1);
2533 check_privileged(s);
2534 potential_page_fault(s);
2535 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2536 set_cc_static(s);
2537 return NO_EXIT;
2538 }
2539 #endif
2540
2541 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2542 {
2543 potential_page_fault(s);
2544 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2545 set_cc_static(s);
2546 return NO_EXIT;
2547 }
2548
2549 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2550 {
2551 potential_page_fault(s);
2552 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2553 set_cc_static(s);
2554 return_low128(o->in2);
2555 return NO_EXIT;
2556 }
2557
2558 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2559 {
2560 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2561 return NO_EXIT;
2562 }
2563
2564 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2565 {
2566 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2567 return NO_EXIT;
2568 }
2569
2570 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2571 {
2572 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2573 return NO_EXIT;
2574 }
2575
2576 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2577 {
2578 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2579 return NO_EXIT;
2580 }
2581
2582 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2583 {
2584 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2585 return NO_EXIT;
2586 }
2587
2588 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2589 {
2590 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2591 return_low128(o->out2);
2592 return NO_EXIT;
2593 }
2594
2595 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2596 {
2597 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2598 return_low128(o->out2);
2599 return NO_EXIT;
2600 }
2601
2602 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2603 {
2604 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2605 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2606 tcg_temp_free_i64(r3);
2607 return NO_EXIT;
2608 }
2609
2610 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2611 {
2612 int r3 = get_field(s->fields, r3);
2613 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2614 return NO_EXIT;
2615 }
2616
2617 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2618 {
2619 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2620 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2621 tcg_temp_free_i64(r3);
2622 return NO_EXIT;
2623 }
2624
2625 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2626 {
2627 int r3 = get_field(s->fields, r3);
2628 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2629 return NO_EXIT;
2630 }
2631
2632 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2633 {
2634 gen_helper_nabs_i64(o->out, o->in2);
2635 return NO_EXIT;
2636 }
2637
2638 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2639 {
2640 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2641 return NO_EXIT;
2642 }
2643
2644 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2645 {
2646 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2647 return NO_EXIT;
2648 }
2649
2650 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2651 {
2652 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2653 tcg_gen_mov_i64(o->out2, o->in2);
2654 return NO_EXIT;
2655 }
2656
2657 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2658 {
2659 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2660 potential_page_fault(s);
2661 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2662 tcg_temp_free_i32(l);
2663 set_cc_static(s);
2664 return NO_EXIT;
2665 }
2666
2667 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2668 {
2669 tcg_gen_neg_i64(o->out, o->in2);
2670 return NO_EXIT;
2671 }
2672
2673 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2674 {
2675 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2676 return NO_EXIT;
2677 }
2678
2679 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2680 {
2681 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2682 return NO_EXIT;
2683 }
2684
2685 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2686 {
2687 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2688 tcg_gen_mov_i64(o->out2, o->in2);
2689 return NO_EXIT;
2690 }
2691
2692 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2693 {
2694 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2695 potential_page_fault(s);
2696 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2697 tcg_temp_free_i32(l);
2698 set_cc_static(s);
2699 return NO_EXIT;
2700 }
2701
2702 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2703 {
2704 tcg_gen_or_i64(o->out, o->in1, o->in2);
2705 return NO_EXIT;
2706 }
2707
2708 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2709 {
2710 int shift = s->insn->data & 0xff;
2711 int size = s->insn->data >> 8;
2712 uint64_t mask = ((1ull << size) - 1) << shift;
2713
2714 assert(!o->g_in2);
2715 tcg_gen_shli_i64(o->in2, o->in2, shift);
2716 tcg_gen_or_i64(o->out, o->in1, o->in2);
2717
2718 /* Produce the CC from only the bits manipulated. */
2719 tcg_gen_andi_i64(cc_dst, o->out, mask);
2720 set_cc_nz_u64(s, cc_dst);
2721 return NO_EXIT;
2722 }
2723
2724 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2725 {
2726 gen_helper_popcnt(o->out, o->in2);
2727 return NO_EXIT;
2728 }
2729
2730 #ifndef CONFIG_USER_ONLY
2731 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2732 {
2733 check_privileged(s);
2734 gen_helper_ptlb(cpu_env);
2735 return NO_EXIT;
2736 }
2737 #endif
2738
2739 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2740 {
2741 int i3 = get_field(s->fields, i3);
2742 int i4 = get_field(s->fields, i4);
2743 int i5 = get_field(s->fields, i5);
2744 int do_zero = i4 & 0x80;
2745 uint64_t mask, imask, pmask;
2746 int pos, len, rot;
2747
2748 /* Adjust the arguments for the specific insn. */
2749 switch (s->fields->op2) {
2750 case 0x55: /* risbg */
2751 i3 &= 63;
2752 i4 &= 63;
2753 pmask = ~0;
2754 break;
2755 case 0x5d: /* risbhg */
2756 i3 &= 31;
2757 i4 &= 31;
2758 pmask = 0xffffffff00000000ull;
2759 break;
2760 case 0x51: /* risblg */
2761 i3 &= 31;
2762 i4 &= 31;
2763 pmask = 0x00000000ffffffffull;
2764 break;
2765 default:
2766 abort();
2767 }
2768
2769 /* MASK is the set of bits to be inserted from R2.
2770 Take care for I3/I4 wraparound. */
2771 mask = pmask >> i3;
2772 if (i3 <= i4) {
2773 mask ^= pmask >> i4 >> 1;
2774 } else {
2775 mask |= ~(pmask >> i4 >> 1);
2776 }
2777 mask &= pmask;
2778
2779 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2780 insns, we need to keep the other half of the register. */
2781 imask = ~mask | ~pmask;
2782 if (do_zero) {
2783 if (s->fields->op2 == 0x55) {
2784 imask = 0;
2785 } else {
2786 imask = ~pmask;
2787 }
2788 }
2789
2790 /* In some cases we can implement this with deposit, which can be more
2791 efficient on some hosts. */
2792 if (~mask == imask && i3 <= i4) {
2793 if (s->fields->op2 == 0x5d) {
2794 i3 += 32, i4 += 32;
2795 }
2796 /* Note that we rotate the bits to be inserted to the lsb, not to
2797 the position as described in the PoO. */
2798 len = i4 - i3 + 1;
2799 pos = 63 - i4;
2800 rot = (i5 - pos) & 63;
2801 } else {
2802 pos = len = -1;
2803 rot = i5 & 63;
2804 }
2805
2806 /* Rotate the input as necessary. */
2807 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2808
2809 /* Insert the selected bits into the output. */
2810 if (pos >= 0) {
2811 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2812 } else if (imask == 0) {
2813 tcg_gen_andi_i64(o->out, o->in2, mask);
2814 } else {
2815 tcg_gen_andi_i64(o->in2, o->in2, mask);
2816 tcg_gen_andi_i64(o->out, o->out, imask);
2817 tcg_gen_or_i64(o->out, o->out, o->in2);
2818 }
2819 return NO_EXIT;
2820 }
2821
2822 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2823 {
2824 int i3 = get_field(s->fields, i3);
2825 int i4 = get_field(s->fields, i4);
2826 int i5 = get_field(s->fields, i5);
2827 uint64_t mask;
2828
2829 /* If this is a test-only form, arrange to discard the result. */
2830 if (i3 & 0x80) {
2831 o->out = tcg_temp_new_i64();
2832 o->g_out = false;
2833 }
2834
2835 i3 &= 63;
2836 i4 &= 63;
2837 i5 &= 63;
2838
2839 /* MASK is the set of bits to be operated on from R2.
2840 Take care for I3/I4 wraparound. */
2841 mask = ~0ull >> i3;
2842 if (i3 <= i4) {
2843 mask ^= ~0ull >> i4 >> 1;
2844 } else {
2845 mask |= ~(~0ull >> i4 >> 1);
2846 }
2847
2848 /* Rotate the input as necessary. */
2849 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2850
2851 /* Operate. */
2852 switch (s->fields->op2) {
2853 case 0x55: /* AND */
2854 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2855 tcg_gen_and_i64(o->out, o->out, o->in2);
2856 break;
2857 case 0x56: /* OR */
2858 tcg_gen_andi_i64(o->in2, o->in2, mask);
2859 tcg_gen_or_i64(o->out, o->out, o->in2);
2860 break;
2861 case 0x57: /* XOR */
2862 tcg_gen_andi_i64(o->in2, o->in2, mask);
2863 tcg_gen_xor_i64(o->out, o->out, o->in2);
2864 break;
2865 default:
2866 abort();
2867 }
2868
2869 /* Set the CC. */
2870 tcg_gen_andi_i64(cc_dst, o->out, mask);
2871 set_cc_nz_u64(s, cc_dst);
2872 return NO_EXIT;
2873 }
2874
2875 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2876 {
2877 tcg_gen_bswap16_i64(o->out, o->in2);
2878 return NO_EXIT;
2879 }
2880
2881 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2882 {
2883 tcg_gen_bswap32_i64(o->out, o->in2);
2884 return NO_EXIT;
2885 }
2886
2887 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2888 {
2889 tcg_gen_bswap64_i64(o->out, o->in2);
2890 return NO_EXIT;
2891 }
2892
2893 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2894 {
2895 TCGv_i32 t1 = tcg_temp_new_i32();
2896 TCGv_i32 t2 = tcg_temp_new_i32();
2897 TCGv_i32 to = tcg_temp_new_i32();
2898 tcg_gen_trunc_i64_i32(t1, o->in1);
2899 tcg_gen_trunc_i64_i32(t2, o->in2);
2900 tcg_gen_rotl_i32(to, t1, t2);
2901 tcg_gen_extu_i32_i64(o->out, to);
2902 tcg_temp_free_i32(t1);
2903 tcg_temp_free_i32(t2);
2904 tcg_temp_free_i32(to);
2905 return NO_EXIT;
2906 }
2907
2908 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2909 {
2910 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2911 return NO_EXIT;
2912 }
2913
2914 #ifndef CONFIG_USER_ONLY
2915 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2916 {
2917 check_privileged(s);
2918 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2919 set_cc_static(s);
2920 return NO_EXIT;
2921 }
2922
2923 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2924 {
2925 check_privileged(s);
2926 gen_helper_sacf(cpu_env, o->in2);
2927 /* Addressing mode has changed, so end the block. */
2928 return EXIT_PC_STALE;
2929 }
2930 #endif
2931
2932 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2933 {
2934 int r1 = get_field(s->fields, r1);
2935 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2936 return NO_EXIT;
2937 }
2938
2939 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2940 {
2941 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2942 return NO_EXIT;
2943 }
2944
2945 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2946 {
2947 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2948 return NO_EXIT;
2949 }
2950
2951 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2952 {
2953 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2954 return_low128(o->out2);
2955 return NO_EXIT;
2956 }
2957
2958 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2959 {
2960 gen_helper_sqeb(o->out, cpu_env, o->in2);
2961 return NO_EXIT;
2962 }
2963
2964 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2965 {
2966 gen_helper_sqdb(o->out, cpu_env, o->in2);
2967 return NO_EXIT;
2968 }
2969
2970 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2971 {
2972 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2973 return_low128(o->out2);
2974 return NO_EXIT;
2975 }
2976
2977 #ifndef CONFIG_USER_ONLY
2978 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2979 {
2980 check_privileged(s);
2981 potential_page_fault(s);
2982 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2983 set_cc_static(s);
2984 return NO_EXIT;
2985 }
2986
2987 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2988 {
2989 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2990 check_privileged(s);
2991 potential_page_fault(s);
2992 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2993 tcg_temp_free_i32(r1);
2994 return NO_EXIT;
2995 }
2996 #endif
2997
2998 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
2999 {
3000 DisasCompare c;
3001 TCGv_i64 a;
3002 int lab, r1;
3003
3004 disas_jcc(s, &c, get_field(s->fields, m3));
3005
3006 lab = gen_new_label();
3007 if (c.is_64) {
3008 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3009 } else {
3010 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3011 }
3012 free_compare(&c);
3013
3014 r1 = get_field(s->fields, r1);
3015 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3016 if (s->insn->data) {
3017 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3018 } else {
3019 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3020 }
3021 tcg_temp_free_i64(a);
3022
3023 gen_set_label(lab);
3024 return NO_EXIT;
3025 }
3026
3027 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3028 {
3029 uint64_t sign = 1ull << s->insn->data;
3030 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3031 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3032 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3033 /* The arithmetic left shift is curious in that it does not affect
3034 the sign bit. Copy that over from the source unchanged. */
3035 tcg_gen_andi_i64(o->out, o->out, ~sign);
3036 tcg_gen_andi_i64(o->in1, o->in1, sign);
3037 tcg_gen_or_i64(o->out, o->out, o->in1);
3038 return NO_EXIT;
3039 }
3040
3041 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3042 {
3043 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3044 return NO_EXIT;
3045 }
3046
3047 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3048 {
3049 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3050 return NO_EXIT;
3051 }
3052
3053 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3054 {
3055 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3056 return NO_EXIT;
3057 }
3058
3059 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3060 {
3061 gen_helper_sfpc(cpu_env, o->in2);
3062 return NO_EXIT;
3063 }
3064
3065 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3066 {
3067 gen_helper_sfas(cpu_env, o->in2);
3068 return NO_EXIT;
3069 }
3070
3071 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3072 {
3073 int b2 = get_field(s->fields, b2);
3074 int d2 = get_field(s->fields, d2);
3075 TCGv_i64 t1 = tcg_temp_new_i64();
3076 TCGv_i64 t2 = tcg_temp_new_i64();
3077 int mask, pos, len;
3078
3079 switch (s->fields->op2) {
3080 case 0x99: /* SRNM */
3081 pos = 0, len = 2;
3082 break;
3083 case 0xb8: /* SRNMB */
3084 pos = 0, len = 3;
3085 break;
3086 case 0xb9: /* SRNMT */
3087 pos = 4, len = 3;
3088 break;
3089 default:
3090 tcg_abort();
3091 }
3092 mask = (1 << len) - 1;
3093
3094 /* Insert the value into the appropriate field of the FPC. */
3095 if (b2 == 0) {
3096 tcg_gen_movi_i64(t1, d2 & mask);
3097 } else {
3098 tcg_gen_addi_i64(t1, regs[b2], d2);
3099 tcg_gen_andi_i64(t1, t1, mask);
3100 }
3101 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3102 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3103 tcg_temp_free_i64(t1);
3104
3105 /* Then install the new FPC to set the rounding mode in fpu_status. */
3106 gen_helper_sfpc(cpu_env, t2);
3107 tcg_temp_free_i64(t2);
3108 return NO_EXIT;
3109 }
3110
3111 #ifndef CONFIG_USER_ONLY
3112 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3113 {
3114 check_privileged(s);
3115 tcg_gen_shri_i64(o->in2, o->in2, 4);
3116 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3117 return NO_EXIT;
3118 }
3119
3120 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3121 {
3122 check_privileged(s);
3123 gen_helper_sske(cpu_env, o->in1, o->in2);
3124 return NO_EXIT;
3125 }
3126
3127 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3128 {
3129 check_privileged(s);
3130 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3131 return NO_EXIT;
3132 }
3133
3134 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3135 {
3136 check_privileged(s);
3137 /* ??? Surely cpu address != cpu number. In any case the previous
3138 version of this stored more than the required half-word, so it
3139 is unlikely this has ever been tested. */
3140 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3141 return NO_EXIT;
3142 }
3143
3144 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3145 {
3146 gen_helper_stck(o->out, cpu_env);
3147 /* ??? We don't implement clock states. */
3148 gen_op_movi_cc(s, 0);
3149 return NO_EXIT;
3150 }
3151
3152 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3153 {
3154 TCGv_i64 c1 = tcg_temp_new_i64();
3155 TCGv_i64 c2 = tcg_temp_new_i64();
3156 gen_helper_stck(c1, cpu_env);
3157 /* Shift the 64-bit value into its place as a zero-extended
3158 104-bit value. Note that "bit positions 64-103 are always
3159 non-zero so that they compare differently to STCK"; we set
3160 the least significant bit to 1. */
3161 tcg_gen_shli_i64(c2, c1, 56);
3162 tcg_gen_shri_i64(c1, c1, 8);
3163 tcg_gen_ori_i64(c2, c2, 0x10000);
3164 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3165 tcg_gen_addi_i64(o->in2, o->in2, 8);
3166 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3167 tcg_temp_free_i64(c1);
3168 tcg_temp_free_i64(c2);
3169 /* ??? We don't implement clock states. */
3170 gen_op_movi_cc(s, 0);
3171 return NO_EXIT;
3172 }
3173
3174 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3175 {
3176 check_privileged(s);
3177 gen_helper_sckc(cpu_env, o->in2);
3178 return NO_EXIT;
3179 }
3180
3181 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3182 {
3183 check_privileged(s);
3184 gen_helper_stckc(o->out, cpu_env);
3185 return NO_EXIT;
3186 }
3187
3188 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3189 {
3190 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3191 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3192 check_privileged(s);
3193 potential_page_fault(s);
3194 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3195 tcg_temp_free_i32(r1);
3196 tcg_temp_free_i32(r3);
3197 return NO_EXIT;
3198 }
3199
3200 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3201 {
3202 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3203 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3204 check_privileged(s);
3205 potential_page_fault(s);
3206 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3207 tcg_temp_free_i32(r1);
3208 tcg_temp_free_i32(r3);
3209 return NO_EXIT;
3210 }
3211
3212 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3213 {
3214 check_privileged(s);
3215 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3216 return NO_EXIT;
3217 }
3218
3219 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3220 {
3221 check_privileged(s);
3222 gen_helper_spt(cpu_env, o->in2);
3223 return NO_EXIT;
3224 }
3225
3226 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3227 {
3228 TCGv_i64 f, a;
3229 /* We really ought to have more complete indication of facilities
3230 that we implement. Address this when STFLE is implemented. */
3231 check_privileged(s);
3232 f = tcg_const_i64(0xc0000000);
3233 a = tcg_const_i64(200);
3234 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3235 tcg_temp_free_i64(f);
3236 tcg_temp_free_i64(a);
3237 return NO_EXIT;
3238 }
3239
3240 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3241 {
3242 check_privileged(s);
3243 gen_helper_stpt(o->out, cpu_env);
3244 return NO_EXIT;
3245 }
3246
3247 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3248 {
3249 check_privileged(s);
3250 potential_page_fault(s);
3251 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3252 set_cc_static(s);
3253 return NO_EXIT;
3254 }
3255
3256 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3257 {
3258 check_privileged(s);
3259 gen_helper_spx(cpu_env, o->in2);
3260 return NO_EXIT;
3261 }
3262
3263 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3264 {
3265 check_privileged(s);
3266 /* Not operational. */
3267 gen_op_movi_cc(s, 3);
3268 return NO_EXIT;
3269 }
3270
3271 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3272 {
3273 check_privileged(s);
3274 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3275 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3276 return NO_EXIT;
3277 }
3278
3279 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3280 {
3281 uint64_t i2 = get_field(s->fields, i2);
3282 TCGv_i64 t;
3283
3284 check_privileged(s);
3285
3286 /* It is important to do what the instruction name says: STORE THEN.
3287 If we let the output hook perform the store then if we fault and
3288 restart, we'll have the wrong SYSTEM MASK in place. */
3289 t = tcg_temp_new_i64();
3290 tcg_gen_shri_i64(t, psw_mask, 56);
3291 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3292 tcg_temp_free_i64(t);
3293
3294 if (s->fields->op == 0xac) {
3295 tcg_gen_andi_i64(psw_mask, psw_mask,
3296 (i2 << 56) | 0x00ffffffffffffffull);
3297 } else {
3298 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3299 }
3300 return NO_EXIT;
3301 }
3302
3303 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3304 {
3305 check_privileged(s);
3306 potential_page_fault(s);
3307 gen_helper_stura(cpu_env, o->in2, o->in1);
3308 return NO_EXIT;
3309 }
3310 #endif
3311
3312 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3313 {
3314 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3315 return NO_EXIT;
3316 }
3317
3318 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3319 {
3320 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3321 return NO_EXIT;
3322 }
3323
3324 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3325 {
3326 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3327 return NO_EXIT;
3328 }
3329
3330 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3331 {
3332 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3333 return NO_EXIT;
3334 }
3335
3336 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3337 {
3338 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3339 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3340 potential_page_fault(s);
3341 gen_helper_stam(cpu_env, r1, o->in2, r3);
3342 tcg_temp_free_i32(r1);
3343 tcg_temp_free_i32(r3);
3344 return NO_EXIT;
3345 }
3346
3347 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3348 {
3349 int m3 = get_field(s->fields, m3);
3350 int pos, base = s->insn->data;
3351 TCGv_i64 tmp = tcg_temp_new_i64();
3352
3353 pos = base + ctz32(m3) * 8;
3354 switch (m3) {
3355 case 0xf:
3356 /* Effectively a 32-bit store. */
3357 tcg_gen_shri_i64(tmp, o->in1, pos);
3358 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3359 break;
3360
3361 case 0xc:
3362 case 0x6:
3363 case 0x3:
3364 /* Effectively a 16-bit store. */
3365 tcg_gen_shri_i64(tmp, o->in1, pos);
3366 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3367 break;
3368
3369 case 0x8:
3370 case 0x4:
3371 case 0x2:
3372 case 0x1:
3373 /* Effectively an 8-bit store. */
3374 tcg_gen_shri_i64(tmp, o->in1, pos);
3375 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3376 break;
3377
3378 default:
3379 /* This is going to be a sequence of shifts and stores. */
3380 pos = base + 32 - 8;
3381 while (m3) {
3382 if (m3 & 0x8) {
3383 tcg_gen_shri_i64(tmp, o->in1, pos);
3384 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3385 tcg_gen_addi_i64(o->in2, o->in2, 1);
3386 }
3387 m3 = (m3 << 1) & 0xf;
3388 pos -= 8;
3389 }
3390 break;
3391 }
3392 tcg_temp_free_i64(tmp);
3393 return NO_EXIT;
3394 }
3395
3396 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3397 {
3398 int r1 = get_field(s->fields, r1);
3399 int r3 = get_field(s->fields, r3);
3400 int size = s->insn->data;
3401 TCGv_i64 tsize = tcg_const_i64(size);
3402
3403 while (1) {
3404 if (size == 8) {
3405 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3406 } else {
3407 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3408 }
3409 if (r1 == r3) {
3410 break;
3411 }
3412 tcg_gen_add_i64(o->in2, o->in2, tsize);
3413 r1 = (r1 + 1) & 15;
3414 }
3415
3416 tcg_temp_free_i64(tsize);
3417 return NO_EXIT;
3418 }
3419
3420 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3421 {
3422 int r1 = get_field(s->fields, r1);
3423 int r3 = get_field(s->fields, r3);
3424 TCGv_i64 t = tcg_temp_new_i64();
3425 TCGv_i64 t4 = tcg_const_i64(4);
3426 TCGv_i64 t32 = tcg_const_i64(32);
3427
3428 while (1) {
3429 tcg_gen_shl_i64(t, regs[r1], t32);
3430 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3431 if (r1 == r3) {
3432 break;
3433 }
3434 tcg_gen_add_i64(o->in2, o->in2, t4);
3435 r1 = (r1 + 1) & 15;
3436 }
3437
3438 tcg_temp_free_i64(t);
3439 tcg_temp_free_i64(t4);
3440 tcg_temp_free_i64(t32);
3441 return NO_EXIT;
3442 }
3443
3444 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3445 {
3446 potential_page_fault(s);
3447 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3448 set_cc_static(s);
3449 return_low128(o->in2);
3450 return NO_EXIT;
3451 }
3452
3453 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3454 {
3455 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3456 return NO_EXIT;
3457 }
3458
3459 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3460 {
3461 DisasCompare cmp;
3462 TCGv_i64 borrow;
3463
3464 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3465
3466 /* The !borrow flag is the msb of CC. Since we want the inverse of
3467 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3468 disas_jcc(s, &cmp, 8 | 4);
3469 borrow = tcg_temp_new_i64();
3470 if (cmp.is_64) {
3471 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3472 } else {
3473 TCGv_i32 t = tcg_temp_new_i32();
3474 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3475 tcg_gen_extu_i32_i64(borrow, t);
3476 tcg_temp_free_i32(t);
3477 }
3478 free_compare(&cmp);
3479
3480 tcg_gen_sub_i64(o->out, o->out, borrow);
3481 tcg_temp_free_i64(borrow);
3482 return NO_EXIT;
3483 }
3484
3485 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3486 {
3487 TCGv_i32 t;
3488
3489 update_psw_addr(s);
3490 update_cc_op(s);
3491
3492 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3493 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3494 tcg_temp_free_i32(t);
3495
3496 t = tcg_const_i32(s->next_pc - s->pc);
3497 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3498 tcg_temp_free_i32(t);
3499
3500 gen_exception(EXCP_SVC);
3501 return EXIT_NORETURN;
3502 }
3503
3504 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3505 {
3506 gen_helper_tceb(cc_op, o->in1, o->in2);
3507 set_cc_static(s);
3508 return NO_EXIT;
3509 }
3510
3511 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3512 {
3513 gen_helper_tcdb(cc_op, o->in1, o->in2);
3514 set_cc_static(s);
3515 return NO_EXIT;
3516 }
3517
3518 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3519 {
3520 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3521 set_cc_static(s);
3522 return NO_EXIT;
3523 }
3524
3525 #ifndef CONFIG_USER_ONLY
3526 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3527 {
3528 potential_page_fault(s);
3529 gen_helper_tprot(cc_op, o->addr1, o->in2);
3530 set_cc_static(s);
3531 return NO_EXIT;
3532 }
3533 #endif
3534
3535 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3536 {
3537 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3538 potential_page_fault(s);
3539 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3540 tcg_temp_free_i32(l);
3541 set_cc_static(s);
3542 return NO_EXIT;
3543 }
3544
3545 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3546 {
3547 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3548 potential_page_fault(s);
3549 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3550 tcg_temp_free_i32(l);
3551 return NO_EXIT;
3552 }
3553
3554 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3555 {
3556 int d1 = get_field(s->fields, d1);
3557 int d2 = get_field(s->fields, d2);
3558 int b1 = get_field(s->fields, b1);
3559 int b2 = get_field(s->fields, b2);
3560 int l = get_field(s->fields, l1);
3561 TCGv_i32 t32;
3562
3563 o->addr1 = get_address(s, 0, b1, d1);
3564
3565 /* If the addresses are identical, this is a store/memset of zero. */
3566 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3567 o->in2 = tcg_const_i64(0);
3568
3569 l++;
3570 while (l >= 8) {
3571 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3572 l -= 8;
3573 if (l > 0) {
3574 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3575 }
3576 }
3577 if (l >= 4) {
3578 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3579 l -= 4;
3580 if (l > 0) {
3581 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3582 }
3583 }
3584 if (l >= 2) {
3585 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3586 l -= 2;
3587 if (l > 0) {
3588 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3589 }
3590 }
3591 if (l) {
3592 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3593 }
3594 gen_op_movi_cc(s, 0);
3595 return NO_EXIT;
3596 }
3597
3598 /* But in general we'll defer to a helper. */
3599 o->in2 = get_address(s, 0, b2, d2);
3600 t32 = tcg_const_i32(l);
3601 potential_page_fault(s);
3602 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3603 tcg_temp_free_i32(t32);
3604 set_cc_static(s);
3605 return NO_EXIT;
3606 }
3607
3608 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3609 {
3610 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3611 return NO_EXIT;
3612 }
3613
3614 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3615 {
3616 int shift = s->insn->data & 0xff;
3617 int size = s->insn->data >> 8;
3618 uint64_t mask = ((1ull << size) - 1) << shift;
3619
3620 assert(!o->g_in2);
3621 tcg_gen_shli_i64(o->in2, o->in2, shift);
3622 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3623
3624 /* Produce the CC from only the bits manipulated. */
3625 tcg_gen_andi_i64(cc_dst, o->out, mask);
3626 set_cc_nz_u64(s, cc_dst);
3627 return NO_EXIT;
3628 }
3629
3630 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3631 {
3632 o->out = tcg_const_i64(0);
3633 return NO_EXIT;
3634 }
3635
3636 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3637 {
3638 o->out = tcg_const_i64(0);
3639 o->out2 = o->out;
3640 o->g_out2 = true;
3641 return NO_EXIT;
3642 }
3643
3644 /* ====================================================================== */
3645 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3646 the original inputs), update the various cc data structures in order to
3647 be able to compute the new condition code. */
3648
3649 static void cout_abs32(DisasContext *s, DisasOps *o)
3650 {
3651 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3652 }
3653
3654 static void cout_abs64(DisasContext *s, DisasOps *o)
3655 {
3656 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3657 }
3658
3659 static void cout_adds32(DisasContext *s, DisasOps *o)
3660 {
3661 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3662 }
3663
3664 static void cout_adds64(DisasContext *s, DisasOps *o)
3665 {
3666 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3667 }
3668
3669 static void cout_addu32(DisasContext *s, DisasOps *o)
3670 {
3671 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3672 }
3673
3674 static void cout_addu64(DisasContext *s, DisasOps *o)
3675 {
3676 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3677 }
3678
3679 static void cout_addc32(DisasContext *s, DisasOps *o)
3680 {
3681 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3682 }
3683
3684 static void cout_addc64(DisasContext *s, DisasOps *o)
3685 {
3686 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3687 }
3688
3689 static void cout_cmps32(DisasContext *s, DisasOps *o)
3690 {
3691 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3692 }
3693
3694 static void cout_cmps64(DisasContext *s, DisasOps *o)
3695 {
3696 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3697 }
3698
3699 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3700 {
3701 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3702 }
3703
3704 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3705 {
3706 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3707 }
3708
3709 static void cout_f32(DisasContext *s, DisasOps *o)
3710 {
3711 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3712 }
3713
3714 static void cout_f64(DisasContext *s, DisasOps *o)
3715 {
3716 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3717 }
3718
3719 static void cout_f128(DisasContext *s, DisasOps *o)
3720 {
3721 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3722 }
3723
3724 static void cout_nabs32(DisasContext *s, DisasOps *o)
3725 {
3726 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3727 }
3728
3729 static void cout_nabs64(DisasContext *s, DisasOps *o)
3730 {
3731 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3732 }
3733
3734 static void cout_neg32(DisasContext *s, DisasOps *o)
3735 {
3736 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3737 }
3738
3739 static void cout_neg64(DisasContext *s, DisasOps *o)
3740 {
3741 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3742 }
3743
3744 static void cout_nz32(DisasContext *s, DisasOps *o)
3745 {
3746 tcg_gen_ext32u_i64(cc_dst, o->out);
3747 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3748 }
3749
3750 static void cout_nz64(DisasContext *s, DisasOps *o)
3751 {
3752 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3753 }
3754
3755 static void cout_s32(DisasContext *s, DisasOps *o)
3756 {
3757 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3758 }
3759
3760 static void cout_s64(DisasContext *s, DisasOps *o)
3761 {
3762 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3763 }
3764
3765 static void cout_subs32(DisasContext *s, DisasOps *o)
3766 {
3767 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3768 }
3769
3770 static void cout_subs64(DisasContext *s, DisasOps *o)
3771 {
3772 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3773 }
3774
3775 static void cout_subu32(DisasContext *s, DisasOps *o)
3776 {
3777 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3778 }
3779
3780 static void cout_subu64(DisasContext *s, DisasOps *o)
3781 {
3782 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3783 }
3784
3785 static void cout_subb32(DisasContext *s, DisasOps *o)
3786 {
3787 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3788 }
3789
3790 static void cout_subb64(DisasContext *s, DisasOps *o)
3791 {
3792 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3793 }
3794
3795 static void cout_tm32(DisasContext *s, DisasOps *o)
3796 {
3797 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3798 }
3799
3800 static void cout_tm64(DisasContext *s, DisasOps *o)
3801 {
3802 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3803 }
3804
3805 /* ====================================================================== */
3806 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
3807 with the TCG register to which we will write. Used in combination with
3808 the "wout" generators, in some cases we need a new temporary, and in
3809 some cases we can write to a TCG global. */
3810
3811 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3812 {
3813 o->out = tcg_temp_new_i64();
3814 }
3815 #define SPEC_prep_new 0
3816
3817 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3818 {
3819 o->out = tcg_temp_new_i64();
3820 o->out2 = tcg_temp_new_i64();
3821 }
3822 #define SPEC_prep_new_P 0
3823
3824 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3825 {
3826 o->out = regs[get_field(f, r1)];
3827 o->g_out = true;
3828 }
3829 #define SPEC_prep_r1 0
3830
3831 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3832 {
3833 int r1 = get_field(f, r1);
3834 o->out = regs[r1];
3835 o->out2 = regs[r1 + 1];
3836 o->g_out = o->g_out2 = true;
3837 }
3838 #define SPEC_prep_r1_P SPEC_r1_even
3839
3840 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3841 {
3842 o->out = fregs[get_field(f, r1)];
3843 o->g_out = true;
3844 }
3845 #define SPEC_prep_f1 0
3846
3847 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3848 {
3849 int r1 = get_field(f, r1);
3850 o->out = fregs[r1];
3851 o->out2 = fregs[r1 + 2];
3852 o->g_out = o->g_out2 = true;
3853 }
3854 #define SPEC_prep_x1 SPEC_r1_f128
3855
3856 /* ====================================================================== */
3857 /* The "Write OUTput" generators. These generally perform some non-trivial
3858 copy of data to TCG globals, or to main memory. The trivial cases are
3859 generally handled by having a "prep" generator install the TCG global
3860 as the destination of the operation. */
3861
3862 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3863 {
3864 store_reg(get_field(f, r1), o->out);
3865 }
3866 #define SPEC_wout_r1 0
3867
3868 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3869 {
3870 int r1 = get_field(f, r1);
3871 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3872 }
3873 #define SPEC_wout_r1_8 0
3874
3875 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3876 {
3877 int r1 = get_field(f, r1);
3878 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3879 }
3880 #define SPEC_wout_r1_16 0
3881
3882 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3883 {
3884 store_reg32_i64(get_field(f, r1), o->out);
3885 }
3886 #define SPEC_wout_r1_32 0
3887
3888 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3889 {
3890 int r1 = get_field(f, r1);
3891 store_reg32_i64(r1, o->out);
3892 store_reg32_i64(r1 + 1, o->out2);
3893 }
3894 #define SPEC_wout_r1_P32 SPEC_r1_even
3895
3896 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3897 {
3898 int r1 = get_field(f, r1);
3899 store_reg32_i64(r1 + 1, o->out);
3900 tcg_gen_shri_i64(o->out, o->out, 32);
3901 store_reg32_i64(r1, o->out);
3902 }
3903 #define SPEC_wout_r1_D32 SPEC_r1_even
3904
3905 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3906 {
3907 store_freg32_i64(get_field(f, r1), o->out);
3908 }
3909 #define SPEC_wout_e1 0
3910
3911 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3912 {
3913 store_freg(get_field(f, r1), o->out);
3914 }
3915 #define SPEC_wout_f1 0
3916
3917 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3918 {
3919 int f1 = get_field(s->fields, r1);
3920 store_freg(f1, o->out);
3921 store_freg(f1 + 2, o->out2);
3922 }
3923 #define SPEC_wout_x1 SPEC_r1_f128
3924
3925 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3926 {
3927 if (get_field(f, r1) != get_field(f, r2)) {
3928 store_reg32_i64(get_field(f, r1), o->out);
3929 }
3930 }
3931 #define SPEC_wout_cond_r1r2_32 0
3932
3933 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3934 {
3935 if (get_field(f, r1) != get_field(f, r2)) {
3936 store_freg32_i64(get_field(f, r1), o->out);
3937 }
3938 }
3939 #define SPEC_wout_cond_e1e2 0
3940
3941 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3942 {
3943 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3944 }
3945 #define SPEC_wout_m1_8 0
3946
3947 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3948 {
3949 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3950 }
3951 #define SPEC_wout_m1_16 0
3952
3953 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3954 {
3955 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3956 }
3957 #define SPEC_wout_m1_32 0
3958
3959 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3960 {
3961 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3962 }
3963 #define SPEC_wout_m1_64 0
3964
3965 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3966 {
3967 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3968 }
3969 #define SPEC_wout_m2_32 0
3970
3971 /* ====================================================================== */
3972 /* The "INput 1" generators. These load the first operand to an insn. */
3973
3974 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3975 {
3976 o->in1 = load_reg(get_field(f, r1));
3977 }
3978 #define SPEC_in1_r1 0
3979
3980 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3981 {
3982 o->in1 = regs[get_field(f, r1)];
3983 o->g_in1 = true;
3984 }
3985 #define SPEC_in1_r1_o 0
3986
3987 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3988 {
3989 o->in1 = tcg_temp_new_i64();
3990 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3991 }
3992 #define SPEC_in1_r1_32s 0
3993
3994 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3995 {
3996 o->in1 = tcg_temp_new_i64();
3997 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3998 }
3999 #define SPEC_in1_r1_32u 0
4000
4001 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4002 {
4003 o->in1 = tcg_temp_new_i64();
4004 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4005 }
4006 #define SPEC_in1_r1_sr32 0
4007
4008 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4009 {
4010 o->in1 = load_reg(get_field(f, r1) + 1);
4011 }
4012 #define SPEC_in1_r1p1 SPEC_r1_even
4013
4014 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4015 {
4016 o->in1 = tcg_temp_new_i64();
4017 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4018 }
4019 #define SPEC_in1_r1p1_32s SPEC_r1_even
4020
4021 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4022 {
4023 o->in1 = tcg_temp_new_i64();
4024 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4025 }
4026 #define SPEC_in1_r1p1_32u SPEC_r1_even
4027
4028 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4029 {
4030 int r1 = get_field(f, r1);
4031 o->in1 = tcg_temp_new_i64();
4032 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4033 }
4034 #define SPEC_in1_r1_D32 SPEC_r1_even
4035
4036 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4037 {
4038 o->in1 = load_reg(get_field(f, r2));
4039 }
4040 #define SPEC_in1_r2 0
4041
4042 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4043 {
4044 o->in1 = load_reg(get_field(f, r3));
4045 }
4046 #define SPEC_in1_r3 0
4047
4048 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4049 {
4050 o->in1 = regs[get_field(f, r3)];
4051 o->g_in1 = true;
4052 }
4053 #define SPEC_in1_r3_o 0
4054
4055 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4056 {
4057 o->in1 = tcg_temp_new_i64();
4058 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4059 }
4060 #define SPEC_in1_r3_32s 0
4061
4062 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4063 {
4064 o->in1 = tcg_temp_new_i64();
4065 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4066 }
4067 #define SPEC_in1_r3_32u 0
4068
4069 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4070 {
4071 int r3 = get_field(f, r3);
4072 o->in1 = tcg_temp_new_i64();
4073 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4074 }
4075 #define SPEC_in1_r3_D32 SPEC_r3_even
4076
4077 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4078 {
4079 o->in1 = load_freg32_i64(get_field(f, r1));
4080 }
4081 #define SPEC_in1_e1 0
4082
4083 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4084 {
4085 o->in1 = fregs[get_field(f, r1)];
4086 o->g_in1 = true;
4087 }
4088 #define SPEC_in1_f1_o 0
4089
4090 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4091 {
4092 int r1 = get_field(f, r1);
4093 o->out = fregs[r1];
4094 o->out2 = fregs[r1 + 2];
4095 o->g_out = o->g_out2 = true;
4096 }
4097 #define SPEC_in1_x1_o SPEC_r1_f128
4098
4099 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4100 {
4101 o->in1 = fregs[get_field(f, r3)];
4102 o->g_in1 = true;
4103 }
4104 #define SPEC_in1_f3_o 0
4105
4106 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4107 {
4108 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4109 }
4110 #define SPEC_in1_la1 0
4111
4112 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4113 {
4114 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4115 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4116 }
4117 #define SPEC_in1_la2 0
4118
4119 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4120 {
4121 in1_la1(s, f, o);
4122 o->in1 = tcg_temp_new_i64();
4123 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4124 }
4125 #define SPEC_in1_m1_8u 0
4126
4127 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4128 {
4129 in1_la1(s, f, o);
4130 o->in1 = tcg_temp_new_i64();
4131 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4132 }
4133 #define SPEC_in1_m1_16s 0
4134
4135 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4136 {
4137 in1_la1(s, f, o);
4138 o->in1 = tcg_temp_new_i64();
4139 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4140 }
4141 #define SPEC_in1_m1_16u 0
4142
4143 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4144 {
4145 in1_la1(s, f, o);
4146 o->in1 = tcg_temp_new_i64();
4147 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4148 }
4149 #define SPEC_in1_m1_32s 0
4150
4151 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4152 {
4153 in1_la1(s, f, o);
4154 o->in1 = tcg_temp_new_i64();
4155 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4156 }
4157 #define SPEC_in1_m1_32u 0
4158
4159 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4160 {
4161 in1_la1(s, f, o);
4162 o->in1 = tcg_temp_new_i64();
4163 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4164 }
4165 #define SPEC_in1_m1_64 0
4166
4167 /* ====================================================================== */
4168 /* The "INput 2" generators. These load the second operand to an insn. */
4169
4170 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4171 {
4172 o->in2 = regs[get_field(f, r1)];
4173 o->g_in2 = true;
4174 }
4175 #define SPEC_in2_r1_o 0
4176
4177 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4178 {
4179 o->in2 = tcg_temp_new_i64();
4180 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4181 }
4182 #define SPEC_in2_r1_16u 0
4183
4184 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4185 {
4186 o->in2 = tcg_temp_new_i64();
4187 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4188 }
4189 #define SPEC_in2_r1_32u 0
4190
4191 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4192 {
4193 int r1 = get_field(f, r1);
4194 o->in2 = tcg_temp_new_i64();
4195 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4196 }
4197 #define SPEC_in2_r1_D32 SPEC_r1_even
4198
4199 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4200 {
4201 o->in2 = load_reg(get_field(f, r2));
4202 }
4203 #define SPEC_in2_r2 0
4204
4205 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4206 {
4207 o->in2 = regs[get_field(f, r2)];
4208 o->g_in2 = true;
4209 }
4210 #define SPEC_in2_r2_o 0
4211
4212 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4213 {
4214 int r2 = get_field(f, r2);
4215 if (r2 != 0) {
4216 o->in2 = load_reg(r2);
4217 }
4218 }
4219 #define SPEC_in2_r2_nz 0
4220
4221 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4222 {
4223 o->in2 = tcg_temp_new_i64();
4224 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4225 }
4226 #define SPEC_in2_r2_8s 0
4227
4228 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4229 {
4230 o->in2 = tcg_temp_new_i64();
4231 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4232 }
4233 #define SPEC_in2_r2_8u 0
4234
4235 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4236 {
4237 o->in2 = tcg_temp_new_i64();
4238 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4239 }
4240 #define SPEC_in2_r2_16s 0
4241
4242 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4243 {
4244 o->in2 = tcg_temp_new_i64();
4245 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4246 }
4247 #define SPEC_in2_r2_16u 0
4248
4249 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4250 {
4251 o->in2 = load_reg(get_field(f, r3));
4252 }
4253 #define SPEC_in2_r3 0
4254
4255 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4256 {
4257 o->in2 = tcg_temp_new_i64();
4258 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4259 }
4260 #define SPEC_in2_r2_32s 0
4261
4262 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4263 {
4264 o->in2 = tcg_temp_new_i64();
4265 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4266 }
4267 #define SPEC_in2_r2_32u 0
4268
4269 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4270 {
4271 o->in2 = load_freg32_i64(get_field(f, r2));
4272 }
4273 #define SPEC_in2_e2 0
4274
4275 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4276 {
4277 o->in2 = fregs[get_field(f, r2)];
4278 o->g_in2 = true;
4279 }
4280 #define SPEC_in2_f2_o 0
4281
4282 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4283 {
4284 int r2 = get_field(f, r2);
4285 o->in1 = fregs[r2];
4286 o->in2 = fregs[r2 + 2];
4287 o->g_in1 = o->g_in2 = true;
4288 }
4289 #define SPEC_in2_x2_o SPEC_r2_f128
4290
4291 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4292 {
4293 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4294 }
4295 #define SPEC_in2_ra2 0
4296
4297 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4298 {
4299 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4300 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4301 }
4302 #define SPEC_in2_a2 0
4303
4304 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4305 {
4306 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4307 }
4308 #define SPEC_in2_ri2 0
4309
4310 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4311 {
4312 help_l2_shift(s, f, o, 31);
4313 }
4314 #define SPEC_in2_sh32 0
4315
4316 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4317 {
4318 help_l2_shift(s, f, o, 63);
4319 }
4320 #define SPEC_in2_sh64 0
4321
4322 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4323 {
4324 in2_a2(s, f, o);
4325 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4326 }
4327 #define SPEC_in2_m2_8u 0
4328
4329 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4330 {
4331 in2_a2(s, f, o);
4332 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4333 }
4334 #define SPEC_in2_m2_16s 0
4335
4336 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4337 {
4338 in2_a2(s, f, o);
4339 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4340 }
4341 #define SPEC_in2_m2_16u 0
4342
4343 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4344 {
4345 in2_a2(s, f, o);
4346 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4347 }
4348 #define SPEC_in2_m2_32s 0
4349
4350 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4351 {
4352 in2_a2(s, f, o);
4353 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4354 }
4355 #define SPEC_in2_m2_32u 0
4356
4357 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4358 {
4359 in2_a2(s, f, o);
4360 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4361 }
4362 #define SPEC_in2_m2_64 0
4363
4364 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4365 {
4366 in2_ri2(s, f, o);
4367 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4368 }
4369 #define SPEC_in2_mri2_16u 0
4370
4371 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4372 {
4373 in2_ri2(s, f, o);
4374 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4375 }
4376 #define SPEC_in2_mri2_32s 0
4377
4378 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4379 {
4380 in2_ri2(s, f, o);
4381 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4382 }
4383 #define SPEC_in2_mri2_32u 0
4384
4385 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4386 {
4387 in2_ri2(s, f, o);
4388 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4389 }
4390 #define SPEC_in2_mri2_64 0
4391
4392 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4393 {
4394 o->in2 = tcg_const_i64(get_field(f, i2));
4395 }
4396 #define SPEC_in2_i2 0
4397
4398 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4399 {
4400 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4401 }
4402 #define SPEC_in2_i2_8u 0
4403
4404 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4405 {
4406 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4407 }
4408 #define SPEC_in2_i2_16u 0
4409
4410 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4411 {
4412 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4413 }
4414 #define SPEC_in2_i2_32u 0
4415
4416 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4417 {
4418 uint64_t i2 = (uint16_t)get_field(f, i2);
4419 o->in2 = tcg_const_i64(i2 << s->insn->data);
4420 }
4421 #define SPEC_in2_i2_16u_shl 0
4422
4423 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4424 {
4425 uint64_t i2 = (uint32_t)get_field(f, i2);
4426 o->in2 = tcg_const_i64(i2 << s->insn->data);
4427 }
4428 #define SPEC_in2_i2_32u_shl 0
4429
4430 /* ====================================================================== */
4431
4432 /* Find opc within the table of insns. This is formulated as a switch
4433 statement so that (1) we get compile-time notice of cut-paste errors
4434 for duplicated opcodes, and (2) the compiler generates the binary
4435 search tree, rather than us having to post-process the table. */
4436
4437 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4438 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4439
4440 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4441
4442 enum DisasInsnEnum {
4443 #include "insn-data.def"
4444 };
4445
4446 #undef D
4447 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4448 .opc = OPC, \
4449 .fmt = FMT_##FT, \
4450 .fac = FAC_##FC, \
4451 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4452 .name = #NM, \
4453 .help_in1 = in1_##I1, \
4454 .help_in2 = in2_##I2, \
4455 .help_prep = prep_##P, \
4456 .help_wout = wout_##W, \
4457 .help_cout = cout_##CC, \
4458 .help_op = op_##OP, \
4459 .data = D \
4460 },
4461
4462 /* Allow 0 to be used for NULL in the table below. */
4463 #define in1_0 NULL
4464 #define in2_0 NULL
4465 #define prep_0 NULL
4466 #define wout_0 NULL
4467 #define cout_0 NULL
4468 #define op_0 NULL
4469
4470 #define SPEC_in1_0 0
4471 #define SPEC_in2_0 0
4472 #define SPEC_prep_0 0
4473 #define SPEC_wout_0 0
4474
4475 static const DisasInsn insn_info[] = {
4476 #include "insn-data.def"
4477 };
4478
4479 #undef D
4480 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4481 case OPC: return &insn_info[insn_ ## NM];
4482
4483 static const DisasInsn *lookup_opc(uint16_t opc)
4484 {
4485 switch (opc) {
4486 #include "insn-data.def"
4487 default:
4488 return NULL;
4489 }
4490 }
4491
4492 #undef D
4493 #undef C
4494
4495 /* Extract a field from the insn. The INSN should be left-aligned in
4496 the uint64_t so that we can more easily utilize the big-bit-endian
4497 definitions we extract from the Principals of Operation. */
4498
4499 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4500 {
4501 uint32_t r, m;
4502
4503 if (f->size == 0) {
4504 return;
4505 }
4506
4507 /* Zero extract the field from the insn. */
4508 r = (insn << f->beg) >> (64 - f->size);
4509
4510 /* Sign-extend, or un-swap the field as necessary. */
4511 switch (f->type) {
4512 case 0: /* unsigned */
4513 break;
4514 case 1: /* signed */
4515 assert(f->size <= 32);
4516 m = 1u << (f->size - 1);
4517 r = (r ^ m) - m;
4518 break;
4519 case 2: /* dl+dh split, signed 20 bit. */
4520 r = ((int8_t)r << 12) | (r >> 8);
4521 break;
4522 default:
4523 abort();
4524 }
4525
4526 /* Validate that the "compressed" encoding we selected above is valid.
4527 I.e. we havn't make two different original fields overlap. */
4528 assert(((o->presentC >> f->indexC) & 1) == 0);
4529 o->presentC |= 1 << f->indexC;
4530 o->presentO |= 1 << f->indexO;
4531
4532 o->c[f->indexC] = r;
4533 }
4534
4535 /* Lookup the insn at the current PC, extracting the operands into O and
4536 returning the info struct for the insn. Returns NULL for invalid insn. */
4537
4538 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4539 DisasFields *f)
4540 {
4541 uint64_t insn, pc = s->pc;
4542 int op, op2, ilen;
4543 const DisasInsn *info;
4544
4545 insn = ld_code2(env, pc);
4546 op = (insn >> 8) & 0xff;
4547 ilen = get_ilen(op);
4548 s->next_pc = s->pc + ilen;
4549
4550 switch (ilen) {
4551 case 2:
4552 insn = insn << 48;
4553 break;
4554 case 4:
4555 insn = ld_code4(env, pc) << 32;
4556 break;
4557 case 6:
4558 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4559 break;
4560 default:
4561 abort();
4562 }
4563
4564 /* We can't actually determine the insn format until we've looked up
4565 the full insn opcode. Which we can't do without locating the
4566 secondary opcode. Assume by default that OP2 is at bit 40; for
4567 those smaller insns that don't actually have a secondary opcode
4568 this will correctly result in OP2 = 0. */
4569 switch (op) {
4570 case 0x01: /* E */
4571 case 0x80: /* S */
4572 case 0x82: /* S */
4573 case 0x93: /* S */
4574 case 0xb2: /* S, RRF, RRE */
4575 case 0xb3: /* RRE, RRD, RRF */
4576 case 0xb9: /* RRE, RRF */
4577 case 0xe5: /* SSE, SIL */
4578 op2 = (insn << 8) >> 56;
4579 break;
4580 case 0xa5: /* RI */
4581 case 0xa7: /* RI */
4582 case 0xc0: /* RIL */
4583 case 0xc2: /* RIL */
4584 case 0xc4: /* RIL */
4585 case 0xc6: /* RIL */
4586 case 0xc8: /* SSF */
4587 case 0xcc: /* RIL */
4588 op2 = (insn << 12) >> 60;
4589 break;
4590 case 0xd0 ... 0xdf: /* SS */
4591 case 0xe1: /* SS */
4592 case 0xe2: /* SS */
4593 case 0xe8: /* SS */
4594 case 0xe9: /* SS */
4595 case 0xea: /* SS */
4596 case 0xee ... 0xf3: /* SS */
4597 case 0xf8 ... 0xfd: /* SS */
4598 op2 = 0;
4599 break;
4600 default:
4601 op2 = (insn << 40) >> 56;
4602 break;
4603 }
4604
4605 memset(f, 0, sizeof(*f));
4606 f->op = op;
4607 f->op2 = op2;
4608
4609 /* Lookup the instruction. */
4610 info = lookup_opc(op << 8 | op2);
4611
4612 /* If we found it, extract the operands. */
4613 if (info != NULL) {
4614 DisasFormat fmt = info->fmt;
4615 int i;
4616
4617 for (i = 0; i < NUM_C_FIELD; ++i) {
4618 extract_field(f, &format_info[fmt].op[i], insn);
4619 }
4620 }
4621 return info;
4622 }
4623
4624 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4625 {
4626 const DisasInsn *insn;
4627 ExitStatus ret = NO_EXIT;
4628 DisasFields f;
4629 DisasOps o;
4630
4631 /* Search for the insn in the table. */
4632 insn = extract_insn(env, s, &f);
4633
4634 /* Not found means unimplemented/illegal opcode. */
4635 if (insn == NULL) {
4636 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4637 f.op, f.op2);
4638 gen_illegal_opcode(s);
4639 return EXIT_NORETURN;
4640 }
4641
4642 /* Check for insn specification exceptions. */
4643 if (insn->spec) {
4644 int spec = insn->spec, excp = 0, r;
4645
4646 if (spec & SPEC_r1_even) {
4647 r = get_field(&f, r1);
4648 if (r & 1) {
4649 excp = PGM_SPECIFICATION;
4650 }
4651 }
4652 if (spec & SPEC_r2_even) {
4653 r = get_field(&f, r2);
4654 if (r & 1) {
4655 excp = PGM_SPECIFICATION;
4656 }
4657 }
4658 if (spec & SPEC_r3_even) {
4659 r = get_field(&f, r3);
4660 if (r & 1) {
4661 excp = PGM_SPECIFICATION;
4662 }
4663 }
4664 if (spec & SPEC_r1_f128) {
4665 r = get_field(&f, r1);
4666 if (r > 13) {
4667 excp = PGM_SPECIFICATION;
4668 }
4669 }
4670 if (spec & SPEC_r2_f128) {
4671 r = get_field(&f, r2);
4672 if (r > 13) {
4673 excp = PGM_SPECIFICATION;
4674 }
4675 }
4676 if (excp) {
4677 gen_program_exception(s, excp);
4678 return EXIT_NORETURN;
4679 }
4680 }
4681
4682 /* Set up the strutures we use to communicate with the helpers. */
4683 s->insn = insn;
4684 s->fields = &f;
4685 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4686 TCGV_UNUSED_I64(o.out);
4687 TCGV_UNUSED_I64(o.out2);
4688 TCGV_UNUSED_I64(o.in1);
4689 TCGV_UNUSED_I64(o.in2);
4690 TCGV_UNUSED_I64(o.addr1);
4691
4692 /* Implement the instruction. */
4693 if (insn->help_in1) {
4694 insn->help_in1(s, &f, &o);
4695 }
4696 if (insn->help_in2) {
4697 insn->help_in2(s, &f, &o);
4698 }
4699 if (insn->help_prep) {
4700 insn->help_prep(s, &f, &o);
4701 }
4702 if (insn->help_op) {
4703 ret = insn->help_op(s, &o);
4704 }
4705 if (insn->help_wout) {
4706 insn->help_wout(s, &f, &o);
4707 }
4708 if (insn->help_cout) {
4709 insn->help_cout(s, &o);
4710 }
4711
4712 /* Free any temporaries created by the helpers. */
4713 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4714 tcg_temp_free_i64(o.out);
4715 }
4716 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4717 tcg_temp_free_i64(o.out2);
4718 }
4719 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4720 tcg_temp_free_i64(o.in1);
4721 }
4722 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4723 tcg_temp_free_i64(o.in2);
4724 }
4725 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4726 tcg_temp_free_i64(o.addr1);
4727 }
4728
4729 /* Advance to the next instruction. */
4730 s->pc = s->next_pc;
4731 return ret;
4732 }
4733
4734 static inline void gen_intermediate_code_internal(S390CPU *cpu,
4735 TranslationBlock *tb,
4736 bool search_pc)
4737 {
4738 CPUState *cs = CPU(cpu);
4739 CPUS390XState *env = &cpu->env;
4740 DisasContext dc;
4741 target_ulong pc_start;
4742 uint64_t next_page_start;
4743 uint16_t *gen_opc_end;
4744 int j, lj = -1;
4745 int num_insns, max_insns;
4746 CPUBreakpoint *bp;
4747 ExitStatus status;
4748 bool do_debug;
4749
4750 pc_start = tb->pc;
4751
4752 /* 31-bit mode */
4753 if (!(tb->flags & FLAG_MASK_64)) {
4754 pc_start &= 0x7fffffff;
4755 }
4756
4757 dc.tb = tb;
4758 dc.pc = pc_start;
4759 dc.cc_op = CC_OP_DYNAMIC;
4760 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
4761
4762 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4763
4764 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4765
4766 num_insns = 0;
4767 max_insns = tb->cflags & CF_COUNT_MASK;
4768 if (max_insns == 0) {
4769 max_insns = CF_COUNT_MASK;
4770 }
4771
4772 gen_tb_start();
4773
4774 do {
4775 if (search_pc) {
4776 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4777 if (lj < j) {
4778 lj++;
4779 while (lj < j) {
4780 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4781 }
4782 }
4783 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4784 gen_opc_cc_op[lj] = dc.cc_op;
4785 tcg_ctx.gen_opc_instr_start[lj] = 1;
4786 tcg_ctx.gen_opc_icount[lj] = num_insns;
4787 }
4788 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4789 gen_io_start();
4790 }
4791
4792 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4793 tcg_gen_debug_insn_start(dc.pc);
4794 }
4795
4796 status = NO_EXIT;
4797 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
4798 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
4799 if (bp->pc == dc.pc) {
4800 status = EXIT_PC_STALE;
4801 do_debug = true;
4802 break;
4803 }
4804 }
4805 }
4806 if (status == NO_EXIT) {
4807 status = translate_one(env, &dc);
4808 }
4809
4810 /* If we reach a page boundary, are single stepping,
4811 or exhaust instruction count, stop generation. */
4812 if (status == NO_EXIT
4813 && (dc.pc >= next_page_start
4814 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4815 || num_insns >= max_insns
4816 || singlestep
4817 || cs->singlestep_enabled)) {
4818 status = EXIT_PC_STALE;
4819 }
4820 } while (status == NO_EXIT);
4821
4822 if (tb->cflags & CF_LAST_IO) {
4823 gen_io_end();
4824 }
4825
4826 switch (status) {
4827 case EXIT_GOTO_TB:
4828 case EXIT_NORETURN:
4829 break;
4830 case EXIT_PC_STALE:
4831 update_psw_addr(&dc);
4832 /* FALLTHRU */
4833 case EXIT_PC_UPDATED:
4834 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4835 cc op type is in env */
4836 update_cc_op(&dc);
4837 /* Exit the TB, either by raising a debug exception or by return. */
4838 if (do_debug) {
4839 gen_exception(EXCP_DEBUG);
4840 } else {
4841 tcg_gen_exit_tb(0);
4842 }
4843 break;
4844 default:
4845 abort();
4846 }
4847
4848 gen_tb_end(tb, num_insns);
4849 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4850 if (search_pc) {
4851 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4852 lj++;
4853 while (lj <= j) {
4854 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4855 }
4856 } else {
4857 tb->size = dc.pc - pc_start;
4858 tb->icount = num_insns;
4859 }
4860
4861 #if defined(S390X_DEBUG_DISAS)
4862 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4863 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4864 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4865 qemu_log("\n");
4866 }
4867 #endif
4868 }
4869
4870 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4871 {
4872 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
4873 }
4874
4875 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4876 {
4877 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
4878 }
4879
4880 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4881 {
4882 int cc_op;
4883 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4884 cc_op = gen_opc_cc_op[pc_pos];
4885 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4886 env->cc_op = cc_op;
4887 }
4888 }