]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/translate.c
target-s390: Convert CSP
[mirror_qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static inline void debug_insn(uint64_t insn)
83 {
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85 }
86
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 {
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
92 }
93 }
94 return pc;
95 }
96
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
99 {
100 int i;
101
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
108 }
109
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
116 }
117 }
118
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
125 }
126 }
127
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
135 }
136 }
137 #endif
138
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
143 }
144 #endif
145
146 cpu_fprintf(f, "\n");
147 }
148
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
151
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
156
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
160
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162
163 void s390x_translate_init(void)
164 {
165 int i;
166
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
174
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
196 }
197
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
201 }
202
203 static inline TCGv_i64 load_reg(int reg)
204 {
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
208 }
209
210 static inline TCGv_i64 load_freg(int reg)
211 {
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
215 }
216
217 static inline TCGv_i32 load_freg32(int reg)
218 {
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
226 }
227
228 static inline TCGv_i64 load_freg32_i64(int reg)
229 {
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
233 }
234
235 static inline TCGv_i32 load_reg32(int reg)
236 {
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
240 }
241
242 static inline TCGv_i64 load_reg32_i64(int reg)
243 {
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
247 }
248
249 static inline void store_reg(int reg, TCGv_i64 v)
250 {
251 tcg_gen_mov_i64(regs[reg], v);
252 }
253
254 static inline void store_freg(int reg, TCGv_i64 v)
255 {
256 tcg_gen_mov_i64(fregs[reg], v);
257 }
258
259 static inline void store_reg32(int reg, TCGv_i32 v)
260 {
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
268 }
269
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
271 {
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274 }
275
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
277 {
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
279 }
280
281 static inline void store_freg32(int reg, TCGv_i32 v)
282 {
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
286 #else
287 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
289 #endif
290 }
291
292 static inline void store_freg32_i64(int reg, TCGv_i64 v)
293 {
294 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
295 }
296
297 static inline void return_low128(TCGv_i64 dest)
298 {
299 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
300 }
301
302 static inline void update_psw_addr(DisasContext *s)
303 {
304 /* psw.addr */
305 tcg_gen_movi_i64(psw_addr, s->pc);
306 }
307
308 static inline void potential_page_fault(DisasContext *s)
309 {
310 #ifndef CONFIG_USER_ONLY
311 update_psw_addr(s);
312 gen_op_calc_cc(s);
313 #endif
314 }
315
316 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
317 {
318 return (uint64_t)cpu_lduw_code(env, pc);
319 }
320
321 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
322 {
323 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
324 }
325
326 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
327 {
328 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
329 }
330
331 static inline int get_mem_index(DisasContext *s)
332 {
333 switch (s->tb->flags & FLAG_MASK_ASC) {
334 case PSW_ASC_PRIMARY >> 32:
335 return 0;
336 case PSW_ASC_SECONDARY >> 32:
337 return 1;
338 case PSW_ASC_HOME >> 32:
339 return 2;
340 default:
341 tcg_abort();
342 break;
343 }
344 }
345
346 static void gen_exception(int excp)
347 {
348 TCGv_i32 tmp = tcg_const_i32(excp);
349 gen_helper_exception(cpu_env, tmp);
350 tcg_temp_free_i32(tmp);
351 }
352
353 static void gen_program_exception(DisasContext *s, int code)
354 {
355 TCGv_i32 tmp;
356
357 /* Remember what pgm exeption this was. */
358 tmp = tcg_const_i32(code);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
360 tcg_temp_free_i32(tmp);
361
362 tmp = tcg_const_i32(s->next_pc - s->pc);
363 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
364 tcg_temp_free_i32(tmp);
365
366 /* Advance past instruction. */
367 s->pc = s->next_pc;
368 update_psw_addr(s);
369
370 /* Save off cc. */
371 gen_op_calc_cc(s);
372
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM);
375
376 /* End TB here. */
377 s->is_jmp = DISAS_EXCP;
378 }
379
380 static inline void gen_illegal_opcode(DisasContext *s)
381 {
382 gen_program_exception(s, PGM_SPECIFICATION);
383 }
384
385 static inline void check_privileged(DisasContext *s)
386 {
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
389 }
390 }
391
392 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
393 {
394 TCGv_i64 tmp;
395
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s->tb->flags & FLAG_MASK_64)) {
398 d2 &= 0x7fffffffUL;
399 }
400
401 if (x2) {
402 if (d2) {
403 tmp = tcg_const_i64(d2);
404 tcg_gen_add_i64(tmp, tmp, regs[x2]);
405 } else {
406 tmp = load_reg(x2);
407 }
408 if (b2) {
409 tcg_gen_add_i64(tmp, tmp, regs[b2]);
410 }
411 } else if (b2) {
412 if (d2) {
413 tmp = tcg_const_i64(d2);
414 tcg_gen_add_i64(tmp, tmp, regs[b2]);
415 } else {
416 tmp = load_reg(b2);
417 }
418 } else {
419 tmp = tcg_const_i64(d2);
420 }
421
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
424 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
425 }
426
427 return tmp;
428 }
429
430 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
431 {
432 s->cc_op = CC_OP_CONST0 + val;
433 }
434
435 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
436 {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_mov_i64(cc_dst, dst);
439 tcg_gen_discard_i64(cc_vr);
440 s->cc_op = op;
441 }
442
443 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
444 {
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_extu_i32_i64(cc_dst, dst);
447 tcg_gen_discard_i64(cc_vr);
448 s->cc_op = op;
449 }
450
451 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
452 TCGv_i64 dst)
453 {
454 tcg_gen_mov_i64(cc_src, src);
455 tcg_gen_mov_i64(cc_dst, dst);
456 tcg_gen_discard_i64(cc_vr);
457 s->cc_op = op;
458 }
459
460 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
461 TCGv_i32 dst)
462 {
463 tcg_gen_extu_i32_i64(cc_src, src);
464 tcg_gen_extu_i32_i64(cc_dst, dst);
465 tcg_gen_discard_i64(cc_vr);
466 s->cc_op = op;
467 }
468
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
471 {
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
475 s->cc_op = op;
476 }
477
478 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
479 {
480 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
481 }
482
483 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
484 {
485 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
486 }
487
488 static inline void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
489 {
490 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
491 }
492
493 static inline void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
494 {
495 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
496 }
497
498 static inline void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
499 {
500 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
501 }
502
503 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
504 enum cc_op cond)
505 {
506 gen_op_update2_cc_i32(s, cond, v1, v2);
507 }
508
509 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
510 enum cc_op cond)
511 {
512 gen_op_update2_cc_i64(s, cond, v1, v2);
513 }
514
515 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
516 {
517 cmp_32(s, v1, v2, CC_OP_LTGT_32);
518 }
519
520 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
521 {
522 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
523 }
524
525 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
526 {
527 /* XXX optimize for the constant? put it in s? */
528 TCGv_i32 tmp = tcg_const_i32(v2);
529 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
530 tcg_temp_free_i32(tmp);
531 }
532
533 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
534 {
535 TCGv_i32 tmp = tcg_const_i32(v2);
536 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
537 tcg_temp_free_i32(tmp);
538 }
539
540 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
541 {
542 cmp_64(s, v1, v2, CC_OP_LTGT_64);
543 }
544
545 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
546 {
547 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
548 }
549
550 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
551 {
552 TCGv_i64 tmp = tcg_const_i64(v2);
553 cmp_s64(s, v1, tmp);
554 tcg_temp_free_i64(tmp);
555 }
556
557 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
558 {
559 TCGv_i64 tmp = tcg_const_i64(v2);
560 cmp_u64(s, v1, tmp);
561 tcg_temp_free_i64(tmp);
562 }
563
564 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
565 {
566 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
567 }
568
569 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
570 {
571 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
572 }
573
574 /* CC value is in env->cc_op */
575 static inline void set_cc_static(DisasContext *s)
576 {
577 tcg_gen_discard_i64(cc_src);
578 tcg_gen_discard_i64(cc_dst);
579 tcg_gen_discard_i64(cc_vr);
580 s->cc_op = CC_OP_STATIC;
581 }
582
583 static inline void gen_op_set_cc_op(DisasContext *s)
584 {
585 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
586 tcg_gen_movi_i32(cc_op, s->cc_op);
587 }
588 }
589
590 static inline void gen_update_cc_op(DisasContext *s)
591 {
592 gen_op_set_cc_op(s);
593 }
594
595 /* calculates cc into cc_op */
596 static void gen_op_calc_cc(DisasContext *s)
597 {
598 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
599 TCGv_i64 dummy = tcg_const_i64(0);
600
601 switch (s->cc_op) {
602 case CC_OP_CONST0:
603 case CC_OP_CONST1:
604 case CC_OP_CONST2:
605 case CC_OP_CONST3:
606 /* s->cc_op is the cc value */
607 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
608 break;
609 case CC_OP_STATIC:
610 /* env->cc_op already is the cc value */
611 break;
612 case CC_OP_NZ:
613 case CC_OP_ABS_64:
614 case CC_OP_NABS_64:
615 case CC_OP_ABS_32:
616 case CC_OP_NABS_32:
617 case CC_OP_LTGT0_32:
618 case CC_OP_LTGT0_64:
619 case CC_OP_COMP_32:
620 case CC_OP_COMP_64:
621 case CC_OP_NZ_F32:
622 case CC_OP_NZ_F64:
623 case CC_OP_FLOGR:
624 /* 1 argument */
625 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
626 break;
627 case CC_OP_ICM:
628 case CC_OP_LTGT_32:
629 case CC_OP_LTGT_64:
630 case CC_OP_LTUGTU_32:
631 case CC_OP_LTUGTU_64:
632 case CC_OP_TM_32:
633 case CC_OP_TM_64:
634 case CC_OP_SLA_32:
635 case CC_OP_SLA_64:
636 case CC_OP_NZ_F128:
637 /* 2 arguments */
638 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
639 break;
640 case CC_OP_ADD_64:
641 case CC_OP_ADDU_64:
642 case CC_OP_ADDC_64:
643 case CC_OP_SUB_64:
644 case CC_OP_SUBU_64:
645 case CC_OP_SUBB_64:
646 case CC_OP_ADD_32:
647 case CC_OP_ADDU_32:
648 case CC_OP_ADDC_32:
649 case CC_OP_SUB_32:
650 case CC_OP_SUBU_32:
651 case CC_OP_SUBB_32:
652 /* 3 arguments */
653 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
654 break;
655 case CC_OP_DYNAMIC:
656 /* unknown operation - assume 3 arguments and cc_op in env */
657 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
658 break;
659 default:
660 tcg_abort();
661 }
662
663 tcg_temp_free_i32(local_cc_op);
664 tcg_temp_free_i64(dummy);
665
666 /* We now have cc in cc_op as constant */
667 set_cc_static(s);
668 }
669
670 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
671 {
672 debug_insn(insn);
673
674 *r1 = (insn >> 4) & 0xf;
675 *r2 = insn & 0xf;
676 }
677
678 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
679 int *x2, int *b2, int *d2)
680 {
681 debug_insn(insn);
682
683 *r1 = (insn >> 20) & 0xf;
684 *x2 = (insn >> 16) & 0xf;
685 *b2 = (insn >> 12) & 0xf;
686 *d2 = insn & 0xfff;
687
688 return get_address(s, *x2, *b2, *d2);
689 }
690
691 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
692 int *b2, int *d2)
693 {
694 debug_insn(insn);
695
696 *r1 = (insn >> 20) & 0xf;
697 /* aka m3 */
698 *r3 = (insn >> 16) & 0xf;
699 *b2 = (insn >> 12) & 0xf;
700 *d2 = insn & 0xfff;
701 }
702
703 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
704 int *b1, int *d1)
705 {
706 debug_insn(insn);
707
708 *i2 = (insn >> 16) & 0xff;
709 *b1 = (insn >> 12) & 0xf;
710 *d1 = insn & 0xfff;
711
712 return get_address(s, 0, *b1, *d1);
713 }
714
715 static int use_goto_tb(DisasContext *s, uint64_t dest)
716 {
717 /* NOTE: we handle the case where the TB spans two pages here */
718 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
719 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
720 && !s->singlestep_enabled
721 && !(s->tb->cflags & CF_LAST_IO));
722 }
723
724 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
725 {
726 gen_update_cc_op(s);
727
728 if (use_goto_tb(s, pc)) {
729 tcg_gen_goto_tb(tb_num);
730 tcg_gen_movi_i64(psw_addr, pc);
731 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
732 } else {
733 /* jump to another page: currently not optimized */
734 tcg_gen_movi_i64(psw_addr, pc);
735 tcg_gen_exit_tb(0);
736 }
737 }
738
739 static inline void account_noninline_branch(DisasContext *s, int cc_op)
740 {
741 #ifdef DEBUG_INLINE_BRANCHES
742 inline_branch_miss[cc_op]++;
743 #endif
744 }
745
746 static inline void account_inline_branch(DisasContext *s, int cc_op)
747 {
748 #ifdef DEBUG_INLINE_BRANCHES
749 inline_branch_hit[cc_op]++;
750 #endif
751 }
752
753 /* Table of mask values to comparison codes, given a comparison as input.
754 For a true comparison CC=3 will never be set, but we treat this
755 conservatively for possible use when CC=3 indicates overflow. */
756 static const TCGCond ltgt_cond[16] = {
757 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
758 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
759 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
760 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
761 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
762 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
763 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
764 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
765 };
766
767 /* Table of mask values to comparison codes, given a logic op as input.
768 For such, only CC=0 and CC=1 should be possible. */
769 static const TCGCond nz_cond[16] = {
770 /* | | x | x */
771 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
772 /* | NE | x | x */
773 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
774 /* EQ | | x | x */
775 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
776 /* EQ | NE | x | x */
777 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
778 };
779
780 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
781 details required to generate a TCG comparison. */
782 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
783 {
784 TCGCond cond;
785 enum cc_op old_cc_op = s->cc_op;
786
787 if (mask == 15 || mask == 0) {
788 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
789 c->u.s32.a = cc_op;
790 c->u.s32.b = cc_op;
791 c->g1 = c->g2 = true;
792 c->is_64 = false;
793 return;
794 }
795
796 /* Find the TCG condition for the mask + cc op. */
797 switch (old_cc_op) {
798 case CC_OP_LTGT0_32:
799 case CC_OP_LTGT0_64:
800 case CC_OP_LTGT_32:
801 case CC_OP_LTGT_64:
802 cond = ltgt_cond[mask];
803 if (cond == TCG_COND_NEVER) {
804 goto do_dynamic;
805 }
806 account_inline_branch(s, old_cc_op);
807 break;
808
809 case CC_OP_LTUGTU_32:
810 case CC_OP_LTUGTU_64:
811 cond = tcg_unsigned_cond(ltgt_cond[mask]);
812 if (cond == TCG_COND_NEVER) {
813 goto do_dynamic;
814 }
815 account_inline_branch(s, old_cc_op);
816 break;
817
818 case CC_OP_NZ:
819 cond = nz_cond[mask];
820 if (cond == TCG_COND_NEVER) {
821 goto do_dynamic;
822 }
823 account_inline_branch(s, old_cc_op);
824 break;
825
826 case CC_OP_TM_32:
827 case CC_OP_TM_64:
828 switch (mask) {
829 case 8:
830 cond = TCG_COND_EQ;
831 break;
832 case 4 | 2 | 1:
833 cond = TCG_COND_NE;
834 break;
835 default:
836 goto do_dynamic;
837 }
838 account_inline_branch(s, old_cc_op);
839 break;
840
841 case CC_OP_ICM:
842 switch (mask) {
843 case 8:
844 cond = TCG_COND_EQ;
845 break;
846 case 4 | 2 | 1:
847 case 4 | 2:
848 cond = TCG_COND_NE;
849 break;
850 default:
851 goto do_dynamic;
852 }
853 account_inline_branch(s, old_cc_op);
854 break;
855
856 case CC_OP_FLOGR:
857 switch (mask & 0xa) {
858 case 8: /* src == 0 -> no one bit found */
859 cond = TCG_COND_EQ;
860 break;
861 case 2: /* src != 0 -> one bit found */
862 cond = TCG_COND_NE;
863 break;
864 default:
865 goto do_dynamic;
866 }
867 account_inline_branch(s, old_cc_op);
868 break;
869
870 default:
871 do_dynamic:
872 /* Calculate cc value. */
873 gen_op_calc_cc(s);
874 /* FALLTHRU */
875
876 case CC_OP_STATIC:
877 /* Jump based on CC. We'll load up the real cond below;
878 the assignment here merely avoids a compiler warning. */
879 account_noninline_branch(s, old_cc_op);
880 old_cc_op = CC_OP_STATIC;
881 cond = TCG_COND_NEVER;
882 break;
883 }
884
885 /* Load up the arguments of the comparison. */
886 c->is_64 = true;
887 c->g1 = c->g2 = false;
888 switch (old_cc_op) {
889 case CC_OP_LTGT0_32:
890 c->is_64 = false;
891 c->u.s32.a = tcg_temp_new_i32();
892 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
893 c->u.s32.b = tcg_const_i32(0);
894 break;
895 case CC_OP_LTGT_32:
896 case CC_OP_LTUGTU_32:
897 c->is_64 = false;
898 c->u.s32.a = tcg_temp_new_i32();
899 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
900 c->u.s32.b = tcg_temp_new_i32();
901 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
902 break;
903
904 case CC_OP_LTGT0_64:
905 case CC_OP_NZ:
906 case CC_OP_FLOGR:
907 c->u.s64.a = cc_dst;
908 c->u.s64.b = tcg_const_i64(0);
909 c->g1 = true;
910 break;
911 case CC_OP_LTGT_64:
912 case CC_OP_LTUGTU_64:
913 c->u.s64.a = cc_src;
914 c->u.s64.b = cc_dst;
915 c->g1 = c->g2 = true;
916 break;
917
918 case CC_OP_TM_32:
919 case CC_OP_TM_64:
920 case CC_OP_ICM:
921 c->u.s64.a = tcg_temp_new_i64();
922 c->u.s64.b = tcg_const_i64(0);
923 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
924 break;
925
926 case CC_OP_STATIC:
927 c->is_64 = false;
928 c->u.s32.a = cc_op;
929 c->g1 = true;
930 switch (mask) {
931 case 0x8 | 0x4 | 0x2: /* cc != 3 */
932 cond = TCG_COND_NE;
933 c->u.s32.b = tcg_const_i32(3);
934 break;
935 case 0x8 | 0x4 | 0x1: /* cc != 2 */
936 cond = TCG_COND_NE;
937 c->u.s32.b = tcg_const_i32(2);
938 break;
939 case 0x8 | 0x2 | 0x1: /* cc != 1 */
940 cond = TCG_COND_NE;
941 c->u.s32.b = tcg_const_i32(1);
942 break;
943 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
944 cond = TCG_COND_EQ;
945 c->g1 = false;
946 c->u.s32.a = tcg_temp_new_i32();
947 c->u.s32.b = tcg_const_i32(0);
948 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
949 break;
950 case 0x8 | 0x4: /* cc < 2 */
951 cond = TCG_COND_LTU;
952 c->u.s32.b = tcg_const_i32(2);
953 break;
954 case 0x8: /* cc == 0 */
955 cond = TCG_COND_EQ;
956 c->u.s32.b = tcg_const_i32(0);
957 break;
958 case 0x4 | 0x2 | 0x1: /* cc != 0 */
959 cond = TCG_COND_NE;
960 c->u.s32.b = tcg_const_i32(0);
961 break;
962 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
963 cond = TCG_COND_NE;
964 c->g1 = false;
965 c->u.s32.a = tcg_temp_new_i32();
966 c->u.s32.b = tcg_const_i32(0);
967 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
968 break;
969 case 0x4: /* cc == 1 */
970 cond = TCG_COND_EQ;
971 c->u.s32.b = tcg_const_i32(1);
972 break;
973 case 0x2 | 0x1: /* cc > 1 */
974 cond = TCG_COND_GTU;
975 c->u.s32.b = tcg_const_i32(1);
976 break;
977 case 0x2: /* cc == 2 */
978 cond = TCG_COND_EQ;
979 c->u.s32.b = tcg_const_i32(2);
980 break;
981 case 0x1: /* cc == 3 */
982 cond = TCG_COND_EQ;
983 c->u.s32.b = tcg_const_i32(3);
984 break;
985 default:
986 /* CC is masked by something else: (8 >> cc) & mask. */
987 cond = TCG_COND_NE;
988 c->g1 = false;
989 c->u.s32.a = tcg_const_i32(8);
990 c->u.s32.b = tcg_const_i32(0);
991 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
992 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
993 break;
994 }
995 break;
996
997 default:
998 abort();
999 }
1000 c->cond = cond;
1001 }
1002
1003 static void free_compare(DisasCompare *c)
1004 {
1005 if (!c->g1) {
1006 if (c->is_64) {
1007 tcg_temp_free_i64(c->u.s64.a);
1008 } else {
1009 tcg_temp_free_i32(c->u.s32.a);
1010 }
1011 }
1012 if (!c->g2) {
1013 if (c->is_64) {
1014 tcg_temp_free_i64(c->u.s64.b);
1015 } else {
1016 tcg_temp_free_i32(c->u.s32.b);
1017 }
1018 }
1019 }
1020
1021 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1022 uint32_t insn)
1023 {
1024 #ifndef CONFIG_USER_ONLY
1025 TCGv_i64 tmp, tmp2, tmp3;
1026 TCGv_i32 tmp32_1, tmp32_2;
1027 int r1, r2;
1028 int r3, d2, b2;
1029
1030 r1 = (insn >> 4) & 0xf;
1031 r2 = insn & 0xf;
1032
1033 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1034
1035 switch (op) {
1036 case 0x78: /* STCKE D2(B2) [S] */
1037 /* Store Clock Extended */
1038 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1039 tmp = get_address(s, 0, b2, d2);
1040 potential_page_fault(s);
1041 gen_helper_stcke(cc_op, cpu_env, tmp);
1042 set_cc_static(s);
1043 tcg_temp_free_i64(tmp);
1044 break;
1045 case 0x79: /* SACF D2(B2) [S] */
1046 /* Set Address Space Control Fast */
1047 check_privileged(s);
1048 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1049 tmp = get_address(s, 0, b2, d2);
1050 potential_page_fault(s);
1051 gen_helper_sacf(cpu_env, tmp);
1052 tcg_temp_free_i64(tmp);
1053 /* addressing mode has changed, so end the block */
1054 s->pc = s->next_pc;
1055 update_psw_addr(s);
1056 s->is_jmp = DISAS_JUMP;
1057 break;
1058 case 0x7d: /* STSI D2,(B2) [S] */
1059 check_privileged(s);
1060 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1061 tmp = get_address(s, 0, b2, d2);
1062 tmp32_1 = load_reg32(0);
1063 tmp32_2 = load_reg32(1);
1064 potential_page_fault(s);
1065 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1066 set_cc_static(s);
1067 tcg_temp_free_i64(tmp);
1068 tcg_temp_free_i32(tmp32_1);
1069 tcg_temp_free_i32(tmp32_2);
1070 break;
1071 case 0xb1: /* STFL D2(B2) [S] */
1072 /* Store Facility List (CPU features) at 200 */
1073 check_privileged(s);
1074 tmp2 = tcg_const_i64(0xc0000000);
1075 tmp = tcg_const_i64(200);
1076 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1077 tcg_temp_free_i64(tmp2);
1078 tcg_temp_free_i64(tmp);
1079 break;
1080 case 0xb2: /* LPSWE D2(B2) [S] */
1081 /* Load PSW Extended */
1082 check_privileged(s);
1083 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1084 tmp = get_address(s, 0, b2, d2);
1085 tmp2 = tcg_temp_new_i64();
1086 tmp3 = tcg_temp_new_i64();
1087 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1088 tcg_gen_addi_i64(tmp, tmp, 8);
1089 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1090 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1091 /* we need to keep cc_op intact */
1092 s->is_jmp = DISAS_JUMP;
1093 tcg_temp_free_i64(tmp);
1094 tcg_temp_free_i64(tmp2);
1095 tcg_temp_free_i64(tmp3);
1096 break;
1097 case 0x20: /* SERVC R1,R2 [RRE] */
1098 /* SCLP Service call (PV hypercall) */
1099 check_privileged(s);
1100 potential_page_fault(s);
1101 tmp32_1 = load_reg32(r2);
1102 tmp = load_reg(r1);
1103 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1104 set_cc_static(s);
1105 tcg_temp_free_i32(tmp32_1);
1106 tcg_temp_free_i64(tmp);
1107 break;
1108 default:
1109 #endif
1110 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1111 gen_illegal_opcode(s);
1112 #ifndef CONFIG_USER_ONLY
1113 break;
1114 }
1115 #endif
1116 }
1117
1118 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1119 {
1120 unsigned char opc;
1121 uint64_t insn;
1122 int op;
1123
1124 opc = cpu_ldub_code(env, s->pc);
1125 LOG_DISAS("opc 0x%x\n", opc);
1126
1127 switch (opc) {
1128 case 0xb2:
1129 insn = ld_code4(env, s->pc);
1130 op = (insn >> 16) & 0xff;
1131 disas_b2(env, s, op, insn);
1132 break;
1133 default:
1134 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
1135 gen_illegal_opcode(s);
1136 break;
1137 }
1138 }
1139
1140 /* ====================================================================== */
1141 /* Define the insn format enumeration. */
1142 #define F0(N) FMT_##N,
1143 #define F1(N, X1) F0(N)
1144 #define F2(N, X1, X2) F0(N)
1145 #define F3(N, X1, X2, X3) F0(N)
1146 #define F4(N, X1, X2, X3, X4) F0(N)
1147 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1148
1149 typedef enum {
1150 #include "insn-format.def"
1151 } DisasFormat;
1152
1153 #undef F0
1154 #undef F1
1155 #undef F2
1156 #undef F3
1157 #undef F4
1158 #undef F5
1159
1160 /* Define a structure to hold the decoded fields. We'll store each inside
1161 an array indexed by an enum. In order to conserve memory, we'll arrange
1162 for fields that do not exist at the same time to overlap, thus the "C"
1163 for compact. For checking purposes there is an "O" for original index
1164 as well that will be applied to availability bitmaps. */
1165
1166 enum DisasFieldIndexO {
1167 FLD_O_r1,
1168 FLD_O_r2,
1169 FLD_O_r3,
1170 FLD_O_m1,
1171 FLD_O_m3,
1172 FLD_O_m4,
1173 FLD_O_b1,
1174 FLD_O_b2,
1175 FLD_O_b4,
1176 FLD_O_d1,
1177 FLD_O_d2,
1178 FLD_O_d4,
1179 FLD_O_x2,
1180 FLD_O_l1,
1181 FLD_O_l2,
1182 FLD_O_i1,
1183 FLD_O_i2,
1184 FLD_O_i3,
1185 FLD_O_i4,
1186 FLD_O_i5
1187 };
1188
1189 enum DisasFieldIndexC {
1190 FLD_C_r1 = 0,
1191 FLD_C_m1 = 0,
1192 FLD_C_b1 = 0,
1193 FLD_C_i1 = 0,
1194
1195 FLD_C_r2 = 1,
1196 FLD_C_b2 = 1,
1197 FLD_C_i2 = 1,
1198
1199 FLD_C_r3 = 2,
1200 FLD_C_m3 = 2,
1201 FLD_C_i3 = 2,
1202
1203 FLD_C_m4 = 3,
1204 FLD_C_b4 = 3,
1205 FLD_C_i4 = 3,
1206 FLD_C_l1 = 3,
1207
1208 FLD_C_i5 = 4,
1209 FLD_C_d1 = 4,
1210
1211 FLD_C_d2 = 5,
1212
1213 FLD_C_d4 = 6,
1214 FLD_C_x2 = 6,
1215 FLD_C_l2 = 6,
1216
1217 NUM_C_FIELD = 7
1218 };
1219
1220 struct DisasFields {
1221 unsigned op:8;
1222 unsigned op2:8;
1223 unsigned presentC:16;
1224 unsigned int presentO;
1225 int c[NUM_C_FIELD];
1226 };
1227
1228 /* This is the way fields are to be accessed out of DisasFields. */
1229 #define have_field(S, F) have_field1((S), FLD_O_##F)
1230 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1231
1232 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1233 {
1234 return (f->presentO >> c) & 1;
1235 }
1236
1237 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1238 enum DisasFieldIndexC c)
1239 {
1240 assert(have_field1(f, o));
1241 return f->c[c];
1242 }
1243
1244 /* Describe the layout of each field in each format. */
1245 typedef struct DisasField {
1246 unsigned int beg:8;
1247 unsigned int size:8;
1248 unsigned int type:2;
1249 unsigned int indexC:6;
1250 enum DisasFieldIndexO indexO:8;
1251 } DisasField;
1252
1253 typedef struct DisasFormatInfo {
1254 DisasField op[NUM_C_FIELD];
1255 } DisasFormatInfo;
1256
1257 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1258 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1259 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1260 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1261 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1262 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1263 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1264 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1265 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1266 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1267 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1268 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1269 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1270 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1271
1272 #define F0(N) { { } },
1273 #define F1(N, X1) { { X1 } },
1274 #define F2(N, X1, X2) { { X1, X2 } },
1275 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1276 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1277 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1278
1279 static const DisasFormatInfo format_info[] = {
1280 #include "insn-format.def"
1281 };
1282
1283 #undef F0
1284 #undef F1
1285 #undef F2
1286 #undef F3
1287 #undef F4
1288 #undef F5
1289 #undef R
1290 #undef M
1291 #undef BD
1292 #undef BXD
1293 #undef BDL
1294 #undef BXDL
1295 #undef I
1296 #undef L
1297
1298 /* Generally, we'll extract operands into this structures, operate upon
1299 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1300 of routines below for more details. */
1301 typedef struct {
1302 bool g_out, g_out2, g_in1, g_in2;
1303 TCGv_i64 out, out2, in1, in2;
1304 TCGv_i64 addr1;
1305 } DisasOps;
1306
1307 /* Return values from translate_one, indicating the state of the TB. */
1308 typedef enum {
1309 /* Continue the TB. */
1310 NO_EXIT,
1311 /* We have emitted one or more goto_tb. No fixup required. */
1312 EXIT_GOTO_TB,
1313 /* We are not using a goto_tb (for whatever reason), but have updated
1314 the PC (for whatever reason), so there's no need to do it again on
1315 exiting the TB. */
1316 EXIT_PC_UPDATED,
1317 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1318 updated the PC for the next instruction to be executed. */
1319 EXIT_PC_STALE,
1320 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1321 No following code will be executed. */
1322 EXIT_NORETURN,
1323 } ExitStatus;
1324
1325 typedef enum DisasFacility {
1326 FAC_Z, /* zarch (default) */
1327 FAC_CASS, /* compare and swap and store */
1328 FAC_CASS2, /* compare and swap and store 2*/
1329 FAC_DFP, /* decimal floating point */
1330 FAC_DFPR, /* decimal floating point rounding */
1331 FAC_DO, /* distinct operands */
1332 FAC_EE, /* execute extensions */
1333 FAC_EI, /* extended immediate */
1334 FAC_FPE, /* floating point extension */
1335 FAC_FPSSH, /* floating point support sign handling */
1336 FAC_FPRGR, /* FPR-GR transfer */
1337 FAC_GIE, /* general instructions extension */
1338 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1339 FAC_HW, /* high-word */
1340 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1341 FAC_LOC, /* load/store on condition */
1342 FAC_LD, /* long displacement */
1343 FAC_PC, /* population count */
1344 FAC_SCF, /* store clock fast */
1345 FAC_SFLE, /* store facility list extended */
1346 } DisasFacility;
1347
1348 struct DisasInsn {
1349 unsigned opc:16;
1350 DisasFormat fmt:6;
1351 DisasFacility fac:6;
1352
1353 const char *name;
1354
1355 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1356 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1357 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1358 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1359 void (*help_cout)(DisasContext *, DisasOps *);
1360 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1361
1362 uint64_t data;
1363 };
1364
1365 /* ====================================================================== */
1366 /* Miscelaneous helpers, used by several operations. */
1367
1368 static void help_l2_shift(DisasContext *s, DisasFields *f,
1369 DisasOps *o, int mask)
1370 {
1371 int b2 = get_field(f, b2);
1372 int d2 = get_field(f, d2);
1373
1374 if (b2 == 0) {
1375 o->in2 = tcg_const_i64(d2 & mask);
1376 } else {
1377 o->in2 = get_address(s, 0, b2, d2);
1378 tcg_gen_andi_i64(o->in2, o->in2, mask);
1379 }
1380 }
1381
1382 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1383 {
1384 if (dest == s->next_pc) {
1385 return NO_EXIT;
1386 }
1387 if (use_goto_tb(s, dest)) {
1388 gen_update_cc_op(s);
1389 tcg_gen_goto_tb(0);
1390 tcg_gen_movi_i64(psw_addr, dest);
1391 tcg_gen_exit_tb((tcg_target_long)s->tb);
1392 return EXIT_GOTO_TB;
1393 } else {
1394 tcg_gen_movi_i64(psw_addr, dest);
1395 return EXIT_PC_UPDATED;
1396 }
1397 }
1398
1399 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1400 bool is_imm, int imm, TCGv_i64 cdest)
1401 {
1402 ExitStatus ret;
1403 uint64_t dest = s->pc + 2 * imm;
1404 int lab;
1405
1406 /* Take care of the special cases first. */
1407 if (c->cond == TCG_COND_NEVER) {
1408 ret = NO_EXIT;
1409 goto egress;
1410 }
1411 if (is_imm) {
1412 if (dest == s->next_pc) {
1413 /* Branch to next. */
1414 ret = NO_EXIT;
1415 goto egress;
1416 }
1417 if (c->cond == TCG_COND_ALWAYS) {
1418 ret = help_goto_direct(s, dest);
1419 goto egress;
1420 }
1421 } else {
1422 if (TCGV_IS_UNUSED_I64(cdest)) {
1423 /* E.g. bcr %r0 -> no branch. */
1424 ret = NO_EXIT;
1425 goto egress;
1426 }
1427 if (c->cond == TCG_COND_ALWAYS) {
1428 tcg_gen_mov_i64(psw_addr, cdest);
1429 ret = EXIT_PC_UPDATED;
1430 goto egress;
1431 }
1432 }
1433
1434 if (use_goto_tb(s, s->next_pc)) {
1435 if (is_imm && use_goto_tb(s, dest)) {
1436 /* Both exits can use goto_tb. */
1437 gen_update_cc_op(s);
1438
1439 lab = gen_new_label();
1440 if (c->is_64) {
1441 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1442 } else {
1443 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1444 }
1445
1446 /* Branch not taken. */
1447 tcg_gen_goto_tb(0);
1448 tcg_gen_movi_i64(psw_addr, s->next_pc);
1449 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1450
1451 /* Branch taken. */
1452 gen_set_label(lab);
1453 tcg_gen_goto_tb(1);
1454 tcg_gen_movi_i64(psw_addr, dest);
1455 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1456
1457 ret = EXIT_GOTO_TB;
1458 } else {
1459 /* Fallthru can use goto_tb, but taken branch cannot. */
1460 /* Store taken branch destination before the brcond. This
1461 avoids having to allocate a new local temp to hold it.
1462 We'll overwrite this in the not taken case anyway. */
1463 if (!is_imm) {
1464 tcg_gen_mov_i64(psw_addr, cdest);
1465 }
1466
1467 lab = gen_new_label();
1468 if (c->is_64) {
1469 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1470 } else {
1471 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1472 }
1473
1474 /* Branch not taken. */
1475 gen_update_cc_op(s);
1476 tcg_gen_goto_tb(0);
1477 tcg_gen_movi_i64(psw_addr, s->next_pc);
1478 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1479
1480 gen_set_label(lab);
1481 if (is_imm) {
1482 tcg_gen_movi_i64(psw_addr, dest);
1483 }
1484 ret = EXIT_PC_UPDATED;
1485 }
1486 } else {
1487 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1488 Most commonly we're single-stepping or some other condition that
1489 disables all use of goto_tb. Just update the PC and exit. */
1490
1491 TCGv_i64 next = tcg_const_i64(s->next_pc);
1492 if (is_imm) {
1493 cdest = tcg_const_i64(dest);
1494 }
1495
1496 if (c->is_64) {
1497 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1498 cdest, next);
1499 } else {
1500 TCGv_i32 t0 = tcg_temp_new_i32();
1501 TCGv_i64 t1 = tcg_temp_new_i64();
1502 TCGv_i64 z = tcg_const_i64(0);
1503 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1504 tcg_gen_extu_i32_i64(t1, t0);
1505 tcg_temp_free_i32(t0);
1506 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1507 tcg_temp_free_i64(t1);
1508 tcg_temp_free_i64(z);
1509 }
1510
1511 if (is_imm) {
1512 tcg_temp_free_i64(cdest);
1513 }
1514 tcg_temp_free_i64(next);
1515
1516 ret = EXIT_PC_UPDATED;
1517 }
1518
1519 egress:
1520 free_compare(c);
1521 return ret;
1522 }
1523
1524 /* ====================================================================== */
1525 /* The operations. These perform the bulk of the work for any insn,
1526 usually after the operands have been loaded and output initialized. */
1527
1528 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1529 {
1530 gen_helper_abs_i64(o->out, o->in2);
1531 return NO_EXIT;
1532 }
1533
1534 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1535 {
1536 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1537 return NO_EXIT;
1538 }
1539
1540 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1541 {
1542 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1543 return NO_EXIT;
1544 }
1545
1546 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1547 {
1548 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1549 tcg_gen_mov_i64(o->out2, o->in2);
1550 return NO_EXIT;
1551 }
1552
1553 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1554 {
1555 tcg_gen_add_i64(o->out, o->in1, o->in2);
1556 return NO_EXIT;
1557 }
1558
1559 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1560 {
1561 TCGv_i64 cc;
1562
1563 tcg_gen_add_i64(o->out, o->in1, o->in2);
1564
1565 /* XXX possible optimization point */
1566 gen_op_calc_cc(s);
1567 cc = tcg_temp_new_i64();
1568 tcg_gen_extu_i32_i64(cc, cc_op);
1569 tcg_gen_shri_i64(cc, cc, 1);
1570
1571 tcg_gen_add_i64(o->out, o->out, cc);
1572 tcg_temp_free_i64(cc);
1573 return NO_EXIT;
1574 }
1575
1576 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1577 {
1578 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1579 return NO_EXIT;
1580 }
1581
1582 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1583 {
1584 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1585 return NO_EXIT;
1586 }
1587
1588 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1589 {
1590 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1591 return_low128(o->out2);
1592 return NO_EXIT;
1593 }
1594
1595 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1596 {
1597 tcg_gen_and_i64(o->out, o->in1, o->in2);
1598 return NO_EXIT;
1599 }
1600
1601 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1602 {
1603 int shift = s->insn->data & 0xff;
1604 int size = s->insn->data >> 8;
1605 uint64_t mask = ((1ull << size) - 1) << shift;
1606
1607 assert(!o->g_in2);
1608 tcg_gen_shli_i64(o->in2, o->in2, shift);
1609 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1610 tcg_gen_and_i64(o->out, o->in1, o->in2);
1611
1612 /* Produce the CC from only the bits manipulated. */
1613 tcg_gen_andi_i64(cc_dst, o->out, mask);
1614 set_cc_nz_u64(s, cc_dst);
1615 return NO_EXIT;
1616 }
1617
1618 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1619 {
1620 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1621 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1622 tcg_gen_mov_i64(psw_addr, o->in2);
1623 return EXIT_PC_UPDATED;
1624 } else {
1625 return NO_EXIT;
1626 }
1627 }
1628
1629 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1630 {
1631 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1632 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1633 }
1634
1635 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1636 {
1637 int m1 = get_field(s->fields, m1);
1638 bool is_imm = have_field(s->fields, i2);
1639 int imm = is_imm ? get_field(s->fields, i2) : 0;
1640 DisasCompare c;
1641
1642 disas_jcc(s, &c, m1);
1643 return help_branch(s, &c, is_imm, imm, o->in2);
1644 }
1645
1646 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1647 {
1648 int r1 = get_field(s->fields, r1);
1649 bool is_imm = have_field(s->fields, i2);
1650 int imm = is_imm ? get_field(s->fields, i2) : 0;
1651 DisasCompare c;
1652 TCGv_i64 t;
1653
1654 c.cond = TCG_COND_NE;
1655 c.is_64 = false;
1656 c.g1 = false;
1657 c.g2 = false;
1658
1659 t = tcg_temp_new_i64();
1660 tcg_gen_subi_i64(t, regs[r1], 1);
1661 store_reg32_i64(r1, t);
1662 c.u.s32.a = tcg_temp_new_i32();
1663 c.u.s32.b = tcg_const_i32(0);
1664 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1665 tcg_temp_free_i64(t);
1666
1667 return help_branch(s, &c, is_imm, imm, o->in2);
1668 }
1669
1670 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1671 {
1672 int r1 = get_field(s->fields, r1);
1673 bool is_imm = have_field(s->fields, i2);
1674 int imm = is_imm ? get_field(s->fields, i2) : 0;
1675 DisasCompare c;
1676
1677 c.cond = TCG_COND_NE;
1678 c.is_64 = true;
1679 c.g1 = true;
1680 c.g2 = false;
1681
1682 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1683 c.u.s64.a = regs[r1];
1684 c.u.s64.b = tcg_const_i64(0);
1685
1686 return help_branch(s, &c, is_imm, imm, o->in2);
1687 }
1688
1689 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1690 {
1691 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1692 set_cc_static(s);
1693 return NO_EXIT;
1694 }
1695
1696 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1697 {
1698 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1699 set_cc_static(s);
1700 return NO_EXIT;
1701 }
1702
1703 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1704 {
1705 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1706 set_cc_static(s);
1707 return NO_EXIT;
1708 }
1709
1710 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1711 {
1712 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1713 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1714 tcg_temp_free_i32(m3);
1715 gen_set_cc_nz_f32(s, o->in2);
1716 return NO_EXIT;
1717 }
1718
1719 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1720 {
1721 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1722 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1723 tcg_temp_free_i32(m3);
1724 gen_set_cc_nz_f64(s, o->in2);
1725 return NO_EXIT;
1726 }
1727
1728 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1729 {
1730 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1731 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1732 tcg_temp_free_i32(m3);
1733 gen_set_cc_nz_f128(s, o->in1, o->in2);
1734 return NO_EXIT;
1735 }
1736
1737 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1738 {
1739 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1740 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1741 tcg_temp_free_i32(m3);
1742 gen_set_cc_nz_f32(s, o->in2);
1743 return NO_EXIT;
1744 }
1745
1746 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1747 {
1748 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1749 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1750 tcg_temp_free_i32(m3);
1751 gen_set_cc_nz_f64(s, o->in2);
1752 return NO_EXIT;
1753 }
1754
1755 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1756 {
1757 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1758 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1759 tcg_temp_free_i32(m3);
1760 gen_set_cc_nz_f128(s, o->in1, o->in2);
1761 return NO_EXIT;
1762 }
1763
1764 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1765 {
1766 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1767 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1768 tcg_temp_free_i32(m3);
1769 return NO_EXIT;
1770 }
1771
1772 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1773 {
1774 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1775 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1776 tcg_temp_free_i32(m3);
1777 return NO_EXIT;
1778 }
1779
1780 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1781 {
1782 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1783 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1784 tcg_temp_free_i32(m3);
1785 return_low128(o->out2);
1786 return NO_EXIT;
1787 }
1788
1789 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1790 {
1791 int r2 = get_field(s->fields, r2);
1792 TCGv_i64 len = tcg_temp_new_i64();
1793
1794 potential_page_fault(s);
1795 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1796 set_cc_static(s);
1797 return_low128(o->out);
1798
1799 tcg_gen_add_i64(regs[r2], regs[r2], len);
1800 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1801 tcg_temp_free_i64(len);
1802
1803 return NO_EXIT;
1804 }
1805
1806 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1807 {
1808 int l = get_field(s->fields, l1);
1809 TCGv_i32 vl;
1810
1811 switch (l + 1) {
1812 case 1:
1813 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1814 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1815 break;
1816 case 2:
1817 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1818 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1819 break;
1820 case 4:
1821 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1822 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1823 break;
1824 case 8:
1825 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1826 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1827 break;
1828 default:
1829 potential_page_fault(s);
1830 vl = tcg_const_i32(l);
1831 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1832 tcg_temp_free_i32(vl);
1833 set_cc_static(s);
1834 return NO_EXIT;
1835 }
1836 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1837 return NO_EXIT;
1838 }
1839
1840 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1841 {
1842 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1843 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1844 potential_page_fault(s);
1845 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1846 tcg_temp_free_i32(r1);
1847 tcg_temp_free_i32(r3);
1848 set_cc_static(s);
1849 return NO_EXIT;
1850 }
1851
1852 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1853 {
1854 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1855 TCGv_i32 t1 = tcg_temp_new_i32();
1856 tcg_gen_trunc_i64_i32(t1, o->in1);
1857 potential_page_fault(s);
1858 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1859 set_cc_static(s);
1860 tcg_temp_free_i32(t1);
1861 tcg_temp_free_i32(m3);
1862 return NO_EXIT;
1863 }
1864
1865 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1866 {
1867 potential_page_fault(s);
1868 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1869 set_cc_static(s);
1870 return_low128(o->in2);
1871 return NO_EXIT;
1872 }
1873
1874 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1875 {
1876 int r3 = get_field(s->fields, r3);
1877 potential_page_fault(s);
1878 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1879 set_cc_static(s);
1880 return NO_EXIT;
1881 }
1882
1883 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1884 {
1885 int r3 = get_field(s->fields, r3);
1886 potential_page_fault(s);
1887 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1888 set_cc_static(s);
1889 return NO_EXIT;
1890 }
1891
1892 #ifndef CONFIG_USER_ONLY
1893 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1894 {
1895 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1896 check_privileged(s);
1897 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1898 tcg_temp_free_i32(r1);
1899 set_cc_static(s);
1900 return NO_EXIT;
1901 }
1902 #endif
1903
1904 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1905 {
1906 int r3 = get_field(s->fields, r3);
1907 TCGv_i64 in3 = tcg_temp_new_i64();
1908 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1909 potential_page_fault(s);
1910 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1911 tcg_temp_free_i64(in3);
1912 set_cc_static(s);
1913 return NO_EXIT;
1914 }
1915
1916 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1917 {
1918 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1919 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1920 potential_page_fault(s);
1921 /* XXX rewrite in tcg */
1922 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1923 set_cc_static(s);
1924 return NO_EXIT;
1925 }
1926
1927 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1928 {
1929 TCGv_i64 t1 = tcg_temp_new_i64();
1930 TCGv_i32 t2 = tcg_temp_new_i32();
1931 tcg_gen_trunc_i64_i32(t2, o->in1);
1932 gen_helper_cvd(t1, t2);
1933 tcg_temp_free_i32(t2);
1934 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1935 tcg_temp_free_i64(t1);
1936 return NO_EXIT;
1937 }
1938
1939 #ifndef CONFIG_USER_ONLY
1940 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1941 {
1942 TCGv_i32 tmp;
1943
1944 check_privileged(s);
1945 potential_page_fault(s);
1946
1947 /* We pretend the format is RX_a so that D2 is the field we want. */
1948 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1949 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1950 tcg_temp_free_i32(tmp);
1951 return NO_EXIT;
1952 }
1953 #endif
1954
1955 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1956 {
1957 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1958 return_low128(o->out);
1959 return NO_EXIT;
1960 }
1961
1962 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1963 {
1964 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1965 return_low128(o->out);
1966 return NO_EXIT;
1967 }
1968
1969 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1970 {
1971 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1972 return_low128(o->out);
1973 return NO_EXIT;
1974 }
1975
1976 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1977 {
1978 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
1979 return_low128(o->out);
1980 return NO_EXIT;
1981 }
1982
1983 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
1984 {
1985 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
1986 return NO_EXIT;
1987 }
1988
1989 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
1990 {
1991 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
1992 return NO_EXIT;
1993 }
1994
1995 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
1996 {
1997 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1998 return_low128(o->out2);
1999 return NO_EXIT;
2000 }
2001
2002 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2003 {
2004 int r2 = get_field(s->fields, r2);
2005 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2006 return NO_EXIT;
2007 }
2008
2009 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2010 {
2011 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2012 return NO_EXIT;
2013 }
2014
2015 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2016 {
2017 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2018 tb->flags, (ab)use the tb->cs_base field as the address of
2019 the template in memory, and grab 8 bits of tb->flags/cflags for
2020 the contents of the register. We would then recognize all this
2021 in gen_intermediate_code_internal, generating code for exactly
2022 one instruction. This new TB then gets executed normally.
2023
2024 On the other hand, this seems to be mostly used for modifying
2025 MVC inside of memcpy, which needs a helper call anyway. So
2026 perhaps this doesn't bear thinking about any further. */
2027
2028 TCGv_i64 tmp;
2029
2030 update_psw_addr(s);
2031 gen_op_calc_cc(s);
2032
2033 tmp = tcg_const_i64(s->next_pc);
2034 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2035 tcg_temp_free_i64(tmp);
2036
2037 set_cc_static(s);
2038 return NO_EXIT;
2039 }
2040
2041 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2042 {
2043 /* We'll use the original input for cc computation, since we get to
2044 compare that against 0, which ought to be better than comparing
2045 the real output against 64. It also lets cc_dst be a convenient
2046 temporary during our computation. */
2047 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2048
2049 /* R1 = IN ? CLZ(IN) : 64. */
2050 gen_helper_clz(o->out, o->in2);
2051
2052 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2053 value by 64, which is undefined. But since the shift is 64 iff the
2054 input is zero, we still get the correct result after and'ing. */
2055 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2056 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2057 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2058 return NO_EXIT;
2059 }
2060
2061 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2062 {
2063 int m3 = get_field(s->fields, m3);
2064 int pos, len, base = s->insn->data;
2065 TCGv_i64 tmp = tcg_temp_new_i64();
2066 uint64_t ccm;
2067
2068 switch (m3) {
2069 case 0xf:
2070 /* Effectively a 32-bit load. */
2071 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2072 len = 32;
2073 goto one_insert;
2074
2075 case 0xc:
2076 case 0x6:
2077 case 0x3:
2078 /* Effectively a 16-bit load. */
2079 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2080 len = 16;
2081 goto one_insert;
2082
2083 case 0x8:
2084 case 0x4:
2085 case 0x2:
2086 case 0x1:
2087 /* Effectively an 8-bit load. */
2088 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2089 len = 8;
2090 goto one_insert;
2091
2092 one_insert:
2093 pos = base + ctz32(m3) * 8;
2094 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2095 ccm = ((1ull << len) - 1) << pos;
2096 break;
2097
2098 default:
2099 /* This is going to be a sequence of loads and inserts. */
2100 pos = base + 32 - 8;
2101 ccm = 0;
2102 while (m3) {
2103 if (m3 & 0x8) {
2104 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2105 tcg_gen_addi_i64(o->in2, o->in2, 1);
2106 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2107 ccm |= 0xff << pos;
2108 }
2109 m3 = (m3 << 1) & 0xf;
2110 pos -= 8;
2111 }
2112 break;
2113 }
2114
2115 tcg_gen_movi_i64(tmp, ccm);
2116 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2117 tcg_temp_free_i64(tmp);
2118 return NO_EXIT;
2119 }
2120
2121 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2122 {
2123 int shift = s->insn->data & 0xff;
2124 int size = s->insn->data >> 8;
2125 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2126 return NO_EXIT;
2127 }
2128
2129 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2130 {
2131 TCGv_i64 t1;
2132
2133 gen_op_calc_cc(s);
2134 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2135
2136 t1 = tcg_temp_new_i64();
2137 tcg_gen_shli_i64(t1, psw_mask, 20);
2138 tcg_gen_shri_i64(t1, t1, 36);
2139 tcg_gen_or_i64(o->out, o->out, t1);
2140
2141 tcg_gen_extu_i32_i64(t1, cc_op);
2142 tcg_gen_shli_i64(t1, t1, 28);
2143 tcg_gen_or_i64(o->out, o->out, t1);
2144 tcg_temp_free_i64(t1);
2145 return NO_EXIT;
2146 }
2147
2148 #ifndef CONFIG_USER_ONLY
2149 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2150 {
2151 check_privileged(s);
2152 gen_helper_ipte(cpu_env, o->in1, o->in2);
2153 return NO_EXIT;
2154 }
2155
2156 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2157 {
2158 check_privileged(s);
2159 gen_helper_iske(o->out, cpu_env, o->in2);
2160 return NO_EXIT;
2161 }
2162 #endif
2163
2164 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2165 {
2166 gen_helper_ldeb(o->out, cpu_env, o->in2);
2167 return NO_EXIT;
2168 }
2169
2170 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2171 {
2172 gen_helper_ledb(o->out, cpu_env, o->in2);
2173 return NO_EXIT;
2174 }
2175
2176 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2177 {
2178 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2179 return NO_EXIT;
2180 }
2181
2182 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2183 {
2184 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2185 return NO_EXIT;
2186 }
2187
2188 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2189 {
2190 gen_helper_lxdb(o->out, cpu_env, o->in2);
2191 return_low128(o->out2);
2192 return NO_EXIT;
2193 }
2194
2195 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2196 {
2197 gen_helper_lxeb(o->out, cpu_env, o->in2);
2198 return_low128(o->out2);
2199 return NO_EXIT;
2200 }
2201
2202 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2203 {
2204 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2205 return NO_EXIT;
2206 }
2207
2208 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2209 {
2210 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2211 return NO_EXIT;
2212 }
2213
2214 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2215 {
2216 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2217 return NO_EXIT;
2218 }
2219
2220 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2221 {
2222 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2223 return NO_EXIT;
2224 }
2225
2226 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2227 {
2228 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2229 return NO_EXIT;
2230 }
2231
2232 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2233 {
2234 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2235 return NO_EXIT;
2236 }
2237
2238 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2239 {
2240 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2241 return NO_EXIT;
2242 }
2243
2244 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2245 {
2246 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2247 return NO_EXIT;
2248 }
2249
2250 #ifndef CONFIG_USER_ONLY
2251 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2252 {
2253 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2254 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2255 check_privileged(s);
2256 potential_page_fault(s);
2257 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2258 tcg_temp_free_i32(r1);
2259 tcg_temp_free_i32(r3);
2260 return NO_EXIT;
2261 }
2262
2263 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2264 {
2265 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2266 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2267 check_privileged(s);
2268 potential_page_fault(s);
2269 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2270 tcg_temp_free_i32(r1);
2271 tcg_temp_free_i32(r3);
2272 return NO_EXIT;
2273 }
2274 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2275 {
2276 check_privileged(s);
2277 potential_page_fault(s);
2278 gen_helper_lra(o->out, cpu_env, o->in2);
2279 set_cc_static(s);
2280 return NO_EXIT;
2281 }
2282
2283 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2284 {
2285 TCGv_i64 t1, t2;
2286
2287 check_privileged(s);
2288
2289 t1 = tcg_temp_new_i64();
2290 t2 = tcg_temp_new_i64();
2291 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2292 tcg_gen_addi_i64(o->in2, o->in2, 4);
2293 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2294 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2295 tcg_gen_shli_i64(t1, t1, 32);
2296 gen_helper_load_psw(cpu_env, t1, t2);
2297 tcg_temp_free_i64(t1);
2298 tcg_temp_free_i64(t2);
2299 return EXIT_NORETURN;
2300 }
2301 #endif
2302
2303 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2304 {
2305 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2306 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2307 potential_page_fault(s);
2308 gen_helper_lam(cpu_env, r1, o->in2, r3);
2309 tcg_temp_free_i32(r1);
2310 tcg_temp_free_i32(r3);
2311 return NO_EXIT;
2312 }
2313
2314 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2315 {
2316 int r1 = get_field(s->fields, r1);
2317 int r3 = get_field(s->fields, r3);
2318 TCGv_i64 t = tcg_temp_new_i64();
2319 TCGv_i64 t4 = tcg_const_i64(4);
2320
2321 while (1) {
2322 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2323 store_reg32_i64(r1, t);
2324 if (r1 == r3) {
2325 break;
2326 }
2327 tcg_gen_add_i64(o->in2, o->in2, t4);
2328 r1 = (r1 + 1) & 15;
2329 }
2330
2331 tcg_temp_free_i64(t);
2332 tcg_temp_free_i64(t4);
2333 return NO_EXIT;
2334 }
2335
2336 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2337 {
2338 int r1 = get_field(s->fields, r1);
2339 int r3 = get_field(s->fields, r3);
2340 TCGv_i64 t = tcg_temp_new_i64();
2341 TCGv_i64 t4 = tcg_const_i64(4);
2342
2343 while (1) {
2344 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2345 store_reg32h_i64(r1, t);
2346 if (r1 == r3) {
2347 break;
2348 }
2349 tcg_gen_add_i64(o->in2, o->in2, t4);
2350 r1 = (r1 + 1) & 15;
2351 }
2352
2353 tcg_temp_free_i64(t);
2354 tcg_temp_free_i64(t4);
2355 return NO_EXIT;
2356 }
2357
2358 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2359 {
2360 int r1 = get_field(s->fields, r1);
2361 int r3 = get_field(s->fields, r3);
2362 TCGv_i64 t8 = tcg_const_i64(8);
2363
2364 while (1) {
2365 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2366 if (r1 == r3) {
2367 break;
2368 }
2369 tcg_gen_add_i64(o->in2, o->in2, t8);
2370 r1 = (r1 + 1) & 15;
2371 }
2372
2373 tcg_temp_free_i64(t8);
2374 return NO_EXIT;
2375 }
2376
2377 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2378 {
2379 o->out = o->in2;
2380 o->g_out = o->g_in2;
2381 TCGV_UNUSED_I64(o->in2);
2382 o->g_in2 = false;
2383 return NO_EXIT;
2384 }
2385
2386 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2387 {
2388 o->out = o->in1;
2389 o->out2 = o->in2;
2390 o->g_out = o->g_in1;
2391 o->g_out2 = o->g_in2;
2392 TCGV_UNUSED_I64(o->in1);
2393 TCGV_UNUSED_I64(o->in2);
2394 o->g_in1 = o->g_in2 = false;
2395 return NO_EXIT;
2396 }
2397
2398 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2399 {
2400 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2401 potential_page_fault(s);
2402 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2403 tcg_temp_free_i32(l);
2404 return NO_EXIT;
2405 }
2406
2407 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2408 {
2409 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2410 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2411 potential_page_fault(s);
2412 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2413 tcg_temp_free_i32(r1);
2414 tcg_temp_free_i32(r2);
2415 set_cc_static(s);
2416 return NO_EXIT;
2417 }
2418
2419 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2420 {
2421 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2422 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2423 potential_page_fault(s);
2424 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2425 tcg_temp_free_i32(r1);
2426 tcg_temp_free_i32(r3);
2427 set_cc_static(s);
2428 return NO_EXIT;
2429 }
2430
2431 #ifndef CONFIG_USER_ONLY
2432 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2433 {
2434 int r1 = get_field(s->fields, l1);
2435 check_privileged(s);
2436 potential_page_fault(s);
2437 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2438 set_cc_static(s);
2439 return NO_EXIT;
2440 }
2441
2442 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2443 {
2444 int r1 = get_field(s->fields, l1);
2445 check_privileged(s);
2446 potential_page_fault(s);
2447 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2448 set_cc_static(s);
2449 return NO_EXIT;
2450 }
2451 #endif
2452
2453 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2454 {
2455 potential_page_fault(s);
2456 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2457 set_cc_static(s);
2458 return NO_EXIT;
2459 }
2460
2461 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2462 {
2463 potential_page_fault(s);
2464 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2465 set_cc_static(s);
2466 return_low128(o->in2);
2467 return NO_EXIT;
2468 }
2469
2470 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2471 {
2472 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2473 return NO_EXIT;
2474 }
2475
2476 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2477 {
2478 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2479 return_low128(o->out2);
2480 return NO_EXIT;
2481 }
2482
2483 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2484 {
2485 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2486 return NO_EXIT;
2487 }
2488
2489 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2490 {
2491 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2492 return NO_EXIT;
2493 }
2494
2495 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2496 {
2497 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2498 return NO_EXIT;
2499 }
2500
2501 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2502 {
2503 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2504 return_low128(o->out2);
2505 return NO_EXIT;
2506 }
2507
2508 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2509 {
2510 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2511 return_low128(o->out2);
2512 return NO_EXIT;
2513 }
2514
2515 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2516 {
2517 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2518 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2519 tcg_temp_free_i64(r3);
2520 return NO_EXIT;
2521 }
2522
2523 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2524 {
2525 int r3 = get_field(s->fields, r3);
2526 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2527 return NO_EXIT;
2528 }
2529
2530 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2531 {
2532 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2533 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2534 tcg_temp_free_i64(r3);
2535 return NO_EXIT;
2536 }
2537
2538 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2539 {
2540 int r3 = get_field(s->fields, r3);
2541 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2542 return NO_EXIT;
2543 }
2544
2545 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2546 {
2547 gen_helper_nabs_i64(o->out, o->in2);
2548 return NO_EXIT;
2549 }
2550
2551 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2552 {
2553 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2554 return NO_EXIT;
2555 }
2556
2557 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2558 {
2559 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2560 return NO_EXIT;
2561 }
2562
2563 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2564 {
2565 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2566 tcg_gen_mov_i64(o->out2, o->in2);
2567 return NO_EXIT;
2568 }
2569
2570 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2571 {
2572 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2573 potential_page_fault(s);
2574 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2575 tcg_temp_free_i32(l);
2576 set_cc_static(s);
2577 return NO_EXIT;
2578 }
2579
2580 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2581 {
2582 tcg_gen_neg_i64(o->out, o->in2);
2583 return NO_EXIT;
2584 }
2585
2586 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2587 {
2588 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2589 return NO_EXIT;
2590 }
2591
2592 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2593 {
2594 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2595 return NO_EXIT;
2596 }
2597
2598 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2599 {
2600 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2601 tcg_gen_mov_i64(o->out2, o->in2);
2602 return NO_EXIT;
2603 }
2604
2605 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2606 {
2607 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2608 potential_page_fault(s);
2609 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2610 tcg_temp_free_i32(l);
2611 set_cc_static(s);
2612 return NO_EXIT;
2613 }
2614
2615 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2616 {
2617 tcg_gen_or_i64(o->out, o->in1, o->in2);
2618 return NO_EXIT;
2619 }
2620
2621 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2622 {
2623 int shift = s->insn->data & 0xff;
2624 int size = s->insn->data >> 8;
2625 uint64_t mask = ((1ull << size) - 1) << shift;
2626
2627 assert(!o->g_in2);
2628 tcg_gen_shli_i64(o->in2, o->in2, shift);
2629 tcg_gen_or_i64(o->out, o->in1, o->in2);
2630
2631 /* Produce the CC from only the bits manipulated. */
2632 tcg_gen_andi_i64(cc_dst, o->out, mask);
2633 set_cc_nz_u64(s, cc_dst);
2634 return NO_EXIT;
2635 }
2636
2637 #ifndef CONFIG_USER_ONLY
2638 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2639 {
2640 check_privileged(s);
2641 gen_helper_ptlb(cpu_env);
2642 return NO_EXIT;
2643 }
2644 #endif
2645
2646 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2647 {
2648 tcg_gen_bswap16_i64(o->out, o->in2);
2649 return NO_EXIT;
2650 }
2651
2652 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2653 {
2654 tcg_gen_bswap32_i64(o->out, o->in2);
2655 return NO_EXIT;
2656 }
2657
2658 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2659 {
2660 tcg_gen_bswap64_i64(o->out, o->in2);
2661 return NO_EXIT;
2662 }
2663
2664 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2665 {
2666 TCGv_i32 t1 = tcg_temp_new_i32();
2667 TCGv_i32 t2 = tcg_temp_new_i32();
2668 TCGv_i32 to = tcg_temp_new_i32();
2669 tcg_gen_trunc_i64_i32(t1, o->in1);
2670 tcg_gen_trunc_i64_i32(t2, o->in2);
2671 tcg_gen_rotl_i32(to, t1, t2);
2672 tcg_gen_extu_i32_i64(o->out, to);
2673 tcg_temp_free_i32(t1);
2674 tcg_temp_free_i32(t2);
2675 tcg_temp_free_i32(to);
2676 return NO_EXIT;
2677 }
2678
2679 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2680 {
2681 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2682 return NO_EXIT;
2683 }
2684
2685 #ifndef CONFIG_USER_ONLY
2686 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2687 {
2688 check_privileged(s);
2689 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2690 set_cc_static(s);
2691 return NO_EXIT;
2692 }
2693 #endif
2694
2695 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2696 {
2697 int r1 = get_field(s->fields, r1);
2698 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2699 return NO_EXIT;
2700 }
2701
2702 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2703 {
2704 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2705 return NO_EXIT;
2706 }
2707
2708 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2709 {
2710 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2711 return NO_EXIT;
2712 }
2713
2714 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2715 {
2716 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2717 return_low128(o->out2);
2718 return NO_EXIT;
2719 }
2720
2721 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2722 {
2723 gen_helper_sqeb(o->out, cpu_env, o->in2);
2724 return NO_EXIT;
2725 }
2726
2727 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2728 {
2729 gen_helper_sqdb(o->out, cpu_env, o->in2);
2730 return NO_EXIT;
2731 }
2732
2733 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2734 {
2735 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2736 return_low128(o->out2);
2737 return NO_EXIT;
2738 }
2739
2740 #ifndef CONFIG_USER_ONLY
2741 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2742 {
2743 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2744 check_privileged(s);
2745 potential_page_fault(s);
2746 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2747 tcg_temp_free_i32(r1);
2748 return NO_EXIT;
2749 }
2750 #endif
2751
2752 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2753 {
2754 uint64_t sign = 1ull << s->insn->data;
2755 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2756 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2757 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2758 /* The arithmetic left shift is curious in that it does not affect
2759 the sign bit. Copy that over from the source unchanged. */
2760 tcg_gen_andi_i64(o->out, o->out, ~sign);
2761 tcg_gen_andi_i64(o->in1, o->in1, sign);
2762 tcg_gen_or_i64(o->out, o->out, o->in1);
2763 return NO_EXIT;
2764 }
2765
2766 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2767 {
2768 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2769 return NO_EXIT;
2770 }
2771
2772 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2773 {
2774 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2775 return NO_EXIT;
2776 }
2777
2778 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2779 {
2780 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2781 return NO_EXIT;
2782 }
2783
2784 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2785 {
2786 gen_helper_sfpc(cpu_env, o->in2);
2787 return NO_EXIT;
2788 }
2789
2790 #ifndef CONFIG_USER_ONLY
2791 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2792 {
2793 check_privileged(s);
2794 tcg_gen_shri_i64(o->in2, o->in2, 4);
2795 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2796 return NO_EXIT;
2797 }
2798
2799 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
2800 {
2801 check_privileged(s);
2802 gen_helper_sske(cpu_env, o->in1, o->in2);
2803 return NO_EXIT;
2804 }
2805
2806 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2807 {
2808 check_privileged(s);
2809 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2810 return NO_EXIT;
2811 }
2812
2813 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
2814 {
2815 check_privileged(s);
2816 /* ??? Surely cpu address != cpu number. In any case the previous
2817 version of this stored more than the required half-word, so it
2818 is unlikely this has ever been tested. */
2819 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2820 return NO_EXIT;
2821 }
2822
2823 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
2824 {
2825 gen_helper_stck(o->out, cpu_env);
2826 /* ??? We don't implement clock states. */
2827 gen_op_movi_cc(s, 0);
2828 return NO_EXIT;
2829 }
2830
2831 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
2832 {
2833 check_privileged(s);
2834 gen_helper_sckc(cpu_env, o->in2);
2835 return NO_EXIT;
2836 }
2837
2838 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
2839 {
2840 check_privileged(s);
2841 gen_helper_stckc(o->out, cpu_env);
2842 return NO_EXIT;
2843 }
2844
2845 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2846 {
2847 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2848 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2849 check_privileged(s);
2850 potential_page_fault(s);
2851 gen_helper_stctg(cpu_env, r1, o->in2, r3);
2852 tcg_temp_free_i32(r1);
2853 tcg_temp_free_i32(r3);
2854 return NO_EXIT;
2855 }
2856
2857 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
2858 {
2859 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2860 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2861 check_privileged(s);
2862 potential_page_fault(s);
2863 gen_helper_stctl(cpu_env, r1, o->in2, r3);
2864 tcg_temp_free_i32(r1);
2865 tcg_temp_free_i32(r3);
2866 return NO_EXIT;
2867 }
2868
2869 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
2870 {
2871 check_privileged(s);
2872 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2873 return NO_EXIT;
2874 }
2875
2876 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
2877 {
2878 check_privileged(s);
2879 gen_helper_spt(cpu_env, o->in2);
2880 return NO_EXIT;
2881 }
2882
2883 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
2884 {
2885 check_privileged(s);
2886 gen_helper_stpt(o->out, cpu_env);
2887 return NO_EXIT;
2888 }
2889
2890 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
2891 {
2892 check_privileged(s);
2893 gen_helper_spx(cpu_env, o->in2);
2894 return NO_EXIT;
2895 }
2896
2897 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
2898 {
2899 check_privileged(s);
2900 /* Not operational. */
2901 gen_op_movi_cc(s, 3);
2902 return NO_EXIT;
2903 }
2904
2905 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
2906 {
2907 check_privileged(s);
2908 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
2909 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
2910 return NO_EXIT;
2911 }
2912
2913 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
2914 {
2915 uint64_t i2 = get_field(s->fields, i2);
2916 TCGv_i64 t;
2917
2918 check_privileged(s);
2919
2920 /* It is important to do what the instruction name says: STORE THEN.
2921 If we let the output hook perform the store then if we fault and
2922 restart, we'll have the wrong SYSTEM MASK in place. */
2923 t = tcg_temp_new_i64();
2924 tcg_gen_shri_i64(t, psw_mask, 56);
2925 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
2926 tcg_temp_free_i64(t);
2927
2928 if (s->fields->op == 0xac) {
2929 tcg_gen_andi_i64(psw_mask, psw_mask,
2930 (i2 << 56) | 0x00ffffffffffffffull);
2931 } else {
2932 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
2933 }
2934 return NO_EXIT;
2935 }
2936
2937 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
2938 {
2939 check_privileged(s);
2940 potential_page_fault(s);
2941 gen_helper_stura(cpu_env, o->in2, o->in1);
2942 return NO_EXIT;
2943 }
2944 #endif
2945
2946 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
2947 {
2948 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
2949 return NO_EXIT;
2950 }
2951
2952 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
2953 {
2954 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
2955 return NO_EXIT;
2956 }
2957
2958 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
2959 {
2960 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
2961 return NO_EXIT;
2962 }
2963
2964 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
2965 {
2966 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
2967 return NO_EXIT;
2968 }
2969
2970 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
2971 {
2972 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2973 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2974 potential_page_fault(s);
2975 gen_helper_stam(cpu_env, r1, o->in2, r3);
2976 tcg_temp_free_i32(r1);
2977 tcg_temp_free_i32(r3);
2978 return NO_EXIT;
2979 }
2980
2981 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
2982 {
2983 int m3 = get_field(s->fields, m3);
2984 int pos, base = s->insn->data;
2985 TCGv_i64 tmp = tcg_temp_new_i64();
2986
2987 pos = base + ctz32(m3) * 8;
2988 switch (m3) {
2989 case 0xf:
2990 /* Effectively a 32-bit store. */
2991 tcg_gen_shri_i64(tmp, o->in1, pos);
2992 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
2993 break;
2994
2995 case 0xc:
2996 case 0x6:
2997 case 0x3:
2998 /* Effectively a 16-bit store. */
2999 tcg_gen_shri_i64(tmp, o->in1, pos);
3000 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3001 break;
3002
3003 case 0x8:
3004 case 0x4:
3005 case 0x2:
3006 case 0x1:
3007 /* Effectively an 8-bit store. */
3008 tcg_gen_shri_i64(tmp, o->in1, pos);
3009 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3010 break;
3011
3012 default:
3013 /* This is going to be a sequence of shifts and stores. */
3014 pos = base + 32 - 8;
3015 while (m3) {
3016 if (m3 & 0x8) {
3017 tcg_gen_shri_i64(tmp, o->in1, pos);
3018 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3019 tcg_gen_addi_i64(o->in2, o->in2, 1);
3020 }
3021 m3 = (m3 << 1) & 0xf;
3022 pos -= 8;
3023 }
3024 break;
3025 }
3026 tcg_temp_free_i64(tmp);
3027 return NO_EXIT;
3028 }
3029
3030 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3031 {
3032 int r1 = get_field(s->fields, r1);
3033 int r3 = get_field(s->fields, r3);
3034 int size = s->insn->data;
3035 TCGv_i64 tsize = tcg_const_i64(size);
3036
3037 while (1) {
3038 if (size == 8) {
3039 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3040 } else {
3041 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3042 }
3043 if (r1 == r3) {
3044 break;
3045 }
3046 tcg_gen_add_i64(o->in2, o->in2, tsize);
3047 r1 = (r1 + 1) & 15;
3048 }
3049
3050 tcg_temp_free_i64(tsize);
3051 return NO_EXIT;
3052 }
3053
3054 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3055 {
3056 int r1 = get_field(s->fields, r1);
3057 int r3 = get_field(s->fields, r3);
3058 TCGv_i64 t = tcg_temp_new_i64();
3059 TCGv_i64 t4 = tcg_const_i64(4);
3060 TCGv_i64 t32 = tcg_const_i64(32);
3061
3062 while (1) {
3063 tcg_gen_shl_i64(t, regs[r1], t32);
3064 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3065 if (r1 == r3) {
3066 break;
3067 }
3068 tcg_gen_add_i64(o->in2, o->in2, t4);
3069 r1 = (r1 + 1) & 15;
3070 }
3071
3072 tcg_temp_free_i64(t);
3073 tcg_temp_free_i64(t4);
3074 tcg_temp_free_i64(t32);
3075 return NO_EXIT;
3076 }
3077
3078 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3079 {
3080 potential_page_fault(s);
3081 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3082 set_cc_static(s);
3083 return_low128(o->in2);
3084 return NO_EXIT;
3085 }
3086
3087 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3088 {
3089 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3090 return NO_EXIT;
3091 }
3092
3093 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3094 {
3095 TCGv_i64 cc;
3096
3097 assert(!o->g_in2);
3098 tcg_gen_not_i64(o->in2, o->in2);
3099 tcg_gen_add_i64(o->out, o->in1, o->in2);
3100
3101 /* XXX possible optimization point */
3102 gen_op_calc_cc(s);
3103 cc = tcg_temp_new_i64();
3104 tcg_gen_extu_i32_i64(cc, cc_op);
3105 tcg_gen_shri_i64(cc, cc, 1);
3106 tcg_gen_add_i64(o->out, o->out, cc);
3107 tcg_temp_free_i64(cc);
3108 return NO_EXIT;
3109 }
3110
3111 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3112 {
3113 TCGv_i32 t;
3114
3115 update_psw_addr(s);
3116 gen_op_calc_cc(s);
3117
3118 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3119 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3120 tcg_temp_free_i32(t);
3121
3122 t = tcg_const_i32(s->next_pc - s->pc);
3123 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3124 tcg_temp_free_i32(t);
3125
3126 gen_exception(EXCP_SVC);
3127 return EXIT_NORETURN;
3128 }
3129
3130 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3131 {
3132 gen_helper_tceb(cc_op, o->in1, o->in2);
3133 set_cc_static(s);
3134 return NO_EXIT;
3135 }
3136
3137 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3138 {
3139 gen_helper_tcdb(cc_op, o->in1, o->in2);
3140 set_cc_static(s);
3141 return NO_EXIT;
3142 }
3143
3144 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3145 {
3146 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3147 set_cc_static(s);
3148 return NO_EXIT;
3149 }
3150
3151 #ifndef CONFIG_USER_ONLY
3152 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3153 {
3154 potential_page_fault(s);
3155 gen_helper_tprot(cc_op, o->addr1, o->in2);
3156 set_cc_static(s);
3157 return NO_EXIT;
3158 }
3159 #endif
3160
3161 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3162 {
3163 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3164 potential_page_fault(s);
3165 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3166 tcg_temp_free_i32(l);
3167 set_cc_static(s);
3168 return NO_EXIT;
3169 }
3170
3171 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3172 {
3173 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3174 potential_page_fault(s);
3175 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3176 tcg_temp_free_i32(l);
3177 return NO_EXIT;
3178 }
3179
3180 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3181 {
3182 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3183 potential_page_fault(s);
3184 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3185 tcg_temp_free_i32(l);
3186 set_cc_static(s);
3187 return NO_EXIT;
3188 }
3189
3190 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3191 {
3192 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3193 return NO_EXIT;
3194 }
3195
3196 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3197 {
3198 int shift = s->insn->data & 0xff;
3199 int size = s->insn->data >> 8;
3200 uint64_t mask = ((1ull << size) - 1) << shift;
3201
3202 assert(!o->g_in2);
3203 tcg_gen_shli_i64(o->in2, o->in2, shift);
3204 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3205
3206 /* Produce the CC from only the bits manipulated. */
3207 tcg_gen_andi_i64(cc_dst, o->out, mask);
3208 set_cc_nz_u64(s, cc_dst);
3209 return NO_EXIT;
3210 }
3211
3212 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3213 {
3214 o->out = tcg_const_i64(0);
3215 return NO_EXIT;
3216 }
3217
3218 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3219 {
3220 o->out = tcg_const_i64(0);
3221 o->out2 = o->out;
3222 o->g_out2 = true;
3223 return NO_EXIT;
3224 }
3225
3226 /* ====================================================================== */
3227 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3228 the original inputs), update the various cc data structures in order to
3229 be able to compute the new condition code. */
3230
3231 static void cout_abs32(DisasContext *s, DisasOps *o)
3232 {
3233 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3234 }
3235
3236 static void cout_abs64(DisasContext *s, DisasOps *o)
3237 {
3238 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3239 }
3240
3241 static void cout_adds32(DisasContext *s, DisasOps *o)
3242 {
3243 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3244 }
3245
3246 static void cout_adds64(DisasContext *s, DisasOps *o)
3247 {
3248 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3249 }
3250
3251 static void cout_addu32(DisasContext *s, DisasOps *o)
3252 {
3253 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3254 }
3255
3256 static void cout_addu64(DisasContext *s, DisasOps *o)
3257 {
3258 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3259 }
3260
3261 static void cout_addc32(DisasContext *s, DisasOps *o)
3262 {
3263 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3264 }
3265
3266 static void cout_addc64(DisasContext *s, DisasOps *o)
3267 {
3268 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3269 }
3270
3271 static void cout_cmps32(DisasContext *s, DisasOps *o)
3272 {
3273 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3274 }
3275
3276 static void cout_cmps64(DisasContext *s, DisasOps *o)
3277 {
3278 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3279 }
3280
3281 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3282 {
3283 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3284 }
3285
3286 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3287 {
3288 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3289 }
3290
3291 static void cout_f32(DisasContext *s, DisasOps *o)
3292 {
3293 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3294 }
3295
3296 static void cout_f64(DisasContext *s, DisasOps *o)
3297 {
3298 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3299 }
3300
3301 static void cout_f128(DisasContext *s, DisasOps *o)
3302 {
3303 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3304 }
3305
3306 static void cout_nabs32(DisasContext *s, DisasOps *o)
3307 {
3308 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3309 }
3310
3311 static void cout_nabs64(DisasContext *s, DisasOps *o)
3312 {
3313 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3314 }
3315
3316 static void cout_neg32(DisasContext *s, DisasOps *o)
3317 {
3318 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3319 }
3320
3321 static void cout_neg64(DisasContext *s, DisasOps *o)
3322 {
3323 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3324 }
3325
3326 static void cout_nz32(DisasContext *s, DisasOps *o)
3327 {
3328 tcg_gen_ext32u_i64(cc_dst, o->out);
3329 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3330 }
3331
3332 static void cout_nz64(DisasContext *s, DisasOps *o)
3333 {
3334 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3335 }
3336
3337 static void cout_s32(DisasContext *s, DisasOps *o)
3338 {
3339 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3340 }
3341
3342 static void cout_s64(DisasContext *s, DisasOps *o)
3343 {
3344 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3345 }
3346
3347 static void cout_subs32(DisasContext *s, DisasOps *o)
3348 {
3349 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3350 }
3351
3352 static void cout_subs64(DisasContext *s, DisasOps *o)
3353 {
3354 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3355 }
3356
3357 static void cout_subu32(DisasContext *s, DisasOps *o)
3358 {
3359 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3360 }
3361
3362 static void cout_subu64(DisasContext *s, DisasOps *o)
3363 {
3364 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3365 }
3366
3367 static void cout_subb32(DisasContext *s, DisasOps *o)
3368 {
3369 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3370 }
3371
3372 static void cout_subb64(DisasContext *s, DisasOps *o)
3373 {
3374 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3375 }
3376
3377 static void cout_tm32(DisasContext *s, DisasOps *o)
3378 {
3379 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3380 }
3381
3382 static void cout_tm64(DisasContext *s, DisasOps *o)
3383 {
3384 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3385 }
3386
3387 /* ====================================================================== */
3388 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3389 with the TCG register to which we will write. Used in combination with
3390 the "wout" generators, in some cases we need a new temporary, and in
3391 some cases we can write to a TCG global. */
3392
3393 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3394 {
3395 o->out = tcg_temp_new_i64();
3396 }
3397
3398 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3399 {
3400 o->out = tcg_temp_new_i64();
3401 o->out2 = tcg_temp_new_i64();
3402 }
3403
3404 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3405 {
3406 o->out = regs[get_field(f, r1)];
3407 o->g_out = true;
3408 }
3409
3410 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3411 {
3412 /* ??? Specification exception: r1 must be even. */
3413 int r1 = get_field(f, r1);
3414 o->out = regs[r1];
3415 o->out2 = regs[(r1 + 1) & 15];
3416 o->g_out = o->g_out2 = true;
3417 }
3418
3419 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3420 {
3421 o->out = fregs[get_field(f, r1)];
3422 o->g_out = true;
3423 }
3424
3425 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3426 {
3427 /* ??? Specification exception: r1 must be < 14. */
3428 int r1 = get_field(f, r1);
3429 o->out = fregs[r1];
3430 o->out2 = fregs[(r1 + 2) & 15];
3431 o->g_out = o->g_out2 = true;
3432 }
3433
3434 /* ====================================================================== */
3435 /* The "Write OUTput" generators. These generally perform some non-trivial
3436 copy of data to TCG globals, or to main memory. The trivial cases are
3437 generally handled by having a "prep" generator install the TCG global
3438 as the destination of the operation. */
3439
3440 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3441 {
3442 store_reg(get_field(f, r1), o->out);
3443 }
3444
3445 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3446 {
3447 int r1 = get_field(f, r1);
3448 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3449 }
3450
3451 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3452 {
3453 int r1 = get_field(f, r1);
3454 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3455 }
3456
3457 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3458 {
3459 store_reg32_i64(get_field(f, r1), o->out);
3460 }
3461
3462 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3463 {
3464 /* ??? Specification exception: r1 must be even. */
3465 int r1 = get_field(f, r1);
3466 store_reg32_i64(r1, o->out);
3467 store_reg32_i64((r1 + 1) & 15, o->out2);
3468 }
3469
3470 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3471 {
3472 /* ??? Specification exception: r1 must be even. */
3473 int r1 = get_field(f, r1);
3474 store_reg32_i64((r1 + 1) & 15, o->out);
3475 tcg_gen_shri_i64(o->out, o->out, 32);
3476 store_reg32_i64(r1, o->out);
3477 }
3478
3479 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3480 {
3481 store_freg32_i64(get_field(f, r1), o->out);
3482 }
3483
3484 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3485 {
3486 store_freg(get_field(f, r1), o->out);
3487 }
3488
3489 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3490 {
3491 /* ??? Specification exception: r1 must be < 14. */
3492 int f1 = get_field(s->fields, r1);
3493 store_freg(f1, o->out);
3494 store_freg((f1 + 2) & 15, o->out2);
3495 }
3496
3497 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3498 {
3499 if (get_field(f, r1) != get_field(f, r2)) {
3500 store_reg32_i64(get_field(f, r1), o->out);
3501 }
3502 }
3503
3504 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3505 {
3506 if (get_field(f, r1) != get_field(f, r2)) {
3507 store_freg32_i64(get_field(f, r1), o->out);
3508 }
3509 }
3510
3511 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3512 {
3513 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3514 }
3515
3516 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3517 {
3518 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3519 }
3520
3521 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3522 {
3523 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3524 }
3525
3526 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3527 {
3528 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3529 }
3530
3531 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3532 {
3533 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3534 }
3535
3536 /* ====================================================================== */
3537 /* The "INput 1" generators. These load the first operand to an insn. */
3538
3539 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3540 {
3541 o->in1 = load_reg(get_field(f, r1));
3542 }
3543
3544 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3545 {
3546 o->in1 = regs[get_field(f, r1)];
3547 o->g_in1 = true;
3548 }
3549
3550 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3551 {
3552 o->in1 = tcg_temp_new_i64();
3553 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3554 }
3555
3556 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3557 {
3558 o->in1 = tcg_temp_new_i64();
3559 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3560 }
3561
3562 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3563 {
3564 o->in1 = tcg_temp_new_i64();
3565 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3566 }
3567
3568 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3569 {
3570 /* ??? Specification exception: r1 must be even. */
3571 int r1 = get_field(f, r1);
3572 o->in1 = load_reg((r1 + 1) & 15);
3573 }
3574
3575 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3576 {
3577 /* ??? Specification exception: r1 must be even. */
3578 int r1 = get_field(f, r1);
3579 o->in1 = tcg_temp_new_i64();
3580 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3581 }
3582
3583 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3584 {
3585 /* ??? Specification exception: r1 must be even. */
3586 int r1 = get_field(f, r1);
3587 o->in1 = tcg_temp_new_i64();
3588 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3589 }
3590
3591 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3592 {
3593 /* ??? Specification exception: r1 must be even. */
3594 int r1 = get_field(f, r1);
3595 o->in1 = tcg_temp_new_i64();
3596 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3597 }
3598
3599 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3600 {
3601 o->in1 = load_reg(get_field(f, r2));
3602 }
3603
3604 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3605 {
3606 o->in1 = load_reg(get_field(f, r3));
3607 }
3608
3609 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3610 {
3611 o->in1 = regs[get_field(f, r3)];
3612 o->g_in1 = true;
3613 }
3614
3615 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3616 {
3617 o->in1 = tcg_temp_new_i64();
3618 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3619 }
3620
3621 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3622 {
3623 o->in1 = tcg_temp_new_i64();
3624 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3625 }
3626
3627 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3628 {
3629 o->in1 = load_freg32_i64(get_field(f, r1));
3630 }
3631
3632 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3633 {
3634 o->in1 = fregs[get_field(f, r1)];
3635 o->g_in1 = true;
3636 }
3637
3638 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3639 {
3640 /* ??? Specification exception: r1 must be < 14. */
3641 int r1 = get_field(f, r1);
3642 o->out = fregs[r1];
3643 o->out2 = fregs[(r1 + 2) & 15];
3644 o->g_out = o->g_out2 = true;
3645 }
3646
3647 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3648 {
3649 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3650 }
3651
3652 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3653 {
3654 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3655 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3656 }
3657
3658 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3659 {
3660 in1_la1(s, f, o);
3661 o->in1 = tcg_temp_new_i64();
3662 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3663 }
3664
3665 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3666 {
3667 in1_la1(s, f, o);
3668 o->in1 = tcg_temp_new_i64();
3669 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3670 }
3671
3672 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3673 {
3674 in1_la1(s, f, o);
3675 o->in1 = tcg_temp_new_i64();
3676 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3677 }
3678
3679 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3680 {
3681 in1_la1(s, f, o);
3682 o->in1 = tcg_temp_new_i64();
3683 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3684 }
3685
3686 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3687 {
3688 in1_la1(s, f, o);
3689 o->in1 = tcg_temp_new_i64();
3690 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3691 }
3692
3693 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3694 {
3695 in1_la1(s, f, o);
3696 o->in1 = tcg_temp_new_i64();
3697 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3698 }
3699
3700 /* ====================================================================== */
3701 /* The "INput 2" generators. These load the second operand to an insn. */
3702
3703 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3704 {
3705 o->in2 = regs[get_field(f, r1)];
3706 o->g_in2 = true;
3707 }
3708
3709 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3710 {
3711 o->in2 = tcg_temp_new_i64();
3712 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3713 }
3714
3715 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3716 {
3717 o->in2 = tcg_temp_new_i64();
3718 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3719 }
3720
3721 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3722 {
3723 o->in2 = load_reg(get_field(f, r2));
3724 }
3725
3726 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3727 {
3728 o->in2 = regs[get_field(f, r2)];
3729 o->g_in2 = true;
3730 }
3731
3732 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3733 {
3734 int r2 = get_field(f, r2);
3735 if (r2 != 0) {
3736 o->in2 = load_reg(r2);
3737 }
3738 }
3739
3740 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3741 {
3742 o->in2 = tcg_temp_new_i64();
3743 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3744 }
3745
3746 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3747 {
3748 o->in2 = tcg_temp_new_i64();
3749 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3750 }
3751
3752 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3753 {
3754 o->in2 = tcg_temp_new_i64();
3755 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3756 }
3757
3758 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3759 {
3760 o->in2 = tcg_temp_new_i64();
3761 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3762 }
3763
3764 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3765 {
3766 o->in2 = load_reg(get_field(f, r3));
3767 }
3768
3769 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3770 {
3771 o->in2 = tcg_temp_new_i64();
3772 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3773 }
3774
3775 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3776 {
3777 o->in2 = tcg_temp_new_i64();
3778 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3779 }
3780
3781 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3782 {
3783 o->in2 = load_freg32_i64(get_field(f, r2));
3784 }
3785
3786 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3787 {
3788 o->in2 = fregs[get_field(f, r2)];
3789 o->g_in2 = true;
3790 }
3791
3792 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3793 {
3794 /* ??? Specification exception: r1 must be < 14. */
3795 int r2 = get_field(f, r2);
3796 o->in1 = fregs[r2];
3797 o->in2 = fregs[(r2 + 2) & 15];
3798 o->g_in1 = o->g_in2 = true;
3799 }
3800
3801 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
3802 {
3803 o->in2 = get_address(s, 0, get_field(f, r2), 0);
3804 }
3805
3806 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3807 {
3808 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3809 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3810 }
3811
3812 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3813 {
3814 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3815 }
3816
3817 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3818 {
3819 help_l2_shift(s, f, o, 31);
3820 }
3821
3822 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3823 {
3824 help_l2_shift(s, f, o, 63);
3825 }
3826
3827 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3828 {
3829 in2_a2(s, f, o);
3830 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3831 }
3832
3833 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3834 {
3835 in2_a2(s, f, o);
3836 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3837 }
3838
3839 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3840 {
3841 in2_a2(s, f, o);
3842 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3843 }
3844
3845 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3846 {
3847 in2_a2(s, f, o);
3848 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3849 }
3850
3851 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3852 {
3853 in2_a2(s, f, o);
3854 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3855 }
3856
3857 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3858 {
3859 in2_a2(s, f, o);
3860 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3861 }
3862
3863 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3864 {
3865 in2_ri2(s, f, o);
3866 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3867 }
3868
3869 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3870 {
3871 in2_ri2(s, f, o);
3872 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3873 }
3874
3875 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3876 {
3877 in2_ri2(s, f, o);
3878 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3879 }
3880
3881 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3882 {
3883 in2_ri2(s, f, o);
3884 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3885 }
3886
3887 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3888 {
3889 o->in2 = tcg_const_i64(get_field(f, i2));
3890 }
3891
3892 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3893 {
3894 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3895 }
3896
3897 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3898 {
3899 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3900 }
3901
3902 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3903 {
3904 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3905 }
3906
3907 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3908 {
3909 uint64_t i2 = (uint16_t)get_field(f, i2);
3910 o->in2 = tcg_const_i64(i2 << s->insn->data);
3911 }
3912
3913 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3914 {
3915 uint64_t i2 = (uint32_t)get_field(f, i2);
3916 o->in2 = tcg_const_i64(i2 << s->insn->data);
3917 }
3918
3919 /* ====================================================================== */
3920
3921 /* Find opc within the table of insns. This is formulated as a switch
3922 statement so that (1) we get compile-time notice of cut-paste errors
3923 for duplicated opcodes, and (2) the compiler generates the binary
3924 search tree, rather than us having to post-process the table. */
3925
3926 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3927 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3928
3929 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3930
3931 enum DisasInsnEnum {
3932 #include "insn-data.def"
3933 };
3934
3935 #undef D
3936 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3937 .opc = OPC, \
3938 .fmt = FMT_##FT, \
3939 .fac = FAC_##FC, \
3940 .name = #NM, \
3941 .help_in1 = in1_##I1, \
3942 .help_in2 = in2_##I2, \
3943 .help_prep = prep_##P, \
3944 .help_wout = wout_##W, \
3945 .help_cout = cout_##CC, \
3946 .help_op = op_##OP, \
3947 .data = D \
3948 },
3949
3950 /* Allow 0 to be used for NULL in the table below. */
3951 #define in1_0 NULL
3952 #define in2_0 NULL
3953 #define prep_0 NULL
3954 #define wout_0 NULL
3955 #define cout_0 NULL
3956 #define op_0 NULL
3957
3958 static const DisasInsn insn_info[] = {
3959 #include "insn-data.def"
3960 };
3961
3962 #undef D
3963 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3964 case OPC: return &insn_info[insn_ ## NM];
3965
3966 static const DisasInsn *lookup_opc(uint16_t opc)
3967 {
3968 switch (opc) {
3969 #include "insn-data.def"
3970 default:
3971 return NULL;
3972 }
3973 }
3974
3975 #undef D
3976 #undef C
3977
3978 /* Extract a field from the insn. The INSN should be left-aligned in
3979 the uint64_t so that we can more easily utilize the big-bit-endian
3980 definitions we extract from the Principals of Operation. */
3981
3982 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
3983 {
3984 uint32_t r, m;
3985
3986 if (f->size == 0) {
3987 return;
3988 }
3989
3990 /* Zero extract the field from the insn. */
3991 r = (insn << f->beg) >> (64 - f->size);
3992
3993 /* Sign-extend, or un-swap the field as necessary. */
3994 switch (f->type) {
3995 case 0: /* unsigned */
3996 break;
3997 case 1: /* signed */
3998 assert(f->size <= 32);
3999 m = 1u << (f->size - 1);
4000 r = (r ^ m) - m;
4001 break;
4002 case 2: /* dl+dh split, signed 20 bit. */
4003 r = ((int8_t)r << 12) | (r >> 8);
4004 break;
4005 default:
4006 abort();
4007 }
4008
4009 /* Validate that the "compressed" encoding we selected above is valid.
4010 I.e. we havn't make two different original fields overlap. */
4011 assert(((o->presentC >> f->indexC) & 1) == 0);
4012 o->presentC |= 1 << f->indexC;
4013 o->presentO |= 1 << f->indexO;
4014
4015 o->c[f->indexC] = r;
4016 }
4017
4018 /* Lookup the insn at the current PC, extracting the operands into O and
4019 returning the info struct for the insn. Returns NULL for invalid insn. */
4020
4021 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4022 DisasFields *f)
4023 {
4024 uint64_t insn, pc = s->pc;
4025 int op, op2, ilen;
4026 const DisasInsn *info;
4027
4028 insn = ld_code2(env, pc);
4029 op = (insn >> 8) & 0xff;
4030 ilen = get_ilen(op);
4031 s->next_pc = s->pc + ilen;
4032
4033 switch (ilen) {
4034 case 2:
4035 insn = insn << 48;
4036 break;
4037 case 4:
4038 insn = ld_code4(env, pc) << 32;
4039 break;
4040 case 6:
4041 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4042 break;
4043 default:
4044 abort();
4045 }
4046
4047 /* We can't actually determine the insn format until we've looked up
4048 the full insn opcode. Which we can't do without locating the
4049 secondary opcode. Assume by default that OP2 is at bit 40; for
4050 those smaller insns that don't actually have a secondary opcode
4051 this will correctly result in OP2 = 0. */
4052 switch (op) {
4053 case 0x01: /* E */
4054 case 0x80: /* S */
4055 case 0x82: /* S */
4056 case 0x93: /* S */
4057 case 0xb2: /* S, RRF, RRE */
4058 case 0xb3: /* RRE, RRD, RRF */
4059 case 0xb9: /* RRE, RRF */
4060 case 0xe5: /* SSE, SIL */
4061 op2 = (insn << 8) >> 56;
4062 break;
4063 case 0xa5: /* RI */
4064 case 0xa7: /* RI */
4065 case 0xc0: /* RIL */
4066 case 0xc2: /* RIL */
4067 case 0xc4: /* RIL */
4068 case 0xc6: /* RIL */
4069 case 0xc8: /* SSF */
4070 case 0xcc: /* RIL */
4071 op2 = (insn << 12) >> 60;
4072 break;
4073 case 0xd0 ... 0xdf: /* SS */
4074 case 0xe1: /* SS */
4075 case 0xe2: /* SS */
4076 case 0xe8: /* SS */
4077 case 0xe9: /* SS */
4078 case 0xea: /* SS */
4079 case 0xee ... 0xf3: /* SS */
4080 case 0xf8 ... 0xfd: /* SS */
4081 op2 = 0;
4082 break;
4083 default:
4084 op2 = (insn << 40) >> 56;
4085 break;
4086 }
4087
4088 memset(f, 0, sizeof(*f));
4089 f->op = op;
4090 f->op2 = op2;
4091
4092 /* Lookup the instruction. */
4093 info = lookup_opc(op << 8 | op2);
4094
4095 /* If we found it, extract the operands. */
4096 if (info != NULL) {
4097 DisasFormat fmt = info->fmt;
4098 int i;
4099
4100 for (i = 0; i < NUM_C_FIELD; ++i) {
4101 extract_field(f, &format_info[fmt].op[i], insn);
4102 }
4103 }
4104 return info;
4105 }
4106
4107 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4108 {
4109 const DisasInsn *insn;
4110 ExitStatus ret = NO_EXIT;
4111 DisasFields f;
4112 DisasOps o;
4113
4114 insn = extract_insn(env, s, &f);
4115
4116 /* If not found, try the old interpreter. This includes ILLOPC. */
4117 if (insn == NULL) {
4118 disas_s390_insn(env, s);
4119 switch (s->is_jmp) {
4120 case DISAS_NEXT:
4121 ret = NO_EXIT;
4122 break;
4123 case DISAS_TB_JUMP:
4124 ret = EXIT_GOTO_TB;
4125 break;
4126 case DISAS_JUMP:
4127 ret = EXIT_PC_UPDATED;
4128 break;
4129 case DISAS_EXCP:
4130 ret = EXIT_NORETURN;
4131 break;
4132 default:
4133 abort();
4134 }
4135
4136 s->pc = s->next_pc;
4137 return ret;
4138 }
4139
4140 /* Set up the strutures we use to communicate with the helpers. */
4141 s->insn = insn;
4142 s->fields = &f;
4143 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4144 TCGV_UNUSED_I64(o.out);
4145 TCGV_UNUSED_I64(o.out2);
4146 TCGV_UNUSED_I64(o.in1);
4147 TCGV_UNUSED_I64(o.in2);
4148 TCGV_UNUSED_I64(o.addr1);
4149
4150 /* Implement the instruction. */
4151 if (insn->help_in1) {
4152 insn->help_in1(s, &f, &o);
4153 }
4154 if (insn->help_in2) {
4155 insn->help_in2(s, &f, &o);
4156 }
4157 if (insn->help_prep) {
4158 insn->help_prep(s, &f, &o);
4159 }
4160 if (insn->help_op) {
4161 ret = insn->help_op(s, &o);
4162 }
4163 if (insn->help_wout) {
4164 insn->help_wout(s, &f, &o);
4165 }
4166 if (insn->help_cout) {
4167 insn->help_cout(s, &o);
4168 }
4169
4170 /* Free any temporaries created by the helpers. */
4171 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4172 tcg_temp_free_i64(o.out);
4173 }
4174 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4175 tcg_temp_free_i64(o.out2);
4176 }
4177 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4178 tcg_temp_free_i64(o.in1);
4179 }
4180 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4181 tcg_temp_free_i64(o.in2);
4182 }
4183 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4184 tcg_temp_free_i64(o.addr1);
4185 }
4186
4187 /* Advance to the next instruction. */
4188 s->pc = s->next_pc;
4189 return ret;
4190 }
4191
4192 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4193 TranslationBlock *tb,
4194 int search_pc)
4195 {
4196 DisasContext dc;
4197 target_ulong pc_start;
4198 uint64_t next_page_start;
4199 uint16_t *gen_opc_end;
4200 int j, lj = -1;
4201 int num_insns, max_insns;
4202 CPUBreakpoint *bp;
4203 ExitStatus status;
4204 bool do_debug;
4205
4206 pc_start = tb->pc;
4207
4208 /* 31-bit mode */
4209 if (!(tb->flags & FLAG_MASK_64)) {
4210 pc_start &= 0x7fffffff;
4211 }
4212
4213 dc.tb = tb;
4214 dc.pc = pc_start;
4215 dc.cc_op = CC_OP_DYNAMIC;
4216 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4217 dc.is_jmp = DISAS_NEXT;
4218
4219 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4220
4221 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4222
4223 num_insns = 0;
4224 max_insns = tb->cflags & CF_COUNT_MASK;
4225 if (max_insns == 0) {
4226 max_insns = CF_COUNT_MASK;
4227 }
4228
4229 gen_icount_start();
4230
4231 do {
4232 if (search_pc) {
4233 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4234 if (lj < j) {
4235 lj++;
4236 while (lj < j) {
4237 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4238 }
4239 }
4240 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4241 gen_opc_cc_op[lj] = dc.cc_op;
4242 tcg_ctx.gen_opc_instr_start[lj] = 1;
4243 tcg_ctx.gen_opc_icount[lj] = num_insns;
4244 }
4245 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4246 gen_io_start();
4247 }
4248
4249 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4250 tcg_gen_debug_insn_start(dc.pc);
4251 }
4252
4253 status = NO_EXIT;
4254 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4255 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4256 if (bp->pc == dc.pc) {
4257 status = EXIT_PC_STALE;
4258 do_debug = true;
4259 break;
4260 }
4261 }
4262 }
4263 if (status == NO_EXIT) {
4264 status = translate_one(env, &dc);
4265 }
4266
4267 /* If we reach a page boundary, are single stepping,
4268 or exhaust instruction count, stop generation. */
4269 if (status == NO_EXIT
4270 && (dc.pc >= next_page_start
4271 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4272 || num_insns >= max_insns
4273 || singlestep
4274 || env->singlestep_enabled)) {
4275 status = EXIT_PC_STALE;
4276 }
4277 } while (status == NO_EXIT);
4278
4279 if (tb->cflags & CF_LAST_IO) {
4280 gen_io_end();
4281 }
4282
4283 switch (status) {
4284 case EXIT_GOTO_TB:
4285 case EXIT_NORETURN:
4286 break;
4287 case EXIT_PC_STALE:
4288 update_psw_addr(&dc);
4289 /* FALLTHRU */
4290 case EXIT_PC_UPDATED:
4291 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4292 gen_op_calc_cc(&dc);
4293 } else {
4294 /* Next TB starts off with CC_OP_DYNAMIC,
4295 so make sure the cc op type is in env */
4296 gen_op_set_cc_op(&dc);
4297 }
4298 if (do_debug) {
4299 gen_exception(EXCP_DEBUG);
4300 } else {
4301 /* Generate the return instruction */
4302 tcg_gen_exit_tb(0);
4303 }
4304 break;
4305 default:
4306 abort();
4307 }
4308
4309 gen_icount_end(tb, num_insns);
4310 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4311 if (search_pc) {
4312 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4313 lj++;
4314 while (lj <= j) {
4315 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4316 }
4317 } else {
4318 tb->size = dc.pc - pc_start;
4319 tb->icount = num_insns;
4320 }
4321
4322 #if defined(S390X_DEBUG_DISAS)
4323 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4324 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4325 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4326 qemu_log("\n");
4327 }
4328 #endif
4329 }
4330
4331 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4332 {
4333 gen_intermediate_code_internal(env, tb, 0);
4334 }
4335
4336 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4337 {
4338 gen_intermediate_code_internal(env, tb, 1);
4339 }
4340
4341 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4342 {
4343 int cc_op;
4344 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4345 cc_op = gen_opc_cc_op[pc_pos];
4346 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4347 env->cc_op = cc_op;
4348 }
4349 }