]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Convert IPTE
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static inline void debug_insn(uint64_t insn)
83 {
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85 }
86
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 {
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
92 }
93 }
94 return pc;
95 }
96
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
99 {
100 int i;
101
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
108 }
109
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
116 }
117 }
118
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
125 }
126 }
127
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
135 }
136 }
137 #endif
138
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
143 }
144 #endif
145
146 cpu_fprintf(f, "\n");
147 }
148
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
151
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
156
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
160
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162
163 void s390x_translate_init(void)
164 {
165 int i;
166
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
174
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
196 }
197
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
201 }
202
203 static inline TCGv_i64 load_reg(int reg)
204 {
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
208 }
209
210 static inline TCGv_i64 load_freg(int reg)
211 {
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
215 }
216
217 static inline TCGv_i32 load_freg32(int reg)
218 {
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
226 }
227
228 static inline TCGv_i64 load_freg32_i64(int reg)
229 {
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
233 }
234
235 static inline TCGv_i32 load_reg32(int reg)
236 {
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
240 }
241
242 static inline TCGv_i64 load_reg32_i64(int reg)
243 {
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
247 }
248
249 static inline void store_reg(int reg, TCGv_i64 v)
250 {
251 tcg_gen_mov_i64(regs[reg], v);
252 }
253
254 static inline void store_freg(int reg, TCGv_i64 v)
255 {
256 tcg_gen_mov_i64(fregs[reg], v);
257 }
258
259 static inline void store_reg32(int reg, TCGv_i32 v)
260 {
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
268 }
269
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
271 {
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274 }
275
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
277 {
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
279 }
280
281 static inline void store_freg32(int reg, TCGv_i32 v)
282 {
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
286 #else
287 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
289 #endif
290 }
291
292 static inline void store_freg32_i64(int reg, TCGv_i64 v)
293 {
294 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
295 }
296
297 static inline void return_low128(TCGv_i64 dest)
298 {
299 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
300 }
301
302 static inline void update_psw_addr(DisasContext *s)
303 {
304 /* psw.addr */
305 tcg_gen_movi_i64(psw_addr, s->pc);
306 }
307
308 static inline void potential_page_fault(DisasContext *s)
309 {
310 #ifndef CONFIG_USER_ONLY
311 update_psw_addr(s);
312 gen_op_calc_cc(s);
313 #endif
314 }
315
316 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
317 {
318 return (uint64_t)cpu_lduw_code(env, pc);
319 }
320
321 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
322 {
323 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
324 }
325
326 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
327 {
328 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
329 }
330
331 static inline int get_mem_index(DisasContext *s)
332 {
333 switch (s->tb->flags & FLAG_MASK_ASC) {
334 case PSW_ASC_PRIMARY >> 32:
335 return 0;
336 case PSW_ASC_SECONDARY >> 32:
337 return 1;
338 case PSW_ASC_HOME >> 32:
339 return 2;
340 default:
341 tcg_abort();
342 break;
343 }
344 }
345
346 static void gen_exception(int excp)
347 {
348 TCGv_i32 tmp = tcg_const_i32(excp);
349 gen_helper_exception(cpu_env, tmp);
350 tcg_temp_free_i32(tmp);
351 }
352
353 static void gen_program_exception(DisasContext *s, int code)
354 {
355 TCGv_i32 tmp;
356
357 /* Remember what pgm exeption this was. */
358 tmp = tcg_const_i32(code);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
360 tcg_temp_free_i32(tmp);
361
362 tmp = tcg_const_i32(s->next_pc - s->pc);
363 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
364 tcg_temp_free_i32(tmp);
365
366 /* Advance past instruction. */
367 s->pc = s->next_pc;
368 update_psw_addr(s);
369
370 /* Save off cc. */
371 gen_op_calc_cc(s);
372
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM);
375
376 /* End TB here. */
377 s->is_jmp = DISAS_EXCP;
378 }
379
380 static inline void gen_illegal_opcode(DisasContext *s)
381 {
382 gen_program_exception(s, PGM_SPECIFICATION);
383 }
384
385 static inline void check_privileged(DisasContext *s)
386 {
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
389 }
390 }
391
392 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
393 {
394 TCGv_i64 tmp;
395
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s->tb->flags & FLAG_MASK_64)) {
398 d2 &= 0x7fffffffUL;
399 }
400
401 if (x2) {
402 if (d2) {
403 tmp = tcg_const_i64(d2);
404 tcg_gen_add_i64(tmp, tmp, regs[x2]);
405 } else {
406 tmp = load_reg(x2);
407 }
408 if (b2) {
409 tcg_gen_add_i64(tmp, tmp, regs[b2]);
410 }
411 } else if (b2) {
412 if (d2) {
413 tmp = tcg_const_i64(d2);
414 tcg_gen_add_i64(tmp, tmp, regs[b2]);
415 } else {
416 tmp = load_reg(b2);
417 }
418 } else {
419 tmp = tcg_const_i64(d2);
420 }
421
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
424 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
425 }
426
427 return tmp;
428 }
429
430 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
431 {
432 s->cc_op = CC_OP_CONST0 + val;
433 }
434
435 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
436 {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_mov_i64(cc_dst, dst);
439 tcg_gen_discard_i64(cc_vr);
440 s->cc_op = op;
441 }
442
443 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
444 {
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_extu_i32_i64(cc_dst, dst);
447 tcg_gen_discard_i64(cc_vr);
448 s->cc_op = op;
449 }
450
451 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
452 TCGv_i64 dst)
453 {
454 tcg_gen_mov_i64(cc_src, src);
455 tcg_gen_mov_i64(cc_dst, dst);
456 tcg_gen_discard_i64(cc_vr);
457 s->cc_op = op;
458 }
459
460 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
461 TCGv_i32 dst)
462 {
463 tcg_gen_extu_i32_i64(cc_src, src);
464 tcg_gen_extu_i32_i64(cc_dst, dst);
465 tcg_gen_discard_i64(cc_vr);
466 s->cc_op = op;
467 }
468
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
471 {
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
475 s->cc_op = op;
476 }
477
478 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
479 {
480 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
481 }
482
483 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
484 {
485 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
486 }
487
488 static inline void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
489 {
490 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
491 }
492
493 static inline void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
494 {
495 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
496 }
497
498 static inline void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
499 {
500 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
501 }
502
503 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
504 enum cc_op cond)
505 {
506 gen_op_update2_cc_i32(s, cond, v1, v2);
507 }
508
509 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
510 enum cc_op cond)
511 {
512 gen_op_update2_cc_i64(s, cond, v1, v2);
513 }
514
515 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
516 {
517 cmp_32(s, v1, v2, CC_OP_LTGT_32);
518 }
519
520 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
521 {
522 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
523 }
524
525 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
526 {
527 /* XXX optimize for the constant? put it in s? */
528 TCGv_i32 tmp = tcg_const_i32(v2);
529 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
530 tcg_temp_free_i32(tmp);
531 }
532
533 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
534 {
535 TCGv_i32 tmp = tcg_const_i32(v2);
536 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
537 tcg_temp_free_i32(tmp);
538 }
539
540 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
541 {
542 cmp_64(s, v1, v2, CC_OP_LTGT_64);
543 }
544
545 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
546 {
547 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
548 }
549
550 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
551 {
552 TCGv_i64 tmp = tcg_const_i64(v2);
553 cmp_s64(s, v1, tmp);
554 tcg_temp_free_i64(tmp);
555 }
556
557 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
558 {
559 TCGv_i64 tmp = tcg_const_i64(v2);
560 cmp_u64(s, v1, tmp);
561 tcg_temp_free_i64(tmp);
562 }
563
564 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
565 {
566 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
567 }
568
569 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
570 {
571 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
572 }
573
574 /* CC value is in env->cc_op */
575 static inline void set_cc_static(DisasContext *s)
576 {
577 tcg_gen_discard_i64(cc_src);
578 tcg_gen_discard_i64(cc_dst);
579 tcg_gen_discard_i64(cc_vr);
580 s->cc_op = CC_OP_STATIC;
581 }
582
583 static inline void gen_op_set_cc_op(DisasContext *s)
584 {
585 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
586 tcg_gen_movi_i32(cc_op, s->cc_op);
587 }
588 }
589
590 static inline void gen_update_cc_op(DisasContext *s)
591 {
592 gen_op_set_cc_op(s);
593 }
594
595 /* calculates cc into cc_op */
596 static void gen_op_calc_cc(DisasContext *s)
597 {
598 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
599 TCGv_i64 dummy = tcg_const_i64(0);
600
601 switch (s->cc_op) {
602 case CC_OP_CONST0:
603 case CC_OP_CONST1:
604 case CC_OP_CONST2:
605 case CC_OP_CONST3:
606 /* s->cc_op is the cc value */
607 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
608 break;
609 case CC_OP_STATIC:
610 /* env->cc_op already is the cc value */
611 break;
612 case CC_OP_NZ:
613 case CC_OP_ABS_64:
614 case CC_OP_NABS_64:
615 case CC_OP_ABS_32:
616 case CC_OP_NABS_32:
617 case CC_OP_LTGT0_32:
618 case CC_OP_LTGT0_64:
619 case CC_OP_COMP_32:
620 case CC_OP_COMP_64:
621 case CC_OP_NZ_F32:
622 case CC_OP_NZ_F64:
623 case CC_OP_FLOGR:
624 /* 1 argument */
625 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
626 break;
627 case CC_OP_ICM:
628 case CC_OP_LTGT_32:
629 case CC_OP_LTGT_64:
630 case CC_OP_LTUGTU_32:
631 case CC_OP_LTUGTU_64:
632 case CC_OP_TM_32:
633 case CC_OP_TM_64:
634 case CC_OP_SLA_32:
635 case CC_OP_SLA_64:
636 case CC_OP_NZ_F128:
637 /* 2 arguments */
638 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
639 break;
640 case CC_OP_ADD_64:
641 case CC_OP_ADDU_64:
642 case CC_OP_ADDC_64:
643 case CC_OP_SUB_64:
644 case CC_OP_SUBU_64:
645 case CC_OP_SUBB_64:
646 case CC_OP_ADD_32:
647 case CC_OP_ADDU_32:
648 case CC_OP_ADDC_32:
649 case CC_OP_SUB_32:
650 case CC_OP_SUBU_32:
651 case CC_OP_SUBB_32:
652 /* 3 arguments */
653 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
654 break;
655 case CC_OP_DYNAMIC:
656 /* unknown operation - assume 3 arguments and cc_op in env */
657 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
658 break;
659 default:
660 tcg_abort();
661 }
662
663 tcg_temp_free_i32(local_cc_op);
664 tcg_temp_free_i64(dummy);
665
666 /* We now have cc in cc_op as constant */
667 set_cc_static(s);
668 }
669
670 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
671 {
672 debug_insn(insn);
673
674 *r1 = (insn >> 4) & 0xf;
675 *r2 = insn & 0xf;
676 }
677
678 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
679 int *x2, int *b2, int *d2)
680 {
681 debug_insn(insn);
682
683 *r1 = (insn >> 20) & 0xf;
684 *x2 = (insn >> 16) & 0xf;
685 *b2 = (insn >> 12) & 0xf;
686 *d2 = insn & 0xfff;
687
688 return get_address(s, *x2, *b2, *d2);
689 }
690
691 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
692 int *b2, int *d2)
693 {
694 debug_insn(insn);
695
696 *r1 = (insn >> 20) & 0xf;
697 /* aka m3 */
698 *r3 = (insn >> 16) & 0xf;
699 *b2 = (insn >> 12) & 0xf;
700 *d2 = insn & 0xfff;
701 }
702
703 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
704 int *b1, int *d1)
705 {
706 debug_insn(insn);
707
708 *i2 = (insn >> 16) & 0xff;
709 *b1 = (insn >> 12) & 0xf;
710 *d1 = insn & 0xfff;
711
712 return get_address(s, 0, *b1, *d1);
713 }
714
715 static int use_goto_tb(DisasContext *s, uint64_t dest)
716 {
717 /* NOTE: we handle the case where the TB spans two pages here */
718 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
719 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
720 && !s->singlestep_enabled
721 && !(s->tb->cflags & CF_LAST_IO));
722 }
723
724 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
725 {
726 gen_update_cc_op(s);
727
728 if (use_goto_tb(s, pc)) {
729 tcg_gen_goto_tb(tb_num);
730 tcg_gen_movi_i64(psw_addr, pc);
731 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
732 } else {
733 /* jump to another page: currently not optimized */
734 tcg_gen_movi_i64(psw_addr, pc);
735 tcg_gen_exit_tb(0);
736 }
737 }
738
739 static inline void account_noninline_branch(DisasContext *s, int cc_op)
740 {
741 #ifdef DEBUG_INLINE_BRANCHES
742 inline_branch_miss[cc_op]++;
743 #endif
744 }
745
746 static inline void account_inline_branch(DisasContext *s, int cc_op)
747 {
748 #ifdef DEBUG_INLINE_BRANCHES
749 inline_branch_hit[cc_op]++;
750 #endif
751 }
752
753 /* Table of mask values to comparison codes, given a comparison as input.
754 For a true comparison CC=3 will never be set, but we treat this
755 conservatively for possible use when CC=3 indicates overflow. */
756 static const TCGCond ltgt_cond[16] = {
757 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
758 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
759 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
760 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
761 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
762 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
763 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
764 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
765 };
766
767 /* Table of mask values to comparison codes, given a logic op as input.
768 For such, only CC=0 and CC=1 should be possible. */
769 static const TCGCond nz_cond[16] = {
770 /* | | x | x */
771 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
772 /* | NE | x | x */
773 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
774 /* EQ | | x | x */
775 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
776 /* EQ | NE | x | x */
777 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
778 };
779
780 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
781 details required to generate a TCG comparison. */
782 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
783 {
784 TCGCond cond;
785 enum cc_op old_cc_op = s->cc_op;
786
787 if (mask == 15 || mask == 0) {
788 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
789 c->u.s32.a = cc_op;
790 c->u.s32.b = cc_op;
791 c->g1 = c->g2 = true;
792 c->is_64 = false;
793 return;
794 }
795
796 /* Find the TCG condition for the mask + cc op. */
797 switch (old_cc_op) {
798 case CC_OP_LTGT0_32:
799 case CC_OP_LTGT0_64:
800 case CC_OP_LTGT_32:
801 case CC_OP_LTGT_64:
802 cond = ltgt_cond[mask];
803 if (cond == TCG_COND_NEVER) {
804 goto do_dynamic;
805 }
806 account_inline_branch(s, old_cc_op);
807 break;
808
809 case CC_OP_LTUGTU_32:
810 case CC_OP_LTUGTU_64:
811 cond = tcg_unsigned_cond(ltgt_cond[mask]);
812 if (cond == TCG_COND_NEVER) {
813 goto do_dynamic;
814 }
815 account_inline_branch(s, old_cc_op);
816 break;
817
818 case CC_OP_NZ:
819 cond = nz_cond[mask];
820 if (cond == TCG_COND_NEVER) {
821 goto do_dynamic;
822 }
823 account_inline_branch(s, old_cc_op);
824 break;
825
826 case CC_OP_TM_32:
827 case CC_OP_TM_64:
828 switch (mask) {
829 case 8:
830 cond = TCG_COND_EQ;
831 break;
832 case 4 | 2 | 1:
833 cond = TCG_COND_NE;
834 break;
835 default:
836 goto do_dynamic;
837 }
838 account_inline_branch(s, old_cc_op);
839 break;
840
841 case CC_OP_ICM:
842 switch (mask) {
843 case 8:
844 cond = TCG_COND_EQ;
845 break;
846 case 4 | 2 | 1:
847 case 4 | 2:
848 cond = TCG_COND_NE;
849 break;
850 default:
851 goto do_dynamic;
852 }
853 account_inline_branch(s, old_cc_op);
854 break;
855
856 case CC_OP_FLOGR:
857 switch (mask & 0xa) {
858 case 8: /* src == 0 -> no one bit found */
859 cond = TCG_COND_EQ;
860 break;
861 case 2: /* src != 0 -> one bit found */
862 cond = TCG_COND_NE;
863 break;
864 default:
865 goto do_dynamic;
866 }
867 account_inline_branch(s, old_cc_op);
868 break;
869
870 default:
871 do_dynamic:
872 /* Calculate cc value. */
873 gen_op_calc_cc(s);
874 /* FALLTHRU */
875
876 case CC_OP_STATIC:
877 /* Jump based on CC. We'll load up the real cond below;
878 the assignment here merely avoids a compiler warning. */
879 account_noninline_branch(s, old_cc_op);
880 old_cc_op = CC_OP_STATIC;
881 cond = TCG_COND_NEVER;
882 break;
883 }
884
885 /* Load up the arguments of the comparison. */
886 c->is_64 = true;
887 c->g1 = c->g2 = false;
888 switch (old_cc_op) {
889 case CC_OP_LTGT0_32:
890 c->is_64 = false;
891 c->u.s32.a = tcg_temp_new_i32();
892 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
893 c->u.s32.b = tcg_const_i32(0);
894 break;
895 case CC_OP_LTGT_32:
896 case CC_OP_LTUGTU_32:
897 c->is_64 = false;
898 c->u.s32.a = tcg_temp_new_i32();
899 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
900 c->u.s32.b = tcg_temp_new_i32();
901 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
902 break;
903
904 case CC_OP_LTGT0_64:
905 case CC_OP_NZ:
906 case CC_OP_FLOGR:
907 c->u.s64.a = cc_dst;
908 c->u.s64.b = tcg_const_i64(0);
909 c->g1 = true;
910 break;
911 case CC_OP_LTGT_64:
912 case CC_OP_LTUGTU_64:
913 c->u.s64.a = cc_src;
914 c->u.s64.b = cc_dst;
915 c->g1 = c->g2 = true;
916 break;
917
918 case CC_OP_TM_32:
919 case CC_OP_TM_64:
920 case CC_OP_ICM:
921 c->u.s64.a = tcg_temp_new_i64();
922 c->u.s64.b = tcg_const_i64(0);
923 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
924 break;
925
926 case CC_OP_STATIC:
927 c->is_64 = false;
928 c->u.s32.a = cc_op;
929 c->g1 = true;
930 switch (mask) {
931 case 0x8 | 0x4 | 0x2: /* cc != 3 */
932 cond = TCG_COND_NE;
933 c->u.s32.b = tcg_const_i32(3);
934 break;
935 case 0x8 | 0x4 | 0x1: /* cc != 2 */
936 cond = TCG_COND_NE;
937 c->u.s32.b = tcg_const_i32(2);
938 break;
939 case 0x8 | 0x2 | 0x1: /* cc != 1 */
940 cond = TCG_COND_NE;
941 c->u.s32.b = tcg_const_i32(1);
942 break;
943 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
944 cond = TCG_COND_EQ;
945 c->g1 = false;
946 c->u.s32.a = tcg_temp_new_i32();
947 c->u.s32.b = tcg_const_i32(0);
948 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
949 break;
950 case 0x8 | 0x4: /* cc < 2 */
951 cond = TCG_COND_LTU;
952 c->u.s32.b = tcg_const_i32(2);
953 break;
954 case 0x8: /* cc == 0 */
955 cond = TCG_COND_EQ;
956 c->u.s32.b = tcg_const_i32(0);
957 break;
958 case 0x4 | 0x2 | 0x1: /* cc != 0 */
959 cond = TCG_COND_NE;
960 c->u.s32.b = tcg_const_i32(0);
961 break;
962 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
963 cond = TCG_COND_NE;
964 c->g1 = false;
965 c->u.s32.a = tcg_temp_new_i32();
966 c->u.s32.b = tcg_const_i32(0);
967 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
968 break;
969 case 0x4: /* cc == 1 */
970 cond = TCG_COND_EQ;
971 c->u.s32.b = tcg_const_i32(1);
972 break;
973 case 0x2 | 0x1: /* cc > 1 */
974 cond = TCG_COND_GTU;
975 c->u.s32.b = tcg_const_i32(1);
976 break;
977 case 0x2: /* cc == 2 */
978 cond = TCG_COND_EQ;
979 c->u.s32.b = tcg_const_i32(2);
980 break;
981 case 0x1: /* cc == 3 */
982 cond = TCG_COND_EQ;
983 c->u.s32.b = tcg_const_i32(3);
984 break;
985 default:
986 /* CC is masked by something else: (8 >> cc) & mask. */
987 cond = TCG_COND_NE;
988 c->g1 = false;
989 c->u.s32.a = tcg_const_i32(8);
990 c->u.s32.b = tcg_const_i32(0);
991 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
992 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
993 break;
994 }
995 break;
996
997 default:
998 abort();
999 }
1000 c->cond = cond;
1001 }
1002
1003 static void free_compare(DisasCompare *c)
1004 {
1005 if (!c->g1) {
1006 if (c->is_64) {
1007 tcg_temp_free_i64(c->u.s64.a);
1008 } else {
1009 tcg_temp_free_i32(c->u.s32.a);
1010 }
1011 }
1012 if (!c->g2) {
1013 if (c->is_64) {
1014 tcg_temp_free_i64(c->u.s64.b);
1015 } else {
1016 tcg_temp_free_i32(c->u.s32.b);
1017 }
1018 }
1019 }
1020
1021 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1022 uint32_t insn)
1023 {
1024 #ifndef CONFIG_USER_ONLY
1025 TCGv_i64 tmp, tmp2, tmp3;
1026 TCGv_i32 tmp32_1, tmp32_2;
1027 int r1, r2;
1028 int r3, d2, b2;
1029
1030 r1 = (insn >> 4) & 0xf;
1031 r2 = insn & 0xf;
1032
1033 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1034
1035 switch (op) {
1036 case 0x29: /* ISKE R1,R2 [RRE] */
1037 /* Insert Storage Key Extended */
1038 check_privileged(s);
1039 r1 = (insn >> 4) & 0xf;
1040 r2 = insn & 0xf;
1041 tmp = load_reg(r2);
1042 tmp2 = tcg_temp_new_i64();
1043 gen_helper_iske(tmp2, cpu_env, tmp);
1044 store_reg(r1, tmp2);
1045 tcg_temp_free_i64(tmp);
1046 tcg_temp_free_i64(tmp2);
1047 break;
1048 case 0x2a: /* RRBE R1,R2 [RRE] */
1049 /* Set Storage Key Extended */
1050 check_privileged(s);
1051 r1 = (insn >> 4) & 0xf;
1052 r2 = insn & 0xf;
1053 tmp32_1 = load_reg32(r1);
1054 tmp = load_reg(r2);
1055 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1056 set_cc_static(s);
1057 tcg_temp_free_i32(tmp32_1);
1058 tcg_temp_free_i64(tmp);
1059 break;
1060 case 0x2b: /* SSKE R1,R2 [RRE] */
1061 /* Set Storage Key Extended */
1062 check_privileged(s);
1063 r1 = (insn >> 4) & 0xf;
1064 r2 = insn & 0xf;
1065 tmp32_1 = load_reg32(r1);
1066 tmp = load_reg(r2);
1067 gen_helper_sske(cpu_env, tmp32_1, tmp);
1068 tcg_temp_free_i32(tmp32_1);
1069 tcg_temp_free_i64(tmp);
1070 break;
1071 case 0x34: /* STCH ? */
1072 /* Store Subchannel */
1073 check_privileged(s);
1074 gen_op_movi_cc(s, 3);
1075 break;
1076 case 0x46: /* STURA R1,R2 [RRE] */
1077 /* Store Using Real Address */
1078 check_privileged(s);
1079 r1 = (insn >> 4) & 0xf;
1080 r2 = insn & 0xf;
1081 tmp32_1 = load_reg32(r1);
1082 tmp = load_reg(r2);
1083 potential_page_fault(s);
1084 gen_helper_stura(cpu_env, tmp, tmp32_1);
1085 tcg_temp_free_i32(tmp32_1);
1086 tcg_temp_free_i64(tmp);
1087 break;
1088 case 0x50: /* CSP R1,R2 [RRE] */
1089 /* Compare And Swap And Purge */
1090 check_privileged(s);
1091 r1 = (insn >> 4) & 0xf;
1092 r2 = insn & 0xf;
1093 tmp32_1 = tcg_const_i32(r1);
1094 tmp32_2 = tcg_const_i32(r2);
1095 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1096 set_cc_static(s);
1097 tcg_temp_free_i32(tmp32_1);
1098 tcg_temp_free_i32(tmp32_2);
1099 break;
1100 case 0x5f: /* CHSC ? */
1101 /* Channel Subsystem Call */
1102 check_privileged(s);
1103 gen_op_movi_cc(s, 3);
1104 break;
1105 case 0x78: /* STCKE D2(B2) [S] */
1106 /* Store Clock Extended */
1107 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1108 tmp = get_address(s, 0, b2, d2);
1109 potential_page_fault(s);
1110 gen_helper_stcke(cc_op, cpu_env, tmp);
1111 set_cc_static(s);
1112 tcg_temp_free_i64(tmp);
1113 break;
1114 case 0x79: /* SACF D2(B2) [S] */
1115 /* Set Address Space Control Fast */
1116 check_privileged(s);
1117 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1118 tmp = get_address(s, 0, b2, d2);
1119 potential_page_fault(s);
1120 gen_helper_sacf(cpu_env, tmp);
1121 tcg_temp_free_i64(tmp);
1122 /* addressing mode has changed, so end the block */
1123 s->pc = s->next_pc;
1124 update_psw_addr(s);
1125 s->is_jmp = DISAS_JUMP;
1126 break;
1127 case 0x7d: /* STSI D2,(B2) [S] */
1128 check_privileged(s);
1129 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1130 tmp = get_address(s, 0, b2, d2);
1131 tmp32_1 = load_reg32(0);
1132 tmp32_2 = load_reg32(1);
1133 potential_page_fault(s);
1134 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1135 set_cc_static(s);
1136 tcg_temp_free_i64(tmp);
1137 tcg_temp_free_i32(tmp32_1);
1138 tcg_temp_free_i32(tmp32_2);
1139 break;
1140 case 0xb1: /* STFL D2(B2) [S] */
1141 /* Store Facility List (CPU features) at 200 */
1142 check_privileged(s);
1143 tmp2 = tcg_const_i64(0xc0000000);
1144 tmp = tcg_const_i64(200);
1145 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1146 tcg_temp_free_i64(tmp2);
1147 tcg_temp_free_i64(tmp);
1148 break;
1149 case 0xb2: /* LPSWE D2(B2) [S] */
1150 /* Load PSW Extended */
1151 check_privileged(s);
1152 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1153 tmp = get_address(s, 0, b2, d2);
1154 tmp2 = tcg_temp_new_i64();
1155 tmp3 = tcg_temp_new_i64();
1156 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1157 tcg_gen_addi_i64(tmp, tmp, 8);
1158 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1159 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1160 /* we need to keep cc_op intact */
1161 s->is_jmp = DISAS_JUMP;
1162 tcg_temp_free_i64(tmp);
1163 tcg_temp_free_i64(tmp2);
1164 tcg_temp_free_i64(tmp3);
1165 break;
1166 case 0x20: /* SERVC R1,R2 [RRE] */
1167 /* SCLP Service call (PV hypercall) */
1168 check_privileged(s);
1169 potential_page_fault(s);
1170 tmp32_1 = load_reg32(r2);
1171 tmp = load_reg(r1);
1172 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1173 set_cc_static(s);
1174 tcg_temp_free_i32(tmp32_1);
1175 tcg_temp_free_i64(tmp);
1176 break;
1177 default:
1178 #endif
1179 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1180 gen_illegal_opcode(s);
1181 #ifndef CONFIG_USER_ONLY
1182 break;
1183 }
1184 #endif
1185 }
1186
1187 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1188 {
1189 unsigned char opc;
1190 uint64_t insn;
1191 int op;
1192
1193 opc = cpu_ldub_code(env, s->pc);
1194 LOG_DISAS("opc 0x%x\n", opc);
1195
1196 switch (opc) {
1197 case 0xb2:
1198 insn = ld_code4(env, s->pc);
1199 op = (insn >> 16) & 0xff;
1200 disas_b2(env, s, op, insn);
1201 break;
1202 default:
1203 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
1204 gen_illegal_opcode(s);
1205 break;
1206 }
1207 }
1208
1209 /* ====================================================================== */
1210 /* Define the insn format enumeration. */
1211 #define F0(N) FMT_##N,
1212 #define F1(N, X1) F0(N)
1213 #define F2(N, X1, X2) F0(N)
1214 #define F3(N, X1, X2, X3) F0(N)
1215 #define F4(N, X1, X2, X3, X4) F0(N)
1216 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1217
1218 typedef enum {
1219 #include "insn-format.def"
1220 } DisasFormat;
1221
1222 #undef F0
1223 #undef F1
1224 #undef F2
1225 #undef F3
1226 #undef F4
1227 #undef F5
1228
1229 /* Define a structure to hold the decoded fields. We'll store each inside
1230 an array indexed by an enum. In order to conserve memory, we'll arrange
1231 for fields that do not exist at the same time to overlap, thus the "C"
1232 for compact. For checking purposes there is an "O" for original index
1233 as well that will be applied to availability bitmaps. */
1234
1235 enum DisasFieldIndexO {
1236 FLD_O_r1,
1237 FLD_O_r2,
1238 FLD_O_r3,
1239 FLD_O_m1,
1240 FLD_O_m3,
1241 FLD_O_m4,
1242 FLD_O_b1,
1243 FLD_O_b2,
1244 FLD_O_b4,
1245 FLD_O_d1,
1246 FLD_O_d2,
1247 FLD_O_d4,
1248 FLD_O_x2,
1249 FLD_O_l1,
1250 FLD_O_l2,
1251 FLD_O_i1,
1252 FLD_O_i2,
1253 FLD_O_i3,
1254 FLD_O_i4,
1255 FLD_O_i5
1256 };
1257
1258 enum DisasFieldIndexC {
1259 FLD_C_r1 = 0,
1260 FLD_C_m1 = 0,
1261 FLD_C_b1 = 0,
1262 FLD_C_i1 = 0,
1263
1264 FLD_C_r2 = 1,
1265 FLD_C_b2 = 1,
1266 FLD_C_i2 = 1,
1267
1268 FLD_C_r3 = 2,
1269 FLD_C_m3 = 2,
1270 FLD_C_i3 = 2,
1271
1272 FLD_C_m4 = 3,
1273 FLD_C_b4 = 3,
1274 FLD_C_i4 = 3,
1275 FLD_C_l1 = 3,
1276
1277 FLD_C_i5 = 4,
1278 FLD_C_d1 = 4,
1279
1280 FLD_C_d2 = 5,
1281
1282 FLD_C_d4 = 6,
1283 FLD_C_x2 = 6,
1284 FLD_C_l2 = 6,
1285
1286 NUM_C_FIELD = 7
1287 };
1288
1289 struct DisasFields {
1290 unsigned op:8;
1291 unsigned op2:8;
1292 unsigned presentC:16;
1293 unsigned int presentO;
1294 int c[NUM_C_FIELD];
1295 };
1296
1297 /* This is the way fields are to be accessed out of DisasFields. */
1298 #define have_field(S, F) have_field1((S), FLD_O_##F)
1299 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1300
1301 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1302 {
1303 return (f->presentO >> c) & 1;
1304 }
1305
1306 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1307 enum DisasFieldIndexC c)
1308 {
1309 assert(have_field1(f, o));
1310 return f->c[c];
1311 }
1312
1313 /* Describe the layout of each field in each format. */
1314 typedef struct DisasField {
1315 unsigned int beg:8;
1316 unsigned int size:8;
1317 unsigned int type:2;
1318 unsigned int indexC:6;
1319 enum DisasFieldIndexO indexO:8;
1320 } DisasField;
1321
1322 typedef struct DisasFormatInfo {
1323 DisasField op[NUM_C_FIELD];
1324 } DisasFormatInfo;
1325
1326 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1327 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1328 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1329 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1330 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1331 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1332 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1333 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1334 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1335 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1336 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1337 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1338 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1339 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1340
1341 #define F0(N) { { } },
1342 #define F1(N, X1) { { X1 } },
1343 #define F2(N, X1, X2) { { X1, X2 } },
1344 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1345 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1346 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1347
1348 static const DisasFormatInfo format_info[] = {
1349 #include "insn-format.def"
1350 };
1351
1352 #undef F0
1353 #undef F1
1354 #undef F2
1355 #undef F3
1356 #undef F4
1357 #undef F5
1358 #undef R
1359 #undef M
1360 #undef BD
1361 #undef BXD
1362 #undef BDL
1363 #undef BXDL
1364 #undef I
1365 #undef L
1366
1367 /* Generally, we'll extract operands into this structures, operate upon
1368 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1369 of routines below for more details. */
1370 typedef struct {
1371 bool g_out, g_out2, g_in1, g_in2;
1372 TCGv_i64 out, out2, in1, in2;
1373 TCGv_i64 addr1;
1374 } DisasOps;
1375
1376 /* Return values from translate_one, indicating the state of the TB. */
1377 typedef enum {
1378 /* Continue the TB. */
1379 NO_EXIT,
1380 /* We have emitted one or more goto_tb. No fixup required. */
1381 EXIT_GOTO_TB,
1382 /* We are not using a goto_tb (for whatever reason), but have updated
1383 the PC (for whatever reason), so there's no need to do it again on
1384 exiting the TB. */
1385 EXIT_PC_UPDATED,
1386 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1387 updated the PC for the next instruction to be executed. */
1388 EXIT_PC_STALE,
1389 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1390 No following code will be executed. */
1391 EXIT_NORETURN,
1392 } ExitStatus;
1393
1394 typedef enum DisasFacility {
1395 FAC_Z, /* zarch (default) */
1396 FAC_CASS, /* compare and swap and store */
1397 FAC_CASS2, /* compare and swap and store 2*/
1398 FAC_DFP, /* decimal floating point */
1399 FAC_DFPR, /* decimal floating point rounding */
1400 FAC_DO, /* distinct operands */
1401 FAC_EE, /* execute extensions */
1402 FAC_EI, /* extended immediate */
1403 FAC_FPE, /* floating point extension */
1404 FAC_FPSSH, /* floating point support sign handling */
1405 FAC_FPRGR, /* FPR-GR transfer */
1406 FAC_GIE, /* general instructions extension */
1407 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1408 FAC_HW, /* high-word */
1409 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1410 FAC_LOC, /* load/store on condition */
1411 FAC_LD, /* long displacement */
1412 FAC_PC, /* population count */
1413 FAC_SCF, /* store clock fast */
1414 FAC_SFLE, /* store facility list extended */
1415 } DisasFacility;
1416
1417 struct DisasInsn {
1418 unsigned opc:16;
1419 DisasFormat fmt:6;
1420 DisasFacility fac:6;
1421
1422 const char *name;
1423
1424 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1425 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1426 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1427 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1428 void (*help_cout)(DisasContext *, DisasOps *);
1429 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1430
1431 uint64_t data;
1432 };
1433
1434 /* ====================================================================== */
1435 /* Miscelaneous helpers, used by several operations. */
1436
1437 static void help_l2_shift(DisasContext *s, DisasFields *f,
1438 DisasOps *o, int mask)
1439 {
1440 int b2 = get_field(f, b2);
1441 int d2 = get_field(f, d2);
1442
1443 if (b2 == 0) {
1444 o->in2 = tcg_const_i64(d2 & mask);
1445 } else {
1446 o->in2 = get_address(s, 0, b2, d2);
1447 tcg_gen_andi_i64(o->in2, o->in2, mask);
1448 }
1449 }
1450
1451 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1452 {
1453 if (dest == s->next_pc) {
1454 return NO_EXIT;
1455 }
1456 if (use_goto_tb(s, dest)) {
1457 gen_update_cc_op(s);
1458 tcg_gen_goto_tb(0);
1459 tcg_gen_movi_i64(psw_addr, dest);
1460 tcg_gen_exit_tb((tcg_target_long)s->tb);
1461 return EXIT_GOTO_TB;
1462 } else {
1463 tcg_gen_movi_i64(psw_addr, dest);
1464 return EXIT_PC_UPDATED;
1465 }
1466 }
1467
1468 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1469 bool is_imm, int imm, TCGv_i64 cdest)
1470 {
1471 ExitStatus ret;
1472 uint64_t dest = s->pc + 2 * imm;
1473 int lab;
1474
1475 /* Take care of the special cases first. */
1476 if (c->cond == TCG_COND_NEVER) {
1477 ret = NO_EXIT;
1478 goto egress;
1479 }
1480 if (is_imm) {
1481 if (dest == s->next_pc) {
1482 /* Branch to next. */
1483 ret = NO_EXIT;
1484 goto egress;
1485 }
1486 if (c->cond == TCG_COND_ALWAYS) {
1487 ret = help_goto_direct(s, dest);
1488 goto egress;
1489 }
1490 } else {
1491 if (TCGV_IS_UNUSED_I64(cdest)) {
1492 /* E.g. bcr %r0 -> no branch. */
1493 ret = NO_EXIT;
1494 goto egress;
1495 }
1496 if (c->cond == TCG_COND_ALWAYS) {
1497 tcg_gen_mov_i64(psw_addr, cdest);
1498 ret = EXIT_PC_UPDATED;
1499 goto egress;
1500 }
1501 }
1502
1503 if (use_goto_tb(s, s->next_pc)) {
1504 if (is_imm && use_goto_tb(s, dest)) {
1505 /* Both exits can use goto_tb. */
1506 gen_update_cc_op(s);
1507
1508 lab = gen_new_label();
1509 if (c->is_64) {
1510 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1511 } else {
1512 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1513 }
1514
1515 /* Branch not taken. */
1516 tcg_gen_goto_tb(0);
1517 tcg_gen_movi_i64(psw_addr, s->next_pc);
1518 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1519
1520 /* Branch taken. */
1521 gen_set_label(lab);
1522 tcg_gen_goto_tb(1);
1523 tcg_gen_movi_i64(psw_addr, dest);
1524 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1525
1526 ret = EXIT_GOTO_TB;
1527 } else {
1528 /* Fallthru can use goto_tb, but taken branch cannot. */
1529 /* Store taken branch destination before the brcond. This
1530 avoids having to allocate a new local temp to hold it.
1531 We'll overwrite this in the not taken case anyway. */
1532 if (!is_imm) {
1533 tcg_gen_mov_i64(psw_addr, cdest);
1534 }
1535
1536 lab = gen_new_label();
1537 if (c->is_64) {
1538 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1539 } else {
1540 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1541 }
1542
1543 /* Branch not taken. */
1544 gen_update_cc_op(s);
1545 tcg_gen_goto_tb(0);
1546 tcg_gen_movi_i64(psw_addr, s->next_pc);
1547 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1548
1549 gen_set_label(lab);
1550 if (is_imm) {
1551 tcg_gen_movi_i64(psw_addr, dest);
1552 }
1553 ret = EXIT_PC_UPDATED;
1554 }
1555 } else {
1556 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1557 Most commonly we're single-stepping or some other condition that
1558 disables all use of goto_tb. Just update the PC and exit. */
1559
1560 TCGv_i64 next = tcg_const_i64(s->next_pc);
1561 if (is_imm) {
1562 cdest = tcg_const_i64(dest);
1563 }
1564
1565 if (c->is_64) {
1566 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1567 cdest, next);
1568 } else {
1569 TCGv_i32 t0 = tcg_temp_new_i32();
1570 TCGv_i64 t1 = tcg_temp_new_i64();
1571 TCGv_i64 z = tcg_const_i64(0);
1572 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1573 tcg_gen_extu_i32_i64(t1, t0);
1574 tcg_temp_free_i32(t0);
1575 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1576 tcg_temp_free_i64(t1);
1577 tcg_temp_free_i64(z);
1578 }
1579
1580 if (is_imm) {
1581 tcg_temp_free_i64(cdest);
1582 }
1583 tcg_temp_free_i64(next);
1584
1585 ret = EXIT_PC_UPDATED;
1586 }
1587
1588 egress:
1589 free_compare(c);
1590 return ret;
1591 }
1592
1593 /* ====================================================================== */
1594 /* The operations. These perform the bulk of the work for any insn,
1595 usually after the operands have been loaded and output initialized. */
1596
1597 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1598 {
1599 gen_helper_abs_i64(o->out, o->in2);
1600 return NO_EXIT;
1601 }
1602
1603 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1604 {
1605 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1606 return NO_EXIT;
1607 }
1608
1609 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1610 {
1611 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1612 return NO_EXIT;
1613 }
1614
1615 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1616 {
1617 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1618 tcg_gen_mov_i64(o->out2, o->in2);
1619 return NO_EXIT;
1620 }
1621
1622 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1623 {
1624 tcg_gen_add_i64(o->out, o->in1, o->in2);
1625 return NO_EXIT;
1626 }
1627
1628 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1629 {
1630 TCGv_i64 cc;
1631
1632 tcg_gen_add_i64(o->out, o->in1, o->in2);
1633
1634 /* XXX possible optimization point */
1635 gen_op_calc_cc(s);
1636 cc = tcg_temp_new_i64();
1637 tcg_gen_extu_i32_i64(cc, cc_op);
1638 tcg_gen_shri_i64(cc, cc, 1);
1639
1640 tcg_gen_add_i64(o->out, o->out, cc);
1641 tcg_temp_free_i64(cc);
1642 return NO_EXIT;
1643 }
1644
1645 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1646 {
1647 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1648 return NO_EXIT;
1649 }
1650
1651 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1652 {
1653 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1654 return NO_EXIT;
1655 }
1656
1657 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1658 {
1659 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1660 return_low128(o->out2);
1661 return NO_EXIT;
1662 }
1663
1664 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1665 {
1666 tcg_gen_and_i64(o->out, o->in1, o->in2);
1667 return NO_EXIT;
1668 }
1669
1670 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1671 {
1672 int shift = s->insn->data & 0xff;
1673 int size = s->insn->data >> 8;
1674 uint64_t mask = ((1ull << size) - 1) << shift;
1675
1676 assert(!o->g_in2);
1677 tcg_gen_shli_i64(o->in2, o->in2, shift);
1678 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1679 tcg_gen_and_i64(o->out, o->in1, o->in2);
1680
1681 /* Produce the CC from only the bits manipulated. */
1682 tcg_gen_andi_i64(cc_dst, o->out, mask);
1683 set_cc_nz_u64(s, cc_dst);
1684 return NO_EXIT;
1685 }
1686
1687 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1688 {
1689 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1690 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1691 tcg_gen_mov_i64(psw_addr, o->in2);
1692 return EXIT_PC_UPDATED;
1693 } else {
1694 return NO_EXIT;
1695 }
1696 }
1697
1698 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1699 {
1700 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1701 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1702 }
1703
1704 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1705 {
1706 int m1 = get_field(s->fields, m1);
1707 bool is_imm = have_field(s->fields, i2);
1708 int imm = is_imm ? get_field(s->fields, i2) : 0;
1709 DisasCompare c;
1710
1711 disas_jcc(s, &c, m1);
1712 return help_branch(s, &c, is_imm, imm, o->in2);
1713 }
1714
1715 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1716 {
1717 int r1 = get_field(s->fields, r1);
1718 bool is_imm = have_field(s->fields, i2);
1719 int imm = is_imm ? get_field(s->fields, i2) : 0;
1720 DisasCompare c;
1721 TCGv_i64 t;
1722
1723 c.cond = TCG_COND_NE;
1724 c.is_64 = false;
1725 c.g1 = false;
1726 c.g2 = false;
1727
1728 t = tcg_temp_new_i64();
1729 tcg_gen_subi_i64(t, regs[r1], 1);
1730 store_reg32_i64(r1, t);
1731 c.u.s32.a = tcg_temp_new_i32();
1732 c.u.s32.b = tcg_const_i32(0);
1733 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1734 tcg_temp_free_i64(t);
1735
1736 return help_branch(s, &c, is_imm, imm, o->in2);
1737 }
1738
1739 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1740 {
1741 int r1 = get_field(s->fields, r1);
1742 bool is_imm = have_field(s->fields, i2);
1743 int imm = is_imm ? get_field(s->fields, i2) : 0;
1744 DisasCompare c;
1745
1746 c.cond = TCG_COND_NE;
1747 c.is_64 = true;
1748 c.g1 = true;
1749 c.g2 = false;
1750
1751 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1752 c.u.s64.a = regs[r1];
1753 c.u.s64.b = tcg_const_i64(0);
1754
1755 return help_branch(s, &c, is_imm, imm, o->in2);
1756 }
1757
1758 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1759 {
1760 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1761 set_cc_static(s);
1762 return NO_EXIT;
1763 }
1764
1765 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1766 {
1767 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1768 set_cc_static(s);
1769 return NO_EXIT;
1770 }
1771
1772 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1773 {
1774 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1775 set_cc_static(s);
1776 return NO_EXIT;
1777 }
1778
1779 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1780 {
1781 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1782 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1783 tcg_temp_free_i32(m3);
1784 gen_set_cc_nz_f32(s, o->in2);
1785 return NO_EXIT;
1786 }
1787
1788 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1789 {
1790 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1791 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1792 tcg_temp_free_i32(m3);
1793 gen_set_cc_nz_f64(s, o->in2);
1794 return NO_EXIT;
1795 }
1796
1797 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1798 {
1799 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1800 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1801 tcg_temp_free_i32(m3);
1802 gen_set_cc_nz_f128(s, o->in1, o->in2);
1803 return NO_EXIT;
1804 }
1805
1806 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1807 {
1808 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1809 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1810 tcg_temp_free_i32(m3);
1811 gen_set_cc_nz_f32(s, o->in2);
1812 return NO_EXIT;
1813 }
1814
1815 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1816 {
1817 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1818 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1819 tcg_temp_free_i32(m3);
1820 gen_set_cc_nz_f64(s, o->in2);
1821 return NO_EXIT;
1822 }
1823
1824 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1825 {
1826 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1827 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1828 tcg_temp_free_i32(m3);
1829 gen_set_cc_nz_f128(s, o->in1, o->in2);
1830 return NO_EXIT;
1831 }
1832
1833 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1834 {
1835 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1836 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1837 tcg_temp_free_i32(m3);
1838 return NO_EXIT;
1839 }
1840
1841 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1842 {
1843 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1844 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1845 tcg_temp_free_i32(m3);
1846 return NO_EXIT;
1847 }
1848
1849 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1850 {
1851 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1852 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1853 tcg_temp_free_i32(m3);
1854 return_low128(o->out2);
1855 return NO_EXIT;
1856 }
1857
1858 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1859 {
1860 int r2 = get_field(s->fields, r2);
1861 TCGv_i64 len = tcg_temp_new_i64();
1862
1863 potential_page_fault(s);
1864 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1865 set_cc_static(s);
1866 return_low128(o->out);
1867
1868 tcg_gen_add_i64(regs[r2], regs[r2], len);
1869 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1870 tcg_temp_free_i64(len);
1871
1872 return NO_EXIT;
1873 }
1874
1875 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1876 {
1877 int l = get_field(s->fields, l1);
1878 TCGv_i32 vl;
1879
1880 switch (l + 1) {
1881 case 1:
1882 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1883 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1884 break;
1885 case 2:
1886 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1887 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1888 break;
1889 case 4:
1890 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1891 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1892 break;
1893 case 8:
1894 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1895 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1896 break;
1897 default:
1898 potential_page_fault(s);
1899 vl = tcg_const_i32(l);
1900 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1901 tcg_temp_free_i32(vl);
1902 set_cc_static(s);
1903 return NO_EXIT;
1904 }
1905 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1906 return NO_EXIT;
1907 }
1908
1909 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1910 {
1911 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1912 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1913 potential_page_fault(s);
1914 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1915 tcg_temp_free_i32(r1);
1916 tcg_temp_free_i32(r3);
1917 set_cc_static(s);
1918 return NO_EXIT;
1919 }
1920
1921 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1922 {
1923 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1924 TCGv_i32 t1 = tcg_temp_new_i32();
1925 tcg_gen_trunc_i64_i32(t1, o->in1);
1926 potential_page_fault(s);
1927 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1928 set_cc_static(s);
1929 tcg_temp_free_i32(t1);
1930 tcg_temp_free_i32(m3);
1931 return NO_EXIT;
1932 }
1933
1934 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1935 {
1936 potential_page_fault(s);
1937 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1938 set_cc_static(s);
1939 return_low128(o->in2);
1940 return NO_EXIT;
1941 }
1942
1943 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1944 {
1945 int r3 = get_field(s->fields, r3);
1946 potential_page_fault(s);
1947 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1948 set_cc_static(s);
1949 return NO_EXIT;
1950 }
1951
1952 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1953 {
1954 int r3 = get_field(s->fields, r3);
1955 potential_page_fault(s);
1956 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1957 set_cc_static(s);
1958 return NO_EXIT;
1959 }
1960
1961 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1962 {
1963 int r3 = get_field(s->fields, r3);
1964 TCGv_i64 in3 = tcg_temp_new_i64();
1965 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1966 potential_page_fault(s);
1967 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1968 tcg_temp_free_i64(in3);
1969 set_cc_static(s);
1970 return NO_EXIT;
1971 }
1972
1973 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1974 {
1975 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1976 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1977 potential_page_fault(s);
1978 /* XXX rewrite in tcg */
1979 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1980 set_cc_static(s);
1981 return NO_EXIT;
1982 }
1983
1984 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1985 {
1986 TCGv_i64 t1 = tcg_temp_new_i64();
1987 TCGv_i32 t2 = tcg_temp_new_i32();
1988 tcg_gen_trunc_i64_i32(t2, o->in1);
1989 gen_helper_cvd(t1, t2);
1990 tcg_temp_free_i32(t2);
1991 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1992 tcg_temp_free_i64(t1);
1993 return NO_EXIT;
1994 }
1995
1996 #ifndef CONFIG_USER_ONLY
1997 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1998 {
1999 TCGv_i32 tmp;
2000
2001 check_privileged(s);
2002 potential_page_fault(s);
2003
2004 /* We pretend the format is RX_a so that D2 is the field we want. */
2005 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2006 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2007 tcg_temp_free_i32(tmp);
2008 return NO_EXIT;
2009 }
2010 #endif
2011
2012 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2013 {
2014 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2015 return_low128(o->out);
2016 return NO_EXIT;
2017 }
2018
2019 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2020 {
2021 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2022 return_low128(o->out);
2023 return NO_EXIT;
2024 }
2025
2026 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2027 {
2028 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2029 return_low128(o->out);
2030 return NO_EXIT;
2031 }
2032
2033 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2034 {
2035 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2036 return_low128(o->out);
2037 return NO_EXIT;
2038 }
2039
2040 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2041 {
2042 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2043 return NO_EXIT;
2044 }
2045
2046 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2047 {
2048 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2049 return NO_EXIT;
2050 }
2051
2052 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2053 {
2054 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2055 return_low128(o->out2);
2056 return NO_EXIT;
2057 }
2058
2059 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2060 {
2061 int r2 = get_field(s->fields, r2);
2062 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2063 return NO_EXIT;
2064 }
2065
2066 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2067 {
2068 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2069 return NO_EXIT;
2070 }
2071
2072 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2073 {
2074 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2075 tb->flags, (ab)use the tb->cs_base field as the address of
2076 the template in memory, and grab 8 bits of tb->flags/cflags for
2077 the contents of the register. We would then recognize all this
2078 in gen_intermediate_code_internal, generating code for exactly
2079 one instruction. This new TB then gets executed normally.
2080
2081 On the other hand, this seems to be mostly used for modifying
2082 MVC inside of memcpy, which needs a helper call anyway. So
2083 perhaps this doesn't bear thinking about any further. */
2084
2085 TCGv_i64 tmp;
2086
2087 update_psw_addr(s);
2088 gen_op_calc_cc(s);
2089
2090 tmp = tcg_const_i64(s->next_pc);
2091 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2092 tcg_temp_free_i64(tmp);
2093
2094 set_cc_static(s);
2095 return NO_EXIT;
2096 }
2097
2098 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2099 {
2100 /* We'll use the original input for cc computation, since we get to
2101 compare that against 0, which ought to be better than comparing
2102 the real output against 64. It also lets cc_dst be a convenient
2103 temporary during our computation. */
2104 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2105
2106 /* R1 = IN ? CLZ(IN) : 64. */
2107 gen_helper_clz(o->out, o->in2);
2108
2109 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2110 value by 64, which is undefined. But since the shift is 64 iff the
2111 input is zero, we still get the correct result after and'ing. */
2112 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2113 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2114 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2115 return NO_EXIT;
2116 }
2117
2118 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2119 {
2120 int m3 = get_field(s->fields, m3);
2121 int pos, len, base = s->insn->data;
2122 TCGv_i64 tmp = tcg_temp_new_i64();
2123 uint64_t ccm;
2124
2125 switch (m3) {
2126 case 0xf:
2127 /* Effectively a 32-bit load. */
2128 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2129 len = 32;
2130 goto one_insert;
2131
2132 case 0xc:
2133 case 0x6:
2134 case 0x3:
2135 /* Effectively a 16-bit load. */
2136 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2137 len = 16;
2138 goto one_insert;
2139
2140 case 0x8:
2141 case 0x4:
2142 case 0x2:
2143 case 0x1:
2144 /* Effectively an 8-bit load. */
2145 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2146 len = 8;
2147 goto one_insert;
2148
2149 one_insert:
2150 pos = base + ctz32(m3) * 8;
2151 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2152 ccm = ((1ull << len) - 1) << pos;
2153 break;
2154
2155 default:
2156 /* This is going to be a sequence of loads and inserts. */
2157 pos = base + 32 - 8;
2158 ccm = 0;
2159 while (m3) {
2160 if (m3 & 0x8) {
2161 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2162 tcg_gen_addi_i64(o->in2, o->in2, 1);
2163 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2164 ccm |= 0xff << pos;
2165 }
2166 m3 = (m3 << 1) & 0xf;
2167 pos -= 8;
2168 }
2169 break;
2170 }
2171
2172 tcg_gen_movi_i64(tmp, ccm);
2173 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2174 tcg_temp_free_i64(tmp);
2175 return NO_EXIT;
2176 }
2177
2178 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2179 {
2180 int shift = s->insn->data & 0xff;
2181 int size = s->insn->data >> 8;
2182 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2183 return NO_EXIT;
2184 }
2185
2186 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2187 {
2188 TCGv_i64 t1;
2189
2190 gen_op_calc_cc(s);
2191 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2192
2193 t1 = tcg_temp_new_i64();
2194 tcg_gen_shli_i64(t1, psw_mask, 20);
2195 tcg_gen_shri_i64(t1, t1, 36);
2196 tcg_gen_or_i64(o->out, o->out, t1);
2197
2198 tcg_gen_extu_i32_i64(t1, cc_op);
2199 tcg_gen_shli_i64(t1, t1, 28);
2200 tcg_gen_or_i64(o->out, o->out, t1);
2201 tcg_temp_free_i64(t1);
2202 return NO_EXIT;
2203 }
2204
2205 #ifndef CONFIG_USER_ONLY
2206 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2207 {
2208 check_privileged(s);
2209 gen_helper_ipte(cpu_env, o->in1, o->in2);
2210 return NO_EXIT;
2211 }
2212 #endif
2213
2214 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2215 {
2216 gen_helper_ldeb(o->out, cpu_env, o->in2);
2217 return NO_EXIT;
2218 }
2219
2220 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2221 {
2222 gen_helper_ledb(o->out, cpu_env, o->in2);
2223 return NO_EXIT;
2224 }
2225
2226 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2227 {
2228 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2229 return NO_EXIT;
2230 }
2231
2232 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2233 {
2234 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2235 return NO_EXIT;
2236 }
2237
2238 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2239 {
2240 gen_helper_lxdb(o->out, cpu_env, o->in2);
2241 return_low128(o->out2);
2242 return NO_EXIT;
2243 }
2244
2245 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2246 {
2247 gen_helper_lxeb(o->out, cpu_env, o->in2);
2248 return_low128(o->out2);
2249 return NO_EXIT;
2250 }
2251
2252 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2253 {
2254 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2255 return NO_EXIT;
2256 }
2257
2258 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2259 {
2260 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2261 return NO_EXIT;
2262 }
2263
2264 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2265 {
2266 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2267 return NO_EXIT;
2268 }
2269
2270 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2271 {
2272 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2273 return NO_EXIT;
2274 }
2275
2276 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2277 {
2278 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2279 return NO_EXIT;
2280 }
2281
2282 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2283 {
2284 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2285 return NO_EXIT;
2286 }
2287
2288 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2289 {
2290 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2291 return NO_EXIT;
2292 }
2293
2294 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2295 {
2296 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2297 return NO_EXIT;
2298 }
2299
2300 #ifndef CONFIG_USER_ONLY
2301 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2302 {
2303 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2304 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2305 check_privileged(s);
2306 potential_page_fault(s);
2307 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2308 tcg_temp_free_i32(r1);
2309 tcg_temp_free_i32(r3);
2310 return NO_EXIT;
2311 }
2312
2313 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2314 {
2315 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2316 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2317 check_privileged(s);
2318 potential_page_fault(s);
2319 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2320 tcg_temp_free_i32(r1);
2321 tcg_temp_free_i32(r3);
2322 return NO_EXIT;
2323 }
2324 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2325 {
2326 check_privileged(s);
2327 potential_page_fault(s);
2328 gen_helper_lra(o->out, cpu_env, o->in2);
2329 set_cc_static(s);
2330 return NO_EXIT;
2331 }
2332
2333 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2334 {
2335 TCGv_i64 t1, t2;
2336
2337 check_privileged(s);
2338
2339 t1 = tcg_temp_new_i64();
2340 t2 = tcg_temp_new_i64();
2341 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2342 tcg_gen_addi_i64(o->in2, o->in2, 4);
2343 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2344 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2345 tcg_gen_shli_i64(t1, t1, 32);
2346 gen_helper_load_psw(cpu_env, t1, t2);
2347 tcg_temp_free_i64(t1);
2348 tcg_temp_free_i64(t2);
2349 return EXIT_NORETURN;
2350 }
2351 #endif
2352
2353 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2354 {
2355 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2356 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2357 potential_page_fault(s);
2358 gen_helper_lam(cpu_env, r1, o->in2, r3);
2359 tcg_temp_free_i32(r1);
2360 tcg_temp_free_i32(r3);
2361 return NO_EXIT;
2362 }
2363
2364 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2365 {
2366 int r1 = get_field(s->fields, r1);
2367 int r3 = get_field(s->fields, r3);
2368 TCGv_i64 t = tcg_temp_new_i64();
2369 TCGv_i64 t4 = tcg_const_i64(4);
2370
2371 while (1) {
2372 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2373 store_reg32_i64(r1, t);
2374 if (r1 == r3) {
2375 break;
2376 }
2377 tcg_gen_add_i64(o->in2, o->in2, t4);
2378 r1 = (r1 + 1) & 15;
2379 }
2380
2381 tcg_temp_free_i64(t);
2382 tcg_temp_free_i64(t4);
2383 return NO_EXIT;
2384 }
2385
2386 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2387 {
2388 int r1 = get_field(s->fields, r1);
2389 int r3 = get_field(s->fields, r3);
2390 TCGv_i64 t = tcg_temp_new_i64();
2391 TCGv_i64 t4 = tcg_const_i64(4);
2392
2393 while (1) {
2394 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2395 store_reg32h_i64(r1, t);
2396 if (r1 == r3) {
2397 break;
2398 }
2399 tcg_gen_add_i64(o->in2, o->in2, t4);
2400 r1 = (r1 + 1) & 15;
2401 }
2402
2403 tcg_temp_free_i64(t);
2404 tcg_temp_free_i64(t4);
2405 return NO_EXIT;
2406 }
2407
2408 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2409 {
2410 int r1 = get_field(s->fields, r1);
2411 int r3 = get_field(s->fields, r3);
2412 TCGv_i64 t8 = tcg_const_i64(8);
2413
2414 while (1) {
2415 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2416 if (r1 == r3) {
2417 break;
2418 }
2419 tcg_gen_add_i64(o->in2, o->in2, t8);
2420 r1 = (r1 + 1) & 15;
2421 }
2422
2423 tcg_temp_free_i64(t8);
2424 return NO_EXIT;
2425 }
2426
2427 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2428 {
2429 o->out = o->in2;
2430 o->g_out = o->g_in2;
2431 TCGV_UNUSED_I64(o->in2);
2432 o->g_in2 = false;
2433 return NO_EXIT;
2434 }
2435
2436 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2437 {
2438 o->out = o->in1;
2439 o->out2 = o->in2;
2440 o->g_out = o->g_in1;
2441 o->g_out2 = o->g_in2;
2442 TCGV_UNUSED_I64(o->in1);
2443 TCGV_UNUSED_I64(o->in2);
2444 o->g_in1 = o->g_in2 = false;
2445 return NO_EXIT;
2446 }
2447
2448 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2449 {
2450 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2451 potential_page_fault(s);
2452 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2453 tcg_temp_free_i32(l);
2454 return NO_EXIT;
2455 }
2456
2457 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2458 {
2459 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2460 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2461 potential_page_fault(s);
2462 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2463 tcg_temp_free_i32(r1);
2464 tcg_temp_free_i32(r2);
2465 set_cc_static(s);
2466 return NO_EXIT;
2467 }
2468
2469 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2470 {
2471 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2472 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2473 potential_page_fault(s);
2474 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2475 tcg_temp_free_i32(r1);
2476 tcg_temp_free_i32(r3);
2477 set_cc_static(s);
2478 return NO_EXIT;
2479 }
2480
2481 #ifndef CONFIG_USER_ONLY
2482 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2483 {
2484 int r1 = get_field(s->fields, l1);
2485 check_privileged(s);
2486 potential_page_fault(s);
2487 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2488 set_cc_static(s);
2489 return NO_EXIT;
2490 }
2491
2492 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2493 {
2494 int r1 = get_field(s->fields, l1);
2495 check_privileged(s);
2496 potential_page_fault(s);
2497 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2498 set_cc_static(s);
2499 return NO_EXIT;
2500 }
2501 #endif
2502
2503 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2504 {
2505 potential_page_fault(s);
2506 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2507 set_cc_static(s);
2508 return NO_EXIT;
2509 }
2510
2511 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2512 {
2513 potential_page_fault(s);
2514 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2515 set_cc_static(s);
2516 return_low128(o->in2);
2517 return NO_EXIT;
2518 }
2519
2520 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2521 {
2522 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2523 return NO_EXIT;
2524 }
2525
2526 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2527 {
2528 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2529 return_low128(o->out2);
2530 return NO_EXIT;
2531 }
2532
2533 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2534 {
2535 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2536 return NO_EXIT;
2537 }
2538
2539 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2540 {
2541 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2542 return NO_EXIT;
2543 }
2544
2545 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2546 {
2547 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2548 return NO_EXIT;
2549 }
2550
2551 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2552 {
2553 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2554 return_low128(o->out2);
2555 return NO_EXIT;
2556 }
2557
2558 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2559 {
2560 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2561 return_low128(o->out2);
2562 return NO_EXIT;
2563 }
2564
2565 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2566 {
2567 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2568 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2569 tcg_temp_free_i64(r3);
2570 return NO_EXIT;
2571 }
2572
2573 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2574 {
2575 int r3 = get_field(s->fields, r3);
2576 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2577 return NO_EXIT;
2578 }
2579
2580 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2581 {
2582 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2583 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2584 tcg_temp_free_i64(r3);
2585 return NO_EXIT;
2586 }
2587
2588 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2589 {
2590 int r3 = get_field(s->fields, r3);
2591 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2592 return NO_EXIT;
2593 }
2594
2595 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2596 {
2597 gen_helper_nabs_i64(o->out, o->in2);
2598 return NO_EXIT;
2599 }
2600
2601 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2602 {
2603 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2604 return NO_EXIT;
2605 }
2606
2607 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2608 {
2609 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2610 return NO_EXIT;
2611 }
2612
2613 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2614 {
2615 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2616 tcg_gen_mov_i64(o->out2, o->in2);
2617 return NO_EXIT;
2618 }
2619
2620 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2621 {
2622 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2623 potential_page_fault(s);
2624 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2625 tcg_temp_free_i32(l);
2626 set_cc_static(s);
2627 return NO_EXIT;
2628 }
2629
2630 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2631 {
2632 tcg_gen_neg_i64(o->out, o->in2);
2633 return NO_EXIT;
2634 }
2635
2636 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2637 {
2638 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2639 return NO_EXIT;
2640 }
2641
2642 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2643 {
2644 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2645 return NO_EXIT;
2646 }
2647
2648 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2649 {
2650 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2651 tcg_gen_mov_i64(o->out2, o->in2);
2652 return NO_EXIT;
2653 }
2654
2655 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2656 {
2657 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2658 potential_page_fault(s);
2659 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2660 tcg_temp_free_i32(l);
2661 set_cc_static(s);
2662 return NO_EXIT;
2663 }
2664
2665 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2666 {
2667 tcg_gen_or_i64(o->out, o->in1, o->in2);
2668 return NO_EXIT;
2669 }
2670
2671 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2672 {
2673 int shift = s->insn->data & 0xff;
2674 int size = s->insn->data >> 8;
2675 uint64_t mask = ((1ull << size) - 1) << shift;
2676
2677 assert(!o->g_in2);
2678 tcg_gen_shli_i64(o->in2, o->in2, shift);
2679 tcg_gen_or_i64(o->out, o->in1, o->in2);
2680
2681 /* Produce the CC from only the bits manipulated. */
2682 tcg_gen_andi_i64(cc_dst, o->out, mask);
2683 set_cc_nz_u64(s, cc_dst);
2684 return NO_EXIT;
2685 }
2686
2687 #ifndef CONFIG_USER_ONLY
2688 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2689 {
2690 check_privileged(s);
2691 gen_helper_ptlb(cpu_env);
2692 return NO_EXIT;
2693 }
2694 #endif
2695
2696 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2697 {
2698 tcg_gen_bswap16_i64(o->out, o->in2);
2699 return NO_EXIT;
2700 }
2701
2702 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2703 {
2704 tcg_gen_bswap32_i64(o->out, o->in2);
2705 return NO_EXIT;
2706 }
2707
2708 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2709 {
2710 tcg_gen_bswap64_i64(o->out, o->in2);
2711 return NO_EXIT;
2712 }
2713
2714 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2715 {
2716 TCGv_i32 t1 = tcg_temp_new_i32();
2717 TCGv_i32 t2 = tcg_temp_new_i32();
2718 TCGv_i32 to = tcg_temp_new_i32();
2719 tcg_gen_trunc_i64_i32(t1, o->in1);
2720 tcg_gen_trunc_i64_i32(t2, o->in2);
2721 tcg_gen_rotl_i32(to, t1, t2);
2722 tcg_gen_extu_i32_i64(o->out, to);
2723 tcg_temp_free_i32(t1);
2724 tcg_temp_free_i32(t2);
2725 tcg_temp_free_i32(to);
2726 return NO_EXIT;
2727 }
2728
2729 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2730 {
2731 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2732 return NO_EXIT;
2733 }
2734
2735 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2736 {
2737 int r1 = get_field(s->fields, r1);
2738 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2739 return NO_EXIT;
2740 }
2741
2742 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2743 {
2744 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2745 return NO_EXIT;
2746 }
2747
2748 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2749 {
2750 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2751 return NO_EXIT;
2752 }
2753
2754 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2755 {
2756 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2757 return_low128(o->out2);
2758 return NO_EXIT;
2759 }
2760
2761 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2762 {
2763 gen_helper_sqeb(o->out, cpu_env, o->in2);
2764 return NO_EXIT;
2765 }
2766
2767 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2768 {
2769 gen_helper_sqdb(o->out, cpu_env, o->in2);
2770 return NO_EXIT;
2771 }
2772
2773 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2774 {
2775 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2776 return_low128(o->out2);
2777 return NO_EXIT;
2778 }
2779
2780 #ifndef CONFIG_USER_ONLY
2781 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2782 {
2783 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2784 check_privileged(s);
2785 potential_page_fault(s);
2786 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2787 tcg_temp_free_i32(r1);
2788 return NO_EXIT;
2789 }
2790 #endif
2791
2792 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2793 {
2794 uint64_t sign = 1ull << s->insn->data;
2795 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2796 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2797 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2798 /* The arithmetic left shift is curious in that it does not affect
2799 the sign bit. Copy that over from the source unchanged. */
2800 tcg_gen_andi_i64(o->out, o->out, ~sign);
2801 tcg_gen_andi_i64(o->in1, o->in1, sign);
2802 tcg_gen_or_i64(o->out, o->out, o->in1);
2803 return NO_EXIT;
2804 }
2805
2806 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2807 {
2808 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2809 return NO_EXIT;
2810 }
2811
2812 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2813 {
2814 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2815 return NO_EXIT;
2816 }
2817
2818 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2819 {
2820 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2821 return NO_EXIT;
2822 }
2823
2824 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2825 {
2826 gen_helper_sfpc(cpu_env, o->in2);
2827 return NO_EXIT;
2828 }
2829
2830 #ifndef CONFIG_USER_ONLY
2831 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2832 {
2833 check_privileged(s);
2834 tcg_gen_shri_i64(o->in2, o->in2, 4);
2835 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2836 return NO_EXIT;
2837 }
2838
2839 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2840 {
2841 check_privileged(s);
2842 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2843 return NO_EXIT;
2844 }
2845
2846 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
2847 {
2848 check_privileged(s);
2849 /* ??? Surely cpu address != cpu number. In any case the previous
2850 version of this stored more than the required half-word, so it
2851 is unlikely this has ever been tested. */
2852 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2853 return NO_EXIT;
2854 }
2855
2856 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
2857 {
2858 gen_helper_stck(o->out, cpu_env);
2859 /* ??? We don't implement clock states. */
2860 gen_op_movi_cc(s, 0);
2861 return NO_EXIT;
2862 }
2863
2864 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
2865 {
2866 check_privileged(s);
2867 gen_helper_sckc(cpu_env, o->in2);
2868 return NO_EXIT;
2869 }
2870
2871 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
2872 {
2873 check_privileged(s);
2874 gen_helper_stckc(o->out, cpu_env);
2875 return NO_EXIT;
2876 }
2877
2878 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2879 {
2880 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2881 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2882 check_privileged(s);
2883 potential_page_fault(s);
2884 gen_helper_stctg(cpu_env, r1, o->in2, r3);
2885 tcg_temp_free_i32(r1);
2886 tcg_temp_free_i32(r3);
2887 return NO_EXIT;
2888 }
2889
2890 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
2891 {
2892 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2893 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2894 check_privileged(s);
2895 potential_page_fault(s);
2896 gen_helper_stctl(cpu_env, r1, o->in2, r3);
2897 tcg_temp_free_i32(r1);
2898 tcg_temp_free_i32(r3);
2899 return NO_EXIT;
2900 }
2901
2902 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
2903 {
2904 check_privileged(s);
2905 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2906 return NO_EXIT;
2907 }
2908
2909 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
2910 {
2911 check_privileged(s);
2912 gen_helper_spt(cpu_env, o->in2);
2913 return NO_EXIT;
2914 }
2915
2916 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
2917 {
2918 check_privileged(s);
2919 gen_helper_stpt(o->out, cpu_env);
2920 return NO_EXIT;
2921 }
2922
2923 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
2924 {
2925 check_privileged(s);
2926 gen_helper_spx(cpu_env, o->in2);
2927 return NO_EXIT;
2928 }
2929
2930 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
2931 {
2932 check_privileged(s);
2933 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
2934 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
2935 return NO_EXIT;
2936 }
2937
2938 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
2939 {
2940 uint64_t i2 = get_field(s->fields, i2);
2941 TCGv_i64 t;
2942
2943 check_privileged(s);
2944
2945 /* It is important to do what the instruction name says: STORE THEN.
2946 If we let the output hook perform the store then if we fault and
2947 restart, we'll have the wrong SYSTEM MASK in place. */
2948 t = tcg_temp_new_i64();
2949 tcg_gen_shri_i64(t, psw_mask, 56);
2950 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
2951 tcg_temp_free_i64(t);
2952
2953 if (s->fields->op == 0xac) {
2954 tcg_gen_andi_i64(psw_mask, psw_mask,
2955 (i2 << 56) | 0x00ffffffffffffffull);
2956 } else {
2957 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
2958 }
2959 return NO_EXIT;
2960 }
2961 #endif
2962
2963 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
2964 {
2965 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
2966 return NO_EXIT;
2967 }
2968
2969 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
2970 {
2971 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
2972 return NO_EXIT;
2973 }
2974
2975 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
2976 {
2977 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
2978 return NO_EXIT;
2979 }
2980
2981 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
2982 {
2983 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
2984 return NO_EXIT;
2985 }
2986
2987 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
2988 {
2989 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2990 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2991 potential_page_fault(s);
2992 gen_helper_stam(cpu_env, r1, o->in2, r3);
2993 tcg_temp_free_i32(r1);
2994 tcg_temp_free_i32(r3);
2995 return NO_EXIT;
2996 }
2997
2998 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
2999 {
3000 int m3 = get_field(s->fields, m3);
3001 int pos, base = s->insn->data;
3002 TCGv_i64 tmp = tcg_temp_new_i64();
3003
3004 pos = base + ctz32(m3) * 8;
3005 switch (m3) {
3006 case 0xf:
3007 /* Effectively a 32-bit store. */
3008 tcg_gen_shri_i64(tmp, o->in1, pos);
3009 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3010 break;
3011
3012 case 0xc:
3013 case 0x6:
3014 case 0x3:
3015 /* Effectively a 16-bit store. */
3016 tcg_gen_shri_i64(tmp, o->in1, pos);
3017 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3018 break;
3019
3020 case 0x8:
3021 case 0x4:
3022 case 0x2:
3023 case 0x1:
3024 /* Effectively an 8-bit store. */
3025 tcg_gen_shri_i64(tmp, o->in1, pos);
3026 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3027 break;
3028
3029 default:
3030 /* This is going to be a sequence of shifts and stores. */
3031 pos = base + 32 - 8;
3032 while (m3) {
3033 if (m3 & 0x8) {
3034 tcg_gen_shri_i64(tmp, o->in1, pos);
3035 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3036 tcg_gen_addi_i64(o->in2, o->in2, 1);
3037 }
3038 m3 = (m3 << 1) & 0xf;
3039 pos -= 8;
3040 }
3041 break;
3042 }
3043 tcg_temp_free_i64(tmp);
3044 return NO_EXIT;
3045 }
3046
3047 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3048 {
3049 int r1 = get_field(s->fields, r1);
3050 int r3 = get_field(s->fields, r3);
3051 int size = s->insn->data;
3052 TCGv_i64 tsize = tcg_const_i64(size);
3053
3054 while (1) {
3055 if (size == 8) {
3056 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3057 } else {
3058 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3059 }
3060 if (r1 == r3) {
3061 break;
3062 }
3063 tcg_gen_add_i64(o->in2, o->in2, tsize);
3064 r1 = (r1 + 1) & 15;
3065 }
3066
3067 tcg_temp_free_i64(tsize);
3068 return NO_EXIT;
3069 }
3070
3071 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3072 {
3073 int r1 = get_field(s->fields, r1);
3074 int r3 = get_field(s->fields, r3);
3075 TCGv_i64 t = tcg_temp_new_i64();
3076 TCGv_i64 t4 = tcg_const_i64(4);
3077 TCGv_i64 t32 = tcg_const_i64(32);
3078
3079 while (1) {
3080 tcg_gen_shl_i64(t, regs[r1], t32);
3081 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3082 if (r1 == r3) {
3083 break;
3084 }
3085 tcg_gen_add_i64(o->in2, o->in2, t4);
3086 r1 = (r1 + 1) & 15;
3087 }
3088
3089 tcg_temp_free_i64(t);
3090 tcg_temp_free_i64(t4);
3091 tcg_temp_free_i64(t32);
3092 return NO_EXIT;
3093 }
3094
3095 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3096 {
3097 potential_page_fault(s);
3098 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3099 set_cc_static(s);
3100 return_low128(o->in2);
3101 return NO_EXIT;
3102 }
3103
3104 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3105 {
3106 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3107 return NO_EXIT;
3108 }
3109
3110 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3111 {
3112 TCGv_i64 cc;
3113
3114 assert(!o->g_in2);
3115 tcg_gen_not_i64(o->in2, o->in2);
3116 tcg_gen_add_i64(o->out, o->in1, o->in2);
3117
3118 /* XXX possible optimization point */
3119 gen_op_calc_cc(s);
3120 cc = tcg_temp_new_i64();
3121 tcg_gen_extu_i32_i64(cc, cc_op);
3122 tcg_gen_shri_i64(cc, cc, 1);
3123 tcg_gen_add_i64(o->out, o->out, cc);
3124 tcg_temp_free_i64(cc);
3125 return NO_EXIT;
3126 }
3127
3128 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3129 {
3130 TCGv_i32 t;
3131
3132 update_psw_addr(s);
3133 gen_op_calc_cc(s);
3134
3135 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3136 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3137 tcg_temp_free_i32(t);
3138
3139 t = tcg_const_i32(s->next_pc - s->pc);
3140 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3141 tcg_temp_free_i32(t);
3142
3143 gen_exception(EXCP_SVC);
3144 return EXIT_NORETURN;
3145 }
3146
3147 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3148 {
3149 gen_helper_tceb(cc_op, o->in1, o->in2);
3150 set_cc_static(s);
3151 return NO_EXIT;
3152 }
3153
3154 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3155 {
3156 gen_helper_tcdb(cc_op, o->in1, o->in2);
3157 set_cc_static(s);
3158 return NO_EXIT;
3159 }
3160
3161 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3162 {
3163 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3164 set_cc_static(s);
3165 return NO_EXIT;
3166 }
3167
3168 #ifndef CONFIG_USER_ONLY
3169 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3170 {
3171 potential_page_fault(s);
3172 gen_helper_tprot(cc_op, o->addr1, o->in2);
3173 set_cc_static(s);
3174 return NO_EXIT;
3175 }
3176 #endif
3177
3178 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3179 {
3180 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3181 potential_page_fault(s);
3182 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3183 tcg_temp_free_i32(l);
3184 set_cc_static(s);
3185 return NO_EXIT;
3186 }
3187
3188 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3189 {
3190 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3191 potential_page_fault(s);
3192 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3193 tcg_temp_free_i32(l);
3194 return NO_EXIT;
3195 }
3196
3197 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3198 {
3199 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3200 potential_page_fault(s);
3201 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3202 tcg_temp_free_i32(l);
3203 set_cc_static(s);
3204 return NO_EXIT;
3205 }
3206
3207 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3208 {
3209 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3210 return NO_EXIT;
3211 }
3212
3213 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3214 {
3215 int shift = s->insn->data & 0xff;
3216 int size = s->insn->data >> 8;
3217 uint64_t mask = ((1ull << size) - 1) << shift;
3218
3219 assert(!o->g_in2);
3220 tcg_gen_shli_i64(o->in2, o->in2, shift);
3221 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3222
3223 /* Produce the CC from only the bits manipulated. */
3224 tcg_gen_andi_i64(cc_dst, o->out, mask);
3225 set_cc_nz_u64(s, cc_dst);
3226 return NO_EXIT;
3227 }
3228
3229 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3230 {
3231 o->out = tcg_const_i64(0);
3232 return NO_EXIT;
3233 }
3234
3235 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3236 {
3237 o->out = tcg_const_i64(0);
3238 o->out2 = o->out;
3239 o->g_out2 = true;
3240 return NO_EXIT;
3241 }
3242
3243 /* ====================================================================== */
3244 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3245 the original inputs), update the various cc data structures in order to
3246 be able to compute the new condition code. */
3247
3248 static void cout_abs32(DisasContext *s, DisasOps *o)
3249 {
3250 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3251 }
3252
3253 static void cout_abs64(DisasContext *s, DisasOps *o)
3254 {
3255 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3256 }
3257
3258 static void cout_adds32(DisasContext *s, DisasOps *o)
3259 {
3260 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3261 }
3262
3263 static void cout_adds64(DisasContext *s, DisasOps *o)
3264 {
3265 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3266 }
3267
3268 static void cout_addu32(DisasContext *s, DisasOps *o)
3269 {
3270 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3271 }
3272
3273 static void cout_addu64(DisasContext *s, DisasOps *o)
3274 {
3275 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3276 }
3277
3278 static void cout_addc32(DisasContext *s, DisasOps *o)
3279 {
3280 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3281 }
3282
3283 static void cout_addc64(DisasContext *s, DisasOps *o)
3284 {
3285 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3286 }
3287
3288 static void cout_cmps32(DisasContext *s, DisasOps *o)
3289 {
3290 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3291 }
3292
3293 static void cout_cmps64(DisasContext *s, DisasOps *o)
3294 {
3295 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3296 }
3297
3298 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3299 {
3300 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3301 }
3302
3303 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3304 {
3305 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3306 }
3307
3308 static void cout_f32(DisasContext *s, DisasOps *o)
3309 {
3310 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3311 }
3312
3313 static void cout_f64(DisasContext *s, DisasOps *o)
3314 {
3315 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3316 }
3317
3318 static void cout_f128(DisasContext *s, DisasOps *o)
3319 {
3320 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3321 }
3322
3323 static void cout_nabs32(DisasContext *s, DisasOps *o)
3324 {
3325 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3326 }
3327
3328 static void cout_nabs64(DisasContext *s, DisasOps *o)
3329 {
3330 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3331 }
3332
3333 static void cout_neg32(DisasContext *s, DisasOps *o)
3334 {
3335 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3336 }
3337
3338 static void cout_neg64(DisasContext *s, DisasOps *o)
3339 {
3340 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3341 }
3342
3343 static void cout_nz32(DisasContext *s, DisasOps *o)
3344 {
3345 tcg_gen_ext32u_i64(cc_dst, o->out);
3346 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3347 }
3348
3349 static void cout_nz64(DisasContext *s, DisasOps *o)
3350 {
3351 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3352 }
3353
3354 static void cout_s32(DisasContext *s, DisasOps *o)
3355 {
3356 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3357 }
3358
3359 static void cout_s64(DisasContext *s, DisasOps *o)
3360 {
3361 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3362 }
3363
3364 static void cout_subs32(DisasContext *s, DisasOps *o)
3365 {
3366 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3367 }
3368
3369 static void cout_subs64(DisasContext *s, DisasOps *o)
3370 {
3371 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3372 }
3373
3374 static void cout_subu32(DisasContext *s, DisasOps *o)
3375 {
3376 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3377 }
3378
3379 static void cout_subu64(DisasContext *s, DisasOps *o)
3380 {
3381 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3382 }
3383
3384 static void cout_subb32(DisasContext *s, DisasOps *o)
3385 {
3386 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3387 }
3388
3389 static void cout_subb64(DisasContext *s, DisasOps *o)
3390 {
3391 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3392 }
3393
3394 static void cout_tm32(DisasContext *s, DisasOps *o)
3395 {
3396 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3397 }
3398
3399 static void cout_tm64(DisasContext *s, DisasOps *o)
3400 {
3401 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3402 }
3403
3404 /* ====================================================================== */
3405 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3406 with the TCG register to which we will write. Used in combination with
3407 the "wout" generators, in some cases we need a new temporary, and in
3408 some cases we can write to a TCG global. */
3409
3410 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3411 {
3412 o->out = tcg_temp_new_i64();
3413 }
3414
3415 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3416 {
3417 o->out = tcg_temp_new_i64();
3418 o->out2 = tcg_temp_new_i64();
3419 }
3420
3421 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3422 {
3423 o->out = regs[get_field(f, r1)];
3424 o->g_out = true;
3425 }
3426
3427 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3428 {
3429 /* ??? Specification exception: r1 must be even. */
3430 int r1 = get_field(f, r1);
3431 o->out = regs[r1];
3432 o->out2 = regs[(r1 + 1) & 15];
3433 o->g_out = o->g_out2 = true;
3434 }
3435
3436 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3437 {
3438 o->out = fregs[get_field(f, r1)];
3439 o->g_out = true;
3440 }
3441
3442 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3443 {
3444 /* ??? Specification exception: r1 must be < 14. */
3445 int r1 = get_field(f, r1);
3446 o->out = fregs[r1];
3447 o->out2 = fregs[(r1 + 2) & 15];
3448 o->g_out = o->g_out2 = true;
3449 }
3450
3451 /* ====================================================================== */
3452 /* The "Write OUTput" generators. These generally perform some non-trivial
3453 copy of data to TCG globals, or to main memory. The trivial cases are
3454 generally handled by having a "prep" generator install the TCG global
3455 as the destination of the operation. */
3456
3457 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3458 {
3459 store_reg(get_field(f, r1), o->out);
3460 }
3461
3462 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3463 {
3464 int r1 = get_field(f, r1);
3465 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3466 }
3467
3468 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3469 {
3470 int r1 = get_field(f, r1);
3471 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3472 }
3473
3474 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3475 {
3476 store_reg32_i64(get_field(f, r1), o->out);
3477 }
3478
3479 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3480 {
3481 /* ??? Specification exception: r1 must be even. */
3482 int r1 = get_field(f, r1);
3483 store_reg32_i64(r1, o->out);
3484 store_reg32_i64((r1 + 1) & 15, o->out2);
3485 }
3486
3487 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3488 {
3489 /* ??? Specification exception: r1 must be even. */
3490 int r1 = get_field(f, r1);
3491 store_reg32_i64((r1 + 1) & 15, o->out);
3492 tcg_gen_shri_i64(o->out, o->out, 32);
3493 store_reg32_i64(r1, o->out);
3494 }
3495
3496 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3497 {
3498 store_freg32_i64(get_field(f, r1), o->out);
3499 }
3500
3501 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3502 {
3503 store_freg(get_field(f, r1), o->out);
3504 }
3505
3506 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3507 {
3508 /* ??? Specification exception: r1 must be < 14. */
3509 int f1 = get_field(s->fields, r1);
3510 store_freg(f1, o->out);
3511 store_freg((f1 + 2) & 15, o->out2);
3512 }
3513
3514 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3515 {
3516 if (get_field(f, r1) != get_field(f, r2)) {
3517 store_reg32_i64(get_field(f, r1), o->out);
3518 }
3519 }
3520
3521 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3522 {
3523 if (get_field(f, r1) != get_field(f, r2)) {
3524 store_freg32_i64(get_field(f, r1), o->out);
3525 }
3526 }
3527
3528 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3529 {
3530 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3531 }
3532
3533 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3534 {
3535 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3536 }
3537
3538 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3539 {
3540 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3541 }
3542
3543 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3544 {
3545 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3546 }
3547
3548 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3549 {
3550 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3551 }
3552
3553 /* ====================================================================== */
3554 /* The "INput 1" generators. These load the first operand to an insn. */
3555
3556 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3557 {
3558 o->in1 = load_reg(get_field(f, r1));
3559 }
3560
3561 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3562 {
3563 o->in1 = regs[get_field(f, r1)];
3564 o->g_in1 = true;
3565 }
3566
3567 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3568 {
3569 o->in1 = tcg_temp_new_i64();
3570 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3571 }
3572
3573 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3574 {
3575 o->in1 = tcg_temp_new_i64();
3576 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3577 }
3578
3579 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3580 {
3581 o->in1 = tcg_temp_new_i64();
3582 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3583 }
3584
3585 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3586 {
3587 /* ??? Specification exception: r1 must be even. */
3588 int r1 = get_field(f, r1);
3589 o->in1 = load_reg((r1 + 1) & 15);
3590 }
3591
3592 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3593 {
3594 /* ??? Specification exception: r1 must be even. */
3595 int r1 = get_field(f, r1);
3596 o->in1 = tcg_temp_new_i64();
3597 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3598 }
3599
3600 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3601 {
3602 /* ??? Specification exception: r1 must be even. */
3603 int r1 = get_field(f, r1);
3604 o->in1 = tcg_temp_new_i64();
3605 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3606 }
3607
3608 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3609 {
3610 /* ??? Specification exception: r1 must be even. */
3611 int r1 = get_field(f, r1);
3612 o->in1 = tcg_temp_new_i64();
3613 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3614 }
3615
3616 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3617 {
3618 o->in1 = load_reg(get_field(f, r2));
3619 }
3620
3621 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3622 {
3623 o->in1 = load_reg(get_field(f, r3));
3624 }
3625
3626 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3627 {
3628 o->in1 = regs[get_field(f, r3)];
3629 o->g_in1 = true;
3630 }
3631
3632 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3633 {
3634 o->in1 = tcg_temp_new_i64();
3635 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3636 }
3637
3638 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3639 {
3640 o->in1 = tcg_temp_new_i64();
3641 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3642 }
3643
3644 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3645 {
3646 o->in1 = load_freg32_i64(get_field(f, r1));
3647 }
3648
3649 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3650 {
3651 o->in1 = fregs[get_field(f, r1)];
3652 o->g_in1 = true;
3653 }
3654
3655 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3656 {
3657 /* ??? Specification exception: r1 must be < 14. */
3658 int r1 = get_field(f, r1);
3659 o->out = fregs[r1];
3660 o->out2 = fregs[(r1 + 2) & 15];
3661 o->g_out = o->g_out2 = true;
3662 }
3663
3664 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3665 {
3666 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3667 }
3668
3669 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3670 {
3671 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3672 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3673 }
3674
3675 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3676 {
3677 in1_la1(s, f, o);
3678 o->in1 = tcg_temp_new_i64();
3679 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3680 }
3681
3682 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3683 {
3684 in1_la1(s, f, o);
3685 o->in1 = tcg_temp_new_i64();
3686 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3687 }
3688
3689 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3690 {
3691 in1_la1(s, f, o);
3692 o->in1 = tcg_temp_new_i64();
3693 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3694 }
3695
3696 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3697 {
3698 in1_la1(s, f, o);
3699 o->in1 = tcg_temp_new_i64();
3700 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3701 }
3702
3703 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3704 {
3705 in1_la1(s, f, o);
3706 o->in1 = tcg_temp_new_i64();
3707 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3708 }
3709
3710 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3711 {
3712 in1_la1(s, f, o);
3713 o->in1 = tcg_temp_new_i64();
3714 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3715 }
3716
3717 /* ====================================================================== */
3718 /* The "INput 2" generators. These load the second operand to an insn. */
3719
3720 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3721 {
3722 o->in2 = regs[get_field(f, r1)];
3723 o->g_in2 = true;
3724 }
3725
3726 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3727 {
3728 o->in2 = tcg_temp_new_i64();
3729 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3730 }
3731
3732 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3733 {
3734 o->in2 = tcg_temp_new_i64();
3735 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3736 }
3737
3738 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3739 {
3740 o->in2 = load_reg(get_field(f, r2));
3741 }
3742
3743 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3744 {
3745 o->in2 = regs[get_field(f, r2)];
3746 o->g_in2 = true;
3747 }
3748
3749 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3750 {
3751 int r2 = get_field(f, r2);
3752 if (r2 != 0) {
3753 o->in2 = load_reg(r2);
3754 }
3755 }
3756
3757 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3758 {
3759 o->in2 = tcg_temp_new_i64();
3760 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3761 }
3762
3763 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3764 {
3765 o->in2 = tcg_temp_new_i64();
3766 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3767 }
3768
3769 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3770 {
3771 o->in2 = tcg_temp_new_i64();
3772 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3773 }
3774
3775 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3776 {
3777 o->in2 = tcg_temp_new_i64();
3778 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3779 }
3780
3781 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3782 {
3783 o->in2 = load_reg(get_field(f, r3));
3784 }
3785
3786 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3787 {
3788 o->in2 = tcg_temp_new_i64();
3789 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3790 }
3791
3792 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3793 {
3794 o->in2 = tcg_temp_new_i64();
3795 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3796 }
3797
3798 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3799 {
3800 o->in2 = load_freg32_i64(get_field(f, r2));
3801 }
3802
3803 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3804 {
3805 o->in2 = fregs[get_field(f, r2)];
3806 o->g_in2 = true;
3807 }
3808
3809 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3810 {
3811 /* ??? Specification exception: r1 must be < 14. */
3812 int r2 = get_field(f, r2);
3813 o->in1 = fregs[r2];
3814 o->in2 = fregs[(r2 + 2) & 15];
3815 o->g_in1 = o->g_in2 = true;
3816 }
3817
3818 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
3819 {
3820 o->in2 = get_address(s, 0, get_field(f, r2), 0);
3821 }
3822
3823 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3824 {
3825 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3826 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3827 }
3828
3829 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3830 {
3831 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3832 }
3833
3834 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3835 {
3836 help_l2_shift(s, f, o, 31);
3837 }
3838
3839 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3840 {
3841 help_l2_shift(s, f, o, 63);
3842 }
3843
3844 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3845 {
3846 in2_a2(s, f, o);
3847 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3848 }
3849
3850 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3851 {
3852 in2_a2(s, f, o);
3853 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3854 }
3855
3856 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3857 {
3858 in2_a2(s, f, o);
3859 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3860 }
3861
3862 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3863 {
3864 in2_a2(s, f, o);
3865 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3866 }
3867
3868 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3869 {
3870 in2_a2(s, f, o);
3871 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3872 }
3873
3874 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3875 {
3876 in2_a2(s, f, o);
3877 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3878 }
3879
3880 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3881 {
3882 in2_ri2(s, f, o);
3883 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3884 }
3885
3886 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3887 {
3888 in2_ri2(s, f, o);
3889 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3890 }
3891
3892 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3893 {
3894 in2_ri2(s, f, o);
3895 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3896 }
3897
3898 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3899 {
3900 in2_ri2(s, f, o);
3901 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3902 }
3903
3904 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3905 {
3906 o->in2 = tcg_const_i64(get_field(f, i2));
3907 }
3908
3909 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3910 {
3911 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3912 }
3913
3914 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3915 {
3916 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3917 }
3918
3919 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3920 {
3921 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3922 }
3923
3924 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3925 {
3926 uint64_t i2 = (uint16_t)get_field(f, i2);
3927 o->in2 = tcg_const_i64(i2 << s->insn->data);
3928 }
3929
3930 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3931 {
3932 uint64_t i2 = (uint32_t)get_field(f, i2);
3933 o->in2 = tcg_const_i64(i2 << s->insn->data);
3934 }
3935
3936 /* ====================================================================== */
3937
3938 /* Find opc within the table of insns. This is formulated as a switch
3939 statement so that (1) we get compile-time notice of cut-paste errors
3940 for duplicated opcodes, and (2) the compiler generates the binary
3941 search tree, rather than us having to post-process the table. */
3942
3943 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3944 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3945
3946 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3947
3948 enum DisasInsnEnum {
3949 #include "insn-data.def"
3950 };
3951
3952 #undef D
3953 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3954 .opc = OPC, \
3955 .fmt = FMT_##FT, \
3956 .fac = FAC_##FC, \
3957 .name = #NM, \
3958 .help_in1 = in1_##I1, \
3959 .help_in2 = in2_##I2, \
3960 .help_prep = prep_##P, \
3961 .help_wout = wout_##W, \
3962 .help_cout = cout_##CC, \
3963 .help_op = op_##OP, \
3964 .data = D \
3965 },
3966
3967 /* Allow 0 to be used for NULL in the table below. */
3968 #define in1_0 NULL
3969 #define in2_0 NULL
3970 #define prep_0 NULL
3971 #define wout_0 NULL
3972 #define cout_0 NULL
3973 #define op_0 NULL
3974
3975 static const DisasInsn insn_info[] = {
3976 #include "insn-data.def"
3977 };
3978
3979 #undef D
3980 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3981 case OPC: return &insn_info[insn_ ## NM];
3982
3983 static const DisasInsn *lookup_opc(uint16_t opc)
3984 {
3985 switch (opc) {
3986 #include "insn-data.def"
3987 default:
3988 return NULL;
3989 }
3990 }
3991
3992 #undef D
3993 #undef C
3994
3995 /* Extract a field from the insn. The INSN should be left-aligned in
3996 the uint64_t so that we can more easily utilize the big-bit-endian
3997 definitions we extract from the Principals of Operation. */
3998
3999 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4000 {
4001 uint32_t r, m;
4002
4003 if (f->size == 0) {
4004 return;
4005 }
4006
4007 /* Zero extract the field from the insn. */
4008 r = (insn << f->beg) >> (64 - f->size);
4009
4010 /* Sign-extend, or un-swap the field as necessary. */
4011 switch (f->type) {
4012 case 0: /* unsigned */
4013 break;
4014 case 1: /* signed */
4015 assert(f->size <= 32);
4016 m = 1u << (f->size - 1);
4017 r = (r ^ m) - m;
4018 break;
4019 case 2: /* dl+dh split, signed 20 bit. */
4020 r = ((int8_t)r << 12) | (r >> 8);
4021 break;
4022 default:
4023 abort();
4024 }
4025
4026 /* Validate that the "compressed" encoding we selected above is valid.
4027 I.e. we havn't make two different original fields overlap. */
4028 assert(((o->presentC >> f->indexC) & 1) == 0);
4029 o->presentC |= 1 << f->indexC;
4030 o->presentO |= 1 << f->indexO;
4031
4032 o->c[f->indexC] = r;
4033 }
4034
4035 /* Lookup the insn at the current PC, extracting the operands into O and
4036 returning the info struct for the insn. Returns NULL for invalid insn. */
4037
4038 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4039 DisasFields *f)
4040 {
4041 uint64_t insn, pc = s->pc;
4042 int op, op2, ilen;
4043 const DisasInsn *info;
4044
4045 insn = ld_code2(env, pc);
4046 op = (insn >> 8) & 0xff;
4047 ilen = get_ilen(op);
4048 s->next_pc = s->pc + ilen;
4049
4050 switch (ilen) {
4051 case 2:
4052 insn = insn << 48;
4053 break;
4054 case 4:
4055 insn = ld_code4(env, pc) << 32;
4056 break;
4057 case 6:
4058 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4059 break;
4060 default:
4061 abort();
4062 }
4063
4064 /* We can't actually determine the insn format until we've looked up
4065 the full insn opcode. Which we can't do without locating the
4066 secondary opcode. Assume by default that OP2 is at bit 40; for
4067 those smaller insns that don't actually have a secondary opcode
4068 this will correctly result in OP2 = 0. */
4069 switch (op) {
4070 case 0x01: /* E */
4071 case 0x80: /* S */
4072 case 0x82: /* S */
4073 case 0x93: /* S */
4074 case 0xb2: /* S, RRF, RRE */
4075 case 0xb3: /* RRE, RRD, RRF */
4076 case 0xb9: /* RRE, RRF */
4077 case 0xe5: /* SSE, SIL */
4078 op2 = (insn << 8) >> 56;
4079 break;
4080 case 0xa5: /* RI */
4081 case 0xa7: /* RI */
4082 case 0xc0: /* RIL */
4083 case 0xc2: /* RIL */
4084 case 0xc4: /* RIL */
4085 case 0xc6: /* RIL */
4086 case 0xc8: /* SSF */
4087 case 0xcc: /* RIL */
4088 op2 = (insn << 12) >> 60;
4089 break;
4090 case 0xd0 ... 0xdf: /* SS */
4091 case 0xe1: /* SS */
4092 case 0xe2: /* SS */
4093 case 0xe8: /* SS */
4094 case 0xe9: /* SS */
4095 case 0xea: /* SS */
4096 case 0xee ... 0xf3: /* SS */
4097 case 0xf8 ... 0xfd: /* SS */
4098 op2 = 0;
4099 break;
4100 default:
4101 op2 = (insn << 40) >> 56;
4102 break;
4103 }
4104
4105 memset(f, 0, sizeof(*f));
4106 f->op = op;
4107 f->op2 = op2;
4108
4109 /* Lookup the instruction. */
4110 info = lookup_opc(op << 8 | op2);
4111
4112 /* If we found it, extract the operands. */
4113 if (info != NULL) {
4114 DisasFormat fmt = info->fmt;
4115 int i;
4116
4117 for (i = 0; i < NUM_C_FIELD; ++i) {
4118 extract_field(f, &format_info[fmt].op[i], insn);
4119 }
4120 }
4121 return info;
4122 }
4123
4124 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4125 {
4126 const DisasInsn *insn;
4127 ExitStatus ret = NO_EXIT;
4128 DisasFields f;
4129 DisasOps o;
4130
4131 insn = extract_insn(env, s, &f);
4132
4133 /* If not found, try the old interpreter. This includes ILLOPC. */
4134 if (insn == NULL) {
4135 disas_s390_insn(env, s);
4136 switch (s->is_jmp) {
4137 case DISAS_NEXT:
4138 ret = NO_EXIT;
4139 break;
4140 case DISAS_TB_JUMP:
4141 ret = EXIT_GOTO_TB;
4142 break;
4143 case DISAS_JUMP:
4144 ret = EXIT_PC_UPDATED;
4145 break;
4146 case DISAS_EXCP:
4147 ret = EXIT_NORETURN;
4148 break;
4149 default:
4150 abort();
4151 }
4152
4153 s->pc = s->next_pc;
4154 return ret;
4155 }
4156
4157 /* Set up the strutures we use to communicate with the helpers. */
4158 s->insn = insn;
4159 s->fields = &f;
4160 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4161 TCGV_UNUSED_I64(o.out);
4162 TCGV_UNUSED_I64(o.out2);
4163 TCGV_UNUSED_I64(o.in1);
4164 TCGV_UNUSED_I64(o.in2);
4165 TCGV_UNUSED_I64(o.addr1);
4166
4167 /* Implement the instruction. */
4168 if (insn->help_in1) {
4169 insn->help_in1(s, &f, &o);
4170 }
4171 if (insn->help_in2) {
4172 insn->help_in2(s, &f, &o);
4173 }
4174 if (insn->help_prep) {
4175 insn->help_prep(s, &f, &o);
4176 }
4177 if (insn->help_op) {
4178 ret = insn->help_op(s, &o);
4179 }
4180 if (insn->help_wout) {
4181 insn->help_wout(s, &f, &o);
4182 }
4183 if (insn->help_cout) {
4184 insn->help_cout(s, &o);
4185 }
4186
4187 /* Free any temporaries created by the helpers. */
4188 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4189 tcg_temp_free_i64(o.out);
4190 }
4191 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4192 tcg_temp_free_i64(o.out2);
4193 }
4194 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4195 tcg_temp_free_i64(o.in1);
4196 }
4197 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4198 tcg_temp_free_i64(o.in2);
4199 }
4200 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4201 tcg_temp_free_i64(o.addr1);
4202 }
4203
4204 /* Advance to the next instruction. */
4205 s->pc = s->next_pc;
4206 return ret;
4207 }
4208
4209 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4210 TranslationBlock *tb,
4211 int search_pc)
4212 {
4213 DisasContext dc;
4214 target_ulong pc_start;
4215 uint64_t next_page_start;
4216 uint16_t *gen_opc_end;
4217 int j, lj = -1;
4218 int num_insns, max_insns;
4219 CPUBreakpoint *bp;
4220 ExitStatus status;
4221 bool do_debug;
4222
4223 pc_start = tb->pc;
4224
4225 /* 31-bit mode */
4226 if (!(tb->flags & FLAG_MASK_64)) {
4227 pc_start &= 0x7fffffff;
4228 }
4229
4230 dc.tb = tb;
4231 dc.pc = pc_start;
4232 dc.cc_op = CC_OP_DYNAMIC;
4233 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4234 dc.is_jmp = DISAS_NEXT;
4235
4236 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4237
4238 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4239
4240 num_insns = 0;
4241 max_insns = tb->cflags & CF_COUNT_MASK;
4242 if (max_insns == 0) {
4243 max_insns = CF_COUNT_MASK;
4244 }
4245
4246 gen_icount_start();
4247
4248 do {
4249 if (search_pc) {
4250 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4251 if (lj < j) {
4252 lj++;
4253 while (lj < j) {
4254 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4255 }
4256 }
4257 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4258 gen_opc_cc_op[lj] = dc.cc_op;
4259 tcg_ctx.gen_opc_instr_start[lj] = 1;
4260 tcg_ctx.gen_opc_icount[lj] = num_insns;
4261 }
4262 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4263 gen_io_start();
4264 }
4265
4266 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4267 tcg_gen_debug_insn_start(dc.pc);
4268 }
4269
4270 status = NO_EXIT;
4271 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4272 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4273 if (bp->pc == dc.pc) {
4274 status = EXIT_PC_STALE;
4275 do_debug = true;
4276 break;
4277 }
4278 }
4279 }
4280 if (status == NO_EXIT) {
4281 status = translate_one(env, &dc);
4282 }
4283
4284 /* If we reach a page boundary, are single stepping,
4285 or exhaust instruction count, stop generation. */
4286 if (status == NO_EXIT
4287 && (dc.pc >= next_page_start
4288 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4289 || num_insns >= max_insns
4290 || singlestep
4291 || env->singlestep_enabled)) {
4292 status = EXIT_PC_STALE;
4293 }
4294 } while (status == NO_EXIT);
4295
4296 if (tb->cflags & CF_LAST_IO) {
4297 gen_io_end();
4298 }
4299
4300 switch (status) {
4301 case EXIT_GOTO_TB:
4302 case EXIT_NORETURN:
4303 break;
4304 case EXIT_PC_STALE:
4305 update_psw_addr(&dc);
4306 /* FALLTHRU */
4307 case EXIT_PC_UPDATED:
4308 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4309 gen_op_calc_cc(&dc);
4310 } else {
4311 /* Next TB starts off with CC_OP_DYNAMIC,
4312 so make sure the cc op type is in env */
4313 gen_op_set_cc_op(&dc);
4314 }
4315 if (do_debug) {
4316 gen_exception(EXCP_DEBUG);
4317 } else {
4318 /* Generate the return instruction */
4319 tcg_gen_exit_tb(0);
4320 }
4321 break;
4322 default:
4323 abort();
4324 }
4325
4326 gen_icount_end(tb, num_insns);
4327 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4328 if (search_pc) {
4329 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4330 lj++;
4331 while (lj <= j) {
4332 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4333 }
4334 } else {
4335 tb->size = dc.pc - pc_start;
4336 tb->icount = num_insns;
4337 }
4338
4339 #if defined(S390X_DEBUG_DISAS)
4340 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4341 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4342 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4343 qemu_log("\n");
4344 }
4345 #endif
4346 }
4347
4348 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4349 {
4350 gen_intermediate_code_internal(env, tb, 0);
4351 }
4352
4353 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4354 {
4355 gen_intermediate_code_internal(env, tb, 1);
4356 }
4357
4358 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4359 {
4360 int cc_op;
4361 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4362 cc_op = gen_opc_cc_op[pc_pos];
4363 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4364 env->cc_op = cc_op;
4365 }
4366 }