]> git.proxmox.com Git - qemu.git/blob - target-s390x/translate.c
target-s390: Convert CONVERT TO DECIMAL
[qemu.git] / target-s390x / translate.c
1 /*
2 * S/390 translation
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
30
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
39
40 #include "exec/gen-icount.h"
41 #include "helper.h"
42 #define GEN_HELPER 1
43 #include "helper.h"
44
45
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
50
51 struct DisasContext {
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
54 DisasFields *fields;
55 uint64_t pc, next_pc;
56 enum cc_op cc_op;
57 bool singlestep_enabled;
58 int is_jmp;
59 };
60
61 /* Information carried about a condition to be evaluated. */
62 typedef struct {
63 TCGCond cond:8;
64 bool is_64;
65 bool g1;
66 bool g2;
67 union {
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
70 } u;
71 } DisasCompare;
72
73 #define DISAS_EXCP 4
74
75 static void gen_op_calc_cc(DisasContext *s);
76
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
80 #endif
81
82 static inline void debug_insn(uint64_t insn)
83 {
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85 }
86
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 {
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
92 }
93 }
94 return pc;
95 }
96
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 int flags)
99 {
100 int i;
101
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105 } else {
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
108 }
109
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112 if ((i % 4) == 3) {
113 cpu_fprintf(f, "\n");
114 } else {
115 cpu_fprintf(f, " ");
116 }
117 }
118
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121 if ((i % 4) == 3) {
122 cpu_fprintf(f, "\n");
123 } else {
124 cpu_fprintf(f, " ");
125 }
126 }
127
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131 if ((i % 4) == 3) {
132 cpu_fprintf(f, "\n");
133 } else {
134 cpu_fprintf(f, " ");
135 }
136 }
137 #endif
138
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
143 }
144 #endif
145
146 cpu_fprintf(f, "\n");
147 }
148
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
151
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
156
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
160
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162
163 void s390x_translate_init(void)
164 {
165 int i;
166
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
170 "psw_addr");
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
173 "psw_mask");
174
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176 "cc_op");
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178 "cc_src");
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180 "cc_dst");
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182 "cc_vr");
183
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
188 cpu_reg_names[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
196 }
197
198 /* register helpers */
199 #define GEN_HELPER 2
200 #include "helper.h"
201 }
202
203 static inline TCGv_i64 load_reg(int reg)
204 {
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
207 return r;
208 }
209
210 static inline TCGv_i64 load_freg(int reg)
211 {
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
214 return r;
215 }
216
217 static inline TCGv_i32 load_freg32(int reg)
218 {
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222 #else
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224 #endif
225 return r;
226 }
227
228 static inline TCGv_i64 load_freg32_i64(int reg)
229 {
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
232 return r;
233 }
234
235 static inline TCGv_i32 load_reg32(int reg)
236 {
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
239 return r;
240 }
241
242 static inline TCGv_i64 load_reg32_i64(int reg)
243 {
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
246 return r;
247 }
248
249 static inline void store_reg(int reg, TCGv_i64 v)
250 {
251 tcg_gen_mov_i64(regs[reg], v);
252 }
253
254 static inline void store_freg(int reg, TCGv_i64 v)
255 {
256 tcg_gen_mov_i64(fregs[reg], v);
257 }
258
259 static inline void store_reg32(int reg, TCGv_i32 v)
260 {
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264 #else
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267 #endif
268 }
269
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
271 {
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274 }
275
276 static inline void store_reg16(int reg, TCGv_i32 v)
277 {
278 /* 16 bit register writes keep the upper bytes */
279 #if HOST_LONG_BITS == 32
280 tcg_gen_deposit_i32(TCGV_LOW(regs[reg]), TCGV_LOW(regs[reg]), v, 0, 16);
281 #else
282 tcg_gen_deposit_i64(regs[reg], regs[reg],
283 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 16);
284 #endif
285 }
286
287 static inline void store_freg32(int reg, TCGv_i32 v)
288 {
289 /* 32 bit register writes keep the lower half */
290 #if HOST_LONG_BITS == 32
291 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
292 #else
293 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
294 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
295 #endif
296 }
297
298 static inline void store_freg32_i64(int reg, TCGv_i64 v)
299 {
300 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
301 }
302
303 static inline void return_low128(TCGv_i64 dest)
304 {
305 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
306 }
307
308 static inline void update_psw_addr(DisasContext *s)
309 {
310 /* psw.addr */
311 tcg_gen_movi_i64(psw_addr, s->pc);
312 }
313
314 static inline void potential_page_fault(DisasContext *s)
315 {
316 #ifndef CONFIG_USER_ONLY
317 update_psw_addr(s);
318 gen_op_calc_cc(s);
319 #endif
320 }
321
322 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
323 {
324 return (uint64_t)cpu_lduw_code(env, pc);
325 }
326
327 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
328 {
329 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
330 }
331
332 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
333 {
334 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
335 }
336
337 static inline int get_mem_index(DisasContext *s)
338 {
339 switch (s->tb->flags & FLAG_MASK_ASC) {
340 case PSW_ASC_PRIMARY >> 32:
341 return 0;
342 case PSW_ASC_SECONDARY >> 32:
343 return 1;
344 case PSW_ASC_HOME >> 32:
345 return 2;
346 default:
347 tcg_abort();
348 break;
349 }
350 }
351
352 static void gen_exception(int excp)
353 {
354 TCGv_i32 tmp = tcg_const_i32(excp);
355 gen_helper_exception(cpu_env, tmp);
356 tcg_temp_free_i32(tmp);
357 }
358
359 static void gen_program_exception(DisasContext *s, int code)
360 {
361 TCGv_i32 tmp;
362
363 /* Remember what pgm exeption this was. */
364 tmp = tcg_const_i32(code);
365 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
366 tcg_temp_free_i32(tmp);
367
368 tmp = tcg_const_i32(s->next_pc - s->pc);
369 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
370 tcg_temp_free_i32(tmp);
371
372 /* Advance past instruction. */
373 s->pc = s->next_pc;
374 update_psw_addr(s);
375
376 /* Save off cc. */
377 gen_op_calc_cc(s);
378
379 /* Trigger exception. */
380 gen_exception(EXCP_PGM);
381
382 /* End TB here. */
383 s->is_jmp = DISAS_EXCP;
384 }
385
386 static inline void gen_illegal_opcode(DisasContext *s)
387 {
388 gen_program_exception(s, PGM_SPECIFICATION);
389 }
390
391 static inline void check_privileged(DisasContext *s)
392 {
393 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
394 gen_program_exception(s, PGM_PRIVILEGED);
395 }
396 }
397
398 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
399 {
400 TCGv_i64 tmp;
401
402 /* 31-bitify the immediate part; register contents are dealt with below */
403 if (!(s->tb->flags & FLAG_MASK_64)) {
404 d2 &= 0x7fffffffUL;
405 }
406
407 if (x2) {
408 if (d2) {
409 tmp = tcg_const_i64(d2);
410 tcg_gen_add_i64(tmp, tmp, regs[x2]);
411 } else {
412 tmp = load_reg(x2);
413 }
414 if (b2) {
415 tcg_gen_add_i64(tmp, tmp, regs[b2]);
416 }
417 } else if (b2) {
418 if (d2) {
419 tmp = tcg_const_i64(d2);
420 tcg_gen_add_i64(tmp, tmp, regs[b2]);
421 } else {
422 tmp = load_reg(b2);
423 }
424 } else {
425 tmp = tcg_const_i64(d2);
426 }
427
428 /* 31-bit mode mask if there are values loaded from registers */
429 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
430 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
431 }
432
433 return tmp;
434 }
435
436 static void gen_op_movi_cc(DisasContext *s, uint32_t val)
437 {
438 s->cc_op = CC_OP_CONST0 + val;
439 }
440
441 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
442 {
443 tcg_gen_discard_i64(cc_src);
444 tcg_gen_mov_i64(cc_dst, dst);
445 tcg_gen_discard_i64(cc_vr);
446 s->cc_op = op;
447 }
448
449 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
450 {
451 tcg_gen_discard_i64(cc_src);
452 tcg_gen_extu_i32_i64(cc_dst, dst);
453 tcg_gen_discard_i64(cc_vr);
454 s->cc_op = op;
455 }
456
457 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
458 TCGv_i64 dst)
459 {
460 tcg_gen_mov_i64(cc_src, src);
461 tcg_gen_mov_i64(cc_dst, dst);
462 tcg_gen_discard_i64(cc_vr);
463 s->cc_op = op;
464 }
465
466 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
467 TCGv_i32 dst)
468 {
469 tcg_gen_extu_i32_i64(cc_src, src);
470 tcg_gen_extu_i32_i64(cc_dst, dst);
471 tcg_gen_discard_i64(cc_vr);
472 s->cc_op = op;
473 }
474
475 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
476 TCGv_i64 dst, TCGv_i64 vr)
477 {
478 tcg_gen_mov_i64(cc_src, src);
479 tcg_gen_mov_i64(cc_dst, dst);
480 tcg_gen_mov_i64(cc_vr, vr);
481 s->cc_op = op;
482 }
483
484 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
485 {
486 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
487 }
488
489 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
490 {
491 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
492 }
493
494 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
495 enum cc_op cond)
496 {
497 gen_op_update2_cc_i32(s, cond, v1, v2);
498 }
499
500 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
501 enum cc_op cond)
502 {
503 gen_op_update2_cc_i64(s, cond, v1, v2);
504 }
505
506 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
507 {
508 cmp_32(s, v1, v2, CC_OP_LTGT_32);
509 }
510
511 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
512 {
513 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
514 }
515
516 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
517 {
518 /* XXX optimize for the constant? put it in s? */
519 TCGv_i32 tmp = tcg_const_i32(v2);
520 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
521 tcg_temp_free_i32(tmp);
522 }
523
524 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
525 {
526 TCGv_i32 tmp = tcg_const_i32(v2);
527 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
528 tcg_temp_free_i32(tmp);
529 }
530
531 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
532 {
533 cmp_64(s, v1, v2, CC_OP_LTGT_64);
534 }
535
536 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
537 {
538 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
539 }
540
541 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
542 {
543 TCGv_i64 tmp = tcg_const_i64(v2);
544 cmp_s64(s, v1, tmp);
545 tcg_temp_free_i64(tmp);
546 }
547
548 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
549 {
550 TCGv_i64 tmp = tcg_const_i64(v2);
551 cmp_u64(s, v1, tmp);
552 tcg_temp_free_i64(tmp);
553 }
554
555 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
556 {
557 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
558 }
559
560 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
561 {
562 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
563 }
564
565 static void set_cc_cmp_f32_i64(DisasContext *s, TCGv_i32 v1, TCGv_i64 v2)
566 {
567 tcg_gen_extu_i32_i64(cc_src, v1);
568 tcg_gen_mov_i64(cc_dst, v2);
569 tcg_gen_discard_i64(cc_vr);
570 s->cc_op = CC_OP_LTGT_F32;
571 }
572
573 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i32 v1)
574 {
575 gen_op_update1_cc_i32(s, CC_OP_NZ_F32, v1);
576 }
577
578 /* CC value is in env->cc_op */
579 static inline void set_cc_static(DisasContext *s)
580 {
581 tcg_gen_discard_i64(cc_src);
582 tcg_gen_discard_i64(cc_dst);
583 tcg_gen_discard_i64(cc_vr);
584 s->cc_op = CC_OP_STATIC;
585 }
586
587 static inline void gen_op_set_cc_op(DisasContext *s)
588 {
589 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
590 tcg_gen_movi_i32(cc_op, s->cc_op);
591 }
592 }
593
594 static inline void gen_update_cc_op(DisasContext *s)
595 {
596 gen_op_set_cc_op(s);
597 }
598
599 /* calculates cc into cc_op */
600 static void gen_op_calc_cc(DisasContext *s)
601 {
602 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
603 TCGv_i64 dummy = tcg_const_i64(0);
604
605 switch (s->cc_op) {
606 case CC_OP_CONST0:
607 case CC_OP_CONST1:
608 case CC_OP_CONST2:
609 case CC_OP_CONST3:
610 /* s->cc_op is the cc value */
611 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
612 break;
613 case CC_OP_STATIC:
614 /* env->cc_op already is the cc value */
615 break;
616 case CC_OP_NZ:
617 case CC_OP_ABS_64:
618 case CC_OP_NABS_64:
619 case CC_OP_ABS_32:
620 case CC_OP_NABS_32:
621 case CC_OP_LTGT0_32:
622 case CC_OP_LTGT0_64:
623 case CC_OP_COMP_32:
624 case CC_OP_COMP_64:
625 case CC_OP_NZ_F32:
626 case CC_OP_NZ_F64:
627 /* 1 argument */
628 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
629 break;
630 case CC_OP_ICM:
631 case CC_OP_LTGT_32:
632 case CC_OP_LTGT_64:
633 case CC_OP_LTUGTU_32:
634 case CC_OP_LTUGTU_64:
635 case CC_OP_TM_32:
636 case CC_OP_TM_64:
637 case CC_OP_LTGT_F32:
638 case CC_OP_LTGT_F64:
639 case CC_OP_SLAG:
640 /* 2 arguments */
641 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
642 break;
643 case CC_OP_ADD_64:
644 case CC_OP_ADDU_64:
645 case CC_OP_ADDC_64:
646 case CC_OP_SUB_64:
647 case CC_OP_SUBU_64:
648 case CC_OP_SUBB_64:
649 case CC_OP_ADD_32:
650 case CC_OP_ADDU_32:
651 case CC_OP_ADDC_32:
652 case CC_OP_SUB_32:
653 case CC_OP_SUBU_32:
654 case CC_OP_SUBB_32:
655 /* 3 arguments */
656 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
657 break;
658 case CC_OP_DYNAMIC:
659 /* unknown operation - assume 3 arguments and cc_op in env */
660 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
661 break;
662 default:
663 tcg_abort();
664 }
665
666 tcg_temp_free_i32(local_cc_op);
667 tcg_temp_free_i64(dummy);
668
669 /* We now have cc in cc_op as constant */
670 set_cc_static(s);
671 }
672
673 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
674 {
675 debug_insn(insn);
676
677 *r1 = (insn >> 4) & 0xf;
678 *r2 = insn & 0xf;
679 }
680
681 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
682 int *x2, int *b2, int *d2)
683 {
684 debug_insn(insn);
685
686 *r1 = (insn >> 20) & 0xf;
687 *x2 = (insn >> 16) & 0xf;
688 *b2 = (insn >> 12) & 0xf;
689 *d2 = insn & 0xfff;
690
691 return get_address(s, *x2, *b2, *d2);
692 }
693
694 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
695 int *b2, int *d2)
696 {
697 debug_insn(insn);
698
699 *r1 = (insn >> 20) & 0xf;
700 /* aka m3 */
701 *r3 = (insn >> 16) & 0xf;
702 *b2 = (insn >> 12) & 0xf;
703 *d2 = insn & 0xfff;
704 }
705
706 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
707 int *b1, int *d1)
708 {
709 debug_insn(insn);
710
711 *i2 = (insn >> 16) & 0xff;
712 *b1 = (insn >> 12) & 0xf;
713 *d1 = insn & 0xfff;
714
715 return get_address(s, 0, *b1, *d1);
716 }
717
718 static int use_goto_tb(DisasContext *s, uint64_t dest)
719 {
720 /* NOTE: we handle the case where the TB spans two pages here */
721 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
722 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
723 && !s->singlestep_enabled
724 && !(s->tb->cflags & CF_LAST_IO));
725 }
726
727 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
728 {
729 gen_update_cc_op(s);
730
731 if (use_goto_tb(s, pc)) {
732 tcg_gen_goto_tb(tb_num);
733 tcg_gen_movi_i64(psw_addr, pc);
734 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
735 } else {
736 /* jump to another page: currently not optimized */
737 tcg_gen_movi_i64(psw_addr, pc);
738 tcg_gen_exit_tb(0);
739 }
740 }
741
742 static inline void account_noninline_branch(DisasContext *s, int cc_op)
743 {
744 #ifdef DEBUG_INLINE_BRANCHES
745 inline_branch_miss[cc_op]++;
746 #endif
747 }
748
749 static inline void account_inline_branch(DisasContext *s, int cc_op)
750 {
751 #ifdef DEBUG_INLINE_BRANCHES
752 inline_branch_hit[cc_op]++;
753 #endif
754 }
755
756 /* Table of mask values to comparison codes, given a comparison as input.
757 For a true comparison CC=3 will never be set, but we treat this
758 conservatively for possible use when CC=3 indicates overflow. */
759 static const TCGCond ltgt_cond[16] = {
760 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
761 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
762 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
763 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
764 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
765 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
766 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
767 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
768 };
769
770 /* Table of mask values to comparison codes, given a logic op as input.
771 For such, only CC=0 and CC=1 should be possible. */
772 static const TCGCond nz_cond[16] = {
773 /* | | x | x */
774 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
775 /* | NE | x | x */
776 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
777 /* EQ | | x | x */
778 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
779 /* EQ | NE | x | x */
780 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
781 };
782
783 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
784 details required to generate a TCG comparison. */
785 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
786 {
787 TCGCond cond;
788 enum cc_op old_cc_op = s->cc_op;
789
790 if (mask == 15 || mask == 0) {
791 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
792 c->u.s32.a = cc_op;
793 c->u.s32.b = cc_op;
794 c->g1 = c->g2 = true;
795 c->is_64 = false;
796 return;
797 }
798
799 /* Find the TCG condition for the mask + cc op. */
800 switch (old_cc_op) {
801 case CC_OP_LTGT0_32:
802 case CC_OP_LTGT0_64:
803 case CC_OP_LTGT_32:
804 case CC_OP_LTGT_64:
805 cond = ltgt_cond[mask];
806 if (cond == TCG_COND_NEVER) {
807 goto do_dynamic;
808 }
809 account_inline_branch(s, old_cc_op);
810 break;
811
812 case CC_OP_LTUGTU_32:
813 case CC_OP_LTUGTU_64:
814 cond = tcg_unsigned_cond(ltgt_cond[mask]);
815 if (cond == TCG_COND_NEVER) {
816 goto do_dynamic;
817 }
818 account_inline_branch(s, old_cc_op);
819 break;
820
821 case CC_OP_NZ:
822 cond = nz_cond[mask];
823 if (cond == TCG_COND_NEVER) {
824 goto do_dynamic;
825 }
826 account_inline_branch(s, old_cc_op);
827 break;
828
829 case CC_OP_TM_32:
830 case CC_OP_TM_64:
831 switch (mask) {
832 case 8:
833 cond = TCG_COND_EQ;
834 break;
835 case 4 | 2 | 1:
836 cond = TCG_COND_NE;
837 break;
838 default:
839 goto do_dynamic;
840 }
841 account_inline_branch(s, old_cc_op);
842 break;
843
844 case CC_OP_ICM:
845 switch (mask) {
846 case 8:
847 cond = TCG_COND_EQ;
848 break;
849 case 4 | 2 | 1:
850 case 4 | 2:
851 cond = TCG_COND_NE;
852 break;
853 default:
854 goto do_dynamic;
855 }
856 account_inline_branch(s, old_cc_op);
857 break;
858
859 default:
860 do_dynamic:
861 /* Calculate cc value. */
862 gen_op_calc_cc(s);
863 /* FALLTHRU */
864
865 case CC_OP_STATIC:
866 /* Jump based on CC. We'll load up the real cond below;
867 the assignment here merely avoids a compiler warning. */
868 account_noninline_branch(s, old_cc_op);
869 old_cc_op = CC_OP_STATIC;
870 cond = TCG_COND_NEVER;
871 break;
872 }
873
874 /* Load up the arguments of the comparison. */
875 c->is_64 = true;
876 c->g1 = c->g2 = false;
877 switch (old_cc_op) {
878 case CC_OP_LTGT0_32:
879 c->is_64 = false;
880 c->u.s32.a = tcg_temp_new_i32();
881 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
882 c->u.s32.b = tcg_const_i32(0);
883 break;
884 case CC_OP_LTGT_32:
885 case CC_OP_LTUGTU_32:
886 c->is_64 = false;
887 c->u.s32.a = tcg_temp_new_i32();
888 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
889 c->u.s32.b = tcg_temp_new_i32();
890 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
891 break;
892
893 case CC_OP_LTGT0_64:
894 case CC_OP_NZ:
895 c->u.s64.a = cc_dst;
896 c->u.s64.b = tcg_const_i64(0);
897 c->g1 = true;
898 break;
899 case CC_OP_LTGT_64:
900 case CC_OP_LTUGTU_64:
901 c->u.s64.a = cc_src;
902 c->u.s64.b = cc_dst;
903 c->g1 = c->g2 = true;
904 break;
905
906 case CC_OP_TM_32:
907 case CC_OP_TM_64:
908 case CC_OP_ICM:
909 c->u.s64.a = tcg_temp_new_i64();
910 c->u.s64.b = tcg_const_i64(0);
911 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
912 break;
913
914 case CC_OP_STATIC:
915 c->is_64 = false;
916 c->u.s32.a = cc_op;
917 c->g1 = true;
918 switch (mask) {
919 case 0x8 | 0x4 | 0x2: /* cc != 3 */
920 cond = TCG_COND_NE;
921 c->u.s32.b = tcg_const_i32(3);
922 break;
923 case 0x8 | 0x4 | 0x1: /* cc != 2 */
924 cond = TCG_COND_NE;
925 c->u.s32.b = tcg_const_i32(2);
926 break;
927 case 0x8 | 0x2 | 0x1: /* cc != 1 */
928 cond = TCG_COND_NE;
929 c->u.s32.b = tcg_const_i32(1);
930 break;
931 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
932 cond = TCG_COND_EQ;
933 c->g1 = false;
934 c->u.s32.a = tcg_temp_new_i32();
935 c->u.s32.b = tcg_const_i32(0);
936 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
937 break;
938 case 0x8 | 0x4: /* cc < 2 */
939 cond = TCG_COND_LTU;
940 c->u.s32.b = tcg_const_i32(2);
941 break;
942 case 0x8: /* cc == 0 */
943 cond = TCG_COND_EQ;
944 c->u.s32.b = tcg_const_i32(0);
945 break;
946 case 0x4 | 0x2 | 0x1: /* cc != 0 */
947 cond = TCG_COND_NE;
948 c->u.s32.b = tcg_const_i32(0);
949 break;
950 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
951 cond = TCG_COND_NE;
952 c->g1 = false;
953 c->u.s32.a = tcg_temp_new_i32();
954 c->u.s32.b = tcg_const_i32(0);
955 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
956 break;
957 case 0x4: /* cc == 1 */
958 cond = TCG_COND_EQ;
959 c->u.s32.b = tcg_const_i32(1);
960 break;
961 case 0x2 | 0x1: /* cc > 1 */
962 cond = TCG_COND_GTU;
963 c->u.s32.b = tcg_const_i32(1);
964 break;
965 case 0x2: /* cc == 2 */
966 cond = TCG_COND_EQ;
967 c->u.s32.b = tcg_const_i32(2);
968 break;
969 case 0x1: /* cc == 3 */
970 cond = TCG_COND_EQ;
971 c->u.s32.b = tcg_const_i32(3);
972 break;
973 default:
974 /* CC is masked by something else: (8 >> cc) & mask. */
975 cond = TCG_COND_NE;
976 c->g1 = false;
977 c->u.s32.a = tcg_const_i32(8);
978 c->u.s32.b = tcg_const_i32(0);
979 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
980 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
981 break;
982 }
983 break;
984
985 default:
986 abort();
987 }
988 c->cond = cond;
989 }
990
991 static void free_compare(DisasCompare *c)
992 {
993 if (!c->g1) {
994 if (c->is_64) {
995 tcg_temp_free_i64(c->u.s64.a);
996 } else {
997 tcg_temp_free_i32(c->u.s32.a);
998 }
999 }
1000 if (!c->g2) {
1001 if (c->is_64) {
1002 tcg_temp_free_i64(c->u.s64.b);
1003 } else {
1004 tcg_temp_free_i32(c->u.s32.b);
1005 }
1006 }
1007 }
1008
1009 static void gen_op_mvc(DisasContext *s, int l, TCGv_i64 s1, TCGv_i64 s2)
1010 {
1011 TCGv_i64 tmp, tmp2;
1012 int i;
1013 int l_memset = gen_new_label();
1014 int l_out = gen_new_label();
1015 TCGv_i64 dest = tcg_temp_local_new_i64();
1016 TCGv_i64 src = tcg_temp_local_new_i64();
1017 TCGv_i32 vl;
1018
1019 /* Find out if we should use the inline version of mvc */
1020 switch (l) {
1021 case 0:
1022 case 1:
1023 case 2:
1024 case 3:
1025 case 4:
1026 case 5:
1027 case 6:
1028 case 7:
1029 case 11:
1030 case 15:
1031 /* use inline */
1032 break;
1033 default:
1034 /* Fall back to helper */
1035 vl = tcg_const_i32(l);
1036 potential_page_fault(s);
1037 gen_helper_mvc(cpu_env, vl, s1, s2);
1038 tcg_temp_free_i32(vl);
1039 return;
1040 }
1041
1042 tcg_gen_mov_i64(dest, s1);
1043 tcg_gen_mov_i64(src, s2);
1044
1045 if (!(s->tb->flags & FLAG_MASK_64)) {
1046 /* XXX what if we overflow while moving? */
1047 tcg_gen_andi_i64(dest, dest, 0x7fffffffUL);
1048 tcg_gen_andi_i64(src, src, 0x7fffffffUL);
1049 }
1050
1051 tmp = tcg_temp_new_i64();
1052 tcg_gen_addi_i64(tmp, src, 1);
1053 tcg_gen_brcond_i64(TCG_COND_EQ, dest, tmp, l_memset);
1054 tcg_temp_free_i64(tmp);
1055
1056 switch (l) {
1057 case 0:
1058 tmp = tcg_temp_new_i64();
1059
1060 tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
1061 tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
1062
1063 tcg_temp_free_i64(tmp);
1064 break;
1065 case 1:
1066 tmp = tcg_temp_new_i64();
1067
1068 tcg_gen_qemu_ld16u(tmp, src, get_mem_index(s));
1069 tcg_gen_qemu_st16(tmp, dest, get_mem_index(s));
1070
1071 tcg_temp_free_i64(tmp);
1072 break;
1073 case 3:
1074 tmp = tcg_temp_new_i64();
1075
1076 tcg_gen_qemu_ld32u(tmp, src, get_mem_index(s));
1077 tcg_gen_qemu_st32(tmp, dest, get_mem_index(s));
1078
1079 tcg_temp_free_i64(tmp);
1080 break;
1081 case 4:
1082 tmp = tcg_temp_new_i64();
1083 tmp2 = tcg_temp_new_i64();
1084
1085 tcg_gen_qemu_ld32u(tmp, src, get_mem_index(s));
1086 tcg_gen_addi_i64(src, src, 4);
1087 tcg_gen_qemu_ld8u(tmp2, src, get_mem_index(s));
1088 tcg_gen_qemu_st32(tmp, dest, get_mem_index(s));
1089 tcg_gen_addi_i64(dest, dest, 4);
1090 tcg_gen_qemu_st8(tmp2, dest, get_mem_index(s));
1091
1092 tcg_temp_free_i64(tmp);
1093 tcg_temp_free_i64(tmp2);
1094 break;
1095 case 7:
1096 tmp = tcg_temp_new_i64();
1097
1098 tcg_gen_qemu_ld64(tmp, src, get_mem_index(s));
1099 tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
1100
1101 tcg_temp_free_i64(tmp);
1102 break;
1103 default:
1104 /* The inline version can become too big for too uneven numbers, only
1105 use it on known good lengths */
1106 tmp = tcg_temp_new_i64();
1107 tmp2 = tcg_const_i64(8);
1108 for (i = 0; (i + 7) <= l; i += 8) {
1109 tcg_gen_qemu_ld64(tmp, src, get_mem_index(s));
1110 tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
1111
1112 tcg_gen_add_i64(src, src, tmp2);
1113 tcg_gen_add_i64(dest, dest, tmp2);
1114 }
1115
1116 tcg_temp_free_i64(tmp2);
1117 tmp2 = tcg_const_i64(1);
1118
1119 for (; i <= l; i++) {
1120 tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
1121 tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
1122
1123 tcg_gen_add_i64(src, src, tmp2);
1124 tcg_gen_add_i64(dest, dest, tmp2);
1125 }
1126
1127 tcg_temp_free_i64(tmp2);
1128 tcg_temp_free_i64(tmp);
1129 break;
1130 }
1131
1132 tcg_gen_br(l_out);
1133
1134 gen_set_label(l_memset);
1135 /* memset case (dest == (src + 1)) */
1136
1137 tmp = tcg_temp_new_i64();
1138 tmp2 = tcg_temp_new_i64();
1139 /* fill tmp with the byte */
1140 tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
1141 tcg_gen_shli_i64(tmp2, tmp, 8);
1142 tcg_gen_or_i64(tmp, tmp, tmp2);
1143 tcg_gen_shli_i64(tmp2, tmp, 16);
1144 tcg_gen_or_i64(tmp, tmp, tmp2);
1145 tcg_gen_shli_i64(tmp2, tmp, 32);
1146 tcg_gen_or_i64(tmp, tmp, tmp2);
1147 tcg_temp_free_i64(tmp2);
1148
1149 tmp2 = tcg_const_i64(8);
1150
1151 for (i = 0; (i + 7) <= l; i += 8) {
1152 tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
1153 tcg_gen_addi_i64(dest, dest, 8);
1154 }
1155
1156 tcg_temp_free_i64(tmp2);
1157 tmp2 = tcg_const_i64(1);
1158
1159 for (; i <= l; i++) {
1160 tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
1161 tcg_gen_addi_i64(dest, dest, 1);
1162 }
1163
1164 tcg_temp_free_i64(tmp2);
1165 tcg_temp_free_i64(tmp);
1166
1167 gen_set_label(l_out);
1168
1169 tcg_temp_free(dest);
1170 tcg_temp_free(src);
1171 }
1172
1173 static void gen_op_clc(DisasContext *s, int l, TCGv_i64 s1, TCGv_i64 s2)
1174 {
1175 TCGv_i64 tmp;
1176 TCGv_i64 tmp2;
1177 TCGv_i32 vl;
1178
1179 /* check for simple 32bit or 64bit match */
1180 switch (l) {
1181 case 0:
1182 tmp = tcg_temp_new_i64();
1183 tmp2 = tcg_temp_new_i64();
1184
1185 tcg_gen_qemu_ld8u(tmp, s1, get_mem_index(s));
1186 tcg_gen_qemu_ld8u(tmp2, s2, get_mem_index(s));
1187 cmp_u64(s, tmp, tmp2);
1188
1189 tcg_temp_free_i64(tmp);
1190 tcg_temp_free_i64(tmp2);
1191 return;
1192 case 1:
1193 tmp = tcg_temp_new_i64();
1194 tmp2 = tcg_temp_new_i64();
1195
1196 tcg_gen_qemu_ld16u(tmp, s1, get_mem_index(s));
1197 tcg_gen_qemu_ld16u(tmp2, s2, get_mem_index(s));
1198 cmp_u64(s, tmp, tmp2);
1199
1200 tcg_temp_free_i64(tmp);
1201 tcg_temp_free_i64(tmp2);
1202 return;
1203 case 3:
1204 tmp = tcg_temp_new_i64();
1205 tmp2 = tcg_temp_new_i64();
1206
1207 tcg_gen_qemu_ld32u(tmp, s1, get_mem_index(s));
1208 tcg_gen_qemu_ld32u(tmp2, s2, get_mem_index(s));
1209 cmp_u64(s, tmp, tmp2);
1210
1211 tcg_temp_free_i64(tmp);
1212 tcg_temp_free_i64(tmp2);
1213 return;
1214 case 7:
1215 tmp = tcg_temp_new_i64();
1216 tmp2 = tcg_temp_new_i64();
1217
1218 tcg_gen_qemu_ld64(tmp, s1, get_mem_index(s));
1219 tcg_gen_qemu_ld64(tmp2, s2, get_mem_index(s));
1220 cmp_u64(s, tmp, tmp2);
1221
1222 tcg_temp_free_i64(tmp);
1223 tcg_temp_free_i64(tmp2);
1224 return;
1225 }
1226
1227 potential_page_fault(s);
1228 vl = tcg_const_i32(l);
1229 gen_helper_clc(cc_op, cpu_env, vl, s1, s2);
1230 tcg_temp_free_i32(vl);
1231 set_cc_static(s);
1232 }
1233
1234 static void disas_e3(CPUS390XState *env, DisasContext* s, int op, int r1,
1235 int x2, int b2, int d2)
1236 {
1237 TCGv_i64 addr, tmp2;
1238 TCGv_i32 tmp32_1;
1239
1240 LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
1241 op, r1, x2, b2, d2);
1242 addr = get_address(s, x2, b2, d2);
1243 switch (op) {
1244 case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
1245 tmp2 = tcg_temp_new_i64();
1246 tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
1247 tcg_gen_bswap64_i64(tmp2, tmp2);
1248 store_reg(r1, tmp2);
1249 tcg_temp_free_i64(tmp2);
1250 break;
1251 case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
1252 tmp2 = tcg_temp_new_i64();
1253 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1254 tcg_gen_andi_i64(tmp2, tmp2, 0x7fffffffULL);
1255 store_reg(r1, tmp2);
1256 tcg_temp_free_i64(tmp2);
1257 break;
1258 case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
1259 tmp2 = tcg_temp_new_i64();
1260 tmp32_1 = tcg_temp_new_i32();
1261 tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
1262 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1263 tcg_temp_free_i64(tmp2);
1264 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1265 store_reg32(r1, tmp32_1);
1266 tcg_temp_free_i32(tmp32_1);
1267 break;
1268 case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
1269 tmp2 = tcg_temp_new_i64();
1270 tmp32_1 = tcg_temp_new_i32();
1271 tcg_gen_qemu_ld16u(tmp2, addr, get_mem_index(s));
1272 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1273 tcg_temp_free_i64(tmp2);
1274 tcg_gen_bswap16_i32(tmp32_1, tmp32_1);
1275 store_reg16(r1, tmp32_1);
1276 tcg_temp_free_i32(tmp32_1);
1277 break;
1278 case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
1279 tmp32_1 = load_reg32(r1);
1280 tmp2 = tcg_temp_new_i64();
1281 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
1282 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1283 tcg_temp_free_i32(tmp32_1);
1284 tcg_gen_qemu_st32(tmp2, addr, get_mem_index(s));
1285 tcg_temp_free_i64(tmp2);
1286 break;
1287 default:
1288 LOG_DISAS("illegal e3 operation 0x%x\n", op);
1289 gen_illegal_opcode(s);
1290 break;
1291 }
1292 tcg_temp_free_i64(addr);
1293 }
1294
1295 #ifndef CONFIG_USER_ONLY
1296 static void disas_e5(CPUS390XState *env, DisasContext* s, uint64_t insn)
1297 {
1298 TCGv_i64 tmp, tmp2;
1299 int op = (insn >> 32) & 0xff;
1300
1301 tmp = get_address(s, 0, (insn >> 28) & 0xf, (insn >> 16) & 0xfff);
1302 tmp2 = get_address(s, 0, (insn >> 12) & 0xf, insn & 0xfff);
1303
1304 LOG_DISAS("disas_e5: insn %" PRIx64 "\n", insn);
1305 switch (op) {
1306 case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
1307 /* Test Protection */
1308 potential_page_fault(s);
1309 gen_helper_tprot(cc_op, tmp, tmp2);
1310 set_cc_static(s);
1311 break;
1312 default:
1313 LOG_DISAS("illegal e5 operation 0x%x\n", op);
1314 gen_illegal_opcode(s);
1315 break;
1316 }
1317
1318 tcg_temp_free_i64(tmp);
1319 tcg_temp_free_i64(tmp2);
1320 }
1321 #endif
1322
1323 static void disas_eb(CPUS390XState *env, DisasContext *s, int op, int r1,
1324 int r3, int b2, int d2)
1325 {
1326 TCGv_i64 tmp, tmp2, tmp3, tmp4;
1327 TCGv_i32 tmp32_1, tmp32_2;
1328 int i, stm_len;
1329
1330 LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
1331 op, r1, r3, b2, d2);
1332 switch (op) {
1333 case 0xc: /* SRLG R1,R3,D2(B2) [RSY] */
1334 case 0xd: /* SLLG R1,R3,D2(B2) [RSY] */
1335 case 0xa: /* SRAG R1,R3,D2(B2) [RSY] */
1336 case 0xb: /* SLAG R1,R3,D2(B2) [RSY] */
1337 case 0x1c: /* RLLG R1,R3,D2(B2) [RSY] */
1338 if (b2) {
1339 tmp = get_address(s, 0, b2, d2);
1340 tcg_gen_andi_i64(tmp, tmp, 0x3f);
1341 } else {
1342 tmp = tcg_const_i64(d2 & 0x3f);
1343 }
1344 switch (op) {
1345 case 0xc:
1346 tcg_gen_shr_i64(regs[r1], regs[r3], tmp);
1347 break;
1348 case 0xd:
1349 tcg_gen_shl_i64(regs[r1], regs[r3], tmp);
1350 break;
1351 case 0xa:
1352 tcg_gen_sar_i64(regs[r1], regs[r3], tmp);
1353 break;
1354 case 0xb:
1355 tmp2 = tcg_temp_new_i64();
1356 tmp3 = tcg_temp_new_i64();
1357 gen_op_update2_cc_i64(s, CC_OP_SLAG, regs[r3], tmp);
1358 tcg_gen_shl_i64(tmp2, regs[r3], tmp);
1359 /* override sign bit with source sign */
1360 tcg_gen_andi_i64(tmp2, tmp2, ~0x8000000000000000ULL);
1361 tcg_gen_andi_i64(tmp3, regs[r3], 0x8000000000000000ULL);
1362 tcg_gen_or_i64(regs[r1], tmp2, tmp3);
1363 tcg_temp_free_i64(tmp2);
1364 tcg_temp_free_i64(tmp3);
1365 break;
1366 case 0x1c:
1367 tcg_gen_rotl_i64(regs[r1], regs[r3], tmp);
1368 break;
1369 default:
1370 tcg_abort();
1371 break;
1372 }
1373 if (op == 0xa) {
1374 set_cc_s64(s, regs[r1]);
1375 }
1376 tcg_temp_free_i64(tmp);
1377 break;
1378 case 0x1d: /* RLL R1,R3,D2(B2) [RSY] */
1379 if (b2) {
1380 tmp = get_address(s, 0, b2, d2);
1381 tcg_gen_andi_i64(tmp, tmp, 0x3f);
1382 } else {
1383 tmp = tcg_const_i64(d2 & 0x3f);
1384 }
1385 tmp32_1 = tcg_temp_new_i32();
1386 tmp32_2 = load_reg32(r3);
1387 tcg_gen_trunc_i64_i32(tmp32_1, tmp);
1388 switch (op) {
1389 case 0x1d:
1390 tcg_gen_rotl_i32(tmp32_1, tmp32_2, tmp32_1);
1391 break;
1392 default:
1393 tcg_abort();
1394 break;
1395 }
1396 store_reg32(r1, tmp32_1);
1397 tcg_temp_free_i64(tmp);
1398 tcg_temp_free_i32(tmp32_1);
1399 tcg_temp_free_i32(tmp32_2);
1400 break;
1401 case 0x4: /* LMG R1,R3,D2(B2) [RSE] */
1402 case 0x24: /* STMG R1,R3,D2(B2) [RSE] */
1403 stm_len = 8;
1404 goto do_mh;
1405 case 0x26: /* STMH R1,R3,D2(B2) [RSE] */
1406 case 0x96: /* LMH R1,R3,D2(B2) [RSE] */
1407 stm_len = 4;
1408 do_mh:
1409 /* Apparently, unrolling lmg/stmg of any size gains performance -
1410 even for very long ones... */
1411 tmp = get_address(s, 0, b2, d2);
1412 tmp3 = tcg_const_i64(stm_len);
1413 tmp4 = tcg_const_i64(op == 0x26 ? 32 : 4);
1414 for (i = r1;; i = (i + 1) % 16) {
1415 switch (op) {
1416 case 0x4:
1417 tcg_gen_qemu_ld64(regs[i], tmp, get_mem_index(s));
1418 break;
1419 case 0x96:
1420 tmp2 = tcg_temp_new_i64();
1421 #if HOST_LONG_BITS == 32
1422 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1423 tcg_gen_trunc_i64_i32(TCGV_HIGH(regs[i]), tmp2);
1424 #else
1425 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1426 tcg_gen_shl_i64(tmp2, tmp2, tmp4);
1427 tcg_gen_ext32u_i64(regs[i], regs[i]);
1428 tcg_gen_or_i64(regs[i], regs[i], tmp2);
1429 #endif
1430 tcg_temp_free_i64(tmp2);
1431 break;
1432 case 0x24:
1433 tcg_gen_qemu_st64(regs[i], tmp, get_mem_index(s));
1434 break;
1435 case 0x26:
1436 tmp2 = tcg_temp_new_i64();
1437 tcg_gen_shr_i64(tmp2, regs[i], tmp4);
1438 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1439 tcg_temp_free_i64(tmp2);
1440 break;
1441 default:
1442 tcg_abort();
1443 }
1444 if (i == r3) {
1445 break;
1446 }
1447 tcg_gen_add_i64(tmp, tmp, tmp3);
1448 }
1449 tcg_temp_free_i64(tmp);
1450 tcg_temp_free_i64(tmp3);
1451 tcg_temp_free_i64(tmp4);
1452 break;
1453 case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */
1454 tmp = get_address(s, 0, b2, d2);
1455 tmp32_1 = tcg_const_i32(r1);
1456 tmp32_2 = tcg_const_i32(r3);
1457 potential_page_fault(s);
1458 gen_helper_stcmh(cpu_env, tmp32_1, tmp, tmp32_2);
1459 tcg_temp_free_i64(tmp);
1460 tcg_temp_free_i32(tmp32_1);
1461 tcg_temp_free_i32(tmp32_2);
1462 break;
1463 #ifndef CONFIG_USER_ONLY
1464 case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
1465 /* Load Control */
1466 check_privileged(s);
1467 tmp = get_address(s, 0, b2, d2);
1468 tmp32_1 = tcg_const_i32(r1);
1469 tmp32_2 = tcg_const_i32(r3);
1470 potential_page_fault(s);
1471 gen_helper_lctlg(cpu_env, tmp32_1, tmp, tmp32_2);
1472 tcg_temp_free_i64(tmp);
1473 tcg_temp_free_i32(tmp32_1);
1474 tcg_temp_free_i32(tmp32_2);
1475 break;
1476 case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
1477 /* Store Control */
1478 check_privileged(s);
1479 tmp = get_address(s, 0, b2, d2);
1480 tmp32_1 = tcg_const_i32(r1);
1481 tmp32_2 = tcg_const_i32(r3);
1482 potential_page_fault(s);
1483 gen_helper_stctg(cpu_env, tmp32_1, tmp, tmp32_2);
1484 tcg_temp_free_i64(tmp);
1485 tcg_temp_free_i32(tmp32_1);
1486 tcg_temp_free_i32(tmp32_2);
1487 break;
1488 #endif
1489 case 0x30: /* CSG R1,R3,D2(B2) [RSY] */
1490 tmp = get_address(s, 0, b2, d2);
1491 tmp32_1 = tcg_const_i32(r1);
1492 tmp32_2 = tcg_const_i32(r3);
1493 potential_page_fault(s);
1494 /* XXX rewrite in tcg */
1495 gen_helper_csg(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
1496 set_cc_static(s);
1497 tcg_temp_free_i64(tmp);
1498 tcg_temp_free_i32(tmp32_1);
1499 tcg_temp_free_i32(tmp32_2);
1500 break;
1501 case 0x3e: /* CDSG R1,R3,D2(B2) [RSY] */
1502 tmp = get_address(s, 0, b2, d2);
1503 tmp32_1 = tcg_const_i32(r1);
1504 tmp32_2 = tcg_const_i32(r3);
1505 potential_page_fault(s);
1506 /* XXX rewrite in tcg */
1507 gen_helper_cdsg(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
1508 set_cc_static(s);
1509 tcg_temp_free_i64(tmp);
1510 tcg_temp_free_i32(tmp32_1);
1511 tcg_temp_free_i32(tmp32_2);
1512 break;
1513 case 0x52: /* MVIY D1(B1),I2 [SIY] */
1514 tmp = get_address(s, 0, b2, d2); /* SIY -> this is the destination */
1515 tmp2 = tcg_const_i64((r1 << 4) | r3);
1516 tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
1517 tcg_temp_free_i64(tmp);
1518 tcg_temp_free_i64(tmp2);
1519 break;
1520 default:
1521 LOG_DISAS("illegal eb operation 0x%x\n", op);
1522 gen_illegal_opcode(s);
1523 break;
1524 }
1525 }
1526
1527 static void disas_ed(CPUS390XState *env, DisasContext *s, int op, int r1,
1528 int x2, int b2, int d2, int r1b)
1529 {
1530 TCGv_i32 tmp_r1, tmp32;
1531 TCGv_i64 addr, tmp;
1532 addr = get_address(s, x2, b2, d2);
1533 tmp_r1 = tcg_const_i32(r1);
1534 switch (op) {
1535 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1536 potential_page_fault(s);
1537 gen_helper_ldeb(cpu_env, tmp_r1, addr);
1538 break;
1539 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1540 potential_page_fault(s);
1541 gen_helper_lxdb(cpu_env, tmp_r1, addr);
1542 break;
1543 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1544 tmp = tcg_temp_new_i64();
1545 tmp32 = load_freg32(r1);
1546 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1547 set_cc_cmp_f32_i64(s, tmp32, tmp);
1548 tcg_temp_free_i64(tmp);
1549 tcg_temp_free_i32(tmp32);
1550 break;
1551 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1552 tmp = tcg_temp_new_i64();
1553 tmp32 = tcg_temp_new_i32();
1554 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1555 tcg_gen_trunc_i64_i32(tmp32, tmp);
1556 gen_helper_aeb(cpu_env, tmp_r1, tmp32);
1557 tcg_temp_free_i64(tmp);
1558 tcg_temp_free_i32(tmp32);
1559
1560 tmp32 = load_freg32(r1);
1561 gen_set_cc_nz_f32(s, tmp32);
1562 tcg_temp_free_i32(tmp32);
1563 break;
1564 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1565 tmp = tcg_temp_new_i64();
1566 tmp32 = tcg_temp_new_i32();
1567 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1568 tcg_gen_trunc_i64_i32(tmp32, tmp);
1569 gen_helper_seb(cpu_env, tmp_r1, tmp32);
1570 tcg_temp_free_i64(tmp);
1571 tcg_temp_free_i32(tmp32);
1572
1573 tmp32 = load_freg32(r1);
1574 gen_set_cc_nz_f32(s, tmp32);
1575 tcg_temp_free_i32(tmp32);
1576 break;
1577 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1578 tmp = tcg_temp_new_i64();
1579 tmp32 = tcg_temp_new_i32();
1580 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1581 tcg_gen_trunc_i64_i32(tmp32, tmp);
1582 gen_helper_deb(cpu_env, tmp_r1, tmp32);
1583 tcg_temp_free_i64(tmp);
1584 tcg_temp_free_i32(tmp32);
1585 break;
1586 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1587 potential_page_fault(s);
1588 gen_helper_tceb(cc_op, cpu_env, tmp_r1, addr);
1589 set_cc_static(s);
1590 break;
1591 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1592 potential_page_fault(s);
1593 gen_helper_tcdb(cc_op, cpu_env, tmp_r1, addr);
1594 set_cc_static(s);
1595 break;
1596 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1597 potential_page_fault(s);
1598 gen_helper_tcxb(cc_op, cpu_env, tmp_r1, addr);
1599 set_cc_static(s);
1600 break;
1601 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1602 tmp = tcg_temp_new_i64();
1603 tmp32 = tcg_temp_new_i32();
1604 tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
1605 tcg_gen_trunc_i64_i32(tmp32, tmp);
1606 gen_helper_meeb(cpu_env, tmp_r1, tmp32);
1607 tcg_temp_free_i64(tmp);
1608 tcg_temp_free_i32(tmp32);
1609 break;
1610 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1611 potential_page_fault(s);
1612 gen_helper_cdb(cc_op, cpu_env, tmp_r1, addr);
1613 set_cc_static(s);
1614 break;
1615 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1616 potential_page_fault(s);
1617 gen_helper_adb(cc_op, cpu_env, tmp_r1, addr);
1618 set_cc_static(s);
1619 break;
1620 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1621 potential_page_fault(s);
1622 gen_helper_sdb(cc_op, cpu_env, tmp_r1, addr);
1623 set_cc_static(s);
1624 break;
1625 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1626 potential_page_fault(s);
1627 gen_helper_mdb(cpu_env, tmp_r1, addr);
1628 break;
1629 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1630 potential_page_fault(s);
1631 gen_helper_ddb(cpu_env, tmp_r1, addr);
1632 break;
1633 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1634 /* for RXF insns, r1 is R3 and r1b is R1 */
1635 tmp32 = tcg_const_i32(r1b);
1636 potential_page_fault(s);
1637 gen_helper_madb(cpu_env, tmp32, addr, tmp_r1);
1638 tcg_temp_free_i32(tmp32);
1639 break;
1640 default:
1641 LOG_DISAS("illegal ed operation 0x%x\n", op);
1642 gen_illegal_opcode(s);
1643 return;
1644 }
1645 tcg_temp_free_i32(tmp_r1);
1646 tcg_temp_free_i64(addr);
1647 }
1648
1649 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1650 uint32_t insn)
1651 {
1652 TCGv_i64 tmp, tmp2, tmp3;
1653 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1654 int r1, r2;
1655 #ifndef CONFIG_USER_ONLY
1656 int r3, d2, b2;
1657 #endif
1658
1659 r1 = (insn >> 4) & 0xf;
1660 r2 = insn & 0xf;
1661
1662 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1663
1664 switch (op) {
1665 case 0x22: /* IPM R1 [RRE] */
1666 tmp32_1 = tcg_const_i32(r1);
1667 gen_op_calc_cc(s);
1668 gen_helper_ipm(cpu_env, cc_op, tmp32_1);
1669 tcg_temp_free_i32(tmp32_1);
1670 break;
1671 case 0x41: /* CKSM R1,R2 [RRE] */
1672 tmp32_1 = tcg_const_i32(r1);
1673 tmp32_2 = tcg_const_i32(r2);
1674 potential_page_fault(s);
1675 gen_helper_cksm(cpu_env, tmp32_1, tmp32_2);
1676 tcg_temp_free_i32(tmp32_1);
1677 tcg_temp_free_i32(tmp32_2);
1678 gen_op_movi_cc(s, 0);
1679 break;
1680 case 0x4e: /* SAR R1,R2 [RRE] */
1681 tmp32_1 = load_reg32(r2);
1682 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r1]));
1683 tcg_temp_free_i32(tmp32_1);
1684 break;
1685 case 0x4f: /* EAR R1,R2 [RRE] */
1686 tmp32_1 = tcg_temp_new_i32();
1687 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1688 store_reg32(r1, tmp32_1);
1689 tcg_temp_free_i32(tmp32_1);
1690 break;
1691 case 0x54: /* MVPG R1,R2 [RRE] */
1692 tmp = load_reg(0);
1693 tmp2 = load_reg(r1);
1694 tmp3 = load_reg(r2);
1695 potential_page_fault(s);
1696 gen_helper_mvpg(cpu_env, tmp, tmp2, tmp3);
1697 tcg_temp_free_i64(tmp);
1698 tcg_temp_free_i64(tmp2);
1699 tcg_temp_free_i64(tmp3);
1700 /* XXX check CCO bit and set CC accordingly */
1701 gen_op_movi_cc(s, 0);
1702 break;
1703 case 0x55: /* MVST R1,R2 [RRE] */
1704 tmp32_1 = load_reg32(0);
1705 tmp32_2 = tcg_const_i32(r1);
1706 tmp32_3 = tcg_const_i32(r2);
1707 potential_page_fault(s);
1708 gen_helper_mvst(cpu_env, tmp32_1, tmp32_2, tmp32_3);
1709 tcg_temp_free_i32(tmp32_1);
1710 tcg_temp_free_i32(tmp32_2);
1711 tcg_temp_free_i32(tmp32_3);
1712 gen_op_movi_cc(s, 1);
1713 break;
1714 case 0x5d: /* CLST R1,R2 [RRE] */
1715 tmp32_1 = load_reg32(0);
1716 tmp32_2 = tcg_const_i32(r1);
1717 tmp32_3 = tcg_const_i32(r2);
1718 potential_page_fault(s);
1719 gen_helper_clst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1720 set_cc_static(s);
1721 tcg_temp_free_i32(tmp32_1);
1722 tcg_temp_free_i32(tmp32_2);
1723 tcg_temp_free_i32(tmp32_3);
1724 break;
1725 case 0x5e: /* SRST R1,R2 [RRE] */
1726 tmp32_1 = load_reg32(0);
1727 tmp32_2 = tcg_const_i32(r1);
1728 tmp32_3 = tcg_const_i32(r2);
1729 potential_page_fault(s);
1730 gen_helper_srst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1731 set_cc_static(s);
1732 tcg_temp_free_i32(tmp32_1);
1733 tcg_temp_free_i32(tmp32_2);
1734 tcg_temp_free_i32(tmp32_3);
1735 break;
1736
1737 #ifndef CONFIG_USER_ONLY
1738 case 0x02: /* STIDP D2(B2) [S] */
1739 /* Store CPU ID */
1740 check_privileged(s);
1741 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1742 tmp = get_address(s, 0, b2, d2);
1743 potential_page_fault(s);
1744 gen_helper_stidp(cpu_env, tmp);
1745 tcg_temp_free_i64(tmp);
1746 break;
1747 case 0x04: /* SCK D2(B2) [S] */
1748 /* Set Clock */
1749 check_privileged(s);
1750 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1751 tmp = get_address(s, 0, b2, d2);
1752 potential_page_fault(s);
1753 gen_helper_sck(cc_op, tmp);
1754 set_cc_static(s);
1755 tcg_temp_free_i64(tmp);
1756 break;
1757 case 0x05: /* STCK D2(B2) [S] */
1758 /* Store Clock */
1759 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1760 tmp = get_address(s, 0, b2, d2);
1761 potential_page_fault(s);
1762 gen_helper_stck(cc_op, cpu_env, tmp);
1763 set_cc_static(s);
1764 tcg_temp_free_i64(tmp);
1765 break;
1766 case 0x06: /* SCKC D2(B2) [S] */
1767 /* Set Clock Comparator */
1768 check_privileged(s);
1769 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1770 tmp = get_address(s, 0, b2, d2);
1771 potential_page_fault(s);
1772 gen_helper_sckc(cpu_env, tmp);
1773 tcg_temp_free_i64(tmp);
1774 break;
1775 case 0x07: /* STCKC D2(B2) [S] */
1776 /* Store Clock Comparator */
1777 check_privileged(s);
1778 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1779 tmp = get_address(s, 0, b2, d2);
1780 potential_page_fault(s);
1781 gen_helper_stckc(cpu_env, tmp);
1782 tcg_temp_free_i64(tmp);
1783 break;
1784 case 0x08: /* SPT D2(B2) [S] */
1785 /* Set CPU Timer */
1786 check_privileged(s);
1787 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1788 tmp = get_address(s, 0, b2, d2);
1789 potential_page_fault(s);
1790 gen_helper_spt(cpu_env, tmp);
1791 tcg_temp_free_i64(tmp);
1792 break;
1793 case 0x09: /* STPT D2(B2) [S] */
1794 /* Store CPU Timer */
1795 check_privileged(s);
1796 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1797 tmp = get_address(s, 0, b2, d2);
1798 potential_page_fault(s);
1799 gen_helper_stpt(cpu_env, tmp);
1800 tcg_temp_free_i64(tmp);
1801 break;
1802 case 0x0a: /* SPKA D2(B2) [S] */
1803 /* Set PSW Key from Address */
1804 check_privileged(s);
1805 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1806 tmp = get_address(s, 0, b2, d2);
1807 tmp2 = tcg_temp_new_i64();
1808 tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
1809 tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
1810 tcg_gen_or_i64(psw_mask, tmp2, tmp);
1811 tcg_temp_free_i64(tmp2);
1812 tcg_temp_free_i64(tmp);
1813 break;
1814 case 0x0d: /* PTLB [S] */
1815 /* Purge TLB */
1816 check_privileged(s);
1817 gen_helper_ptlb(cpu_env);
1818 break;
1819 case 0x10: /* SPX D2(B2) [S] */
1820 /* Set Prefix Register */
1821 check_privileged(s);
1822 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1823 tmp = get_address(s, 0, b2, d2);
1824 potential_page_fault(s);
1825 gen_helper_spx(cpu_env, tmp);
1826 tcg_temp_free_i64(tmp);
1827 break;
1828 case 0x11: /* STPX D2(B2) [S] */
1829 /* Store Prefix */
1830 check_privileged(s);
1831 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1832 tmp = get_address(s, 0, b2, d2);
1833 tmp2 = tcg_temp_new_i64();
1834 tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1835 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1836 tcg_temp_free_i64(tmp);
1837 tcg_temp_free_i64(tmp2);
1838 break;
1839 case 0x12: /* STAP D2(B2) [S] */
1840 /* Store CPU Address */
1841 check_privileged(s);
1842 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1843 tmp = get_address(s, 0, b2, d2);
1844 tmp2 = tcg_temp_new_i64();
1845 tmp32_1 = tcg_temp_new_i32();
1846 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1847 tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1848 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1849 tcg_temp_free_i64(tmp);
1850 tcg_temp_free_i64(tmp2);
1851 tcg_temp_free_i32(tmp32_1);
1852 break;
1853 case 0x21: /* IPTE R1,R2 [RRE] */
1854 /* Invalidate PTE */
1855 check_privileged(s);
1856 r1 = (insn >> 4) & 0xf;
1857 r2 = insn & 0xf;
1858 tmp = load_reg(r1);
1859 tmp2 = load_reg(r2);
1860 gen_helper_ipte(cpu_env, tmp, tmp2);
1861 tcg_temp_free_i64(tmp);
1862 tcg_temp_free_i64(tmp2);
1863 break;
1864 case 0x29: /* ISKE R1,R2 [RRE] */
1865 /* Insert Storage Key Extended */
1866 check_privileged(s);
1867 r1 = (insn >> 4) & 0xf;
1868 r2 = insn & 0xf;
1869 tmp = load_reg(r2);
1870 tmp2 = tcg_temp_new_i64();
1871 gen_helper_iske(tmp2, cpu_env, tmp);
1872 store_reg(r1, tmp2);
1873 tcg_temp_free_i64(tmp);
1874 tcg_temp_free_i64(tmp2);
1875 break;
1876 case 0x2a: /* RRBE R1,R2 [RRE] */
1877 /* Set Storage Key Extended */
1878 check_privileged(s);
1879 r1 = (insn >> 4) & 0xf;
1880 r2 = insn & 0xf;
1881 tmp32_1 = load_reg32(r1);
1882 tmp = load_reg(r2);
1883 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1884 set_cc_static(s);
1885 tcg_temp_free_i32(tmp32_1);
1886 tcg_temp_free_i64(tmp);
1887 break;
1888 case 0x2b: /* SSKE R1,R2 [RRE] */
1889 /* Set Storage Key Extended */
1890 check_privileged(s);
1891 r1 = (insn >> 4) & 0xf;
1892 r2 = insn & 0xf;
1893 tmp32_1 = load_reg32(r1);
1894 tmp = load_reg(r2);
1895 gen_helper_sske(cpu_env, tmp32_1, tmp);
1896 tcg_temp_free_i32(tmp32_1);
1897 tcg_temp_free_i64(tmp);
1898 break;
1899 case 0x34: /* STCH ? */
1900 /* Store Subchannel */
1901 check_privileged(s);
1902 gen_op_movi_cc(s, 3);
1903 break;
1904 case 0x46: /* STURA R1,R2 [RRE] */
1905 /* Store Using Real Address */
1906 check_privileged(s);
1907 r1 = (insn >> 4) & 0xf;
1908 r2 = insn & 0xf;
1909 tmp32_1 = load_reg32(r1);
1910 tmp = load_reg(r2);
1911 potential_page_fault(s);
1912 gen_helper_stura(cpu_env, tmp, tmp32_1);
1913 tcg_temp_free_i32(tmp32_1);
1914 tcg_temp_free_i64(tmp);
1915 break;
1916 case 0x50: /* CSP R1,R2 [RRE] */
1917 /* Compare And Swap And Purge */
1918 check_privileged(s);
1919 r1 = (insn >> 4) & 0xf;
1920 r2 = insn & 0xf;
1921 tmp32_1 = tcg_const_i32(r1);
1922 tmp32_2 = tcg_const_i32(r2);
1923 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1924 set_cc_static(s);
1925 tcg_temp_free_i32(tmp32_1);
1926 tcg_temp_free_i32(tmp32_2);
1927 break;
1928 case 0x5f: /* CHSC ? */
1929 /* Channel Subsystem Call */
1930 check_privileged(s);
1931 gen_op_movi_cc(s, 3);
1932 break;
1933 case 0x78: /* STCKE D2(B2) [S] */
1934 /* Store Clock Extended */
1935 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1936 tmp = get_address(s, 0, b2, d2);
1937 potential_page_fault(s);
1938 gen_helper_stcke(cc_op, cpu_env, tmp);
1939 set_cc_static(s);
1940 tcg_temp_free_i64(tmp);
1941 break;
1942 case 0x79: /* SACF D2(B2) [S] */
1943 /* Set Address Space Control Fast */
1944 check_privileged(s);
1945 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1946 tmp = get_address(s, 0, b2, d2);
1947 potential_page_fault(s);
1948 gen_helper_sacf(cpu_env, tmp);
1949 tcg_temp_free_i64(tmp);
1950 /* addressing mode has changed, so end the block */
1951 s->pc = s->next_pc;
1952 update_psw_addr(s);
1953 s->is_jmp = DISAS_JUMP;
1954 break;
1955 case 0x7d: /* STSI D2,(B2) [S] */
1956 check_privileged(s);
1957 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1958 tmp = get_address(s, 0, b2, d2);
1959 tmp32_1 = load_reg32(0);
1960 tmp32_2 = load_reg32(1);
1961 potential_page_fault(s);
1962 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1963 set_cc_static(s);
1964 tcg_temp_free_i64(tmp);
1965 tcg_temp_free_i32(tmp32_1);
1966 tcg_temp_free_i32(tmp32_2);
1967 break;
1968 case 0x9d: /* LFPC D2(B2) [S] */
1969 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1970 tmp = get_address(s, 0, b2, d2);
1971 tmp2 = tcg_temp_new_i64();
1972 tmp32_1 = tcg_temp_new_i32();
1973 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1974 tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1975 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1976 tcg_temp_free_i64(tmp);
1977 tcg_temp_free_i64(tmp2);
1978 tcg_temp_free_i32(tmp32_1);
1979 break;
1980 case 0xb1: /* STFL D2(B2) [S] */
1981 /* Store Facility List (CPU features) at 200 */
1982 check_privileged(s);
1983 tmp2 = tcg_const_i64(0xc0000000);
1984 tmp = tcg_const_i64(200);
1985 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1986 tcg_temp_free_i64(tmp2);
1987 tcg_temp_free_i64(tmp);
1988 break;
1989 case 0xb2: /* LPSWE D2(B2) [S] */
1990 /* Load PSW Extended */
1991 check_privileged(s);
1992 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1993 tmp = get_address(s, 0, b2, d2);
1994 tmp2 = tcg_temp_new_i64();
1995 tmp3 = tcg_temp_new_i64();
1996 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1997 tcg_gen_addi_i64(tmp, tmp, 8);
1998 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1999 gen_helper_load_psw(cpu_env, tmp2, tmp3);
2000 /* we need to keep cc_op intact */
2001 s->is_jmp = DISAS_JUMP;
2002 tcg_temp_free_i64(tmp);
2003 tcg_temp_free_i64(tmp2);
2004 tcg_temp_free_i64(tmp3);
2005 break;
2006 case 0x20: /* SERVC R1,R2 [RRE] */
2007 /* SCLP Service call (PV hypercall) */
2008 check_privileged(s);
2009 potential_page_fault(s);
2010 tmp32_1 = load_reg32(r2);
2011 tmp = load_reg(r1);
2012 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
2013 set_cc_static(s);
2014 tcg_temp_free_i32(tmp32_1);
2015 tcg_temp_free_i64(tmp);
2016 break;
2017 #endif
2018 default:
2019 LOG_DISAS("illegal b2 operation 0x%x\n", op);
2020 gen_illegal_opcode(s);
2021 break;
2022 }
2023 }
2024
2025 static void disas_b3(CPUS390XState *env, DisasContext *s, int op, int m3,
2026 int r1, int r2)
2027 {
2028 TCGv_i64 tmp;
2029 TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
2030 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
2031 #define FP_HELPER(i) \
2032 tmp32_1 = tcg_const_i32(r1); \
2033 tmp32_2 = tcg_const_i32(r2); \
2034 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
2035 tcg_temp_free_i32(tmp32_1); \
2036 tcg_temp_free_i32(tmp32_2);
2037
2038 #define FP_HELPER_CC(i) \
2039 tmp32_1 = tcg_const_i32(r1); \
2040 tmp32_2 = tcg_const_i32(r2); \
2041 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
2042 set_cc_static(s); \
2043 tcg_temp_free_i32(tmp32_1); \
2044 tcg_temp_free_i32(tmp32_2);
2045
2046 switch (op) {
2047 case 0x0: /* LPEBR R1,R2 [RRE] */
2048 FP_HELPER_CC(lpebr);
2049 break;
2050 case 0x2: /* LTEBR R1,R2 [RRE] */
2051 FP_HELPER_CC(ltebr);
2052 break;
2053 case 0x3: /* LCEBR R1,R2 [RRE] */
2054 FP_HELPER_CC(lcebr);
2055 break;
2056 case 0x4: /* LDEBR R1,R2 [RRE] */
2057 FP_HELPER(ldebr);
2058 break;
2059 case 0x5: /* LXDBR R1,R2 [RRE] */
2060 FP_HELPER(lxdbr);
2061 break;
2062 case 0x9: /* CEBR R1,R2 [RRE] */
2063 FP_HELPER_CC(cebr);
2064 break;
2065 case 0xa: /* AEBR R1,R2 [RRE] */
2066 FP_HELPER_CC(aebr);
2067 break;
2068 case 0xb: /* SEBR R1,R2 [RRE] */
2069 FP_HELPER_CC(sebr);
2070 break;
2071 case 0xd: /* DEBR R1,R2 [RRE] */
2072 FP_HELPER(debr);
2073 break;
2074 case 0x10: /* LPDBR R1,R2 [RRE] */
2075 FP_HELPER_CC(lpdbr);
2076 break;
2077 case 0x12: /* LTDBR R1,R2 [RRE] */
2078 FP_HELPER_CC(ltdbr);
2079 break;
2080 case 0x13: /* LCDBR R1,R2 [RRE] */
2081 FP_HELPER_CC(lcdbr);
2082 break;
2083 case 0x15: /* SQBDR R1,R2 [RRE] */
2084 FP_HELPER(sqdbr);
2085 break;
2086 case 0x17: /* MEEBR R1,R2 [RRE] */
2087 FP_HELPER(meebr);
2088 break;
2089 case 0x19: /* CDBR R1,R2 [RRE] */
2090 FP_HELPER_CC(cdbr);
2091 break;
2092 case 0x1a: /* ADBR R1,R2 [RRE] */
2093 FP_HELPER_CC(adbr);
2094 break;
2095 case 0x1b: /* SDBR R1,R2 [RRE] */
2096 FP_HELPER_CC(sdbr);
2097 break;
2098 case 0x1c: /* MDBR R1,R2 [RRE] */
2099 FP_HELPER(mdbr);
2100 break;
2101 case 0x1d: /* DDBR R1,R2 [RRE] */
2102 FP_HELPER(ddbr);
2103 break;
2104 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
2105 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
2106 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
2107 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
2108 tmp32_1 = tcg_const_i32(m3);
2109 tmp32_2 = tcg_const_i32(r2);
2110 tmp32_3 = tcg_const_i32(r1);
2111 switch (op) {
2112 case 0xe:
2113 gen_helper_maebr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
2114 break;
2115 case 0x1e:
2116 gen_helper_madbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
2117 break;
2118 case 0x1f:
2119 gen_helper_msdbr(cpu_env, tmp32_1, tmp32_3, tmp32_2);
2120 break;
2121 default:
2122 tcg_abort();
2123 }
2124 tcg_temp_free_i32(tmp32_1);
2125 tcg_temp_free_i32(tmp32_2);
2126 tcg_temp_free_i32(tmp32_3);
2127 break;
2128 case 0x40: /* LPXBR R1,R2 [RRE] */
2129 FP_HELPER_CC(lpxbr);
2130 break;
2131 case 0x42: /* LTXBR R1,R2 [RRE] */
2132 FP_HELPER_CC(ltxbr);
2133 break;
2134 case 0x43: /* LCXBR R1,R2 [RRE] */
2135 FP_HELPER_CC(lcxbr);
2136 break;
2137 case 0x44: /* LEDBR R1,R2 [RRE] */
2138 FP_HELPER(ledbr);
2139 break;
2140 case 0x45: /* LDXBR R1,R2 [RRE] */
2141 FP_HELPER(ldxbr);
2142 break;
2143 case 0x46: /* LEXBR R1,R2 [RRE] */
2144 FP_HELPER(lexbr);
2145 break;
2146 case 0x49: /* CXBR R1,R2 [RRE] */
2147 FP_HELPER_CC(cxbr);
2148 break;
2149 case 0x4a: /* AXBR R1,R2 [RRE] */
2150 FP_HELPER_CC(axbr);
2151 break;
2152 case 0x4b: /* SXBR R1,R2 [RRE] */
2153 FP_HELPER_CC(sxbr);
2154 break;
2155 case 0x4c: /* MXBR R1,R2 [RRE] */
2156 FP_HELPER(mxbr);
2157 break;
2158 case 0x4d: /* DXBR R1,R2 [RRE] */
2159 FP_HELPER(dxbr);
2160 break;
2161 case 0x65: /* LXR R1,R2 [RRE] */
2162 tmp = load_freg(r2);
2163 store_freg(r1, tmp);
2164 tcg_temp_free_i64(tmp);
2165 tmp = load_freg(r2 + 2);
2166 store_freg(r1 + 2, tmp);
2167 tcg_temp_free_i64(tmp);
2168 break;
2169 case 0x74: /* LZER R1 [RRE] */
2170 tmp32_1 = tcg_const_i32(r1);
2171 gen_helper_lzer(cpu_env, tmp32_1);
2172 tcg_temp_free_i32(tmp32_1);
2173 break;
2174 case 0x75: /* LZDR R1 [RRE] */
2175 tmp32_1 = tcg_const_i32(r1);
2176 gen_helper_lzdr(cpu_env, tmp32_1);
2177 tcg_temp_free_i32(tmp32_1);
2178 break;
2179 case 0x76: /* LZXR R1 [RRE] */
2180 tmp32_1 = tcg_const_i32(r1);
2181 gen_helper_lzxr(cpu_env, tmp32_1);
2182 tcg_temp_free_i32(tmp32_1);
2183 break;
2184 case 0x84: /* SFPC R1 [RRE] */
2185 tmp32_1 = load_reg32(r1);
2186 tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
2187 tcg_temp_free_i32(tmp32_1);
2188 break;
2189 case 0x8c: /* EFPC R1 [RRE] */
2190 tmp32_1 = tcg_temp_new_i32();
2191 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
2192 store_reg32(r1, tmp32_1);
2193 tcg_temp_free_i32(tmp32_1);
2194 break;
2195 case 0x94: /* CEFBR R1,R2 [RRE] */
2196 case 0x95: /* CDFBR R1,R2 [RRE] */
2197 case 0x96: /* CXFBR R1,R2 [RRE] */
2198 tmp32_1 = tcg_const_i32(r1);
2199 tmp32_2 = load_reg32(r2);
2200 switch (op) {
2201 case 0x94:
2202 gen_helper_cefbr(cpu_env, tmp32_1, tmp32_2);
2203 break;
2204 case 0x95:
2205 gen_helper_cdfbr(cpu_env, tmp32_1, tmp32_2);
2206 break;
2207 case 0x96:
2208 gen_helper_cxfbr(cpu_env, tmp32_1, tmp32_2);
2209 break;
2210 default:
2211 tcg_abort();
2212 }
2213 tcg_temp_free_i32(tmp32_1);
2214 tcg_temp_free_i32(tmp32_2);
2215 break;
2216 case 0x98: /* CFEBR R1,R2 [RRE] */
2217 case 0x99: /* CFDBR R1,R2 [RRE] */
2218 case 0x9a: /* CFXBR R1,R2 [RRE] */
2219 tmp32_1 = tcg_const_i32(r1);
2220 tmp32_2 = tcg_const_i32(r2);
2221 tmp32_3 = tcg_const_i32(m3);
2222 switch (op) {
2223 case 0x98:
2224 gen_helper_cfebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2225 break;
2226 case 0x99:
2227 gen_helper_cfdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2228 break;
2229 case 0x9a:
2230 gen_helper_cfxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2231 break;
2232 default:
2233 tcg_abort();
2234 }
2235 set_cc_static(s);
2236 tcg_temp_free_i32(tmp32_1);
2237 tcg_temp_free_i32(tmp32_2);
2238 tcg_temp_free_i32(tmp32_3);
2239 break;
2240 case 0xa4: /* CEGBR R1,R2 [RRE] */
2241 case 0xa5: /* CDGBR R1,R2 [RRE] */
2242 tmp32_1 = tcg_const_i32(r1);
2243 tmp = load_reg(r2);
2244 switch (op) {
2245 case 0xa4:
2246 gen_helper_cegbr(cpu_env, tmp32_1, tmp);
2247 break;
2248 case 0xa5:
2249 gen_helper_cdgbr(cpu_env, tmp32_1, tmp);
2250 break;
2251 default:
2252 tcg_abort();
2253 }
2254 tcg_temp_free_i32(tmp32_1);
2255 tcg_temp_free_i64(tmp);
2256 break;
2257 case 0xa6: /* CXGBR R1,R2 [RRE] */
2258 tmp32_1 = tcg_const_i32(r1);
2259 tmp = load_reg(r2);
2260 gen_helper_cxgbr(cpu_env, tmp32_1, tmp);
2261 tcg_temp_free_i32(tmp32_1);
2262 tcg_temp_free_i64(tmp);
2263 break;
2264 case 0xa8: /* CGEBR R1,R2 [RRE] */
2265 tmp32_1 = tcg_const_i32(r1);
2266 tmp32_2 = tcg_const_i32(r2);
2267 tmp32_3 = tcg_const_i32(m3);
2268 gen_helper_cgebr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2269 set_cc_static(s);
2270 tcg_temp_free_i32(tmp32_1);
2271 tcg_temp_free_i32(tmp32_2);
2272 tcg_temp_free_i32(tmp32_3);
2273 break;
2274 case 0xa9: /* CGDBR R1,R2 [RRE] */
2275 tmp32_1 = tcg_const_i32(r1);
2276 tmp32_2 = tcg_const_i32(r2);
2277 tmp32_3 = tcg_const_i32(m3);
2278 gen_helper_cgdbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2279 set_cc_static(s);
2280 tcg_temp_free_i32(tmp32_1);
2281 tcg_temp_free_i32(tmp32_2);
2282 tcg_temp_free_i32(tmp32_3);
2283 break;
2284 case 0xaa: /* CGXBR R1,R2 [RRE] */
2285 tmp32_1 = tcg_const_i32(r1);
2286 tmp32_2 = tcg_const_i32(r2);
2287 tmp32_3 = tcg_const_i32(m3);
2288 gen_helper_cgxbr(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
2289 set_cc_static(s);
2290 tcg_temp_free_i32(tmp32_1);
2291 tcg_temp_free_i32(tmp32_2);
2292 tcg_temp_free_i32(tmp32_3);
2293 break;
2294 default:
2295 LOG_DISAS("illegal b3 operation 0x%x\n", op);
2296 gen_illegal_opcode(s);
2297 break;
2298 }
2299
2300 #undef FP_HELPER_CC
2301 #undef FP_HELPER
2302 }
2303
2304 static void disas_b9(CPUS390XState *env, DisasContext *s, int op, int r1,
2305 int r2)
2306 {
2307 TCGv_i64 tmp;
2308 TCGv_i32 tmp32_1;
2309
2310 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
2311 switch (op) {
2312 case 0x17: /* LLGTR R1,R2 [RRE] */
2313 tmp32_1 = load_reg32(r2);
2314 tmp = tcg_temp_new_i64();
2315 tcg_gen_andi_i32(tmp32_1, tmp32_1, 0x7fffffffUL);
2316 tcg_gen_extu_i32_i64(tmp, tmp32_1);
2317 store_reg(r1, tmp);
2318 tcg_temp_free_i32(tmp32_1);
2319 tcg_temp_free_i64(tmp);
2320 break;
2321 case 0x0f: /* LRVGR R1,R2 [RRE] */
2322 tcg_gen_bswap64_i64(regs[r1], regs[r2]);
2323 break;
2324 case 0x1f: /* LRVR R1,R2 [RRE] */
2325 tmp32_1 = load_reg32(r2);
2326 tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
2327 store_reg32(r1, tmp32_1);
2328 tcg_temp_free_i32(tmp32_1);
2329 break;
2330 case 0x83: /* FLOGR R1,R2 [RRE] */
2331 tmp = load_reg(r2);
2332 tmp32_1 = tcg_const_i32(r1);
2333 gen_helper_flogr(cc_op, cpu_env, tmp32_1, tmp);
2334 set_cc_static(s);
2335 tcg_temp_free_i64(tmp);
2336 tcg_temp_free_i32(tmp32_1);
2337 break;
2338 default:
2339 LOG_DISAS("illegal b9 operation 0x%x\n", op);
2340 gen_illegal_opcode(s);
2341 break;
2342 }
2343 }
2344
2345 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
2346 {
2347 TCGv_i64 tmp, tmp2, tmp3, tmp4;
2348 TCGv_i32 tmp32_1, tmp32_2;
2349 unsigned char opc;
2350 uint64_t insn;
2351 int op, r1, r2, r3, d1, d2, x2, b1, b2, i, i2, r1b;
2352 TCGv_i32 vl;
2353
2354 opc = cpu_ldub_code(env, s->pc);
2355 LOG_DISAS("opc 0x%x\n", opc);
2356
2357 switch (opc) {
2358 #ifndef CONFIG_USER_ONLY
2359 case 0x80: /* SSM D2(B2) [S] */
2360 /* Set System Mask */
2361 check_privileged(s);
2362 insn = ld_code4(env, s->pc);
2363 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2364 tmp = get_address(s, 0, b2, d2);
2365 tmp2 = tcg_temp_new_i64();
2366 tmp3 = tcg_temp_new_i64();
2367 tcg_gen_andi_i64(tmp3, psw_mask, ~0xff00000000000000ULL);
2368 tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
2369 tcg_gen_shli_i64(tmp2, tmp2, 56);
2370 tcg_gen_or_i64(psw_mask, tmp3, tmp2);
2371 tcg_temp_free_i64(tmp);
2372 tcg_temp_free_i64(tmp2);
2373 tcg_temp_free_i64(tmp3);
2374 break;
2375 case 0x82: /* LPSW D2(B2) [S] */
2376 /* Load PSW */
2377 check_privileged(s);
2378 insn = ld_code4(env, s->pc);
2379 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2380 tmp = get_address(s, 0, b2, d2);
2381 tmp2 = tcg_temp_new_i64();
2382 tmp3 = tcg_temp_new_i64();
2383 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
2384 tcg_gen_addi_i64(tmp, tmp, 4);
2385 tcg_gen_qemu_ld32u(tmp3, tmp, get_mem_index(s));
2386 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2387 tcg_gen_shli_i64(tmp2, tmp2, 32);
2388 gen_helper_load_psw(cpu_env, tmp2, tmp3);
2389 tcg_temp_free_i64(tmp);
2390 tcg_temp_free_i64(tmp2);
2391 tcg_temp_free_i64(tmp3);
2392 /* we need to keep cc_op intact */
2393 s->is_jmp = DISAS_JUMP;
2394 break;
2395 case 0x83: /* DIAG R1,R3,D2 [RS] */
2396 /* Diagnose call (KVM hypercall) */
2397 check_privileged(s);
2398 potential_page_fault(s);
2399 insn = ld_code4(env, s->pc);
2400 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2401 tmp32_1 = tcg_const_i32(insn & 0xfff);
2402 tmp2 = load_reg(2);
2403 tmp3 = load_reg(1);
2404 gen_helper_diag(tmp2, cpu_env, tmp32_1, tmp2, tmp3);
2405 store_reg(2, tmp2);
2406 tcg_temp_free_i32(tmp32_1);
2407 tcg_temp_free_i64(tmp2);
2408 tcg_temp_free_i64(tmp3);
2409 break;
2410 #endif
2411 case 0x88: /* SRL R1,D2(B2) [RS] */
2412 case 0x89: /* SLL R1,D2(B2) [RS] */
2413 case 0x8a: /* SRA R1,D2(B2) [RS] */
2414 insn = ld_code4(env, s->pc);
2415 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2416 tmp = get_address(s, 0, b2, d2);
2417 tmp32_1 = load_reg32(r1);
2418 tmp32_2 = tcg_temp_new_i32();
2419 tcg_gen_trunc_i64_i32(tmp32_2, tmp);
2420 tcg_gen_andi_i32(tmp32_2, tmp32_2, 0x3f);
2421 switch (opc) {
2422 case 0x88:
2423 tcg_gen_shr_i32(tmp32_1, tmp32_1, tmp32_2);
2424 break;
2425 case 0x89:
2426 tcg_gen_shl_i32(tmp32_1, tmp32_1, tmp32_2);
2427 break;
2428 case 0x8a:
2429 tcg_gen_sar_i32(tmp32_1, tmp32_1, tmp32_2);
2430 set_cc_s32(s, tmp32_1);
2431 break;
2432 default:
2433 tcg_abort();
2434 }
2435 store_reg32(r1, tmp32_1);
2436 tcg_temp_free_i64(tmp);
2437 tcg_temp_free_i32(tmp32_1);
2438 tcg_temp_free_i32(tmp32_2);
2439 break;
2440 case 0x8c: /* SRDL R1,D2(B2) [RS] */
2441 case 0x8d: /* SLDL R1,D2(B2) [RS] */
2442 case 0x8e: /* SRDA R1,D2(B2) [RS] */
2443 insn = ld_code4(env, s->pc);
2444 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2445 tmp = get_address(s, 0, b2, d2); /* shift */
2446 tmp2 = tcg_temp_new_i64();
2447 tmp32_1 = load_reg32(r1);
2448 tmp32_2 = load_reg32(r1 + 1);
2449 tcg_gen_concat_i32_i64(tmp2, tmp32_2, tmp32_1); /* operand */
2450 switch (opc) {
2451 case 0x8c:
2452 tcg_gen_shr_i64(tmp2, tmp2, tmp);
2453 break;
2454 case 0x8d:
2455 tcg_gen_shl_i64(tmp2, tmp2, tmp);
2456 break;
2457 case 0x8e:
2458 tcg_gen_sar_i64(tmp2, tmp2, tmp);
2459 set_cc_s64(s, tmp2);
2460 break;
2461 }
2462 tcg_gen_shri_i64(tmp, tmp2, 32);
2463 tcg_gen_trunc_i64_i32(tmp32_1, tmp);
2464 store_reg32(r1, tmp32_1);
2465 tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
2466 store_reg32(r1 + 1, tmp32_2);
2467 tcg_temp_free_i64(tmp);
2468 tcg_temp_free_i64(tmp2);
2469 break;
2470 case 0x98: /* LM R1,R3,D2(B2) [RS] */
2471 case 0x90: /* STM R1,R3,D2(B2) [RS] */
2472 insn = ld_code4(env, s->pc);
2473 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2474
2475 tmp = get_address(s, 0, b2, d2);
2476 tmp2 = tcg_temp_new_i64();
2477 tmp3 = tcg_const_i64(4);
2478 tmp4 = tcg_const_i64(0xffffffff00000000ULL);
2479 for (i = r1;; i = (i + 1) % 16) {
2480 if (opc == 0x98) {
2481 tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
2482 tcg_gen_and_i64(regs[i], regs[i], tmp4);
2483 tcg_gen_or_i64(regs[i], regs[i], tmp2);
2484 } else {
2485 tcg_gen_qemu_st32(regs[i], tmp, get_mem_index(s));
2486 }
2487 if (i == r3) {
2488 break;
2489 }
2490 tcg_gen_add_i64(tmp, tmp, tmp3);
2491 }
2492 tcg_temp_free_i64(tmp);
2493 tcg_temp_free_i64(tmp2);
2494 tcg_temp_free_i64(tmp3);
2495 tcg_temp_free_i64(tmp4);
2496 break;
2497 case 0x92: /* MVI D1(B1),I2 [SI] */
2498 insn = ld_code4(env, s->pc);
2499 tmp = decode_si(s, insn, &i2, &b1, &d1);
2500 tmp2 = tcg_const_i64(i2);
2501 tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
2502 tcg_temp_free_i64(tmp);
2503 tcg_temp_free_i64(tmp2);
2504 break;
2505 case 0x94: /* NI D1(B1),I2 [SI] */
2506 case 0x96: /* OI D1(B1),I2 [SI] */
2507 case 0x97: /* XI D1(B1),I2 [SI] */
2508 insn = ld_code4(env, s->pc);
2509 tmp = decode_si(s, insn, &i2, &b1, &d1);
2510 tmp2 = tcg_temp_new_i64();
2511 tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
2512 switch (opc) {
2513 case 0x94:
2514 tcg_gen_andi_i64(tmp2, tmp2, i2);
2515 break;
2516 case 0x96:
2517 tcg_gen_ori_i64(tmp2, tmp2, i2);
2518 break;
2519 case 0x97:
2520 tcg_gen_xori_i64(tmp2, tmp2, i2);
2521 break;
2522 default:
2523 tcg_abort();
2524 }
2525 tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
2526 set_cc_nz_u64(s, tmp2);
2527 tcg_temp_free_i64(tmp);
2528 tcg_temp_free_i64(tmp2);
2529 break;
2530 case 0x9a: /* LAM R1,R3,D2(B2) [RS] */
2531 insn = ld_code4(env, s->pc);
2532 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2533 tmp = get_address(s, 0, b2, d2);
2534 tmp32_1 = tcg_const_i32(r1);
2535 tmp32_2 = tcg_const_i32(r3);
2536 potential_page_fault(s);
2537 gen_helper_lam(cpu_env, tmp32_1, tmp, tmp32_2);
2538 tcg_temp_free_i64(tmp);
2539 tcg_temp_free_i32(tmp32_1);
2540 tcg_temp_free_i32(tmp32_2);
2541 break;
2542 case 0x9b: /* STAM R1,R3,D2(B2) [RS] */
2543 insn = ld_code4(env, s->pc);
2544 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2545 tmp = get_address(s, 0, b2, d2);
2546 tmp32_1 = tcg_const_i32(r1);
2547 tmp32_2 = tcg_const_i32(r3);
2548 potential_page_fault(s);
2549 gen_helper_stam(cpu_env, tmp32_1, tmp, tmp32_2);
2550 tcg_temp_free_i64(tmp);
2551 tcg_temp_free_i32(tmp32_1);
2552 tcg_temp_free_i32(tmp32_2);
2553 break;
2554 case 0xa8: /* MVCLE R1,R3,D2(B2) [RS] */
2555 insn = ld_code4(env, s->pc);
2556 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2557 tmp = get_address(s, 0, b2, d2);
2558 tmp32_1 = tcg_const_i32(r1);
2559 tmp32_2 = tcg_const_i32(r3);
2560 potential_page_fault(s);
2561 gen_helper_mvcle(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
2562 set_cc_static(s);
2563 tcg_temp_free_i64(tmp);
2564 tcg_temp_free_i32(tmp32_1);
2565 tcg_temp_free_i32(tmp32_2);
2566 break;
2567 case 0xa9: /* CLCLE R1,R3,D2(B2) [RS] */
2568 insn = ld_code4(env, s->pc);
2569 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2570 tmp = get_address(s, 0, b2, d2);
2571 tmp32_1 = tcg_const_i32(r1);
2572 tmp32_2 = tcg_const_i32(r3);
2573 potential_page_fault(s);
2574 gen_helper_clcle(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
2575 set_cc_static(s);
2576 tcg_temp_free_i64(tmp);
2577 tcg_temp_free_i32(tmp32_1);
2578 tcg_temp_free_i32(tmp32_2);
2579 break;
2580 #ifndef CONFIG_USER_ONLY
2581 case 0xac: /* STNSM D1(B1),I2 [SI] */
2582 case 0xad: /* STOSM D1(B1),I2 [SI] */
2583 check_privileged(s);
2584 insn = ld_code4(env, s->pc);
2585 tmp = decode_si(s, insn, &i2, &b1, &d1);
2586 tmp2 = tcg_temp_new_i64();
2587 tcg_gen_shri_i64(tmp2, psw_mask, 56);
2588 tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
2589 if (opc == 0xac) {
2590 tcg_gen_andi_i64(psw_mask, psw_mask,
2591 ((uint64_t)i2 << 56) | 0x00ffffffffffffffULL);
2592 } else {
2593 tcg_gen_ori_i64(psw_mask, psw_mask, (uint64_t)i2 << 56);
2594 }
2595 tcg_temp_free_i64(tmp);
2596 tcg_temp_free_i64(tmp2);
2597 break;
2598 case 0xae: /* SIGP R1,R3,D2(B2) [RS] */
2599 check_privileged(s);
2600 insn = ld_code4(env, s->pc);
2601 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2602 tmp = get_address(s, 0, b2, d2);
2603 tmp2 = load_reg(r3);
2604 tmp32_1 = tcg_const_i32(r1);
2605 potential_page_fault(s);
2606 gen_helper_sigp(cc_op, cpu_env, tmp, tmp32_1, tmp2);
2607 set_cc_static(s);
2608 tcg_temp_free_i64(tmp);
2609 tcg_temp_free_i64(tmp2);
2610 tcg_temp_free_i32(tmp32_1);
2611 break;
2612 case 0xb1: /* LRA R1,D2(X2, B2) [RX] */
2613 check_privileged(s);
2614 insn = ld_code4(env, s->pc);
2615 tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
2616 tmp32_1 = tcg_const_i32(r1);
2617 potential_page_fault(s);
2618 gen_helper_lra(cc_op, cpu_env, tmp, tmp32_1);
2619 set_cc_static(s);
2620 tcg_temp_free_i64(tmp);
2621 tcg_temp_free_i32(tmp32_1);
2622 break;
2623 #endif
2624 case 0xb2:
2625 insn = ld_code4(env, s->pc);
2626 op = (insn >> 16) & 0xff;
2627 switch (op) {
2628 case 0x9c: /* STFPC D2(B2) [S] */
2629 d2 = insn & 0xfff;
2630 b2 = (insn >> 12) & 0xf;
2631 tmp32_1 = tcg_temp_new_i32();
2632 tmp = tcg_temp_new_i64();
2633 tmp2 = get_address(s, 0, b2, d2);
2634 tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
2635 tcg_gen_extu_i32_i64(tmp, tmp32_1);
2636 tcg_gen_qemu_st32(tmp, tmp2, get_mem_index(s));
2637 tcg_temp_free_i32(tmp32_1);
2638 tcg_temp_free_i64(tmp);
2639 tcg_temp_free_i64(tmp2);
2640 break;
2641 default:
2642 disas_b2(env, s, op, insn);
2643 break;
2644 }
2645 break;
2646 case 0xb3:
2647 insn = ld_code4(env, s->pc);
2648 op = (insn >> 16) & 0xff;
2649 r3 = (insn >> 12) & 0xf; /* aka m3 */
2650 r1 = (insn >> 4) & 0xf;
2651 r2 = insn & 0xf;
2652 disas_b3(env, s, op, r3, r1, r2);
2653 break;
2654 #ifndef CONFIG_USER_ONLY
2655 case 0xb6: /* STCTL R1,R3,D2(B2) [RS] */
2656 /* Store Control */
2657 check_privileged(s);
2658 insn = ld_code4(env, s->pc);
2659 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2660 tmp = get_address(s, 0, b2, d2);
2661 tmp32_1 = tcg_const_i32(r1);
2662 tmp32_2 = tcg_const_i32(r3);
2663 potential_page_fault(s);
2664 gen_helper_stctl(cpu_env, tmp32_1, tmp, tmp32_2);
2665 tcg_temp_free_i64(tmp);
2666 tcg_temp_free_i32(tmp32_1);
2667 tcg_temp_free_i32(tmp32_2);
2668 break;
2669 case 0xb7: /* LCTL R1,R3,D2(B2) [RS] */
2670 /* Load Control */
2671 check_privileged(s);
2672 insn = ld_code4(env, s->pc);
2673 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2674 tmp = get_address(s, 0, b2, d2);
2675 tmp32_1 = tcg_const_i32(r1);
2676 tmp32_2 = tcg_const_i32(r3);
2677 potential_page_fault(s);
2678 gen_helper_lctl(cpu_env, tmp32_1, tmp, tmp32_2);
2679 tcg_temp_free_i64(tmp);
2680 tcg_temp_free_i32(tmp32_1);
2681 tcg_temp_free_i32(tmp32_2);
2682 break;
2683 #endif
2684 case 0xb9:
2685 insn = ld_code4(env, s->pc);
2686 r1 = (insn >> 4) & 0xf;
2687 r2 = insn & 0xf;
2688 op = (insn >> 16) & 0xff;
2689 disas_b9(env, s, op, r1, r2);
2690 break;
2691 case 0xba: /* CS R1,R3,D2(B2) [RS] */
2692 insn = ld_code4(env, s->pc);
2693 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2694 tmp = get_address(s, 0, b2, d2);
2695 tmp32_1 = tcg_const_i32(r1);
2696 tmp32_2 = tcg_const_i32(r3);
2697 potential_page_fault(s);
2698 gen_helper_cs(cc_op, cpu_env, tmp32_1, tmp, tmp32_2);
2699 set_cc_static(s);
2700 tcg_temp_free_i64(tmp);
2701 tcg_temp_free_i32(tmp32_1);
2702 tcg_temp_free_i32(tmp32_2);
2703 break;
2704 case 0xbd: /* CLM R1,M3,D2(B2) [RS] */
2705 insn = ld_code4(env, s->pc);
2706 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2707 tmp = get_address(s, 0, b2, d2);
2708 tmp32_1 = load_reg32(r1);
2709 tmp32_2 = tcg_const_i32(r3);
2710 potential_page_fault(s);
2711 gen_helper_clm(cc_op, cpu_env, tmp32_1, tmp32_2, tmp);
2712 set_cc_static(s);
2713 tcg_temp_free_i64(tmp);
2714 tcg_temp_free_i32(tmp32_1);
2715 tcg_temp_free_i32(tmp32_2);
2716 break;
2717 case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
2718 insn = ld_code4(env, s->pc);
2719 decode_rs(s, insn, &r1, &r3, &b2, &d2);
2720 tmp = get_address(s, 0, b2, d2);
2721 tmp32_1 = load_reg32(r1);
2722 tmp32_2 = tcg_const_i32(r3);
2723 potential_page_fault(s);
2724 gen_helper_stcm(cpu_env, tmp32_1, tmp32_2, tmp);
2725 tcg_temp_free_i64(tmp);
2726 tcg_temp_free_i32(tmp32_1);
2727 tcg_temp_free_i32(tmp32_2);
2728 break;
2729 case 0xd2: /* MVC D1(L,B1),D2(B2) [SS] */
2730 case 0xd4: /* NC D1(L,B1),D2(B2) [SS] */
2731 case 0xd5: /* CLC D1(L,B1),D2(B2) [SS] */
2732 case 0xd6: /* OC D1(L,B1),D2(B2) [SS] */
2733 case 0xd7: /* XC D1(L,B1),D2(B2) [SS] */
2734 case 0xdc: /* TR D1(L,B1),D2(B2) [SS] */
2735 case 0xf3: /* UNPK D1(L1,B1),D2(L2,B2) [SS] */
2736 insn = ld_code6(env, s->pc);
2737 vl = tcg_const_i32((insn >> 32) & 0xff);
2738 b1 = (insn >> 28) & 0xf;
2739 b2 = (insn >> 12) & 0xf;
2740 d1 = (insn >> 16) & 0xfff;
2741 d2 = insn & 0xfff;
2742 tmp = get_address(s, 0, b1, d1);
2743 tmp2 = get_address(s, 0, b2, d2);
2744 switch (opc) {
2745 case 0xd2:
2746 gen_op_mvc(s, (insn >> 32) & 0xff, tmp, tmp2);
2747 break;
2748 case 0xd4:
2749 potential_page_fault(s);
2750 gen_helper_nc(cc_op, cpu_env, vl, tmp, tmp2);
2751 set_cc_static(s);
2752 break;
2753 case 0xd5:
2754 gen_op_clc(s, (insn >> 32) & 0xff, tmp, tmp2);
2755 break;
2756 case 0xd6:
2757 potential_page_fault(s);
2758 gen_helper_oc(cc_op, cpu_env, vl, tmp, tmp2);
2759 set_cc_static(s);
2760 break;
2761 case 0xd7:
2762 potential_page_fault(s);
2763 gen_helper_xc(cc_op, cpu_env, vl, tmp, tmp2);
2764 set_cc_static(s);
2765 break;
2766 case 0xdc:
2767 potential_page_fault(s);
2768 gen_helper_tr(cpu_env, vl, tmp, tmp2);
2769 set_cc_static(s);
2770 break;
2771 case 0xf3:
2772 potential_page_fault(s);
2773 gen_helper_unpk(cpu_env, vl, tmp, tmp2);
2774 break;
2775 default:
2776 tcg_abort();
2777 }
2778 tcg_temp_free_i64(tmp);
2779 tcg_temp_free_i64(tmp2);
2780 break;
2781 #ifndef CONFIG_USER_ONLY
2782 case 0xda: /* MVCP D1(R1,B1),D2(B2),R3 [SS] */
2783 case 0xdb: /* MVCS D1(R1,B1),D2(B2),R3 [SS] */
2784 check_privileged(s);
2785 potential_page_fault(s);
2786 insn = ld_code6(env, s->pc);
2787 r1 = (insn >> 36) & 0xf;
2788 r3 = (insn >> 32) & 0xf;
2789 b1 = (insn >> 28) & 0xf;
2790 d1 = (insn >> 16) & 0xfff;
2791 b2 = (insn >> 12) & 0xf;
2792 d2 = insn & 0xfff;
2793 tmp = load_reg(r1);
2794 /* XXX key in r3 */
2795 tmp2 = get_address(s, 0, b1, d1);
2796 tmp3 = get_address(s, 0, b2, d2);
2797 if (opc == 0xda) {
2798 gen_helper_mvcp(cc_op, cpu_env, tmp, tmp2, tmp3);
2799 } else {
2800 gen_helper_mvcs(cc_op, cpu_env, tmp, tmp2, tmp3);
2801 }
2802 set_cc_static(s);
2803 tcg_temp_free_i64(tmp);
2804 tcg_temp_free_i64(tmp2);
2805 tcg_temp_free_i64(tmp3);
2806 break;
2807 #endif
2808 case 0xe3:
2809 insn = ld_code6(env, s->pc);
2810 debug_insn(insn);
2811 op = insn & 0xff;
2812 r1 = (insn >> 36) & 0xf;
2813 x2 = (insn >> 32) & 0xf;
2814 b2 = (insn >> 28) & 0xf;
2815 d2 = ((int)((((insn >> 16) & 0xfff)
2816 | ((insn << 4) & 0xff000)) << 12)) >> 12;
2817 disas_e3(env, s, op, r1, x2, b2, d2 );
2818 break;
2819 #ifndef CONFIG_USER_ONLY
2820 case 0xe5:
2821 /* Test Protection */
2822 check_privileged(s);
2823 insn = ld_code6(env, s->pc);
2824 debug_insn(insn);
2825 disas_e5(env, s, insn);
2826 break;
2827 #endif
2828 case 0xeb:
2829 insn = ld_code6(env, s->pc);
2830 debug_insn(insn);
2831 op = insn & 0xff;
2832 r1 = (insn >> 36) & 0xf;
2833 r3 = (insn >> 32) & 0xf;
2834 b2 = (insn >> 28) & 0xf;
2835 d2 = ((int)((((insn >> 16) & 0xfff)
2836 | ((insn << 4) & 0xff000)) << 12)) >> 12;
2837 disas_eb(env, s, op, r1, r3, b2, d2);
2838 break;
2839 case 0xed:
2840 insn = ld_code6(env, s->pc);
2841 debug_insn(insn);
2842 op = insn & 0xff;
2843 r1 = (insn >> 36) & 0xf;
2844 x2 = (insn >> 32) & 0xf;
2845 b2 = (insn >> 28) & 0xf;
2846 d2 = (short)((insn >> 16) & 0xfff);
2847 r1b = (insn >> 12) & 0xf;
2848 disas_ed(env, s, op, r1, x2, b2, d2, r1b);
2849 break;
2850 default:
2851 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
2852 gen_illegal_opcode(s);
2853 break;
2854 }
2855 }
2856
2857 /* ====================================================================== */
2858 /* Define the insn format enumeration. */
2859 #define F0(N) FMT_##N,
2860 #define F1(N, X1) F0(N)
2861 #define F2(N, X1, X2) F0(N)
2862 #define F3(N, X1, X2, X3) F0(N)
2863 #define F4(N, X1, X2, X3, X4) F0(N)
2864 #define F5(N, X1, X2, X3, X4, X5) F0(N)
2865
2866 typedef enum {
2867 #include "insn-format.def"
2868 } DisasFormat;
2869
2870 #undef F0
2871 #undef F1
2872 #undef F2
2873 #undef F3
2874 #undef F4
2875 #undef F5
2876
2877 /* Define a structure to hold the decoded fields. We'll store each inside
2878 an array indexed by an enum. In order to conserve memory, we'll arrange
2879 for fields that do not exist at the same time to overlap, thus the "C"
2880 for compact. For checking purposes there is an "O" for original index
2881 as well that will be applied to availability bitmaps. */
2882
2883 enum DisasFieldIndexO {
2884 FLD_O_r1,
2885 FLD_O_r2,
2886 FLD_O_r3,
2887 FLD_O_m1,
2888 FLD_O_m3,
2889 FLD_O_m4,
2890 FLD_O_b1,
2891 FLD_O_b2,
2892 FLD_O_b4,
2893 FLD_O_d1,
2894 FLD_O_d2,
2895 FLD_O_d4,
2896 FLD_O_x2,
2897 FLD_O_l1,
2898 FLD_O_l2,
2899 FLD_O_i1,
2900 FLD_O_i2,
2901 FLD_O_i3,
2902 FLD_O_i4,
2903 FLD_O_i5
2904 };
2905
2906 enum DisasFieldIndexC {
2907 FLD_C_r1 = 0,
2908 FLD_C_m1 = 0,
2909 FLD_C_b1 = 0,
2910 FLD_C_i1 = 0,
2911
2912 FLD_C_r2 = 1,
2913 FLD_C_b2 = 1,
2914 FLD_C_i2 = 1,
2915
2916 FLD_C_r3 = 2,
2917 FLD_C_m3 = 2,
2918 FLD_C_i3 = 2,
2919
2920 FLD_C_m4 = 3,
2921 FLD_C_b4 = 3,
2922 FLD_C_i4 = 3,
2923 FLD_C_l1 = 3,
2924
2925 FLD_C_i5 = 4,
2926 FLD_C_d1 = 4,
2927
2928 FLD_C_d2 = 5,
2929
2930 FLD_C_d4 = 6,
2931 FLD_C_x2 = 6,
2932 FLD_C_l2 = 6,
2933
2934 NUM_C_FIELD = 7
2935 };
2936
2937 struct DisasFields {
2938 unsigned op:8;
2939 unsigned op2:8;
2940 unsigned presentC:16;
2941 unsigned int presentO;
2942 int c[NUM_C_FIELD];
2943 };
2944
2945 /* This is the way fields are to be accessed out of DisasFields. */
2946 #define have_field(S, F) have_field1((S), FLD_O_##F)
2947 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
2948
2949 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
2950 {
2951 return (f->presentO >> c) & 1;
2952 }
2953
2954 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
2955 enum DisasFieldIndexC c)
2956 {
2957 assert(have_field1(f, o));
2958 return f->c[c];
2959 }
2960
2961 /* Describe the layout of each field in each format. */
2962 typedef struct DisasField {
2963 unsigned int beg:8;
2964 unsigned int size:8;
2965 unsigned int type:2;
2966 unsigned int indexC:6;
2967 enum DisasFieldIndexO indexO:8;
2968 } DisasField;
2969
2970 typedef struct DisasFormatInfo {
2971 DisasField op[NUM_C_FIELD];
2972 } DisasFormatInfo;
2973
2974 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
2975 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
2976 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2977 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
2978 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2979 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2980 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
2981 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2982 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2983 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2984 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2985 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2986 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
2987 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
2988
2989 #define F0(N) { { } },
2990 #define F1(N, X1) { { X1 } },
2991 #define F2(N, X1, X2) { { X1, X2 } },
2992 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
2993 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
2994 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
2995
2996 static const DisasFormatInfo format_info[] = {
2997 #include "insn-format.def"
2998 };
2999
3000 #undef F0
3001 #undef F1
3002 #undef F2
3003 #undef F3
3004 #undef F4
3005 #undef F5
3006 #undef R
3007 #undef M
3008 #undef BD
3009 #undef BXD
3010 #undef BDL
3011 #undef BXDL
3012 #undef I
3013 #undef L
3014
3015 /* Generally, we'll extract operands into this structures, operate upon
3016 them, and store them back. See the "in1", "in2", "prep", "wout" sets
3017 of routines below for more details. */
3018 typedef struct {
3019 bool g_out, g_out2, g_in1, g_in2;
3020 TCGv_i64 out, out2, in1, in2;
3021 TCGv_i64 addr1;
3022 } DisasOps;
3023
3024 /* Return values from translate_one, indicating the state of the TB. */
3025 typedef enum {
3026 /* Continue the TB. */
3027 NO_EXIT,
3028 /* We have emitted one or more goto_tb. No fixup required. */
3029 EXIT_GOTO_TB,
3030 /* We are not using a goto_tb (for whatever reason), but have updated
3031 the PC (for whatever reason), so there's no need to do it again on
3032 exiting the TB. */
3033 EXIT_PC_UPDATED,
3034 /* We are exiting the TB, but have neither emitted a goto_tb, nor
3035 updated the PC for the next instruction to be executed. */
3036 EXIT_PC_STALE,
3037 /* We are ending the TB with a noreturn function call, e.g. longjmp.
3038 No following code will be executed. */
3039 EXIT_NORETURN,
3040 } ExitStatus;
3041
3042 typedef enum DisasFacility {
3043 FAC_Z, /* zarch (default) */
3044 FAC_CASS, /* compare and swap and store */
3045 FAC_CASS2, /* compare and swap and store 2*/
3046 FAC_DFP, /* decimal floating point */
3047 FAC_DFPR, /* decimal floating point rounding */
3048 FAC_DO, /* distinct operands */
3049 FAC_EE, /* execute extensions */
3050 FAC_EI, /* extended immediate */
3051 FAC_FPE, /* floating point extension */
3052 FAC_FPSSH, /* floating point support sign handling */
3053 FAC_FPRGR, /* FPR-GR transfer */
3054 FAC_GIE, /* general instructions extension */
3055 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
3056 FAC_HW, /* high-word */
3057 FAC_IEEEE_SIM, /* IEEE exception sumilation */
3058 FAC_LOC, /* load/store on condition */
3059 FAC_LD, /* long displacement */
3060 FAC_PC, /* population count */
3061 FAC_SCF, /* store clock fast */
3062 FAC_SFLE, /* store facility list extended */
3063 } DisasFacility;
3064
3065 struct DisasInsn {
3066 unsigned opc:16;
3067 DisasFormat fmt:6;
3068 DisasFacility fac:6;
3069
3070 const char *name;
3071
3072 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
3073 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
3074 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
3075 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
3076 void (*help_cout)(DisasContext *, DisasOps *);
3077 ExitStatus (*help_op)(DisasContext *, DisasOps *);
3078
3079 uint64_t data;
3080 };
3081
3082 /* ====================================================================== */
3083 /* Miscelaneous helpers, used by several operations. */
3084
3085 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
3086 {
3087 if (dest == s->next_pc) {
3088 return NO_EXIT;
3089 }
3090 if (use_goto_tb(s, dest)) {
3091 gen_update_cc_op(s);
3092 tcg_gen_goto_tb(0);
3093 tcg_gen_movi_i64(psw_addr, dest);
3094 tcg_gen_exit_tb((tcg_target_long)s->tb);
3095 return EXIT_GOTO_TB;
3096 } else {
3097 tcg_gen_movi_i64(psw_addr, dest);
3098 return EXIT_PC_UPDATED;
3099 }
3100 }
3101
3102 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
3103 bool is_imm, int imm, TCGv_i64 cdest)
3104 {
3105 ExitStatus ret;
3106 uint64_t dest = s->pc + 2 * imm;
3107 int lab;
3108
3109 /* Take care of the special cases first. */
3110 if (c->cond == TCG_COND_NEVER) {
3111 ret = NO_EXIT;
3112 goto egress;
3113 }
3114 if (is_imm) {
3115 if (dest == s->next_pc) {
3116 /* Branch to next. */
3117 ret = NO_EXIT;
3118 goto egress;
3119 }
3120 if (c->cond == TCG_COND_ALWAYS) {
3121 ret = help_goto_direct(s, dest);
3122 goto egress;
3123 }
3124 } else {
3125 if (TCGV_IS_UNUSED_I64(cdest)) {
3126 /* E.g. bcr %r0 -> no branch. */
3127 ret = NO_EXIT;
3128 goto egress;
3129 }
3130 if (c->cond == TCG_COND_ALWAYS) {
3131 tcg_gen_mov_i64(psw_addr, cdest);
3132 ret = EXIT_PC_UPDATED;
3133 goto egress;
3134 }
3135 }
3136
3137 if (use_goto_tb(s, s->next_pc)) {
3138 if (is_imm && use_goto_tb(s, dest)) {
3139 /* Both exits can use goto_tb. */
3140 gen_update_cc_op(s);
3141
3142 lab = gen_new_label();
3143 if (c->is_64) {
3144 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
3145 } else {
3146 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
3147 }
3148
3149 /* Branch not taken. */
3150 tcg_gen_goto_tb(0);
3151 tcg_gen_movi_i64(psw_addr, s->next_pc);
3152 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
3153
3154 /* Branch taken. */
3155 gen_set_label(lab);
3156 tcg_gen_goto_tb(1);
3157 tcg_gen_movi_i64(psw_addr, dest);
3158 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
3159
3160 ret = EXIT_GOTO_TB;
3161 } else {
3162 /* Fallthru can use goto_tb, but taken branch cannot. */
3163 /* Store taken branch destination before the brcond. This
3164 avoids having to allocate a new local temp to hold it.
3165 We'll overwrite this in the not taken case anyway. */
3166 if (!is_imm) {
3167 tcg_gen_mov_i64(psw_addr, cdest);
3168 }
3169
3170 lab = gen_new_label();
3171 if (c->is_64) {
3172 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
3173 } else {
3174 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
3175 }
3176
3177 /* Branch not taken. */
3178 gen_update_cc_op(s);
3179 tcg_gen_goto_tb(0);
3180 tcg_gen_movi_i64(psw_addr, s->next_pc);
3181 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
3182
3183 gen_set_label(lab);
3184 if (is_imm) {
3185 tcg_gen_movi_i64(psw_addr, dest);
3186 }
3187 ret = EXIT_PC_UPDATED;
3188 }
3189 } else {
3190 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
3191 Most commonly we're single-stepping or some other condition that
3192 disables all use of goto_tb. Just update the PC and exit. */
3193
3194 TCGv_i64 next = tcg_const_i64(s->next_pc);
3195 if (is_imm) {
3196 cdest = tcg_const_i64(dest);
3197 }
3198
3199 if (c->is_64) {
3200 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
3201 cdest, next);
3202 } else {
3203 TCGv_i32 t0 = tcg_temp_new_i32();
3204 TCGv_i64 t1 = tcg_temp_new_i64();
3205 TCGv_i64 z = tcg_const_i64(0);
3206 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
3207 tcg_gen_extu_i32_i64(t1, t0);
3208 tcg_temp_free_i32(t0);
3209 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
3210 tcg_temp_free_i64(t1);
3211 tcg_temp_free_i64(z);
3212 }
3213
3214 if (is_imm) {
3215 tcg_temp_free_i64(cdest);
3216 }
3217 tcg_temp_free_i64(next);
3218
3219 ret = EXIT_PC_UPDATED;
3220 }
3221
3222 egress:
3223 free_compare(c);
3224 return ret;
3225 }
3226
3227 /* ====================================================================== */
3228 /* The operations. These perform the bulk of the work for any insn,
3229 usually after the operands have been loaded and output initialized. */
3230
3231 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
3232 {
3233 gen_helper_abs_i64(o->out, o->in2);
3234 return NO_EXIT;
3235 }
3236
3237 static ExitStatus op_add(DisasContext *s, DisasOps *o)
3238 {
3239 tcg_gen_add_i64(o->out, o->in1, o->in2);
3240 return NO_EXIT;
3241 }
3242
3243 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
3244 {
3245 TCGv_i64 cc;
3246
3247 tcg_gen_add_i64(o->out, o->in1, o->in2);
3248
3249 /* XXX possible optimization point */
3250 gen_op_calc_cc(s);
3251 cc = tcg_temp_new_i64();
3252 tcg_gen_extu_i32_i64(cc, cc_op);
3253 tcg_gen_shri_i64(cc, cc, 1);
3254
3255 tcg_gen_add_i64(o->out, o->out, cc);
3256 tcg_temp_free_i64(cc);
3257 return NO_EXIT;
3258 }
3259
3260 static ExitStatus op_and(DisasContext *s, DisasOps *o)
3261 {
3262 tcg_gen_and_i64(o->out, o->in1, o->in2);
3263 return NO_EXIT;
3264 }
3265
3266 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
3267 {
3268 int shift = s->insn->data & 0xff;
3269 int size = s->insn->data >> 8;
3270 uint64_t mask = ((1ull << size) - 1) << shift;
3271
3272 assert(!o->g_in2);
3273 tcg_gen_shli_i64(o->in2, o->in2, shift);
3274 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3275 tcg_gen_and_i64(o->out, o->in1, o->in2);
3276
3277 /* Produce the CC from only the bits manipulated. */
3278 tcg_gen_andi_i64(cc_dst, o->out, mask);
3279 set_cc_nz_u64(s, cc_dst);
3280 return NO_EXIT;
3281 }
3282
3283 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
3284 {
3285 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
3286 if (!TCGV_IS_UNUSED_I64(o->in2)) {
3287 tcg_gen_mov_i64(psw_addr, o->in2);
3288 return EXIT_PC_UPDATED;
3289 } else {
3290 return NO_EXIT;
3291 }
3292 }
3293
3294 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
3295 {
3296 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
3297 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
3298 }
3299
3300 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
3301 {
3302 int m1 = get_field(s->fields, m1);
3303 bool is_imm = have_field(s->fields, i2);
3304 int imm = is_imm ? get_field(s->fields, i2) : 0;
3305 DisasCompare c;
3306
3307 disas_jcc(s, &c, m1);
3308 return help_branch(s, &c, is_imm, imm, o->in2);
3309 }
3310
3311 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
3312 {
3313 int r1 = get_field(s->fields, r1);
3314 bool is_imm = have_field(s->fields, i2);
3315 int imm = is_imm ? get_field(s->fields, i2) : 0;
3316 DisasCompare c;
3317 TCGv_i64 t;
3318
3319 c.cond = TCG_COND_NE;
3320 c.is_64 = false;
3321 c.g1 = false;
3322 c.g2 = false;
3323
3324 t = tcg_temp_new_i64();
3325 tcg_gen_subi_i64(t, regs[r1], 1);
3326 store_reg32_i64(r1, t);
3327 c.u.s32.a = tcg_temp_new_i32();
3328 c.u.s32.b = tcg_const_i32(0);
3329 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
3330 tcg_temp_free_i64(t);
3331
3332 return help_branch(s, &c, is_imm, imm, o->in2);
3333 }
3334
3335 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
3336 {
3337 int r1 = get_field(s->fields, r1);
3338 bool is_imm = have_field(s->fields, i2);
3339 int imm = is_imm ? get_field(s->fields, i2) : 0;
3340 DisasCompare c;
3341
3342 c.cond = TCG_COND_NE;
3343 c.is_64 = true;
3344 c.g1 = true;
3345 c.g2 = false;
3346
3347 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
3348 c.u.s64.a = regs[r1];
3349 c.u.s64.b = tcg_const_i64(0);
3350
3351 return help_branch(s, &c, is_imm, imm, o->in2);
3352 }
3353
3354 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
3355 {
3356 TCGv_i64 t1 = tcg_temp_new_i64();
3357 TCGv_i32 t2 = tcg_temp_new_i32();
3358 tcg_gen_trunc_i64_i32(t2, o->in1);
3359 gen_helper_cvd(t1, t2);
3360 tcg_temp_free_i32(t2);
3361 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
3362 tcg_temp_free_i64(t1);
3363 return NO_EXIT;
3364 }
3365
3366 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
3367 {
3368 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
3369 return_low128(o->out);
3370 return NO_EXIT;
3371 }
3372
3373 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
3374 {
3375 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
3376 return_low128(o->out);
3377 return NO_EXIT;
3378 }
3379
3380 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
3381 {
3382 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
3383 return_low128(o->out);
3384 return NO_EXIT;
3385 }
3386
3387 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
3388 {
3389 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
3390 return_low128(o->out);
3391 return NO_EXIT;
3392 }
3393
3394 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
3395 {
3396 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
3397 tb->flags, (ab)use the tb->cs_base field as the address of
3398 the template in memory, and grab 8 bits of tb->flags/cflags for
3399 the contents of the register. We would then recognize all this
3400 in gen_intermediate_code_internal, generating code for exactly
3401 one instruction. This new TB then gets executed normally.
3402
3403 On the other hand, this seems to be mostly used for modifying
3404 MVC inside of memcpy, which needs a helper call anyway. So
3405 perhaps this doesn't bear thinking about any further. */
3406
3407 TCGv_i64 tmp;
3408
3409 update_psw_addr(s);
3410 gen_op_calc_cc(s);
3411
3412 tmp = tcg_const_i64(s->next_pc);
3413 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
3414 tcg_temp_free_i64(tmp);
3415
3416 set_cc_static(s);
3417 return NO_EXIT;
3418 }
3419
3420 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
3421 {
3422 int m3 = get_field(s->fields, m3);
3423 int pos, len, base = s->insn->data;
3424 TCGv_i64 tmp = tcg_temp_new_i64();
3425 uint64_t ccm;
3426
3427 switch (m3) {
3428 case 0xf:
3429 /* Effectively a 32-bit load. */
3430 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
3431 len = 32;
3432 goto one_insert;
3433
3434 case 0xc:
3435 case 0x6:
3436 case 0x3:
3437 /* Effectively a 16-bit load. */
3438 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
3439 len = 16;
3440 goto one_insert;
3441
3442 case 0x8:
3443 case 0x4:
3444 case 0x2:
3445 case 0x1:
3446 /* Effectively an 8-bit load. */
3447 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
3448 len = 8;
3449 goto one_insert;
3450
3451 one_insert:
3452 pos = base + ctz32(m3) * 8;
3453 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
3454 ccm = ((1ull << len) - 1) << pos;
3455 break;
3456
3457 default:
3458 /* This is going to be a sequence of loads and inserts. */
3459 pos = base + 32 - 8;
3460 ccm = 0;
3461 while (m3) {
3462 if (m3 & 0x8) {
3463 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
3464 tcg_gen_addi_i64(o->in2, o->in2, 1);
3465 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
3466 ccm |= 0xff << pos;
3467 }
3468 m3 = (m3 << 1) & 0xf;
3469 pos -= 8;
3470 }
3471 break;
3472 }
3473
3474 tcg_gen_movi_i64(tmp, ccm);
3475 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
3476 tcg_temp_free_i64(tmp);
3477 return NO_EXIT;
3478 }
3479
3480 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
3481 {
3482 int shift = s->insn->data & 0xff;
3483 int size = s->insn->data >> 8;
3484 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
3485 return NO_EXIT;
3486 }
3487
3488 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
3489 {
3490 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
3491 return NO_EXIT;
3492 }
3493
3494 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
3495 {
3496 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
3497 return NO_EXIT;
3498 }
3499
3500 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
3501 {
3502 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
3503 return NO_EXIT;
3504 }
3505
3506 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
3507 {
3508 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
3509 return NO_EXIT;
3510 }
3511
3512 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
3513 {
3514 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
3515 return NO_EXIT;
3516 }
3517
3518 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
3519 {
3520 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
3521 return NO_EXIT;
3522 }
3523
3524 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
3525 {
3526 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
3527 return NO_EXIT;
3528 }
3529
3530 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
3531 {
3532 o->out = o->in2;
3533 o->g_out = o->g_in2;
3534 TCGV_UNUSED_I64(o->in2);
3535 o->g_in2 = false;
3536 return NO_EXIT;
3537 }
3538
3539 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
3540 {
3541 o->out = o->in1;
3542 o->out2 = o->in2;
3543 o->g_out = o->g_in1;
3544 o->g_out2 = o->g_in2;
3545 TCGV_UNUSED_I64(o->in1);
3546 TCGV_UNUSED_I64(o->in2);
3547 o->g_in1 = o->g_in2 = false;
3548 return NO_EXIT;
3549 }
3550
3551 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3552 {
3553 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3554 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
3555 potential_page_fault(s);
3556 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
3557 tcg_temp_free_i32(r1);
3558 tcg_temp_free_i32(r2);
3559 set_cc_static(s);
3560 return NO_EXIT;
3561 }
3562
3563 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3564 {
3565 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3566 return NO_EXIT;
3567 }
3568
3569 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3570 {
3571 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
3572 return_low128(o->out2);
3573 return NO_EXIT;
3574 }
3575
3576 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3577 {
3578 gen_helper_nabs_i64(o->out, o->in2);
3579 return NO_EXIT;
3580 }
3581
3582 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3583 {
3584 tcg_gen_neg_i64(o->out, o->in2);
3585 return NO_EXIT;
3586 }
3587
3588 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3589 {
3590 tcg_gen_or_i64(o->out, o->in1, o->in2);
3591 return NO_EXIT;
3592 }
3593
3594 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3595 {
3596 int shift = s->insn->data & 0xff;
3597 int size = s->insn->data >> 8;
3598 uint64_t mask = ((1ull << size) - 1) << shift;
3599
3600 assert(!o->g_in2);
3601 tcg_gen_shli_i64(o->in2, o->in2, shift);
3602 tcg_gen_or_i64(o->out, o->in1, o->in2);
3603
3604 /* Produce the CC from only the bits manipulated. */
3605 tcg_gen_andi_i64(cc_dst, o->out, mask);
3606 set_cc_nz_u64(s, cc_dst);
3607 return NO_EXIT;
3608 }
3609
3610 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3611 {
3612 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3613 return NO_EXIT;
3614 }
3615
3616 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3617 {
3618 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3619 return NO_EXIT;
3620 }
3621
3622 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3623 {
3624 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3625 return NO_EXIT;
3626 }
3627
3628 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3629 {
3630 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3631 return NO_EXIT;
3632 }
3633
3634 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3635 {
3636 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3637 return NO_EXIT;
3638 }
3639
3640 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3641 {
3642 TCGv_i64 cc;
3643
3644 assert(!o->g_in2);
3645 tcg_gen_not_i64(o->in2, o->in2);
3646 tcg_gen_add_i64(o->out, o->in1, o->in2);
3647
3648 /* XXX possible optimization point */
3649 gen_op_calc_cc(s);
3650 cc = tcg_temp_new_i64();
3651 tcg_gen_extu_i32_i64(cc, cc_op);
3652 tcg_gen_shri_i64(cc, cc, 1);
3653 tcg_gen_add_i64(o->out, o->out, cc);
3654 tcg_temp_free_i64(cc);
3655 return NO_EXIT;
3656 }
3657
3658 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3659 {
3660 TCGv_i32 t;
3661
3662 update_psw_addr(s);
3663 gen_op_calc_cc(s);
3664
3665 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3666 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3667 tcg_temp_free_i32(t);
3668
3669 t = tcg_const_i32(s->next_pc - s->pc);
3670 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3671 tcg_temp_free_i32(t);
3672
3673 gen_exception(EXCP_SVC);
3674 return EXIT_NORETURN;
3675 }
3676
3677 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3678 {
3679 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3680 return NO_EXIT;
3681 }
3682
3683 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3684 {
3685 int shift = s->insn->data & 0xff;
3686 int size = s->insn->data >> 8;
3687 uint64_t mask = ((1ull << size) - 1) << shift;
3688
3689 assert(!o->g_in2);
3690 tcg_gen_shli_i64(o->in2, o->in2, shift);
3691 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3692
3693 /* Produce the CC from only the bits manipulated. */
3694 tcg_gen_andi_i64(cc_dst, o->out, mask);
3695 set_cc_nz_u64(s, cc_dst);
3696 return NO_EXIT;
3697 }
3698
3699 /* ====================================================================== */
3700 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3701 the original inputs), update the various cc data structures in order to
3702 be able to compute the new condition code. */
3703
3704 static void cout_abs32(DisasContext *s, DisasOps *o)
3705 {
3706 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3707 }
3708
3709 static void cout_abs64(DisasContext *s, DisasOps *o)
3710 {
3711 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3712 }
3713
3714 static void cout_adds32(DisasContext *s, DisasOps *o)
3715 {
3716 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3717 }
3718
3719 static void cout_adds64(DisasContext *s, DisasOps *o)
3720 {
3721 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3722 }
3723
3724 static void cout_addu32(DisasContext *s, DisasOps *o)
3725 {
3726 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3727 }
3728
3729 static void cout_addu64(DisasContext *s, DisasOps *o)
3730 {
3731 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3732 }
3733
3734 static void cout_addc32(DisasContext *s, DisasOps *o)
3735 {
3736 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3737 }
3738
3739 static void cout_addc64(DisasContext *s, DisasOps *o)
3740 {
3741 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3742 }
3743
3744 static void cout_cmps32(DisasContext *s, DisasOps *o)
3745 {
3746 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3747 }
3748
3749 static void cout_cmps64(DisasContext *s, DisasOps *o)
3750 {
3751 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3752 }
3753
3754 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3755 {
3756 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3757 }
3758
3759 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3760 {
3761 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3762 }
3763
3764 static void cout_nabs32(DisasContext *s, DisasOps *o)
3765 {
3766 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3767 }
3768
3769 static void cout_nabs64(DisasContext *s, DisasOps *o)
3770 {
3771 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3772 }
3773
3774 static void cout_neg32(DisasContext *s, DisasOps *o)
3775 {
3776 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3777 }
3778
3779 static void cout_neg64(DisasContext *s, DisasOps *o)
3780 {
3781 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3782 }
3783
3784 static void cout_nz32(DisasContext *s, DisasOps *o)
3785 {
3786 tcg_gen_ext32u_i64(cc_dst, o->out);
3787 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3788 }
3789
3790 static void cout_nz64(DisasContext *s, DisasOps *o)
3791 {
3792 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3793 }
3794
3795 static void cout_s32(DisasContext *s, DisasOps *o)
3796 {
3797 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3798 }
3799
3800 static void cout_s64(DisasContext *s, DisasOps *o)
3801 {
3802 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3803 }
3804
3805 static void cout_subs32(DisasContext *s, DisasOps *o)
3806 {
3807 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3808 }
3809
3810 static void cout_subs64(DisasContext *s, DisasOps *o)
3811 {
3812 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3813 }
3814
3815 static void cout_subu32(DisasContext *s, DisasOps *o)
3816 {
3817 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3818 }
3819
3820 static void cout_subu64(DisasContext *s, DisasOps *o)
3821 {
3822 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3823 }
3824
3825 static void cout_subb32(DisasContext *s, DisasOps *o)
3826 {
3827 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3828 }
3829
3830 static void cout_subb64(DisasContext *s, DisasOps *o)
3831 {
3832 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3833 }
3834
3835 static void cout_tm32(DisasContext *s, DisasOps *o)
3836 {
3837 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3838 }
3839
3840 static void cout_tm64(DisasContext *s, DisasOps *o)
3841 {
3842 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3843 }
3844
3845 /* ====================================================================== */
3846 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3847 with the TCG register to which we will write. Used in combination with
3848 the "wout" generators, in some cases we need a new temporary, and in
3849 some cases we can write to a TCG global. */
3850
3851 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3852 {
3853 o->out = tcg_temp_new_i64();
3854 }
3855
3856 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3857 {
3858 o->out = tcg_temp_new_i64();
3859 o->out2 = tcg_temp_new_i64();
3860 }
3861
3862 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3863 {
3864 o->out = regs[get_field(f, r1)];
3865 o->g_out = true;
3866 }
3867
3868 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3869 {
3870 /* ??? Specification exception: r1 must be even. */
3871 int r1 = get_field(f, r1);
3872 o->out = regs[r1];
3873 o->out2 = regs[(r1 + 1) & 15];
3874 o->g_out = o->g_out2 = true;
3875 }
3876
3877 /* ====================================================================== */
3878 /* The "Write OUTput" generators. These generally perform some non-trivial
3879 copy of data to TCG globals, or to main memory. The trivial cases are
3880 generally handled by having a "prep" generator install the TCG global
3881 as the destination of the operation. */
3882
3883 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3884 {
3885 store_reg(get_field(f, r1), o->out);
3886 }
3887
3888 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3889 {
3890 int r1 = get_field(f, r1);
3891 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3892 }
3893
3894 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3895 {
3896 store_reg32_i64(get_field(f, r1), o->out);
3897 }
3898
3899 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3900 {
3901 /* ??? Specification exception: r1 must be even. */
3902 int r1 = get_field(f, r1);
3903 store_reg32_i64(r1, o->out);
3904 store_reg32_i64((r1 + 1) & 15, o->out2);
3905 }
3906
3907 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3908 {
3909 /* ??? Specification exception: r1 must be even. */
3910 int r1 = get_field(f, r1);
3911 store_reg32_i64((r1 + 1) & 15, o->out);
3912 tcg_gen_shri_i64(o->out, o->out, 32);
3913 store_reg32_i64(r1, o->out);
3914 }
3915
3916 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3917 {
3918 store_freg32_i64(get_field(f, r1), o->out);
3919 }
3920
3921 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3922 {
3923 store_freg(get_field(f, r1), o->out);
3924 }
3925
3926 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3927 {
3928 int f1 = get_field(s->fields, r1);
3929 store_freg(f1, o->out);
3930 store_freg((f1 + 2) & 15, o->out2);
3931 }
3932
3933 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3934 {
3935 if (get_field(f, r1) != get_field(f, r2)) {
3936 store_reg32_i64(get_field(f, r1), o->out);
3937 }
3938 }
3939
3940 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3941 {
3942 if (get_field(f, r1) != get_field(f, r2)) {
3943 store_freg32_i64(get_field(f, r1), o->out);
3944 }
3945 }
3946
3947 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3948 {
3949 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3950 }
3951
3952 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3953 {
3954 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3955 }
3956
3957 /* ====================================================================== */
3958 /* The "INput 1" generators. These load the first operand to an insn. */
3959
3960 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3961 {
3962 o->in1 = load_reg(get_field(f, r1));
3963 }
3964
3965 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3966 {
3967 o->in1 = regs[get_field(f, r1)];
3968 o->g_in1 = true;
3969 }
3970
3971 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3972 {
3973 /* ??? Specification exception: r1 must be even. */
3974 int r1 = get_field(f, r1);
3975 o->in1 = load_reg((r1 + 1) & 15);
3976 }
3977
3978 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3979 {
3980 /* ??? Specification exception: r1 must be even. */
3981 int r1 = get_field(f, r1);
3982 o->in1 = tcg_temp_new_i64();
3983 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3984 }
3985
3986 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3987 {
3988 /* ??? Specification exception: r1 must be even. */
3989 int r1 = get_field(f, r1);
3990 o->in1 = tcg_temp_new_i64();
3991 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3992 }
3993
3994 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3995 {
3996 /* ??? Specification exception: r1 must be even. */
3997 int r1 = get_field(f, r1);
3998 o->in1 = tcg_temp_new_i64();
3999 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4000 }
4001
4002 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4003 {
4004 o->in1 = load_reg(get_field(f, r2));
4005 }
4006
4007 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4008 {
4009 o->in1 = load_reg(get_field(f, r3));
4010 }
4011
4012 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4013 {
4014 o->in1 = load_freg32_i64(get_field(f, r1));
4015 }
4016
4017 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4018 {
4019 o->in1 = fregs[get_field(f, r1)];
4020 o->g_in1 = true;
4021 }
4022
4023 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4024 {
4025 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4026 }
4027
4028 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4029 {
4030 in1_la1(s, f, o);
4031 o->in1 = tcg_temp_new_i64();
4032 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4033 }
4034
4035 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4036 {
4037 in1_la1(s, f, o);
4038 o->in1 = tcg_temp_new_i64();
4039 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4040 }
4041
4042 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4043 {
4044 in1_la1(s, f, o);
4045 o->in1 = tcg_temp_new_i64();
4046 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4047 }
4048
4049 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4050 {
4051 in1_la1(s, f, o);
4052 o->in1 = tcg_temp_new_i64();
4053 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4054 }
4055
4056 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4057 {
4058 in1_la1(s, f, o);
4059 o->in1 = tcg_temp_new_i64();
4060 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4061 }
4062
4063 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4064 {
4065 in1_la1(s, f, o);
4066 o->in1 = tcg_temp_new_i64();
4067 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4068 }
4069
4070 /* ====================================================================== */
4071 /* The "INput 2" generators. These load the second operand to an insn. */
4072
4073 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4074 {
4075 o->in2 = load_reg(get_field(f, r2));
4076 }
4077
4078 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4079 {
4080 o->in2 = regs[get_field(f, r2)];
4081 o->g_in2 = true;
4082 }
4083
4084 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4085 {
4086 int r2 = get_field(f, r2);
4087 if (r2 != 0) {
4088 o->in2 = load_reg(r2);
4089 }
4090 }
4091
4092 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4093 {
4094 o->in2 = tcg_temp_new_i64();
4095 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4096 }
4097
4098 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4099 {
4100 o->in2 = tcg_temp_new_i64();
4101 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4102 }
4103
4104 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4105 {
4106 o->in2 = tcg_temp_new_i64();
4107 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4108 }
4109
4110 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4111 {
4112 o->in2 = tcg_temp_new_i64();
4113 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4114 }
4115
4116 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4117 {
4118 o->in2 = load_reg(get_field(f, r3));
4119 }
4120
4121 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4122 {
4123 o->in2 = tcg_temp_new_i64();
4124 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4125 }
4126
4127 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4128 {
4129 o->in2 = tcg_temp_new_i64();
4130 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4131 }
4132
4133 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4134 {
4135 o->in2 = load_freg32_i64(get_field(f, r2));
4136 }
4137
4138 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4139 {
4140 o->in2 = fregs[get_field(f, r2)];
4141 o->g_in2 = true;
4142 }
4143
4144 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4145 {
4146 int f2 = get_field(f, r2);
4147 o->in1 = fregs[f2];
4148 o->in2 = fregs[(f2 + 2) & 15];
4149 o->g_in1 = o->g_in2 = true;
4150 }
4151
4152 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4153 {
4154 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4155 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4156 }
4157
4158 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4159 {
4160 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4161 }
4162
4163 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4164 {
4165 in2_a2(s, f, o);
4166 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4167 }
4168
4169 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4170 {
4171 in2_a2(s, f, o);
4172 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4173 }
4174
4175 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4176 {
4177 in2_a2(s, f, o);
4178 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4179 }
4180
4181 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4182 {
4183 in2_a2(s, f, o);
4184 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4185 }
4186
4187 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4188 {
4189 in2_a2(s, f, o);
4190 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4191 }
4192
4193 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4194 {
4195 in2_ri2(s, f, o);
4196 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4197 }
4198
4199 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4200 {
4201 in2_ri2(s, f, o);
4202 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4203 }
4204
4205 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4206 {
4207 in2_ri2(s, f, o);
4208 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4209 }
4210
4211 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4212 {
4213 in2_ri2(s, f, o);
4214 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4215 }
4216
4217 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4218 {
4219 o->in2 = tcg_const_i64(get_field(f, i2));
4220 }
4221
4222 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4223 {
4224 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4225 }
4226
4227 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4228 {
4229 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4230 }
4231
4232 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4233 {
4234 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4235 }
4236
4237 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4238 {
4239 uint64_t i2 = (uint16_t)get_field(f, i2);
4240 o->in2 = tcg_const_i64(i2 << s->insn->data);
4241 }
4242
4243 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4244 {
4245 uint64_t i2 = (uint32_t)get_field(f, i2);
4246 o->in2 = tcg_const_i64(i2 << s->insn->data);
4247 }
4248
4249 /* ====================================================================== */
4250
4251 /* Find opc within the table of insns. This is formulated as a switch
4252 statement so that (1) we get compile-time notice of cut-paste errors
4253 for duplicated opcodes, and (2) the compiler generates the binary
4254 search tree, rather than us having to post-process the table. */
4255
4256 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4257 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4258
4259 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4260
4261 enum DisasInsnEnum {
4262 #include "insn-data.def"
4263 };
4264
4265 #undef D
4266 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4267 .opc = OPC, \
4268 .fmt = FMT_##FT, \
4269 .fac = FAC_##FC, \
4270 .name = #NM, \
4271 .help_in1 = in1_##I1, \
4272 .help_in2 = in2_##I2, \
4273 .help_prep = prep_##P, \
4274 .help_wout = wout_##W, \
4275 .help_cout = cout_##CC, \
4276 .help_op = op_##OP, \
4277 .data = D \
4278 },
4279
4280 /* Allow 0 to be used for NULL in the table below. */
4281 #define in1_0 NULL
4282 #define in2_0 NULL
4283 #define prep_0 NULL
4284 #define wout_0 NULL
4285 #define cout_0 NULL
4286 #define op_0 NULL
4287
4288 static const DisasInsn insn_info[] = {
4289 #include "insn-data.def"
4290 };
4291
4292 #undef D
4293 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4294 case OPC: return &insn_info[insn_ ## NM];
4295
4296 static const DisasInsn *lookup_opc(uint16_t opc)
4297 {
4298 switch (opc) {
4299 #include "insn-data.def"
4300 default:
4301 return NULL;
4302 }
4303 }
4304
4305 #undef D
4306 #undef C
4307
4308 /* Extract a field from the insn. The INSN should be left-aligned in
4309 the uint64_t so that we can more easily utilize the big-bit-endian
4310 definitions we extract from the Principals of Operation. */
4311
4312 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4313 {
4314 uint32_t r, m;
4315
4316 if (f->size == 0) {
4317 return;
4318 }
4319
4320 /* Zero extract the field from the insn. */
4321 r = (insn << f->beg) >> (64 - f->size);
4322
4323 /* Sign-extend, or un-swap the field as necessary. */
4324 switch (f->type) {
4325 case 0: /* unsigned */
4326 break;
4327 case 1: /* signed */
4328 assert(f->size <= 32);
4329 m = 1u << (f->size - 1);
4330 r = (r ^ m) - m;
4331 break;
4332 case 2: /* dl+dh split, signed 20 bit. */
4333 r = ((int8_t)r << 12) | (r >> 8);
4334 break;
4335 default:
4336 abort();
4337 }
4338
4339 /* Validate that the "compressed" encoding we selected above is valid.
4340 I.e. we havn't make two different original fields overlap. */
4341 assert(((o->presentC >> f->indexC) & 1) == 0);
4342 o->presentC |= 1 << f->indexC;
4343 o->presentO |= 1 << f->indexO;
4344
4345 o->c[f->indexC] = r;
4346 }
4347
4348 /* Lookup the insn at the current PC, extracting the operands into O and
4349 returning the info struct for the insn. Returns NULL for invalid insn. */
4350
4351 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4352 DisasFields *f)
4353 {
4354 uint64_t insn, pc = s->pc;
4355 int op, op2, ilen;
4356 const DisasInsn *info;
4357
4358 insn = ld_code2(env, pc);
4359 op = (insn >> 8) & 0xff;
4360 ilen = get_ilen(op);
4361 s->next_pc = s->pc + ilen;
4362
4363 switch (ilen) {
4364 case 2:
4365 insn = insn << 48;
4366 break;
4367 case 4:
4368 insn = ld_code4(env, pc) << 32;
4369 break;
4370 case 6:
4371 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4372 break;
4373 default:
4374 abort();
4375 }
4376
4377 /* We can't actually determine the insn format until we've looked up
4378 the full insn opcode. Which we can't do without locating the
4379 secondary opcode. Assume by default that OP2 is at bit 40; for
4380 those smaller insns that don't actually have a secondary opcode
4381 this will correctly result in OP2 = 0. */
4382 switch (op) {
4383 case 0x01: /* E */
4384 case 0x80: /* S */
4385 case 0x82: /* S */
4386 case 0x93: /* S */
4387 case 0xb2: /* S, RRF, RRE */
4388 case 0xb3: /* RRE, RRD, RRF */
4389 case 0xb9: /* RRE, RRF */
4390 case 0xe5: /* SSE, SIL */
4391 op2 = (insn << 8) >> 56;
4392 break;
4393 case 0xa5: /* RI */
4394 case 0xa7: /* RI */
4395 case 0xc0: /* RIL */
4396 case 0xc2: /* RIL */
4397 case 0xc4: /* RIL */
4398 case 0xc6: /* RIL */
4399 case 0xc8: /* SSF */
4400 case 0xcc: /* RIL */
4401 op2 = (insn << 12) >> 60;
4402 break;
4403 case 0xd0 ... 0xdf: /* SS */
4404 case 0xe1: /* SS */
4405 case 0xe2: /* SS */
4406 case 0xe8: /* SS */
4407 case 0xe9: /* SS */
4408 case 0xea: /* SS */
4409 case 0xee ... 0xf3: /* SS */
4410 case 0xf8 ... 0xfd: /* SS */
4411 op2 = 0;
4412 break;
4413 default:
4414 op2 = (insn << 40) >> 56;
4415 break;
4416 }
4417
4418 memset(f, 0, sizeof(*f));
4419 f->op = op;
4420 f->op2 = op2;
4421
4422 /* Lookup the instruction. */
4423 info = lookup_opc(op << 8 | op2);
4424
4425 /* If we found it, extract the operands. */
4426 if (info != NULL) {
4427 DisasFormat fmt = info->fmt;
4428 int i;
4429
4430 for (i = 0; i < NUM_C_FIELD; ++i) {
4431 extract_field(f, &format_info[fmt].op[i], insn);
4432 }
4433 }
4434 return info;
4435 }
4436
4437 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4438 {
4439 const DisasInsn *insn;
4440 ExitStatus ret = NO_EXIT;
4441 DisasFields f;
4442 DisasOps o;
4443
4444 insn = extract_insn(env, s, &f);
4445
4446 /* If not found, try the old interpreter. This includes ILLOPC. */
4447 if (insn == NULL) {
4448 disas_s390_insn(env, s);
4449 switch (s->is_jmp) {
4450 case DISAS_NEXT:
4451 ret = NO_EXIT;
4452 break;
4453 case DISAS_TB_JUMP:
4454 ret = EXIT_GOTO_TB;
4455 break;
4456 case DISAS_JUMP:
4457 ret = EXIT_PC_UPDATED;
4458 break;
4459 case DISAS_EXCP:
4460 ret = EXIT_NORETURN;
4461 break;
4462 default:
4463 abort();
4464 }
4465
4466 s->pc = s->next_pc;
4467 return ret;
4468 }
4469
4470 /* Set up the strutures we use to communicate with the helpers. */
4471 s->insn = insn;
4472 s->fields = &f;
4473 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4474 TCGV_UNUSED_I64(o.out);
4475 TCGV_UNUSED_I64(o.out2);
4476 TCGV_UNUSED_I64(o.in1);
4477 TCGV_UNUSED_I64(o.in2);
4478 TCGV_UNUSED_I64(o.addr1);
4479
4480 /* Implement the instruction. */
4481 if (insn->help_in1) {
4482 insn->help_in1(s, &f, &o);
4483 }
4484 if (insn->help_in2) {
4485 insn->help_in2(s, &f, &o);
4486 }
4487 if (insn->help_prep) {
4488 insn->help_prep(s, &f, &o);
4489 }
4490 if (insn->help_op) {
4491 ret = insn->help_op(s, &o);
4492 }
4493 if (insn->help_wout) {
4494 insn->help_wout(s, &f, &o);
4495 }
4496 if (insn->help_cout) {
4497 insn->help_cout(s, &o);
4498 }
4499
4500 /* Free any temporaries created by the helpers. */
4501 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4502 tcg_temp_free_i64(o.out);
4503 }
4504 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4505 tcg_temp_free_i64(o.out2);
4506 }
4507 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4508 tcg_temp_free_i64(o.in1);
4509 }
4510 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4511 tcg_temp_free_i64(o.in2);
4512 }
4513 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4514 tcg_temp_free_i64(o.addr1);
4515 }
4516
4517 /* Advance to the next instruction. */
4518 s->pc = s->next_pc;
4519 return ret;
4520 }
4521
4522 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4523 TranslationBlock *tb,
4524 int search_pc)
4525 {
4526 DisasContext dc;
4527 target_ulong pc_start;
4528 uint64_t next_page_start;
4529 uint16_t *gen_opc_end;
4530 int j, lj = -1;
4531 int num_insns, max_insns;
4532 CPUBreakpoint *bp;
4533 ExitStatus status;
4534 bool do_debug;
4535
4536 pc_start = tb->pc;
4537
4538 /* 31-bit mode */
4539 if (!(tb->flags & FLAG_MASK_64)) {
4540 pc_start &= 0x7fffffff;
4541 }
4542
4543 dc.tb = tb;
4544 dc.pc = pc_start;
4545 dc.cc_op = CC_OP_DYNAMIC;
4546 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4547 dc.is_jmp = DISAS_NEXT;
4548
4549 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4550
4551 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4552
4553 num_insns = 0;
4554 max_insns = tb->cflags & CF_COUNT_MASK;
4555 if (max_insns == 0) {
4556 max_insns = CF_COUNT_MASK;
4557 }
4558
4559 gen_icount_start();
4560
4561 do {
4562 if (search_pc) {
4563 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4564 if (lj < j) {
4565 lj++;
4566 while (lj < j) {
4567 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4568 }
4569 }
4570 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4571 gen_opc_cc_op[lj] = dc.cc_op;
4572 tcg_ctx.gen_opc_instr_start[lj] = 1;
4573 tcg_ctx.gen_opc_icount[lj] = num_insns;
4574 }
4575 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4576 gen_io_start();
4577 }
4578
4579 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4580 tcg_gen_debug_insn_start(dc.pc);
4581 }
4582
4583 status = NO_EXIT;
4584 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4585 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4586 if (bp->pc == dc.pc) {
4587 status = EXIT_PC_STALE;
4588 do_debug = true;
4589 break;
4590 }
4591 }
4592 }
4593 if (status == NO_EXIT) {
4594 status = translate_one(env, &dc);
4595 }
4596
4597 /* If we reach a page boundary, are single stepping,
4598 or exhaust instruction count, stop generation. */
4599 if (status == NO_EXIT
4600 && (dc.pc >= next_page_start
4601 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4602 || num_insns >= max_insns
4603 || singlestep
4604 || env->singlestep_enabled)) {
4605 status = EXIT_PC_STALE;
4606 }
4607 } while (status == NO_EXIT);
4608
4609 if (tb->cflags & CF_LAST_IO) {
4610 gen_io_end();
4611 }
4612
4613 switch (status) {
4614 case EXIT_GOTO_TB:
4615 case EXIT_NORETURN:
4616 break;
4617 case EXIT_PC_STALE:
4618 update_psw_addr(&dc);
4619 /* FALLTHRU */
4620 case EXIT_PC_UPDATED:
4621 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4622 gen_op_calc_cc(&dc);
4623 } else {
4624 /* Next TB starts off with CC_OP_DYNAMIC,
4625 so make sure the cc op type is in env */
4626 gen_op_set_cc_op(&dc);
4627 }
4628 if (do_debug) {
4629 gen_exception(EXCP_DEBUG);
4630 } else {
4631 /* Generate the return instruction */
4632 tcg_gen_exit_tb(0);
4633 }
4634 break;
4635 default:
4636 abort();
4637 }
4638
4639 gen_icount_end(tb, num_insns);
4640 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4641 if (search_pc) {
4642 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4643 lj++;
4644 while (lj <= j) {
4645 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4646 }
4647 } else {
4648 tb->size = dc.pc - pc_start;
4649 tb->icount = num_insns;
4650 }
4651
4652 #if defined(S390X_DEBUG_DISAS)
4653 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4654 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4655 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4656 qemu_log("\n");
4657 }
4658 #endif
4659 }
4660
4661 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4662 {
4663 gen_intermediate_code_internal(env, tb, 0);
4664 }
4665
4666 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4667 {
4668 gen_intermediate_code_internal(env, tb, 1);
4669 }
4670
4671 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4672 {
4673 int cc_op;
4674 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4675 cc_op = gen_opc_cc_op[pc_pos];
4676 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4677 env->cc_op = cc_op;
4678 }
4679 }