]> git.proxmox.com Git - qemu.git/blob - tcg/s390/tcg-target.c
99b53390c5b783417a2aed3666eacd3af4586f08
[qemu.git] / tcg / s390 / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
27 /* ??? The translation blocks produced by TCG are generally small enough to
28 be entirely reachable with a 16-bit displacement. Leaving the option for
29 a 32-bit displacement here Just In Case. */
30 #define USE_LONG_BRANCHES 0
31
32 #define TCG_CT_CONST_32 0x0100
33 #define TCG_CT_CONST_NEG 0x0200
34 #define TCG_CT_CONST_ADDI 0x0400
35 #define TCG_CT_CONST_MULI 0x0800
36 #define TCG_CT_CONST_ANDI 0x1000
37 #define TCG_CT_CONST_ORI 0x2000
38 #define TCG_CT_CONST_XORI 0x4000
39 #define TCG_CT_CONST_CMPI 0x8000
40
41 /* Several places within the instruction set 0 means "no register"
42 rather than TCG_REG_R0. */
43 #define TCG_REG_NONE 0
44
45 /* A scratch register that may be be used throughout the backend. */
46 #define TCG_TMP0 TCG_REG_R14
47
48 #ifdef CONFIG_USE_GUEST_BASE
49 #define TCG_GUEST_BASE_REG TCG_REG_R13
50 #else
51 #define TCG_GUEST_BASE_REG TCG_REG_R0
52 #endif
53
54 #ifndef GUEST_BASE
55 #define GUEST_BASE 0
56 #endif
57
58
59 /* All of the following instructions are prefixed with their instruction
60 format, and are defined as 8- or 16-bit quantities, even when the two
61 halves of the 16-bit quantity may appear 32 bits apart in the insn.
62 This makes it easy to copy the values from the tables in Appendix B. */
63 typedef enum S390Opcode {
64 RIL_AFI = 0xc209,
65 RIL_AGFI = 0xc208,
66 RIL_ALGFI = 0xc20a,
67 RIL_BRASL = 0xc005,
68 RIL_BRCL = 0xc004,
69 RIL_CFI = 0xc20d,
70 RIL_CGFI = 0xc20c,
71 RIL_CLFI = 0xc20f,
72 RIL_CLGFI = 0xc20e,
73 RIL_IIHF = 0xc008,
74 RIL_IILF = 0xc009,
75 RIL_LARL = 0xc000,
76 RIL_LGFI = 0xc001,
77 RIL_LGRL = 0xc408,
78 RIL_LLIHF = 0xc00e,
79 RIL_LLILF = 0xc00f,
80 RIL_LRL = 0xc40d,
81 RIL_MSFI = 0xc201,
82 RIL_MSGFI = 0xc200,
83 RIL_NIHF = 0xc00a,
84 RIL_NILF = 0xc00b,
85 RIL_OIHF = 0xc00c,
86 RIL_OILF = 0xc00d,
87 RIL_XIHF = 0xc006,
88 RIL_XILF = 0xc007,
89
90 RI_AGHI = 0xa70b,
91 RI_AHI = 0xa70a,
92 RI_BRC = 0xa704,
93 RI_IIHH = 0xa500,
94 RI_IIHL = 0xa501,
95 RI_IILH = 0xa502,
96 RI_IILL = 0xa503,
97 RI_LGHI = 0xa709,
98 RI_LLIHH = 0xa50c,
99 RI_LLIHL = 0xa50d,
100 RI_LLILH = 0xa50e,
101 RI_LLILL = 0xa50f,
102 RI_MGHI = 0xa70d,
103 RI_MHI = 0xa70c,
104 RI_NIHH = 0xa504,
105 RI_NIHL = 0xa505,
106 RI_NILH = 0xa506,
107 RI_NILL = 0xa507,
108 RI_OIHH = 0xa508,
109 RI_OIHL = 0xa509,
110 RI_OILH = 0xa50a,
111 RI_OILL = 0xa50b,
112
113 RIE_CGIJ = 0xec7c,
114 RIE_CGRJ = 0xec64,
115 RIE_CIJ = 0xec7e,
116 RIE_CLGRJ = 0xec65,
117 RIE_CLIJ = 0xec7f,
118 RIE_CLGIJ = 0xec7d,
119 RIE_CLRJ = 0xec77,
120 RIE_CRJ = 0xec76,
121
122 RRE_AGR = 0xb908,
123 RRE_CGR = 0xb920,
124 RRE_CLGR = 0xb921,
125 RRE_DLGR = 0xb987,
126 RRE_DLR = 0xb997,
127 RRE_DSGFR = 0xb91d,
128 RRE_DSGR = 0xb90d,
129 RRE_LGBR = 0xb906,
130 RRE_LCGR = 0xb903,
131 RRE_LGFR = 0xb914,
132 RRE_LGHR = 0xb907,
133 RRE_LGR = 0xb904,
134 RRE_LLGCR = 0xb984,
135 RRE_LLGFR = 0xb916,
136 RRE_LLGHR = 0xb985,
137 RRE_LRVR = 0xb91f,
138 RRE_LRVGR = 0xb90f,
139 RRE_LTGR = 0xb902,
140 RRE_MSGR = 0xb90c,
141 RRE_MSR = 0xb252,
142 RRE_NGR = 0xb980,
143 RRE_OGR = 0xb981,
144 RRE_SGR = 0xb909,
145 RRE_XGR = 0xb982,
146
147 RR_AR = 0x1a,
148 RR_BASR = 0x0d,
149 RR_BCR = 0x07,
150 RR_CLR = 0x15,
151 RR_CR = 0x19,
152 RR_DR = 0x1d,
153 RR_LCR = 0x13,
154 RR_LR = 0x18,
155 RR_LTR = 0x12,
156 RR_NR = 0x14,
157 RR_OR = 0x16,
158 RR_SR = 0x1b,
159 RR_XR = 0x17,
160
161 RSY_RLL = 0xeb1d,
162 RSY_RLLG = 0xeb1c,
163 RSY_SLLG = 0xeb0d,
164 RSY_SRAG = 0xeb0a,
165 RSY_SRLG = 0xeb0c,
166
167 RS_SLL = 0x89,
168 RS_SRA = 0x8a,
169 RS_SRL = 0x88,
170
171 RXY_AG = 0xe308,
172 RXY_AY = 0xe35a,
173 RXY_CG = 0xe320,
174 RXY_CY = 0xe359,
175 RXY_LB = 0xe376,
176 RXY_LG = 0xe304,
177 RXY_LGB = 0xe377,
178 RXY_LGF = 0xe314,
179 RXY_LGH = 0xe315,
180 RXY_LHY = 0xe378,
181 RXY_LLGC = 0xe390,
182 RXY_LLGF = 0xe316,
183 RXY_LLGH = 0xe391,
184 RXY_LMG = 0xeb04,
185 RXY_LRV = 0xe31e,
186 RXY_LRVG = 0xe30f,
187 RXY_LRVH = 0xe31f,
188 RXY_LY = 0xe358,
189 RXY_STCY = 0xe372,
190 RXY_STG = 0xe324,
191 RXY_STHY = 0xe370,
192 RXY_STMG = 0xeb24,
193 RXY_STRV = 0xe33e,
194 RXY_STRVG = 0xe32f,
195 RXY_STRVH = 0xe33f,
196 RXY_STY = 0xe350,
197
198 RX_A = 0x5a,
199 RX_C = 0x59,
200 RX_L = 0x58,
201 RX_LH = 0x48,
202 RX_ST = 0x50,
203 RX_STC = 0x42,
204 RX_STH = 0x40,
205 } S390Opcode;
206
207 #define LD_SIGNED 0x04
208 #define LD_UINT8 0x00
209 #define LD_INT8 (LD_UINT8 | LD_SIGNED)
210 #define LD_UINT16 0x01
211 #define LD_INT16 (LD_UINT16 | LD_SIGNED)
212 #define LD_UINT32 0x02
213 #define LD_INT32 (LD_UINT32 | LD_SIGNED)
214 #define LD_UINT64 0x03
215 #define LD_INT64 (LD_UINT64 | LD_SIGNED)
216
217 #ifndef NDEBUG
218 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
219 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
220 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
221 };
222 #endif
223
224 /* Since R6 is a potential argument register, choose it last of the
225 call-saved registers. Likewise prefer the call-clobbered registers
226 in reverse order to maximize the chance of avoiding the arguments. */
227 static const int tcg_target_reg_alloc_order[] = {
228 TCG_REG_R13,
229 TCG_REG_R12,
230 TCG_REG_R11,
231 TCG_REG_R10,
232 TCG_REG_R9,
233 TCG_REG_R8,
234 TCG_REG_R7,
235 TCG_REG_R6,
236 TCG_REG_R14,
237 TCG_REG_R0,
238 TCG_REG_R1,
239 TCG_REG_R5,
240 TCG_REG_R4,
241 TCG_REG_R3,
242 TCG_REG_R2,
243 };
244
245 static const int tcg_target_call_iarg_regs[] = {
246 TCG_REG_R2,
247 TCG_REG_R3,
248 TCG_REG_R4,
249 TCG_REG_R5,
250 TCG_REG_R6,
251 };
252
253 static const int tcg_target_call_oarg_regs[] = {
254 TCG_REG_R2,
255 #if TCG_TARGET_REG_BITS == 32
256 TCG_REG_R3
257 #endif
258 };
259
260 #define S390_CC_EQ 8
261 #define S390_CC_LT 4
262 #define S390_CC_GT 2
263 #define S390_CC_OV 1
264 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
265 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
266 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
267 #define S390_CC_NEVER 0
268 #define S390_CC_ALWAYS 15
269
270 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
271 static const uint8_t tcg_cond_to_s390_cond[10] = {
272 [TCG_COND_EQ] = S390_CC_EQ,
273 [TCG_COND_NE] = S390_CC_NE,
274 [TCG_COND_LT] = S390_CC_LT,
275 [TCG_COND_LE] = S390_CC_LE,
276 [TCG_COND_GT] = S390_CC_GT,
277 [TCG_COND_GE] = S390_CC_GE,
278 [TCG_COND_LTU] = S390_CC_LT,
279 [TCG_COND_LEU] = S390_CC_LE,
280 [TCG_COND_GTU] = S390_CC_GT,
281 [TCG_COND_GEU] = S390_CC_GE,
282 };
283
284 /* Condition codes that result from a LOAD AND TEST. Here, we have no
285 unsigned instruction variation, however since the test is vs zero we
286 can re-map the outcomes appropriately. */
287 static const uint8_t tcg_cond_to_ltr_cond[10] = {
288 [TCG_COND_EQ] = S390_CC_EQ,
289 [TCG_COND_NE] = S390_CC_NE,
290 [TCG_COND_LT] = S390_CC_LT,
291 [TCG_COND_LE] = S390_CC_LE,
292 [TCG_COND_GT] = S390_CC_GT,
293 [TCG_COND_GE] = S390_CC_GE,
294 [TCG_COND_LTU] = S390_CC_NEVER,
295 [TCG_COND_LEU] = S390_CC_EQ,
296 [TCG_COND_GTU] = S390_CC_NE,
297 [TCG_COND_GEU] = S390_CC_ALWAYS,
298 };
299
300 #ifdef CONFIG_SOFTMMU
301
302 #include "../../softmmu_defs.h"
303
304 #ifdef CONFIG_TCG_PASS_AREG0
305 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
306 int mmu_idx) */
307 static const void * const qemu_ld_helpers[4] = {
308 helper_ldb_mmu,
309 helper_ldw_mmu,
310 helper_ldl_mmu,
311 helper_ldq_mmu,
312 };
313
314 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
315 uintxx_t val, int mmu_idx) */
316 static const void * const qemu_st_helpers[4] = {
317 helper_stb_mmu,
318 helper_stw_mmu,
319 helper_stl_mmu,
320 helper_stq_mmu,
321 };
322 #else
323 /* legacy helper signature: __ld_mmu(target_ulong addr, int
324 mmu_idx) */
325 static void *qemu_ld_helpers[4] = {
326 __ldb_mmu,
327 __ldw_mmu,
328 __ldl_mmu,
329 __ldq_mmu,
330 };
331
332 /* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
333 int mmu_idx) */
334 static void *qemu_st_helpers[4] = {
335 __stb_mmu,
336 __stw_mmu,
337 __stl_mmu,
338 __stq_mmu,
339 };
340 #endif
341 #endif
342
343 static uint8_t *tb_ret_addr;
344
345 /* A list of relevant facilities used by this translator. Some of these
346 are required for proper operation, and these are checked at startup. */
347
348 #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
349 #define FACILITY_LONG_DISP (1ULL << (63 - 18))
350 #define FACILITY_EXT_IMM (1ULL << (63 - 21))
351 #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
352
353 static uint64_t facilities;
354
355 static void patch_reloc(uint8_t *code_ptr, int type,
356 tcg_target_long value, tcg_target_long addend)
357 {
358 tcg_target_long code_ptr_tl = (tcg_target_long)code_ptr;
359 tcg_target_long pcrel2;
360
361 /* ??? Not the usual definition of "addend". */
362 pcrel2 = (value - (code_ptr_tl + addend)) >> 1;
363
364 switch (type) {
365 case R_390_PC16DBL:
366 assert(pcrel2 == (int16_t)pcrel2);
367 *(int16_t *)code_ptr = pcrel2;
368 break;
369 case R_390_PC32DBL:
370 assert(pcrel2 == (int32_t)pcrel2);
371 *(int32_t *)code_ptr = pcrel2;
372 break;
373 default:
374 tcg_abort();
375 break;
376 }
377 }
378
379 static int tcg_target_get_call_iarg_regs_count(int flags)
380 {
381 return sizeof(tcg_target_call_iarg_regs) / sizeof(int);
382 }
383
384 /* parse target specific constraints */
385 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
386 {
387 const char *ct_str = *pct_str;
388
389 switch (ct_str[0]) {
390 case 'r': /* all registers */
391 ct->ct |= TCG_CT_REG;
392 tcg_regset_set32(ct->u.regs, 0, 0xffff);
393 break;
394 case 'R': /* not R0 */
395 ct->ct |= TCG_CT_REG;
396 tcg_regset_set32(ct->u.regs, 0, 0xffff);
397 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
398 break;
399 case 'L': /* qemu_ld/st constraint */
400 ct->ct |= TCG_CT_REG;
401 tcg_regset_set32(ct->u.regs, 0, 0xffff);
402 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
403 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
404 break;
405 case 'a': /* force R2 for division */
406 ct->ct |= TCG_CT_REG;
407 tcg_regset_clear(ct->u.regs);
408 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
409 break;
410 case 'b': /* force R3 for division */
411 ct->ct |= TCG_CT_REG;
412 tcg_regset_clear(ct->u.regs);
413 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
414 break;
415 case 'N': /* force immediate negate */
416 ct->ct |= TCG_CT_CONST_NEG;
417 break;
418 case 'W': /* force 32-bit ("word") immediate */
419 ct->ct |= TCG_CT_CONST_32;
420 break;
421 case 'I':
422 ct->ct |= TCG_CT_CONST_ADDI;
423 break;
424 case 'K':
425 ct->ct |= TCG_CT_CONST_MULI;
426 break;
427 case 'A':
428 ct->ct |= TCG_CT_CONST_ANDI;
429 break;
430 case 'O':
431 ct->ct |= TCG_CT_CONST_ORI;
432 break;
433 case 'X':
434 ct->ct |= TCG_CT_CONST_XORI;
435 break;
436 case 'C':
437 ct->ct |= TCG_CT_CONST_CMPI;
438 break;
439 default:
440 return -1;
441 }
442 ct_str++;
443 *pct_str = ct_str;
444
445 return 0;
446 }
447
448 /* Immediates to be used with logical AND. This is an optimization only,
449 since a full 64-bit immediate AND can always be performed with 4 sequential
450 NI[LH][LH] instructions. What we're looking for is immediates that we
451 can load efficiently, and the immediate load plus the reg-reg AND is
452 smaller than the sequential NI's. */
453
454 static int tcg_match_andi(int ct, tcg_target_ulong val)
455 {
456 int i;
457
458 if (facilities & FACILITY_EXT_IMM) {
459 if (ct & TCG_CT_CONST_32) {
460 /* All 32-bit ANDs can be performed with 1 48-bit insn. */
461 return 1;
462 }
463
464 /* Zero-extensions. */
465 if (val == 0xff || val == 0xffff || val == 0xffffffff) {
466 return 1;
467 }
468 } else {
469 if (ct & TCG_CT_CONST_32) {
470 val = (uint32_t)val;
471 } else if (val == 0xffffffff) {
472 return 1;
473 }
474 }
475
476 /* Try all 32-bit insns that can perform it in one go. */
477 for (i = 0; i < 4; i++) {
478 tcg_target_ulong mask = ~(0xffffull << i*16);
479 if ((val & mask) == mask) {
480 return 1;
481 }
482 }
483
484 /* Look for 16-bit values performing the mask. These are better
485 to load with LLI[LH][LH]. */
486 for (i = 0; i < 4; i++) {
487 tcg_target_ulong mask = 0xffffull << i*16;
488 if ((val & mask) == val) {
489 return 0;
490 }
491 }
492
493 /* Look for 32-bit values performing the 64-bit mask. These
494 are better to load with LLI[LH]F, or if extended immediates
495 not available, with a pair of LLI insns. */
496 if ((ct & TCG_CT_CONST_32) == 0) {
497 if (val <= 0xffffffff || (val & 0xffffffff) == 0) {
498 return 0;
499 }
500 }
501
502 return 1;
503 }
504
505 /* Immediates to be used with logical OR. This is an optimization only,
506 since a full 64-bit immediate OR can always be performed with 4 sequential
507 OI[LH][LH] instructions. What we're looking for is immediates that we
508 can load efficiently, and the immediate load plus the reg-reg OR is
509 smaller than the sequential OI's. */
510
511 static int tcg_match_ori(int ct, tcg_target_long val)
512 {
513 if (facilities & FACILITY_EXT_IMM) {
514 if (ct & TCG_CT_CONST_32) {
515 /* All 32-bit ORs can be performed with 1 48-bit insn. */
516 return 1;
517 }
518 }
519
520 /* Look for negative values. These are best to load with LGHI. */
521 if (val < 0) {
522 if (val == (int16_t)val) {
523 return 0;
524 }
525 if (facilities & FACILITY_EXT_IMM) {
526 if (val == (int32_t)val) {
527 return 0;
528 }
529 }
530 }
531
532 return 1;
533 }
534
535 /* Immediates to be used with logical XOR. This is almost, but not quite,
536 only an optimization. XOR with immediate is only supported with the
537 extended-immediate facility. That said, there are a few patterns for
538 which it is better to load the value into a register first. */
539
540 static int tcg_match_xori(int ct, tcg_target_long val)
541 {
542 if ((facilities & FACILITY_EXT_IMM) == 0) {
543 return 0;
544 }
545
546 if (ct & TCG_CT_CONST_32) {
547 /* All 32-bit XORs can be performed with 1 48-bit insn. */
548 return 1;
549 }
550
551 /* Look for negative values. These are best to load with LGHI. */
552 if (val < 0 && val == (int32_t)val) {
553 return 0;
554 }
555
556 return 1;
557 }
558
559 /* Imediates to be used with comparisons. */
560
561 static int tcg_match_cmpi(int ct, tcg_target_long val)
562 {
563 if (facilities & FACILITY_EXT_IMM) {
564 /* The COMPARE IMMEDIATE instruction is available. */
565 if (ct & TCG_CT_CONST_32) {
566 /* We have a 32-bit immediate and can compare against anything. */
567 return 1;
568 } else {
569 /* ??? We have no insight here into whether the comparison is
570 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
571 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
572 a 32-bit unsigned immediate. If we were to use the (semi)
573 obvious "val == (int32_t)val" we would be enabling unsigned
574 comparisons vs very large numbers. The only solution is to
575 take the intersection of the ranges. */
576 /* ??? Another possible solution is to simply lie and allow all
577 constants here and force the out-of-range values into a temp
578 register in tgen_cmp when we have knowledge of the actual
579 comparison code in use. */
580 return val >= 0 && val <= 0x7fffffff;
581 }
582 } else {
583 /* Only the LOAD AND TEST instruction is available. */
584 return val == 0;
585 }
586 }
587
588 /* Test if a constant matches the constraint. */
589 static int tcg_target_const_match(tcg_target_long val,
590 const TCGArgConstraint *arg_ct)
591 {
592 int ct = arg_ct->ct;
593
594 if (ct & TCG_CT_CONST) {
595 return 1;
596 }
597
598 /* Handle the modifiers. */
599 if (ct & TCG_CT_CONST_NEG) {
600 val = -val;
601 }
602 if (ct & TCG_CT_CONST_32) {
603 val = (int32_t)val;
604 }
605
606 /* The following are mutually exclusive. */
607 if (ct & TCG_CT_CONST_ADDI) {
608 /* Immediates that may be used with add. If we have the
609 extended-immediates facility then we have ADD IMMEDIATE
610 with signed and unsigned 32-bit, otherwise we have only
611 ADD HALFWORD IMMEDIATE with a signed 16-bit. */
612 if (facilities & FACILITY_EXT_IMM) {
613 return val == (int32_t)val || val == (uint32_t)val;
614 } else {
615 return val == (int16_t)val;
616 }
617 } else if (ct & TCG_CT_CONST_MULI) {
618 /* Immediates that may be used with multiply. If we have the
619 general-instruction-extensions, then we have MULTIPLY SINGLE
620 IMMEDIATE with a signed 32-bit, otherwise we have only
621 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
622 if (facilities & FACILITY_GEN_INST_EXT) {
623 return val == (int32_t)val;
624 } else {
625 return val == (int16_t)val;
626 }
627 } else if (ct & TCG_CT_CONST_ANDI) {
628 return tcg_match_andi(ct, val);
629 } else if (ct & TCG_CT_CONST_ORI) {
630 return tcg_match_ori(ct, val);
631 } else if (ct & TCG_CT_CONST_XORI) {
632 return tcg_match_xori(ct, val);
633 } else if (ct & TCG_CT_CONST_CMPI) {
634 return tcg_match_cmpi(ct, val);
635 }
636
637 return 0;
638 }
639
640 /* Emit instructions according to the given instruction format. */
641
642 static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
643 {
644 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
645 }
646
647 static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
648 TCGReg r1, TCGReg r2)
649 {
650 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
651 }
652
653 static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
654 {
655 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
656 }
657
658 static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
659 {
660 tcg_out16(s, op | (r1 << 4));
661 tcg_out32(s, i2);
662 }
663
664 static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
665 TCGReg b2, TCGReg r3, int disp)
666 {
667 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
668 | (disp & 0xfff));
669 }
670
671 static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
672 TCGReg b2, TCGReg r3, int disp)
673 {
674 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
675 tcg_out32(s, (op & 0xff) | (b2 << 28)
676 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
677 }
678
679 #define tcg_out_insn_RX tcg_out_insn_RS
680 #define tcg_out_insn_RXY tcg_out_insn_RSY
681
682 /* Emit an opcode with "type-checking" of the format. */
683 #define tcg_out_insn(S, FMT, OP, ...) \
684 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
685
686
687 /* emit 64-bit shifts */
688 static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
689 TCGReg src, TCGReg sh_reg, int sh_imm)
690 {
691 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
692 }
693
694 /* emit 32-bit shifts */
695 static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
696 TCGReg sh_reg, int sh_imm)
697 {
698 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
699 }
700
701 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
702 {
703 if (src != dst) {
704 if (type == TCG_TYPE_I32) {
705 tcg_out_insn(s, RR, LR, dst, src);
706 } else {
707 tcg_out_insn(s, RRE, LGR, dst, src);
708 }
709 }
710 }
711
712 /* load a register with an immediate value */
713 static void tcg_out_movi(TCGContext *s, TCGType type,
714 TCGReg ret, tcg_target_long sval)
715 {
716 static const S390Opcode lli_insns[4] = {
717 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
718 };
719
720 tcg_target_ulong uval = sval;
721 int i;
722
723 if (type == TCG_TYPE_I32) {
724 uval = (uint32_t)sval;
725 sval = (int32_t)sval;
726 }
727
728 /* Try all 32-bit insns that can load it in one go. */
729 if (sval >= -0x8000 && sval < 0x8000) {
730 tcg_out_insn(s, RI, LGHI, ret, sval);
731 return;
732 }
733
734 for (i = 0; i < 4; i++) {
735 tcg_target_long mask = 0xffffull << i*16;
736 if ((uval & mask) == uval) {
737 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
738 return;
739 }
740 }
741
742 /* Try all 48-bit insns that can load it in one go. */
743 if (facilities & FACILITY_EXT_IMM) {
744 if (sval == (int32_t)sval) {
745 tcg_out_insn(s, RIL, LGFI, ret, sval);
746 return;
747 }
748 if (uval <= 0xffffffff) {
749 tcg_out_insn(s, RIL, LLILF, ret, uval);
750 return;
751 }
752 if ((uval & 0xffffffff) == 0) {
753 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
754 return;
755 }
756 }
757
758 /* Try for PC-relative address load. */
759 if ((sval & 1) == 0) {
760 intptr_t off = (sval - (intptr_t)s->code_ptr) >> 1;
761 if (off == (int32_t)off) {
762 tcg_out_insn(s, RIL, LARL, ret, off);
763 return;
764 }
765 }
766
767 /* If extended immediates are not present, then we may have to issue
768 several instructions to load the low 32 bits. */
769 if (!(facilities & FACILITY_EXT_IMM)) {
770 /* A 32-bit unsigned value can be loaded in 2 insns. And given
771 that the lli_insns loop above did not succeed, we know that
772 both insns are required. */
773 if (uval <= 0xffffffff) {
774 tcg_out_insn(s, RI, LLILL, ret, uval);
775 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
776 return;
777 }
778
779 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
780 We first want to make sure that all the high bits get set. With
781 luck the low 16-bits can be considered negative to perform that for
782 free, otherwise we load an explicit -1. */
783 if (sval >> 31 >> 1 == -1) {
784 if (uval & 0x8000) {
785 tcg_out_insn(s, RI, LGHI, ret, uval);
786 } else {
787 tcg_out_insn(s, RI, LGHI, ret, -1);
788 tcg_out_insn(s, RI, IILL, ret, uval);
789 }
790 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
791 return;
792 }
793 }
794
795 /* If we get here, both the high and low parts have non-zero bits. */
796
797 /* Recurse to load the lower 32-bits. */
798 tcg_out_movi(s, TCG_TYPE_I32, ret, sval);
799
800 /* Insert data into the high 32-bits. */
801 uval = uval >> 31 >> 1;
802 if (facilities & FACILITY_EXT_IMM) {
803 if (uval < 0x10000) {
804 tcg_out_insn(s, RI, IIHL, ret, uval);
805 } else if ((uval & 0xffff) == 0) {
806 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
807 } else {
808 tcg_out_insn(s, RIL, IIHF, ret, uval);
809 }
810 } else {
811 if (uval & 0xffff) {
812 tcg_out_insn(s, RI, IIHL, ret, uval);
813 }
814 if (uval & 0xffff0000) {
815 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
816 }
817 }
818 }
819
820
821 /* Emit a load/store type instruction. Inputs are:
822 DATA: The register to be loaded or stored.
823 BASE+OFS: The effective address.
824 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
825 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
826
827 static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
828 TCGReg data, TCGReg base, TCGReg index,
829 tcg_target_long ofs)
830 {
831 if (ofs < -0x80000 || ofs >= 0x80000) {
832 /* Combine the low 16 bits of the offset with the actual load insn;
833 the high 48 bits must come from an immediate load. */
834 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs & ~0xffff);
835 ofs &= 0xffff;
836
837 /* If we were already given an index register, add it in. */
838 if (index != TCG_REG_NONE) {
839 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
840 }
841 index = TCG_TMP0;
842 }
843
844 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
845 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
846 } else {
847 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
848 }
849 }
850
851
852 /* load data without address translation or endianness conversion */
853 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
854 TCGReg base, tcg_target_long ofs)
855 {
856 if (type == TCG_TYPE_I32) {
857 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
858 } else {
859 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
860 }
861 }
862
863 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
864 TCGReg base, tcg_target_long ofs)
865 {
866 if (type == TCG_TYPE_I32) {
867 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
868 } else {
869 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
870 }
871 }
872
873 /* load data from an absolute host address */
874 static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
875 {
876 tcg_target_long addr = (tcg_target_long)abs;
877
878 if (facilities & FACILITY_GEN_INST_EXT) {
879 tcg_target_long disp = (addr - (tcg_target_long)s->code_ptr) >> 1;
880 if (disp == (int32_t)disp) {
881 if (type == TCG_TYPE_I32) {
882 tcg_out_insn(s, RIL, LRL, dest, disp);
883 } else {
884 tcg_out_insn(s, RIL, LGRL, dest, disp);
885 }
886 return;
887 }
888 }
889
890 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
891 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
892 }
893
894 static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
895 {
896 if (facilities & FACILITY_EXT_IMM) {
897 tcg_out_insn(s, RRE, LGBR, dest, src);
898 return;
899 }
900
901 if (type == TCG_TYPE_I32) {
902 if (dest == src) {
903 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
904 } else {
905 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
906 }
907 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
908 } else {
909 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
910 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
911 }
912 }
913
914 static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
915 {
916 if (facilities & FACILITY_EXT_IMM) {
917 tcg_out_insn(s, RRE, LLGCR, dest, src);
918 return;
919 }
920
921 if (dest == src) {
922 tcg_out_movi(s, type, TCG_TMP0, 0xff);
923 src = TCG_TMP0;
924 } else {
925 tcg_out_movi(s, type, dest, 0xff);
926 }
927 if (type == TCG_TYPE_I32) {
928 tcg_out_insn(s, RR, NR, dest, src);
929 } else {
930 tcg_out_insn(s, RRE, NGR, dest, src);
931 }
932 }
933
934 static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
935 {
936 if (facilities & FACILITY_EXT_IMM) {
937 tcg_out_insn(s, RRE, LGHR, dest, src);
938 return;
939 }
940
941 if (type == TCG_TYPE_I32) {
942 if (dest == src) {
943 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
944 } else {
945 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
946 }
947 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
948 } else {
949 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
950 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
951 }
952 }
953
954 static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
955 {
956 if (facilities & FACILITY_EXT_IMM) {
957 tcg_out_insn(s, RRE, LLGHR, dest, src);
958 return;
959 }
960
961 if (dest == src) {
962 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
963 src = TCG_TMP0;
964 } else {
965 tcg_out_movi(s, type, dest, 0xffff);
966 }
967 if (type == TCG_TYPE_I32) {
968 tcg_out_insn(s, RR, NR, dest, src);
969 } else {
970 tcg_out_insn(s, RRE, NGR, dest, src);
971 }
972 }
973
974 static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
975 {
976 tcg_out_insn(s, RRE, LGFR, dest, src);
977 }
978
979 static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
980 {
981 tcg_out_insn(s, RRE, LLGFR, dest, src);
982 }
983
984 static inline void tgen32_addi(TCGContext *s, TCGReg dest, int32_t val)
985 {
986 if (val == (int16_t)val) {
987 tcg_out_insn(s, RI, AHI, dest, val);
988 } else {
989 tcg_out_insn(s, RIL, AFI, dest, val);
990 }
991 }
992
993 static inline void tgen64_addi(TCGContext *s, TCGReg dest, int64_t val)
994 {
995 if (val == (int16_t)val) {
996 tcg_out_insn(s, RI, AGHI, dest, val);
997 } else if (val == (int32_t)val) {
998 tcg_out_insn(s, RIL, AGFI, dest, val);
999 } else if (val == (uint32_t)val) {
1000 tcg_out_insn(s, RIL, ALGFI, dest, val);
1001 } else {
1002 tcg_abort();
1003 }
1004
1005 }
1006
1007 static void tgen64_andi(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1008 {
1009 static const S390Opcode ni_insns[4] = {
1010 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
1011 };
1012 static const S390Opcode nif_insns[2] = {
1013 RIL_NILF, RIL_NIHF
1014 };
1015
1016 int i;
1017
1018 /* Look for no-op. */
1019 if (val == -1) {
1020 return;
1021 }
1022
1023 /* Look for the zero-extensions. */
1024 if (val == 0xffffffff) {
1025 tgen_ext32u(s, dest, dest);
1026 return;
1027 }
1028
1029 if (facilities & FACILITY_EXT_IMM) {
1030 if (val == 0xff) {
1031 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
1032 return;
1033 }
1034 if (val == 0xffff) {
1035 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
1036 return;
1037 }
1038
1039 /* Try all 32-bit insns that can perform it in one go. */
1040 for (i = 0; i < 4; i++) {
1041 tcg_target_ulong mask = ~(0xffffull << i*16);
1042 if ((val & mask) == mask) {
1043 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
1044 return;
1045 }
1046 }
1047
1048 /* Try all 48-bit insns that can perform it in one go. */
1049 if (facilities & FACILITY_EXT_IMM) {
1050 for (i = 0; i < 2; i++) {
1051 tcg_target_ulong mask = ~(0xffffffffull << i*32);
1052 if ((val & mask) == mask) {
1053 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1054 return;
1055 }
1056 }
1057 }
1058
1059 /* Perform the AND via sequential modifications to the high and low
1060 parts. Do this via recursion to handle 16-bit vs 32-bit masks in
1061 each half. */
1062 tgen64_andi(s, dest, val | 0xffffffff00000000ull);
1063 tgen64_andi(s, dest, val | 0x00000000ffffffffull);
1064 } else {
1065 /* With no extended-immediate facility, just emit the sequence. */
1066 for (i = 0; i < 4; i++) {
1067 tcg_target_ulong mask = 0xffffull << i*16;
1068 if ((val & mask) != mask) {
1069 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
1070 }
1071 }
1072 }
1073 }
1074
1075 static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1076 {
1077 static const S390Opcode oi_insns[4] = {
1078 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1079 };
1080 static const S390Opcode nif_insns[2] = {
1081 RIL_OILF, RIL_OIHF
1082 };
1083
1084 int i;
1085
1086 /* Look for no-op. */
1087 if (val == 0) {
1088 return;
1089 }
1090
1091 if (facilities & FACILITY_EXT_IMM) {
1092 /* Try all 32-bit insns that can perform it in one go. */
1093 for (i = 0; i < 4; i++) {
1094 tcg_target_ulong mask = (0xffffull << i*16);
1095 if ((val & mask) != 0 && (val & ~mask) == 0) {
1096 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1097 return;
1098 }
1099 }
1100
1101 /* Try all 48-bit insns that can perform it in one go. */
1102 for (i = 0; i < 2; i++) {
1103 tcg_target_ulong mask = (0xffffffffull << i*32);
1104 if ((val & mask) != 0 && (val & ~mask) == 0) {
1105 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1106 return;
1107 }
1108 }
1109
1110 /* Perform the OR via sequential modifications to the high and
1111 low parts. Do this via recursion to handle 16-bit vs 32-bit
1112 masks in each half. */
1113 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1114 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1115 } else {
1116 /* With no extended-immediate facility, we don't need to be so
1117 clever. Just iterate over the insns and mask in the constant. */
1118 for (i = 0; i < 4; i++) {
1119 tcg_target_ulong mask = (0xffffull << i*16);
1120 if ((val & mask) != 0) {
1121 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1122 }
1123 }
1124 }
1125 }
1126
1127 static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1128 {
1129 /* Perform the xor by parts. */
1130 if (val & 0xffffffff) {
1131 tcg_out_insn(s, RIL, XILF, dest, val);
1132 }
1133 if (val > 0xffffffff) {
1134 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1135 }
1136 }
1137
1138 static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1139 TCGArg c2, int c2const)
1140 {
1141 bool is_unsigned = (c > TCG_COND_GT);
1142 if (c2const) {
1143 if (c2 == 0) {
1144 if (type == TCG_TYPE_I32) {
1145 tcg_out_insn(s, RR, LTR, r1, r1);
1146 } else {
1147 tcg_out_insn(s, RRE, LTGR, r1, r1);
1148 }
1149 return tcg_cond_to_ltr_cond[c];
1150 } else {
1151 if (is_unsigned) {
1152 if (type == TCG_TYPE_I32) {
1153 tcg_out_insn(s, RIL, CLFI, r1, c2);
1154 } else {
1155 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1156 }
1157 } else {
1158 if (type == TCG_TYPE_I32) {
1159 tcg_out_insn(s, RIL, CFI, r1, c2);
1160 } else {
1161 tcg_out_insn(s, RIL, CGFI, r1, c2);
1162 }
1163 }
1164 }
1165 } else {
1166 if (is_unsigned) {
1167 if (type == TCG_TYPE_I32) {
1168 tcg_out_insn(s, RR, CLR, r1, c2);
1169 } else {
1170 tcg_out_insn(s, RRE, CLGR, r1, c2);
1171 }
1172 } else {
1173 if (type == TCG_TYPE_I32) {
1174 tcg_out_insn(s, RR, CR, r1, c2);
1175 } else {
1176 tcg_out_insn(s, RRE, CGR, r1, c2);
1177 }
1178 }
1179 }
1180 return tcg_cond_to_s390_cond[c];
1181 }
1182
1183 static void tgen_setcond(TCGContext *s, TCGType type, TCGCond c,
1184 TCGReg dest, TCGReg r1, TCGArg c2, int c2const)
1185 {
1186 int cc = tgen_cmp(s, type, c, r1, c2, c2const);
1187
1188 /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over: */
1189 tcg_out_movi(s, type, dest, 1);
1190 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1191 tcg_out_movi(s, type, dest, 0);
1192 }
1193
1194 static void tgen_gotoi(TCGContext *s, int cc, tcg_target_long dest)
1195 {
1196 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1197 if (off > -0x8000 && off < 0x7fff) {
1198 tcg_out_insn(s, RI, BRC, cc, off);
1199 } else if (off == (int32_t)off) {
1200 tcg_out_insn(s, RIL, BRCL, cc, off);
1201 } else {
1202 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1203 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1204 }
1205 }
1206
1207 static void tgen_branch(TCGContext *s, int cc, int labelno)
1208 {
1209 TCGLabel* l = &s->labels[labelno];
1210 if (l->has_value) {
1211 tgen_gotoi(s, cc, l->u.value);
1212 } else if (USE_LONG_BRANCHES) {
1213 tcg_out16(s, RIL_BRCL | (cc << 4));
1214 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, labelno, -2);
1215 s->code_ptr += 4;
1216 } else {
1217 tcg_out16(s, RI_BRC | (cc << 4));
1218 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, labelno, -2);
1219 s->code_ptr += 2;
1220 }
1221 }
1222
1223 static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1224 TCGReg r1, TCGReg r2, int labelno)
1225 {
1226 TCGLabel* l = &s->labels[labelno];
1227 tcg_target_long off;
1228
1229 if (l->has_value) {
1230 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1231 } else {
1232 /* We need to keep the offset unchanged for retranslation. */
1233 off = ((int16_t *)s->code_ptr)[1];
1234 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1235 }
1236
1237 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1238 tcg_out16(s, off);
1239 tcg_out16(s, cc << 12 | (opc & 0xff));
1240 }
1241
1242 static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1243 TCGReg r1, int i2, int labelno)
1244 {
1245 TCGLabel* l = &s->labels[labelno];
1246 tcg_target_long off;
1247
1248 if (l->has_value) {
1249 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1250 } else {
1251 /* We need to keep the offset unchanged for retranslation. */
1252 off = ((int16_t *)s->code_ptr)[1];
1253 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1254 }
1255
1256 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1257 tcg_out16(s, off);
1258 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1259 }
1260
1261 static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1262 TCGReg r1, TCGArg c2, int c2const, int labelno)
1263 {
1264 int cc;
1265
1266 if (facilities & FACILITY_GEN_INST_EXT) {
1267 bool is_unsigned = (c > TCG_COND_GT);
1268 bool in_range;
1269 S390Opcode opc;
1270
1271 cc = tcg_cond_to_s390_cond[c];
1272
1273 if (!c2const) {
1274 opc = (type == TCG_TYPE_I32
1275 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1276 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1277 tgen_compare_branch(s, opc, cc, r1, c2, labelno);
1278 return;
1279 }
1280
1281 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1282 If the immediate we've been given does not fit that range, we'll
1283 fall back to separate compare and branch instructions using the
1284 larger comparison range afforded by COMPARE IMMEDIATE. */
1285 if (type == TCG_TYPE_I32) {
1286 if (is_unsigned) {
1287 opc = RIE_CLIJ;
1288 in_range = (uint32_t)c2 == (uint8_t)c2;
1289 } else {
1290 opc = RIE_CIJ;
1291 in_range = (int32_t)c2 == (int8_t)c2;
1292 }
1293 } else {
1294 if (is_unsigned) {
1295 opc = RIE_CLGIJ;
1296 in_range = (uint64_t)c2 == (uint8_t)c2;
1297 } else {
1298 opc = RIE_CGIJ;
1299 in_range = (int64_t)c2 == (int8_t)c2;
1300 }
1301 }
1302 if (in_range) {
1303 tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno);
1304 return;
1305 }
1306 }
1307
1308 cc = tgen_cmp(s, type, c, r1, c2, c2const);
1309 tgen_branch(s, cc, labelno);
1310 }
1311
1312 static void tgen_calli(TCGContext *s, tcg_target_long dest)
1313 {
1314 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1315 if (off == (int32_t)off) {
1316 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1317 } else {
1318 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1319 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1320 }
1321 }
1322
1323 static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data,
1324 TCGReg base, TCGReg index, int disp)
1325 {
1326 #ifdef TARGET_WORDS_BIGENDIAN
1327 const int bswap = 0;
1328 #else
1329 const int bswap = 1;
1330 #endif
1331 switch (opc) {
1332 case LD_UINT8:
1333 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1334 break;
1335 case LD_INT8:
1336 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1337 break;
1338 case LD_UINT16:
1339 if (bswap) {
1340 /* swapped unsigned halfword load with upper bits zeroed */
1341 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1342 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1343 } else {
1344 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1345 }
1346 break;
1347 case LD_INT16:
1348 if (bswap) {
1349 /* swapped sign-extended halfword load */
1350 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1351 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1352 } else {
1353 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1354 }
1355 break;
1356 case LD_UINT32:
1357 if (bswap) {
1358 /* swapped unsigned int load with upper bits zeroed */
1359 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1360 tgen_ext32u(s, data, data);
1361 } else {
1362 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1363 }
1364 break;
1365 case LD_INT32:
1366 if (bswap) {
1367 /* swapped sign-extended int load */
1368 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1369 tgen_ext32s(s, data, data);
1370 } else {
1371 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1372 }
1373 break;
1374 case LD_UINT64:
1375 if (bswap) {
1376 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1377 } else {
1378 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1379 }
1380 break;
1381 default:
1382 tcg_abort();
1383 }
1384 }
1385
1386 static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data,
1387 TCGReg base, TCGReg index, int disp)
1388 {
1389 #ifdef TARGET_WORDS_BIGENDIAN
1390 const int bswap = 0;
1391 #else
1392 const int bswap = 1;
1393 #endif
1394 switch (opc) {
1395 case LD_UINT8:
1396 if (disp >= 0 && disp < 0x1000) {
1397 tcg_out_insn(s, RX, STC, data, base, index, disp);
1398 } else {
1399 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1400 }
1401 break;
1402 case LD_UINT16:
1403 if (bswap) {
1404 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1405 } else if (disp >= 0 && disp < 0x1000) {
1406 tcg_out_insn(s, RX, STH, data, base, index, disp);
1407 } else {
1408 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1409 }
1410 break;
1411 case LD_UINT32:
1412 if (bswap) {
1413 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1414 } else if (disp >= 0 && disp < 0x1000) {
1415 tcg_out_insn(s, RX, ST, data, base, index, disp);
1416 } else {
1417 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1418 }
1419 break;
1420 case LD_UINT64:
1421 if (bswap) {
1422 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1423 } else {
1424 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1425 }
1426 break;
1427 default:
1428 tcg_abort();
1429 }
1430 }
1431
1432 #if defined(CONFIG_SOFTMMU)
1433 static void tgen64_andi_tmp(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1434 {
1435 if (tcg_match_andi(0, val)) {
1436 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, val);
1437 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
1438 } else {
1439 tgen64_andi(s, dest, val);
1440 }
1441 }
1442
1443 static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
1444 TCGReg addr_reg, int mem_index, int opc,
1445 uint16_t **label2_ptr_p, int is_store)
1446 {
1447 const TCGReg arg0 = TCG_REG_R2;
1448 const TCGReg arg1 = TCG_REG_R3;
1449 int s_bits = opc & 3;
1450 uint16_t *label1_ptr;
1451 tcg_target_long ofs;
1452
1453 if (TARGET_LONG_BITS == 32) {
1454 tgen_ext32u(s, arg0, addr_reg);
1455 } else {
1456 tcg_out_mov(s, TCG_TYPE_I64, arg0, addr_reg);
1457 }
1458
1459 tcg_out_sh64(s, RSY_SRLG, arg1, addr_reg, TCG_REG_NONE,
1460 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1461
1462 tgen64_andi_tmp(s, arg0, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
1463 tgen64_andi_tmp(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1464
1465 if (is_store) {
1466 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1467 } else {
1468 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1469 }
1470 assert(ofs < 0x80000);
1471
1472 if (TARGET_LONG_BITS == 32) {
1473 tcg_out_mem(s, RX_C, RXY_CY, arg0, arg1, TCG_AREG0, ofs);
1474 } else {
1475 tcg_out_mem(s, 0, RXY_CG, arg0, arg1, TCG_AREG0, ofs);
1476 }
1477
1478 if (TARGET_LONG_BITS == 32) {
1479 tgen_ext32u(s, arg0, addr_reg);
1480 } else {
1481 tcg_out_mov(s, TCG_TYPE_I64, arg0, addr_reg);
1482 }
1483
1484 label1_ptr = (uint16_t*)s->code_ptr;
1485
1486 /* je label1 (offset will be patched in later) */
1487 tcg_out_insn(s, RI, BRC, S390_CC_EQ, 0);
1488
1489 /* call load/store helper */
1490 if (is_store) {
1491 /* Make sure to zero-extend the value to the full register
1492 for the calling convention. */
1493 switch (opc) {
1494 case LD_UINT8:
1495 tgen_ext8u(s, TCG_TYPE_I64, arg1, data_reg);
1496 break;
1497 case LD_UINT16:
1498 tgen_ext16u(s, TCG_TYPE_I64, arg1, data_reg);
1499 break;
1500 case LD_UINT32:
1501 tgen_ext32u(s, arg1, data_reg);
1502 break;
1503 case LD_UINT64:
1504 tcg_out_mov(s, TCG_TYPE_I64, arg1, data_reg);
1505 break;
1506 default:
1507 tcg_abort();
1508 }
1509 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, mem_index);
1510 #ifdef CONFIG_TCG_PASS_AREG0
1511 /* XXX/FIXME: suboptimal */
1512 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[3],
1513 tcg_target_call_iarg_regs[2]);
1514 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
1515 tcg_target_call_iarg_regs[1]);
1516 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[1],
1517 tcg_target_call_iarg_regs[0]);
1518 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
1519 TCG_AREG0);
1520 #endif
1521 tgen_calli(s, (tcg_target_ulong)qemu_st_helpers[s_bits]);
1522 } else {
1523 tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
1524 #ifdef CONFIG_TCG_PASS_AREG0
1525 /* XXX/FIXME: suboptimal */
1526 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
1527 tcg_target_call_iarg_regs[1]);
1528 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[1],
1529 tcg_target_call_iarg_regs[0]);
1530 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
1531 TCG_AREG0);
1532 #endif
1533 tgen_calli(s, (tcg_target_ulong)qemu_ld_helpers[s_bits]);
1534
1535 /* sign extension */
1536 switch (opc) {
1537 case LD_INT8:
1538 tgen_ext8s(s, TCG_TYPE_I64, data_reg, arg0);
1539 break;
1540 case LD_INT16:
1541 tgen_ext16s(s, TCG_TYPE_I64, data_reg, arg0);
1542 break;
1543 case LD_INT32:
1544 tgen_ext32s(s, data_reg, arg0);
1545 break;
1546 default:
1547 /* unsigned -> just copy */
1548 tcg_out_mov(s, TCG_TYPE_I64, data_reg, arg0);
1549 break;
1550 }
1551 }
1552
1553 /* jump to label2 (end) */
1554 *label2_ptr_p = (uint16_t*)s->code_ptr;
1555
1556 tcg_out_insn(s, RI, BRC, S390_CC_ALWAYS, 0);
1557
1558 /* this is label1, patch branch */
1559 *(label1_ptr + 1) = ((unsigned long)s->code_ptr -
1560 (unsigned long)label1_ptr) >> 1;
1561
1562 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1563 assert(ofs < 0x80000);
1564
1565 tcg_out_mem(s, 0, RXY_AG, arg0, arg1, TCG_AREG0, ofs);
1566 }
1567
1568 static void tcg_finish_qemu_ldst(TCGContext* s, uint16_t *label2_ptr)
1569 {
1570 /* patch branch */
1571 *(label2_ptr + 1) = ((unsigned long)s->code_ptr -
1572 (unsigned long)label2_ptr) >> 1;
1573 }
1574 #else
1575 static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1576 TCGReg *index_reg, tcg_target_long *disp)
1577 {
1578 if (TARGET_LONG_BITS == 32) {
1579 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1580 *addr_reg = TCG_TMP0;
1581 }
1582 if (GUEST_BASE < 0x80000) {
1583 *index_reg = TCG_REG_NONE;
1584 *disp = GUEST_BASE;
1585 } else {
1586 *index_reg = TCG_GUEST_BASE_REG;
1587 *disp = 0;
1588 }
1589 }
1590 #endif /* CONFIG_SOFTMMU */
1591
1592 /* load data with address translation (if applicable)
1593 and endianness conversion */
1594 static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* args, int opc)
1595 {
1596 TCGReg addr_reg, data_reg;
1597 #if defined(CONFIG_SOFTMMU)
1598 int mem_index;
1599 uint16_t *label2_ptr;
1600 #else
1601 TCGReg index_reg;
1602 tcg_target_long disp;
1603 #endif
1604
1605 data_reg = *args++;
1606 addr_reg = *args++;
1607
1608 #if defined(CONFIG_SOFTMMU)
1609 mem_index = *args;
1610
1611 tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1612 opc, &label2_ptr, 0);
1613
1614 tcg_out_qemu_ld_direct(s, opc, data_reg, TCG_REG_R2, TCG_REG_NONE, 0);
1615
1616 tcg_finish_qemu_ldst(s, label2_ptr);
1617 #else
1618 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1619 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1620 #endif
1621 }
1622
1623 static void tcg_out_qemu_st(TCGContext* s, const TCGArg* args, int opc)
1624 {
1625 TCGReg addr_reg, data_reg;
1626 #if defined(CONFIG_SOFTMMU)
1627 int mem_index;
1628 uint16_t *label2_ptr;
1629 #else
1630 TCGReg index_reg;
1631 tcg_target_long disp;
1632 #endif
1633
1634 data_reg = *args++;
1635 addr_reg = *args++;
1636
1637 #if defined(CONFIG_SOFTMMU)
1638 mem_index = *args;
1639
1640 tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1641 opc, &label2_ptr, 1);
1642
1643 tcg_out_qemu_st_direct(s, opc, data_reg, TCG_REG_R2, TCG_REG_NONE, 0);
1644
1645 tcg_finish_qemu_ldst(s, label2_ptr);
1646 #else
1647 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1648 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1649 #endif
1650 }
1651
1652 #if TCG_TARGET_REG_BITS == 64
1653 # define OP_32_64(x) \
1654 case glue(glue(INDEX_op_,x),_i32): \
1655 case glue(glue(INDEX_op_,x),_i64)
1656 #else
1657 # define OP_32_64(x) \
1658 case glue(glue(INDEX_op_,x),_i32)
1659 #endif
1660
1661 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1662 const TCGArg *args, const int *const_args)
1663 {
1664 S390Opcode op;
1665
1666 switch (opc) {
1667 case INDEX_op_exit_tb:
1668 /* return value */
1669 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
1670 tgen_gotoi(s, S390_CC_ALWAYS, (unsigned long)tb_ret_addr);
1671 break;
1672
1673 case INDEX_op_goto_tb:
1674 if (s->tb_jmp_offset) {
1675 tcg_abort();
1676 } else {
1677 /* load address stored at s->tb_next + args[0] */
1678 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
1679 /* and go there */
1680 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1681 }
1682 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1683 break;
1684
1685 case INDEX_op_call:
1686 if (const_args[0]) {
1687 tgen_calli(s, args[0]);
1688 } else {
1689 tcg_out_insn(s, RR, BASR, TCG_REG_R14, args[0]);
1690 }
1691 break;
1692
1693 case INDEX_op_mov_i32:
1694 tcg_out_mov(s, TCG_TYPE_I32, args[0], args[1]);
1695 break;
1696 case INDEX_op_movi_i32:
1697 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1698 break;
1699
1700 OP_32_64(ld8u):
1701 /* ??? LLC (RXY format) is only present with the extended-immediate
1702 facility, whereas LLGC is always present. */
1703 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1704 break;
1705
1706 OP_32_64(ld8s):
1707 /* ??? LB is no smaller than LGB, so no point to using it. */
1708 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1709 break;
1710
1711 OP_32_64(ld16u):
1712 /* ??? LLH (RXY format) is only present with the extended-immediate
1713 facility, whereas LLGH is always present. */
1714 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1715 break;
1716
1717 case INDEX_op_ld16s_i32:
1718 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1719 break;
1720
1721 case INDEX_op_ld_i32:
1722 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1723 break;
1724
1725 OP_32_64(st8):
1726 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1727 TCG_REG_NONE, args[2]);
1728 break;
1729
1730 OP_32_64(st16):
1731 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1732 TCG_REG_NONE, args[2]);
1733 break;
1734
1735 case INDEX_op_st_i32:
1736 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1737 break;
1738
1739 case INDEX_op_add_i32:
1740 if (const_args[2]) {
1741 tgen32_addi(s, args[0], args[2]);
1742 } else {
1743 tcg_out_insn(s, RR, AR, args[0], args[2]);
1744 }
1745 break;
1746 case INDEX_op_sub_i32:
1747 if (const_args[2]) {
1748 tgen32_addi(s, args[0], -args[2]);
1749 } else {
1750 tcg_out_insn(s, RR, SR, args[0], args[2]);
1751 }
1752 break;
1753
1754 case INDEX_op_and_i32:
1755 if (const_args[2]) {
1756 tgen64_andi(s, args[0], args[2] | 0xffffffff00000000ull);
1757 } else {
1758 tcg_out_insn(s, RR, NR, args[0], args[2]);
1759 }
1760 break;
1761 case INDEX_op_or_i32:
1762 if (const_args[2]) {
1763 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1764 } else {
1765 tcg_out_insn(s, RR, OR, args[0], args[2]);
1766 }
1767 break;
1768 case INDEX_op_xor_i32:
1769 if (const_args[2]) {
1770 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1771 } else {
1772 tcg_out_insn(s, RR, XR, args[0], args[2]);
1773 }
1774 break;
1775
1776 case INDEX_op_neg_i32:
1777 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1778 break;
1779
1780 case INDEX_op_mul_i32:
1781 if (const_args[2]) {
1782 if ((int32_t)args[2] == (int16_t)args[2]) {
1783 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1784 } else {
1785 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1786 }
1787 } else {
1788 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1789 }
1790 break;
1791
1792 case INDEX_op_div2_i32:
1793 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1794 break;
1795 case INDEX_op_divu2_i32:
1796 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1797 break;
1798
1799 case INDEX_op_shl_i32:
1800 op = RS_SLL;
1801 do_shift32:
1802 if (const_args[2]) {
1803 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1804 } else {
1805 tcg_out_sh32(s, op, args[0], args[2], 0);
1806 }
1807 break;
1808 case INDEX_op_shr_i32:
1809 op = RS_SRL;
1810 goto do_shift32;
1811 case INDEX_op_sar_i32:
1812 op = RS_SRA;
1813 goto do_shift32;
1814
1815 case INDEX_op_rotl_i32:
1816 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1817 if (const_args[2]) {
1818 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1819 } else {
1820 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1821 }
1822 break;
1823 case INDEX_op_rotr_i32:
1824 if (const_args[2]) {
1825 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1826 TCG_REG_NONE, (32 - args[2]) & 31);
1827 } else {
1828 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1829 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1830 }
1831 break;
1832
1833 case INDEX_op_ext8s_i32:
1834 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1835 break;
1836 case INDEX_op_ext16s_i32:
1837 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1838 break;
1839 case INDEX_op_ext8u_i32:
1840 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1841 break;
1842 case INDEX_op_ext16u_i32:
1843 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1844 break;
1845
1846 OP_32_64(bswap16):
1847 /* The TCG bswap definition requires bits 0-47 already be zero.
1848 Thus we don't need the G-type insns to implement bswap16_i64. */
1849 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1850 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1851 break;
1852 OP_32_64(bswap32):
1853 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1854 break;
1855
1856 case INDEX_op_br:
1857 tgen_branch(s, S390_CC_ALWAYS, args[0]);
1858 break;
1859
1860 case INDEX_op_brcond_i32:
1861 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
1862 args[1], const_args[1], args[3]);
1863 break;
1864 case INDEX_op_setcond_i32:
1865 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1866 args[2], const_args[2]);
1867 break;
1868
1869 case INDEX_op_qemu_ld8u:
1870 tcg_out_qemu_ld(s, args, LD_UINT8);
1871 break;
1872 case INDEX_op_qemu_ld8s:
1873 tcg_out_qemu_ld(s, args, LD_INT8);
1874 break;
1875 case INDEX_op_qemu_ld16u:
1876 tcg_out_qemu_ld(s, args, LD_UINT16);
1877 break;
1878 case INDEX_op_qemu_ld16s:
1879 tcg_out_qemu_ld(s, args, LD_INT16);
1880 break;
1881 case INDEX_op_qemu_ld32:
1882 /* ??? Technically we can use a non-extending instruction. */
1883 tcg_out_qemu_ld(s, args, LD_UINT32);
1884 break;
1885 case INDEX_op_qemu_ld64:
1886 tcg_out_qemu_ld(s, args, LD_UINT64);
1887 break;
1888
1889 case INDEX_op_qemu_st8:
1890 tcg_out_qemu_st(s, args, LD_UINT8);
1891 break;
1892 case INDEX_op_qemu_st16:
1893 tcg_out_qemu_st(s, args, LD_UINT16);
1894 break;
1895 case INDEX_op_qemu_st32:
1896 tcg_out_qemu_st(s, args, LD_UINT32);
1897 break;
1898 case INDEX_op_qemu_st64:
1899 tcg_out_qemu_st(s, args, LD_UINT64);
1900 break;
1901
1902 #if TCG_TARGET_REG_BITS == 64
1903 case INDEX_op_mov_i64:
1904 tcg_out_mov(s, TCG_TYPE_I64, args[0], args[1]);
1905 break;
1906 case INDEX_op_movi_i64:
1907 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1908 break;
1909
1910 case INDEX_op_ld16s_i64:
1911 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1912 break;
1913 case INDEX_op_ld32u_i64:
1914 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1915 break;
1916 case INDEX_op_ld32s_i64:
1917 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1918 break;
1919 case INDEX_op_ld_i64:
1920 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1921 break;
1922
1923 case INDEX_op_st32_i64:
1924 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1925 break;
1926 case INDEX_op_st_i64:
1927 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1928 break;
1929
1930 case INDEX_op_add_i64:
1931 if (const_args[2]) {
1932 tgen64_addi(s, args[0], args[2]);
1933 } else {
1934 tcg_out_insn(s, RRE, AGR, args[0], args[2]);
1935 }
1936 break;
1937 case INDEX_op_sub_i64:
1938 if (const_args[2]) {
1939 tgen64_addi(s, args[0], -args[2]);
1940 } else {
1941 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
1942 }
1943 break;
1944
1945 case INDEX_op_and_i64:
1946 if (const_args[2]) {
1947 tgen64_andi(s, args[0], args[2]);
1948 } else {
1949 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
1950 }
1951 break;
1952 case INDEX_op_or_i64:
1953 if (const_args[2]) {
1954 tgen64_ori(s, args[0], args[2]);
1955 } else {
1956 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
1957 }
1958 break;
1959 case INDEX_op_xor_i64:
1960 if (const_args[2]) {
1961 tgen64_xori(s, args[0], args[2]);
1962 } else {
1963 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
1964 }
1965 break;
1966
1967 case INDEX_op_neg_i64:
1968 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
1969 break;
1970 case INDEX_op_bswap64_i64:
1971 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
1972 break;
1973
1974 case INDEX_op_mul_i64:
1975 if (const_args[2]) {
1976 if (args[2] == (int16_t)args[2]) {
1977 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
1978 } else {
1979 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
1980 }
1981 } else {
1982 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
1983 }
1984 break;
1985
1986 case INDEX_op_div2_i64:
1987 /* ??? We get an unnecessary sign-extension of the dividend
1988 into R3 with this definition, but as we do in fact always
1989 produce both quotient and remainder using INDEX_op_div_i64
1990 instead requires jumping through even more hoops. */
1991 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
1992 break;
1993 case INDEX_op_divu2_i64:
1994 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
1995 break;
1996
1997 case INDEX_op_shl_i64:
1998 op = RSY_SLLG;
1999 do_shift64:
2000 if (const_args[2]) {
2001 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2002 } else {
2003 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2004 }
2005 break;
2006 case INDEX_op_shr_i64:
2007 op = RSY_SRLG;
2008 goto do_shift64;
2009 case INDEX_op_sar_i64:
2010 op = RSY_SRAG;
2011 goto do_shift64;
2012
2013 case INDEX_op_rotl_i64:
2014 if (const_args[2]) {
2015 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2016 TCG_REG_NONE, args[2]);
2017 } else {
2018 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2019 }
2020 break;
2021 case INDEX_op_rotr_i64:
2022 if (const_args[2]) {
2023 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2024 TCG_REG_NONE, (64 - args[2]) & 63);
2025 } else {
2026 /* We can use the smaller 32-bit negate because only the
2027 low 6 bits are examined for the rotate. */
2028 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2029 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2030 }
2031 break;
2032
2033 case INDEX_op_ext8s_i64:
2034 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2035 break;
2036 case INDEX_op_ext16s_i64:
2037 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2038 break;
2039 case INDEX_op_ext32s_i64:
2040 tgen_ext32s(s, args[0], args[1]);
2041 break;
2042 case INDEX_op_ext8u_i64:
2043 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2044 break;
2045 case INDEX_op_ext16u_i64:
2046 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2047 break;
2048 case INDEX_op_ext32u_i64:
2049 tgen_ext32u(s, args[0], args[1]);
2050 break;
2051
2052 case INDEX_op_brcond_i64:
2053 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2054 args[1], const_args[1], args[3]);
2055 break;
2056 case INDEX_op_setcond_i64:
2057 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2058 args[2], const_args[2]);
2059 break;
2060
2061 case INDEX_op_qemu_ld32u:
2062 tcg_out_qemu_ld(s, args, LD_UINT32);
2063 break;
2064 case INDEX_op_qemu_ld32s:
2065 tcg_out_qemu_ld(s, args, LD_INT32);
2066 break;
2067 #endif /* TCG_TARGET_REG_BITS == 64 */
2068
2069 case INDEX_op_jmp:
2070 /* This one is obsolete and never emitted. */
2071 tcg_abort();
2072 break;
2073
2074 default:
2075 fprintf(stderr,"unimplemented opc 0x%x\n",opc);
2076 tcg_abort();
2077 }
2078 }
2079
2080 static const TCGTargetOpDef s390_op_defs[] = {
2081 { INDEX_op_exit_tb, { } },
2082 { INDEX_op_goto_tb, { } },
2083 { INDEX_op_call, { "ri" } },
2084 { INDEX_op_jmp, { "ri" } },
2085 { INDEX_op_br, { } },
2086
2087 { INDEX_op_mov_i32, { "r", "r" } },
2088 { INDEX_op_movi_i32, { "r" } },
2089
2090 { INDEX_op_ld8u_i32, { "r", "r" } },
2091 { INDEX_op_ld8s_i32, { "r", "r" } },
2092 { INDEX_op_ld16u_i32, { "r", "r" } },
2093 { INDEX_op_ld16s_i32, { "r", "r" } },
2094 { INDEX_op_ld_i32, { "r", "r" } },
2095 { INDEX_op_st8_i32, { "r", "r" } },
2096 { INDEX_op_st16_i32, { "r", "r" } },
2097 { INDEX_op_st_i32, { "r", "r" } },
2098
2099 { INDEX_op_add_i32, { "r", "0", "rWI" } },
2100 { INDEX_op_sub_i32, { "r", "0", "rWNI" } },
2101 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2102
2103 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2104 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2105
2106 { INDEX_op_and_i32, { "r", "0", "rWA" } },
2107 { INDEX_op_or_i32, { "r", "0", "rWO" } },
2108 { INDEX_op_xor_i32, { "r", "0", "rWX" } },
2109
2110 { INDEX_op_neg_i32, { "r", "r" } },
2111
2112 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2113 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2114 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2115
2116 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2117 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2118
2119 { INDEX_op_ext8s_i32, { "r", "r" } },
2120 { INDEX_op_ext8u_i32, { "r", "r" } },
2121 { INDEX_op_ext16s_i32, { "r", "r" } },
2122 { INDEX_op_ext16u_i32, { "r", "r" } },
2123
2124 { INDEX_op_bswap16_i32, { "r", "r" } },
2125 { INDEX_op_bswap32_i32, { "r", "r" } },
2126
2127 { INDEX_op_brcond_i32, { "r", "rWC" } },
2128 { INDEX_op_setcond_i32, { "r", "r", "rWC" } },
2129
2130 { INDEX_op_qemu_ld8u, { "r", "L" } },
2131 { INDEX_op_qemu_ld8s, { "r", "L" } },
2132 { INDEX_op_qemu_ld16u, { "r", "L" } },
2133 { INDEX_op_qemu_ld16s, { "r", "L" } },
2134 { INDEX_op_qemu_ld32, { "r", "L" } },
2135 { INDEX_op_qemu_ld64, { "r", "L" } },
2136
2137 { INDEX_op_qemu_st8, { "L", "L" } },
2138 { INDEX_op_qemu_st16, { "L", "L" } },
2139 { INDEX_op_qemu_st32, { "L", "L" } },
2140 { INDEX_op_qemu_st64, { "L", "L" } },
2141
2142 #if defined(__s390x__)
2143 { INDEX_op_mov_i64, { "r", "r" } },
2144 { INDEX_op_movi_i64, { "r" } },
2145
2146 { INDEX_op_ld8u_i64, { "r", "r" } },
2147 { INDEX_op_ld8s_i64, { "r", "r" } },
2148 { INDEX_op_ld16u_i64, { "r", "r" } },
2149 { INDEX_op_ld16s_i64, { "r", "r" } },
2150 { INDEX_op_ld32u_i64, { "r", "r" } },
2151 { INDEX_op_ld32s_i64, { "r", "r" } },
2152 { INDEX_op_ld_i64, { "r", "r" } },
2153
2154 { INDEX_op_st8_i64, { "r", "r" } },
2155 { INDEX_op_st16_i64, { "r", "r" } },
2156 { INDEX_op_st32_i64, { "r", "r" } },
2157 { INDEX_op_st_i64, { "r", "r" } },
2158
2159 { INDEX_op_add_i64, { "r", "0", "rI" } },
2160 { INDEX_op_sub_i64, { "r", "0", "rNI" } },
2161 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2162
2163 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2164 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
2165
2166 { INDEX_op_and_i64, { "r", "0", "rA" } },
2167 { INDEX_op_or_i64, { "r", "0", "rO" } },
2168 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2169
2170 { INDEX_op_neg_i64, { "r", "r" } },
2171
2172 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2173 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2174 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2175
2176 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2177 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2178
2179 { INDEX_op_ext8s_i64, { "r", "r" } },
2180 { INDEX_op_ext8u_i64, { "r", "r" } },
2181 { INDEX_op_ext16s_i64, { "r", "r" } },
2182 { INDEX_op_ext16u_i64, { "r", "r" } },
2183 { INDEX_op_ext32s_i64, { "r", "r" } },
2184 { INDEX_op_ext32u_i64, { "r", "r" } },
2185
2186 { INDEX_op_bswap16_i64, { "r", "r" } },
2187 { INDEX_op_bswap32_i64, { "r", "r" } },
2188 { INDEX_op_bswap64_i64, { "r", "r" } },
2189
2190 { INDEX_op_brcond_i64, { "r", "rC" } },
2191 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
2192
2193 { INDEX_op_qemu_ld32u, { "r", "L" } },
2194 { INDEX_op_qemu_ld32s, { "r", "L" } },
2195 #endif
2196
2197 { -1 },
2198 };
2199
2200 /* ??? Linux kernels provide an AUXV entry AT_HWCAP that provides most of
2201 this information. However, getting at that entry is not easy this far
2202 away from main. Our options are: start searching from environ, but
2203 that fails as soon as someone does a setenv in between. Read the data
2204 from /proc/self/auxv. Or do the probing ourselves. The only thing
2205 extra that AT_HWCAP gives us is HWCAP_S390_HIGH_GPRS, which indicates
2206 that the kernel saves all 64-bits of the registers around traps while
2207 in 31-bit mode. But this is true of all "recent" kernels (ought to dig
2208 back and see from when this might not be true). */
2209
2210 #include <signal.h>
2211
2212 static volatile sig_atomic_t got_sigill;
2213
2214 static void sigill_handler(int sig)
2215 {
2216 got_sigill = 1;
2217 }
2218
2219 static void query_facilities(void)
2220 {
2221 struct sigaction sa_old, sa_new;
2222 register int r0 __asm__("0");
2223 register void *r1 __asm__("1");
2224 int fail;
2225
2226 memset(&sa_new, 0, sizeof(sa_new));
2227 sa_new.sa_handler = sigill_handler;
2228 sigaction(SIGILL, &sa_new, &sa_old);
2229
2230 /* First, try STORE FACILITY LIST EXTENDED. If this is present, then
2231 we need not do any more probing. Unfortunately, this itself is an
2232 extension and the original STORE FACILITY LIST instruction is
2233 kernel-only, storing its results at absolute address 200. */
2234 /* stfle 0(%r1) */
2235 r1 = &facilities;
2236 asm volatile(".word 0xb2b0,0x1000"
2237 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
2238
2239 if (got_sigill) {
2240 /* STORE FACILITY EXTENDED is not available. Probe for one of each
2241 kind of instruction that we're interested in. */
2242 /* ??? Possibly some of these are in practice never present unless
2243 the store-facility-extended facility is also present. But since
2244 that isn't documented it's just better to probe for each. */
2245
2246 /* Test for z/Architecture. Required even in 31-bit mode. */
2247 got_sigill = 0;
2248 /* agr %r0,%r0 */
2249 asm volatile(".word 0xb908,0x0000" : "=r"(r0) : : "cc");
2250 if (!got_sigill) {
2251 facilities |= FACILITY_ZARCH_ACTIVE;
2252 }
2253
2254 /* Test for long displacement. */
2255 got_sigill = 0;
2256 /* ly %r0,0(%r1) */
2257 r1 = &facilities;
2258 asm volatile(".word 0xe300,0x1000,0x0058"
2259 : "=r"(r0) : "r"(r1) : "cc");
2260 if (!got_sigill) {
2261 facilities |= FACILITY_LONG_DISP;
2262 }
2263
2264 /* Test for extended immediates. */
2265 got_sigill = 0;
2266 /* afi %r0,0 */
2267 asm volatile(".word 0xc209,0x0000,0x0000" : : : "cc");
2268 if (!got_sigill) {
2269 facilities |= FACILITY_EXT_IMM;
2270 }
2271
2272 /* Test for general-instructions-extension. */
2273 got_sigill = 0;
2274 /* msfi %r0,1 */
2275 asm volatile(".word 0xc201,0x0000,0x0001");
2276 if (!got_sigill) {
2277 facilities |= FACILITY_GEN_INST_EXT;
2278 }
2279 }
2280
2281 sigaction(SIGILL, &sa_old, NULL);
2282
2283 /* The translator currently uses these extensions unconditionally.
2284 Pruning this back to the base ESA/390 architecture doesn't seem
2285 worthwhile, since even the KVM target requires z/Arch. */
2286 fail = 0;
2287 if ((facilities & FACILITY_ZARCH_ACTIVE) == 0) {
2288 fprintf(stderr, "TCG: z/Arch facility is required.\n");
2289 fprintf(stderr, "TCG: Boot with a 64-bit enabled kernel.\n");
2290 fail = 1;
2291 }
2292 if ((facilities & FACILITY_LONG_DISP) == 0) {
2293 fprintf(stderr, "TCG: long-displacement facility is required.\n");
2294 fail = 1;
2295 }
2296
2297 /* So far there's just enough support for 31-bit mode to let the
2298 compile succeed. This is good enough to run QEMU with KVM. */
2299 if (sizeof(void *) != 8) {
2300 fprintf(stderr, "TCG: 31-bit mode is not supported.\n");
2301 fail = 1;
2302 }
2303
2304 if (fail) {
2305 exit(-1);
2306 }
2307 }
2308
2309 static void tcg_target_init(TCGContext *s)
2310 {
2311 #if !defined(CONFIG_USER_ONLY)
2312 /* fail safe */
2313 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) {
2314 tcg_abort();
2315 }
2316 #endif
2317
2318 query_facilities();
2319
2320 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2321 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2322
2323 tcg_regset_clear(tcg_target_call_clobber_regs);
2324 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2325 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2326 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2327 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2328 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2329 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2330 /* The return register can be considered call-clobbered. */
2331 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2332
2333 tcg_regset_clear(s->reserved_regs);
2334 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2335 /* XXX many insns can't be used with R0, so we better avoid it for now */
2336 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2337 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2338
2339 tcg_add_target_add_op_defs(s390_op_defs);
2340 tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf),
2341 CPU_TEMP_BUF_NLONGS * sizeof(long));
2342 }
2343
2344 static void tcg_target_qemu_prologue(TCGContext *s)
2345 {
2346 /* stmg %r6,%r15,48(%r15) (save registers) */
2347 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2348
2349 /* aghi %r15,-160 (stack frame) */
2350 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -160);
2351
2352 if (GUEST_BASE >= 0x80000) {
2353 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
2354 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2355 }
2356
2357 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2358 /* br %r3 (go to TB) */
2359 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
2360
2361 tb_ret_addr = s->code_ptr;
2362
2363 /* lmg %r6,%r15,208(%r15) (restore registers) */
2364 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 208);
2365
2366 /* br %r14 (return) */
2367 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2368 }