]> git.proxmox.com Git - qemu.git/blob - tcg/s390/tcg-target.c
main-loop: narrow win32 pollfds_fill() event bitmasks
[qemu.git] / tcg / s390 / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
27 /* We only support generating code for 64-bit mode. */
28 #if TCG_TARGET_REG_BITS != 64
29 #error "unsupported code generation mode"
30 #endif
31
32 /* ??? The translation blocks produced by TCG are generally small enough to
33 be entirely reachable with a 16-bit displacement. Leaving the option for
34 a 32-bit displacement here Just In Case. */
35 #define USE_LONG_BRANCHES 0
36
37 #define TCG_CT_CONST_32 0x0100
38 #define TCG_CT_CONST_MULI 0x0800
39 #define TCG_CT_CONST_ORI 0x2000
40 #define TCG_CT_CONST_XORI 0x4000
41 #define TCG_CT_CONST_CMPI 0x8000
42
43 /* Several places within the instruction set 0 means "no register"
44 rather than TCG_REG_R0. */
45 #define TCG_REG_NONE 0
46
47 /* A scratch register that may be be used throughout the backend. */
48 #define TCG_TMP0 TCG_REG_R14
49
50 #ifdef CONFIG_USE_GUEST_BASE
51 #define TCG_GUEST_BASE_REG TCG_REG_R13
52 #else
53 #define TCG_GUEST_BASE_REG TCG_REG_R0
54 #endif
55
56 #ifndef GUEST_BASE
57 #define GUEST_BASE 0
58 #endif
59
60
61 /* All of the following instructions are prefixed with their instruction
62 format, and are defined as 8- or 16-bit quantities, even when the two
63 halves of the 16-bit quantity may appear 32 bits apart in the insn.
64 This makes it easy to copy the values from the tables in Appendix B. */
65 typedef enum S390Opcode {
66 RIL_AFI = 0xc209,
67 RIL_AGFI = 0xc208,
68 RIL_ALFI = 0xc20b,
69 RIL_ALGFI = 0xc20a,
70 RIL_BRASL = 0xc005,
71 RIL_BRCL = 0xc004,
72 RIL_CFI = 0xc20d,
73 RIL_CGFI = 0xc20c,
74 RIL_CLFI = 0xc20f,
75 RIL_CLGFI = 0xc20e,
76 RIL_IIHF = 0xc008,
77 RIL_IILF = 0xc009,
78 RIL_LARL = 0xc000,
79 RIL_LGFI = 0xc001,
80 RIL_LGRL = 0xc408,
81 RIL_LLIHF = 0xc00e,
82 RIL_LLILF = 0xc00f,
83 RIL_LRL = 0xc40d,
84 RIL_MSFI = 0xc201,
85 RIL_MSGFI = 0xc200,
86 RIL_NIHF = 0xc00a,
87 RIL_NILF = 0xc00b,
88 RIL_OIHF = 0xc00c,
89 RIL_OILF = 0xc00d,
90 RIL_SLFI = 0xc205,
91 RIL_SLGFI = 0xc204,
92 RIL_XIHF = 0xc006,
93 RIL_XILF = 0xc007,
94
95 RI_AGHI = 0xa70b,
96 RI_AHI = 0xa70a,
97 RI_BRC = 0xa704,
98 RI_IIHH = 0xa500,
99 RI_IIHL = 0xa501,
100 RI_IILH = 0xa502,
101 RI_IILL = 0xa503,
102 RI_LGHI = 0xa709,
103 RI_LLIHH = 0xa50c,
104 RI_LLIHL = 0xa50d,
105 RI_LLILH = 0xa50e,
106 RI_LLILL = 0xa50f,
107 RI_MGHI = 0xa70d,
108 RI_MHI = 0xa70c,
109 RI_NIHH = 0xa504,
110 RI_NIHL = 0xa505,
111 RI_NILH = 0xa506,
112 RI_NILL = 0xa507,
113 RI_OIHH = 0xa508,
114 RI_OIHL = 0xa509,
115 RI_OILH = 0xa50a,
116 RI_OILL = 0xa50b,
117
118 RIE_CGIJ = 0xec7c,
119 RIE_CGRJ = 0xec64,
120 RIE_CIJ = 0xec7e,
121 RIE_CLGRJ = 0xec65,
122 RIE_CLIJ = 0xec7f,
123 RIE_CLGIJ = 0xec7d,
124 RIE_CLRJ = 0xec77,
125 RIE_CRJ = 0xec76,
126 RIE_RISBG = 0xec55,
127
128 RRE_AGR = 0xb908,
129 RRE_ALGR = 0xb90a,
130 RRE_ALCR = 0xb998,
131 RRE_ALCGR = 0xb988,
132 RRE_CGR = 0xb920,
133 RRE_CLGR = 0xb921,
134 RRE_DLGR = 0xb987,
135 RRE_DLR = 0xb997,
136 RRE_DSGFR = 0xb91d,
137 RRE_DSGR = 0xb90d,
138 RRE_LGBR = 0xb906,
139 RRE_LCGR = 0xb903,
140 RRE_LGFR = 0xb914,
141 RRE_LGHR = 0xb907,
142 RRE_LGR = 0xb904,
143 RRE_LLGCR = 0xb984,
144 RRE_LLGFR = 0xb916,
145 RRE_LLGHR = 0xb985,
146 RRE_LRVR = 0xb91f,
147 RRE_LRVGR = 0xb90f,
148 RRE_LTGR = 0xb902,
149 RRE_MLGR = 0xb986,
150 RRE_MSGR = 0xb90c,
151 RRE_MSR = 0xb252,
152 RRE_NGR = 0xb980,
153 RRE_OGR = 0xb981,
154 RRE_SGR = 0xb909,
155 RRE_SLGR = 0xb90b,
156 RRE_SLBR = 0xb999,
157 RRE_SLBGR = 0xb989,
158 RRE_XGR = 0xb982,
159
160 RRF_LOCR = 0xb9f2,
161 RRF_LOCGR = 0xb9e2,
162
163 RR_AR = 0x1a,
164 RR_ALR = 0x1e,
165 RR_BASR = 0x0d,
166 RR_BCR = 0x07,
167 RR_CLR = 0x15,
168 RR_CR = 0x19,
169 RR_DR = 0x1d,
170 RR_LCR = 0x13,
171 RR_LR = 0x18,
172 RR_LTR = 0x12,
173 RR_NR = 0x14,
174 RR_OR = 0x16,
175 RR_SR = 0x1b,
176 RR_SLR = 0x1f,
177 RR_XR = 0x17,
178
179 RSY_RLL = 0xeb1d,
180 RSY_RLLG = 0xeb1c,
181 RSY_SLLG = 0xeb0d,
182 RSY_SRAG = 0xeb0a,
183 RSY_SRLG = 0xeb0c,
184
185 RS_SLL = 0x89,
186 RS_SRA = 0x8a,
187 RS_SRL = 0x88,
188
189 RXY_AG = 0xe308,
190 RXY_AY = 0xe35a,
191 RXY_CG = 0xe320,
192 RXY_CY = 0xe359,
193 RXY_LAY = 0xe371,
194 RXY_LB = 0xe376,
195 RXY_LG = 0xe304,
196 RXY_LGB = 0xe377,
197 RXY_LGF = 0xe314,
198 RXY_LGH = 0xe315,
199 RXY_LHY = 0xe378,
200 RXY_LLGC = 0xe390,
201 RXY_LLGF = 0xe316,
202 RXY_LLGH = 0xe391,
203 RXY_LMG = 0xeb04,
204 RXY_LRV = 0xe31e,
205 RXY_LRVG = 0xe30f,
206 RXY_LRVH = 0xe31f,
207 RXY_LY = 0xe358,
208 RXY_STCY = 0xe372,
209 RXY_STG = 0xe324,
210 RXY_STHY = 0xe370,
211 RXY_STMG = 0xeb24,
212 RXY_STRV = 0xe33e,
213 RXY_STRVG = 0xe32f,
214 RXY_STRVH = 0xe33f,
215 RXY_STY = 0xe350,
216
217 RX_A = 0x5a,
218 RX_C = 0x59,
219 RX_L = 0x58,
220 RX_LA = 0x41,
221 RX_LH = 0x48,
222 RX_ST = 0x50,
223 RX_STC = 0x42,
224 RX_STH = 0x40,
225 } S390Opcode;
226
227 #define LD_SIGNED 0x04
228 #define LD_UINT8 0x00
229 #define LD_INT8 (LD_UINT8 | LD_SIGNED)
230 #define LD_UINT16 0x01
231 #define LD_INT16 (LD_UINT16 | LD_SIGNED)
232 #define LD_UINT32 0x02
233 #define LD_INT32 (LD_UINT32 | LD_SIGNED)
234 #define LD_UINT64 0x03
235 #define LD_INT64 (LD_UINT64 | LD_SIGNED)
236
237 #ifndef NDEBUG
238 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
239 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
240 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
241 };
242 #endif
243
244 /* Since R6 is a potential argument register, choose it last of the
245 call-saved registers. Likewise prefer the call-clobbered registers
246 in reverse order to maximize the chance of avoiding the arguments. */
247 static const int tcg_target_reg_alloc_order[] = {
248 TCG_REG_R13,
249 TCG_REG_R12,
250 TCG_REG_R11,
251 TCG_REG_R10,
252 TCG_REG_R9,
253 TCG_REG_R8,
254 TCG_REG_R7,
255 TCG_REG_R6,
256 TCG_REG_R14,
257 TCG_REG_R0,
258 TCG_REG_R1,
259 TCG_REG_R5,
260 TCG_REG_R4,
261 TCG_REG_R3,
262 TCG_REG_R2,
263 };
264
265 static const int tcg_target_call_iarg_regs[] = {
266 TCG_REG_R2,
267 TCG_REG_R3,
268 TCG_REG_R4,
269 TCG_REG_R5,
270 TCG_REG_R6,
271 };
272
273 static const int tcg_target_call_oarg_regs[] = {
274 TCG_REG_R2,
275 };
276
277 #define S390_CC_EQ 8
278 #define S390_CC_LT 4
279 #define S390_CC_GT 2
280 #define S390_CC_OV 1
281 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
282 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
283 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
284 #define S390_CC_NEVER 0
285 #define S390_CC_ALWAYS 15
286
287 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
288 static const uint8_t tcg_cond_to_s390_cond[] = {
289 [TCG_COND_EQ] = S390_CC_EQ,
290 [TCG_COND_NE] = S390_CC_NE,
291 [TCG_COND_LT] = S390_CC_LT,
292 [TCG_COND_LE] = S390_CC_LE,
293 [TCG_COND_GT] = S390_CC_GT,
294 [TCG_COND_GE] = S390_CC_GE,
295 [TCG_COND_LTU] = S390_CC_LT,
296 [TCG_COND_LEU] = S390_CC_LE,
297 [TCG_COND_GTU] = S390_CC_GT,
298 [TCG_COND_GEU] = S390_CC_GE,
299 };
300
301 /* Condition codes that result from a LOAD AND TEST. Here, we have no
302 unsigned instruction variation, however since the test is vs zero we
303 can re-map the outcomes appropriately. */
304 static const uint8_t tcg_cond_to_ltr_cond[] = {
305 [TCG_COND_EQ] = S390_CC_EQ,
306 [TCG_COND_NE] = S390_CC_NE,
307 [TCG_COND_LT] = S390_CC_LT,
308 [TCG_COND_LE] = S390_CC_LE,
309 [TCG_COND_GT] = S390_CC_GT,
310 [TCG_COND_GE] = S390_CC_GE,
311 [TCG_COND_LTU] = S390_CC_NEVER,
312 [TCG_COND_LEU] = S390_CC_EQ,
313 [TCG_COND_GTU] = S390_CC_NE,
314 [TCG_COND_GEU] = S390_CC_ALWAYS,
315 };
316
317 #ifdef CONFIG_SOFTMMU
318
319 #include "exec/softmmu_defs.h"
320
321 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
322 int mmu_idx) */
323 static const void * const qemu_ld_helpers[4] = {
324 helper_ldb_mmu,
325 helper_ldw_mmu,
326 helper_ldl_mmu,
327 helper_ldq_mmu,
328 };
329
330 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
331 uintxx_t val, int mmu_idx) */
332 static const void * const qemu_st_helpers[4] = {
333 helper_stb_mmu,
334 helper_stw_mmu,
335 helper_stl_mmu,
336 helper_stq_mmu,
337 };
338 #endif
339
340 static uint8_t *tb_ret_addr;
341
342 /* A list of relevant facilities used by this translator. Some of these
343 are required for proper operation, and these are checked at startup. */
344
345 #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
346 #define FACILITY_LONG_DISP (1ULL << (63 - 18))
347 #define FACILITY_EXT_IMM (1ULL << (63 - 21))
348 #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
349 #define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
350
351 static uint64_t facilities;
352
353 static void patch_reloc(uint8_t *code_ptr, int type,
354 tcg_target_long value, tcg_target_long addend)
355 {
356 tcg_target_long code_ptr_tl = (tcg_target_long)code_ptr;
357 tcg_target_long pcrel2;
358
359 /* ??? Not the usual definition of "addend". */
360 pcrel2 = (value - (code_ptr_tl + addend)) >> 1;
361
362 switch (type) {
363 case R_390_PC16DBL:
364 assert(pcrel2 == (int16_t)pcrel2);
365 *(int16_t *)code_ptr = pcrel2;
366 break;
367 case R_390_PC32DBL:
368 assert(pcrel2 == (int32_t)pcrel2);
369 *(int32_t *)code_ptr = pcrel2;
370 break;
371 default:
372 tcg_abort();
373 break;
374 }
375 }
376
377 /* parse target specific constraints */
378 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
379 {
380 const char *ct_str = *pct_str;
381
382 switch (ct_str[0]) {
383 case 'r': /* all registers */
384 ct->ct |= TCG_CT_REG;
385 tcg_regset_set32(ct->u.regs, 0, 0xffff);
386 break;
387 case 'R': /* not R0 */
388 ct->ct |= TCG_CT_REG;
389 tcg_regset_set32(ct->u.regs, 0, 0xffff);
390 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
391 break;
392 case 'L': /* qemu_ld/st constraint */
393 ct->ct |= TCG_CT_REG;
394 tcg_regset_set32(ct->u.regs, 0, 0xffff);
395 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
396 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
397 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
398 break;
399 case 'a': /* force R2 for division */
400 ct->ct |= TCG_CT_REG;
401 tcg_regset_clear(ct->u.regs);
402 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
403 break;
404 case 'b': /* force R3 for division */
405 ct->ct |= TCG_CT_REG;
406 tcg_regset_clear(ct->u.regs);
407 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
408 break;
409 case 'W': /* force 32-bit ("word") immediate */
410 ct->ct |= TCG_CT_CONST_32;
411 break;
412 case 'K':
413 ct->ct |= TCG_CT_CONST_MULI;
414 break;
415 case 'O':
416 ct->ct |= TCG_CT_CONST_ORI;
417 break;
418 case 'X':
419 ct->ct |= TCG_CT_CONST_XORI;
420 break;
421 case 'C':
422 ct->ct |= TCG_CT_CONST_CMPI;
423 break;
424 default:
425 return -1;
426 }
427 ct_str++;
428 *pct_str = ct_str;
429
430 return 0;
431 }
432
433 /* Immediates to be used with logical OR. This is an optimization only,
434 since a full 64-bit immediate OR can always be performed with 4 sequential
435 OI[LH][LH] instructions. What we're looking for is immediates that we
436 can load efficiently, and the immediate load plus the reg-reg OR is
437 smaller than the sequential OI's. */
438
439 static int tcg_match_ori(int ct, tcg_target_long val)
440 {
441 if (facilities & FACILITY_EXT_IMM) {
442 if (ct & TCG_CT_CONST_32) {
443 /* All 32-bit ORs can be performed with 1 48-bit insn. */
444 return 1;
445 }
446 }
447
448 /* Look for negative values. These are best to load with LGHI. */
449 if (val < 0) {
450 if (val == (int16_t)val) {
451 return 0;
452 }
453 if (facilities & FACILITY_EXT_IMM) {
454 if (val == (int32_t)val) {
455 return 0;
456 }
457 }
458 }
459
460 return 1;
461 }
462
463 /* Immediates to be used with logical XOR. This is almost, but not quite,
464 only an optimization. XOR with immediate is only supported with the
465 extended-immediate facility. That said, there are a few patterns for
466 which it is better to load the value into a register first. */
467
468 static int tcg_match_xori(int ct, tcg_target_long val)
469 {
470 if ((facilities & FACILITY_EXT_IMM) == 0) {
471 return 0;
472 }
473
474 if (ct & TCG_CT_CONST_32) {
475 /* All 32-bit XORs can be performed with 1 48-bit insn. */
476 return 1;
477 }
478
479 /* Look for negative values. These are best to load with LGHI. */
480 if (val < 0 && val == (int32_t)val) {
481 return 0;
482 }
483
484 return 1;
485 }
486
487 /* Imediates to be used with comparisons. */
488
489 static int tcg_match_cmpi(int ct, tcg_target_long val)
490 {
491 if (facilities & FACILITY_EXT_IMM) {
492 /* The COMPARE IMMEDIATE instruction is available. */
493 if (ct & TCG_CT_CONST_32) {
494 /* We have a 32-bit immediate and can compare against anything. */
495 return 1;
496 } else {
497 /* ??? We have no insight here into whether the comparison is
498 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
499 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
500 a 32-bit unsigned immediate. If we were to use the (semi)
501 obvious "val == (int32_t)val" we would be enabling unsigned
502 comparisons vs very large numbers. The only solution is to
503 take the intersection of the ranges. */
504 /* ??? Another possible solution is to simply lie and allow all
505 constants here and force the out-of-range values into a temp
506 register in tgen_cmp when we have knowledge of the actual
507 comparison code in use. */
508 return val >= 0 && val <= 0x7fffffff;
509 }
510 } else {
511 /* Only the LOAD AND TEST instruction is available. */
512 return val == 0;
513 }
514 }
515
516 /* Test if a constant matches the constraint. */
517 static int tcg_target_const_match(tcg_target_long val,
518 const TCGArgConstraint *arg_ct)
519 {
520 int ct = arg_ct->ct;
521
522 if (ct & TCG_CT_CONST) {
523 return 1;
524 }
525
526 /* Handle the modifiers. */
527 if (ct & TCG_CT_CONST_32) {
528 val = (int32_t)val;
529 }
530
531 /* The following are mutually exclusive. */
532 if (ct & TCG_CT_CONST_MULI) {
533 /* Immediates that may be used with multiply. If we have the
534 general-instruction-extensions, then we have MULTIPLY SINGLE
535 IMMEDIATE with a signed 32-bit, otherwise we have only
536 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
537 if (facilities & FACILITY_GEN_INST_EXT) {
538 return val == (int32_t)val;
539 } else {
540 return val == (int16_t)val;
541 }
542 } else if (ct & TCG_CT_CONST_ORI) {
543 return tcg_match_ori(ct, val);
544 } else if (ct & TCG_CT_CONST_XORI) {
545 return tcg_match_xori(ct, val);
546 } else if (ct & TCG_CT_CONST_CMPI) {
547 return tcg_match_cmpi(ct, val);
548 }
549
550 return 0;
551 }
552
553 /* Emit instructions according to the given instruction format. */
554
555 static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
556 {
557 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
558 }
559
560 static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
561 TCGReg r1, TCGReg r2)
562 {
563 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
564 }
565
566 static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
567 TCGReg r1, TCGReg r2, int m3)
568 {
569 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
570 }
571
572 static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
573 {
574 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
575 }
576
577 static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
578 {
579 tcg_out16(s, op | (r1 << 4));
580 tcg_out32(s, i2);
581 }
582
583 static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
584 TCGReg b2, TCGReg r3, int disp)
585 {
586 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
587 | (disp & 0xfff));
588 }
589
590 static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
591 TCGReg b2, TCGReg r3, int disp)
592 {
593 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
594 tcg_out32(s, (op & 0xff) | (b2 << 28)
595 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
596 }
597
598 #define tcg_out_insn_RX tcg_out_insn_RS
599 #define tcg_out_insn_RXY tcg_out_insn_RSY
600
601 /* Emit an opcode with "type-checking" of the format. */
602 #define tcg_out_insn(S, FMT, OP, ...) \
603 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
604
605
606 /* emit 64-bit shifts */
607 static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
608 TCGReg src, TCGReg sh_reg, int sh_imm)
609 {
610 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
611 }
612
613 /* emit 32-bit shifts */
614 static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
615 TCGReg sh_reg, int sh_imm)
616 {
617 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
618 }
619
620 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
621 {
622 if (src != dst) {
623 if (type == TCG_TYPE_I32) {
624 tcg_out_insn(s, RR, LR, dst, src);
625 } else {
626 tcg_out_insn(s, RRE, LGR, dst, src);
627 }
628 }
629 }
630
631 /* load a register with an immediate value */
632 static void tcg_out_movi(TCGContext *s, TCGType type,
633 TCGReg ret, tcg_target_long sval)
634 {
635 static const S390Opcode lli_insns[4] = {
636 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
637 };
638
639 tcg_target_ulong uval = sval;
640 int i;
641
642 if (type == TCG_TYPE_I32) {
643 uval = (uint32_t)sval;
644 sval = (int32_t)sval;
645 }
646
647 /* Try all 32-bit insns that can load it in one go. */
648 if (sval >= -0x8000 && sval < 0x8000) {
649 tcg_out_insn(s, RI, LGHI, ret, sval);
650 return;
651 }
652
653 for (i = 0; i < 4; i++) {
654 tcg_target_long mask = 0xffffull << i*16;
655 if ((uval & mask) == uval) {
656 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
657 return;
658 }
659 }
660
661 /* Try all 48-bit insns that can load it in one go. */
662 if (facilities & FACILITY_EXT_IMM) {
663 if (sval == (int32_t)sval) {
664 tcg_out_insn(s, RIL, LGFI, ret, sval);
665 return;
666 }
667 if (uval <= 0xffffffff) {
668 tcg_out_insn(s, RIL, LLILF, ret, uval);
669 return;
670 }
671 if ((uval & 0xffffffff) == 0) {
672 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
673 return;
674 }
675 }
676
677 /* Try for PC-relative address load. */
678 if ((sval & 1) == 0) {
679 intptr_t off = (sval - (intptr_t)s->code_ptr) >> 1;
680 if (off == (int32_t)off) {
681 tcg_out_insn(s, RIL, LARL, ret, off);
682 return;
683 }
684 }
685
686 /* If extended immediates are not present, then we may have to issue
687 several instructions to load the low 32 bits. */
688 if (!(facilities & FACILITY_EXT_IMM)) {
689 /* A 32-bit unsigned value can be loaded in 2 insns. And given
690 that the lli_insns loop above did not succeed, we know that
691 both insns are required. */
692 if (uval <= 0xffffffff) {
693 tcg_out_insn(s, RI, LLILL, ret, uval);
694 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
695 return;
696 }
697
698 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
699 We first want to make sure that all the high bits get set. With
700 luck the low 16-bits can be considered negative to perform that for
701 free, otherwise we load an explicit -1. */
702 if (sval >> 31 >> 1 == -1) {
703 if (uval & 0x8000) {
704 tcg_out_insn(s, RI, LGHI, ret, uval);
705 } else {
706 tcg_out_insn(s, RI, LGHI, ret, -1);
707 tcg_out_insn(s, RI, IILL, ret, uval);
708 }
709 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
710 return;
711 }
712 }
713
714 /* If we get here, both the high and low parts have non-zero bits. */
715
716 /* Recurse to load the lower 32-bits. */
717 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
718
719 /* Insert data into the high 32-bits. */
720 uval = uval >> 31 >> 1;
721 if (facilities & FACILITY_EXT_IMM) {
722 if (uval < 0x10000) {
723 tcg_out_insn(s, RI, IIHL, ret, uval);
724 } else if ((uval & 0xffff) == 0) {
725 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
726 } else {
727 tcg_out_insn(s, RIL, IIHF, ret, uval);
728 }
729 } else {
730 if (uval & 0xffff) {
731 tcg_out_insn(s, RI, IIHL, ret, uval);
732 }
733 if (uval & 0xffff0000) {
734 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
735 }
736 }
737 }
738
739
740 /* Emit a load/store type instruction. Inputs are:
741 DATA: The register to be loaded or stored.
742 BASE+OFS: The effective address.
743 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
744 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
745
746 static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
747 TCGReg data, TCGReg base, TCGReg index,
748 tcg_target_long ofs)
749 {
750 if (ofs < -0x80000 || ofs >= 0x80000) {
751 /* Combine the low 20 bits of the offset with the actual load insn;
752 the high 44 bits must come from an immediate load. */
753 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
754 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
755 ofs = low;
756
757 /* If we were already given an index register, add it in. */
758 if (index != TCG_REG_NONE) {
759 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
760 }
761 index = TCG_TMP0;
762 }
763
764 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
765 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
766 } else {
767 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
768 }
769 }
770
771
772 /* load data without address translation or endianness conversion */
773 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
774 TCGReg base, tcg_target_long ofs)
775 {
776 if (type == TCG_TYPE_I32) {
777 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
778 } else {
779 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
780 }
781 }
782
783 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
784 TCGReg base, tcg_target_long ofs)
785 {
786 if (type == TCG_TYPE_I32) {
787 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
788 } else {
789 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
790 }
791 }
792
793 /* load data from an absolute host address */
794 static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
795 {
796 tcg_target_long addr = (tcg_target_long)abs;
797
798 if (facilities & FACILITY_GEN_INST_EXT) {
799 tcg_target_long disp = (addr - (tcg_target_long)s->code_ptr) >> 1;
800 if (disp == (int32_t)disp) {
801 if (type == TCG_TYPE_I32) {
802 tcg_out_insn(s, RIL, LRL, dest, disp);
803 } else {
804 tcg_out_insn(s, RIL, LGRL, dest, disp);
805 }
806 return;
807 }
808 }
809
810 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
811 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
812 }
813
814 static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
815 int msb, int lsb, int ofs, int z)
816 {
817 /* Format RIE-f */
818 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
819 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
820 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
821 }
822
823 static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
824 {
825 if (facilities & FACILITY_EXT_IMM) {
826 tcg_out_insn(s, RRE, LGBR, dest, src);
827 return;
828 }
829
830 if (type == TCG_TYPE_I32) {
831 if (dest == src) {
832 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
833 } else {
834 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
835 }
836 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
837 } else {
838 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
839 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
840 }
841 }
842
843 static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
844 {
845 if (facilities & FACILITY_EXT_IMM) {
846 tcg_out_insn(s, RRE, LLGCR, dest, src);
847 return;
848 }
849
850 if (dest == src) {
851 tcg_out_movi(s, type, TCG_TMP0, 0xff);
852 src = TCG_TMP0;
853 } else {
854 tcg_out_movi(s, type, dest, 0xff);
855 }
856 if (type == TCG_TYPE_I32) {
857 tcg_out_insn(s, RR, NR, dest, src);
858 } else {
859 tcg_out_insn(s, RRE, NGR, dest, src);
860 }
861 }
862
863 static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
864 {
865 if (facilities & FACILITY_EXT_IMM) {
866 tcg_out_insn(s, RRE, LGHR, dest, src);
867 return;
868 }
869
870 if (type == TCG_TYPE_I32) {
871 if (dest == src) {
872 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
873 } else {
874 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
875 }
876 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
877 } else {
878 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
879 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
880 }
881 }
882
883 static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
884 {
885 if (facilities & FACILITY_EXT_IMM) {
886 tcg_out_insn(s, RRE, LLGHR, dest, src);
887 return;
888 }
889
890 if (dest == src) {
891 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
892 src = TCG_TMP0;
893 } else {
894 tcg_out_movi(s, type, dest, 0xffff);
895 }
896 if (type == TCG_TYPE_I32) {
897 tcg_out_insn(s, RR, NR, dest, src);
898 } else {
899 tcg_out_insn(s, RRE, NGR, dest, src);
900 }
901 }
902
903 static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
904 {
905 tcg_out_insn(s, RRE, LGFR, dest, src);
906 }
907
908 static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
909 {
910 tcg_out_insn(s, RRE, LLGFR, dest, src);
911 }
912
913 /* Accept bit patterns like these:
914 0....01....1
915 1....10....0
916 1..10..01..1
917 0..01..10..0
918 Copied from gcc sources. */
919 static inline bool risbg_mask(uint64_t c)
920 {
921 uint64_t lsb;
922 /* We don't change the number of transitions by inverting,
923 so make sure we start with the LSB zero. */
924 if (c & 1) {
925 c = ~c;
926 }
927 /* Reject all zeros or all ones. */
928 if (c == 0) {
929 return false;
930 }
931 /* Find the first transition. */
932 lsb = c & -c;
933 /* Invert to look for a second transition. */
934 c = ~c;
935 /* Erase the first transition. */
936 c &= -lsb;
937 /* Find the second transition, if any. */
938 lsb = c & -c;
939 /* Match if all the bits are 1's, or if c is zero. */
940 return c == -lsb;
941 }
942
943 static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
944 {
945 static const S390Opcode ni_insns[4] = {
946 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
947 };
948 static const S390Opcode nif_insns[2] = {
949 RIL_NILF, RIL_NIHF
950 };
951 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
952 int i;
953
954 /* Look for the zero-extensions. */
955 if ((val & valid) == 0xffffffff) {
956 tgen_ext32u(s, dest, dest);
957 return;
958 }
959 if (facilities & FACILITY_EXT_IMM) {
960 if ((val & valid) == 0xff) {
961 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
962 return;
963 }
964 if ((val & valid) == 0xffff) {
965 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
966 return;
967 }
968 }
969
970 /* Try all 32-bit insns that can perform it in one go. */
971 for (i = 0; i < 4; i++) {
972 tcg_target_ulong mask = ~(0xffffull << i*16);
973 if (((val | ~valid) & mask) == mask) {
974 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
975 return;
976 }
977 }
978
979 /* Try all 48-bit insns that can perform it in one go. */
980 if (facilities & FACILITY_EXT_IMM) {
981 for (i = 0; i < 2; i++) {
982 tcg_target_ulong mask = ~(0xffffffffull << i*32);
983 if (((val | ~valid) & mask) == mask) {
984 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
985 return;
986 }
987 }
988 }
989 if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
990 int msb, lsb;
991 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
992 /* Achieve wraparound by swapping msb and lsb. */
993 msb = 63 - ctz64(~val);
994 lsb = clz64(~val) + 1;
995 } else {
996 msb = clz64(val);
997 lsb = 63 - ctz64(val);
998 }
999 tcg_out_risbg(s, dest, dest, msb, lsb, 0, 1);
1000 return;
1001 }
1002
1003 /* Fall back to loading the constant. */
1004 tcg_out_movi(s, type, TCG_TMP0, val);
1005 if (type == TCG_TYPE_I32) {
1006 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
1007 } else {
1008 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
1009 }
1010 }
1011
1012 static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1013 {
1014 static const S390Opcode oi_insns[4] = {
1015 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1016 };
1017 static const S390Opcode nif_insns[2] = {
1018 RIL_OILF, RIL_OIHF
1019 };
1020
1021 int i;
1022
1023 /* Look for no-op. */
1024 if (val == 0) {
1025 return;
1026 }
1027
1028 if (facilities & FACILITY_EXT_IMM) {
1029 /* Try all 32-bit insns that can perform it in one go. */
1030 for (i = 0; i < 4; i++) {
1031 tcg_target_ulong mask = (0xffffull << i*16);
1032 if ((val & mask) != 0 && (val & ~mask) == 0) {
1033 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1034 return;
1035 }
1036 }
1037
1038 /* Try all 48-bit insns that can perform it in one go. */
1039 for (i = 0; i < 2; i++) {
1040 tcg_target_ulong mask = (0xffffffffull << i*32);
1041 if ((val & mask) != 0 && (val & ~mask) == 0) {
1042 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1043 return;
1044 }
1045 }
1046
1047 /* Perform the OR via sequential modifications to the high and
1048 low parts. Do this via recursion to handle 16-bit vs 32-bit
1049 masks in each half. */
1050 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1051 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1052 } else {
1053 /* With no extended-immediate facility, we don't need to be so
1054 clever. Just iterate over the insns and mask in the constant. */
1055 for (i = 0; i < 4; i++) {
1056 tcg_target_ulong mask = (0xffffull << i*16);
1057 if ((val & mask) != 0) {
1058 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1059 }
1060 }
1061 }
1062 }
1063
1064 static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1065 {
1066 /* Perform the xor by parts. */
1067 if (val & 0xffffffff) {
1068 tcg_out_insn(s, RIL, XILF, dest, val);
1069 }
1070 if (val > 0xffffffff) {
1071 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1072 }
1073 }
1074
1075 static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1076 TCGArg c2, int c2const)
1077 {
1078 bool is_unsigned = is_unsigned_cond(c);
1079 if (c2const) {
1080 if (c2 == 0) {
1081 if (type == TCG_TYPE_I32) {
1082 tcg_out_insn(s, RR, LTR, r1, r1);
1083 } else {
1084 tcg_out_insn(s, RRE, LTGR, r1, r1);
1085 }
1086 return tcg_cond_to_ltr_cond[c];
1087 } else {
1088 if (is_unsigned) {
1089 if (type == TCG_TYPE_I32) {
1090 tcg_out_insn(s, RIL, CLFI, r1, c2);
1091 } else {
1092 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1093 }
1094 } else {
1095 if (type == TCG_TYPE_I32) {
1096 tcg_out_insn(s, RIL, CFI, r1, c2);
1097 } else {
1098 tcg_out_insn(s, RIL, CGFI, r1, c2);
1099 }
1100 }
1101 }
1102 } else {
1103 if (is_unsigned) {
1104 if (type == TCG_TYPE_I32) {
1105 tcg_out_insn(s, RR, CLR, r1, c2);
1106 } else {
1107 tcg_out_insn(s, RRE, CLGR, r1, c2);
1108 }
1109 } else {
1110 if (type == TCG_TYPE_I32) {
1111 tcg_out_insn(s, RR, CR, r1, c2);
1112 } else {
1113 tcg_out_insn(s, RRE, CGR, r1, c2);
1114 }
1115 }
1116 }
1117 return tcg_cond_to_s390_cond[c];
1118 }
1119
1120 static void tgen_setcond(TCGContext *s, TCGType type, TCGCond c,
1121 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
1122 {
1123 int cc = tgen_cmp(s, type, c, c1, c2, c2const);
1124
1125 /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over: */
1126 tcg_out_movi(s, type, dest, 1);
1127 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1128 tcg_out_movi(s, type, dest, 0);
1129 }
1130
1131 static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1132 TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
1133 {
1134 int cc;
1135 if (facilities & FACILITY_LOAD_ON_COND) {
1136 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1137 tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
1138 } else {
1139 c = tcg_invert_cond(c);
1140 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1141
1142 /* Emit: if (cc) goto over; dest = r3; over: */
1143 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1144 tcg_out_insn(s, RRE, LGR, dest, r3);
1145 }
1146 }
1147
1148 bool tcg_target_deposit_valid(int ofs, int len)
1149 {
1150 return (facilities & FACILITY_GEN_INST_EXT) != 0;
1151 }
1152
1153 static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1154 int ofs, int len)
1155 {
1156 int lsb = (63 - ofs);
1157 int msb = lsb - (len - 1);
1158 tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0);
1159 }
1160
1161 static void tgen_gotoi(TCGContext *s, int cc, tcg_target_long dest)
1162 {
1163 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1164 if (off > -0x8000 && off < 0x7fff) {
1165 tcg_out_insn(s, RI, BRC, cc, off);
1166 } else if (off == (int32_t)off) {
1167 tcg_out_insn(s, RIL, BRCL, cc, off);
1168 } else {
1169 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1170 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1171 }
1172 }
1173
1174 static void tgen_branch(TCGContext *s, int cc, int labelno)
1175 {
1176 TCGLabel* l = &s->labels[labelno];
1177 if (l->has_value) {
1178 tgen_gotoi(s, cc, l->u.value);
1179 } else if (USE_LONG_BRANCHES) {
1180 tcg_out16(s, RIL_BRCL | (cc << 4));
1181 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, labelno, -2);
1182 s->code_ptr += 4;
1183 } else {
1184 tcg_out16(s, RI_BRC | (cc << 4));
1185 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, labelno, -2);
1186 s->code_ptr += 2;
1187 }
1188 }
1189
1190 static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1191 TCGReg r1, TCGReg r2, int labelno)
1192 {
1193 TCGLabel* l = &s->labels[labelno];
1194 tcg_target_long off;
1195
1196 if (l->has_value) {
1197 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1198 } else {
1199 /* We need to keep the offset unchanged for retranslation. */
1200 off = ((int16_t *)s->code_ptr)[1];
1201 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1202 }
1203
1204 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1205 tcg_out16(s, off);
1206 tcg_out16(s, cc << 12 | (opc & 0xff));
1207 }
1208
1209 static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1210 TCGReg r1, int i2, int labelno)
1211 {
1212 TCGLabel* l = &s->labels[labelno];
1213 tcg_target_long off;
1214
1215 if (l->has_value) {
1216 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1217 } else {
1218 /* We need to keep the offset unchanged for retranslation. */
1219 off = ((int16_t *)s->code_ptr)[1];
1220 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1221 }
1222
1223 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1224 tcg_out16(s, off);
1225 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1226 }
1227
1228 static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1229 TCGReg r1, TCGArg c2, int c2const, int labelno)
1230 {
1231 int cc;
1232
1233 if (facilities & FACILITY_GEN_INST_EXT) {
1234 bool is_unsigned = is_unsigned_cond(c);
1235 bool in_range;
1236 S390Opcode opc;
1237
1238 cc = tcg_cond_to_s390_cond[c];
1239
1240 if (!c2const) {
1241 opc = (type == TCG_TYPE_I32
1242 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1243 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1244 tgen_compare_branch(s, opc, cc, r1, c2, labelno);
1245 return;
1246 }
1247
1248 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1249 If the immediate we've been given does not fit that range, we'll
1250 fall back to separate compare and branch instructions using the
1251 larger comparison range afforded by COMPARE IMMEDIATE. */
1252 if (type == TCG_TYPE_I32) {
1253 if (is_unsigned) {
1254 opc = RIE_CLIJ;
1255 in_range = (uint32_t)c2 == (uint8_t)c2;
1256 } else {
1257 opc = RIE_CIJ;
1258 in_range = (int32_t)c2 == (int8_t)c2;
1259 }
1260 } else {
1261 if (is_unsigned) {
1262 opc = RIE_CLGIJ;
1263 in_range = (uint64_t)c2 == (uint8_t)c2;
1264 } else {
1265 opc = RIE_CGIJ;
1266 in_range = (int64_t)c2 == (int8_t)c2;
1267 }
1268 }
1269 if (in_range) {
1270 tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno);
1271 return;
1272 }
1273 }
1274
1275 cc = tgen_cmp(s, type, c, r1, c2, c2const);
1276 tgen_branch(s, cc, labelno);
1277 }
1278
1279 static void tgen_calli(TCGContext *s, tcg_target_long dest)
1280 {
1281 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1282 if (off == (int32_t)off) {
1283 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1284 } else {
1285 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1286 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1287 }
1288 }
1289
1290 static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data,
1291 TCGReg base, TCGReg index, int disp)
1292 {
1293 #ifdef TARGET_WORDS_BIGENDIAN
1294 const int bswap = 0;
1295 #else
1296 const int bswap = 1;
1297 #endif
1298 switch (opc) {
1299 case LD_UINT8:
1300 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1301 break;
1302 case LD_INT8:
1303 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1304 break;
1305 case LD_UINT16:
1306 if (bswap) {
1307 /* swapped unsigned halfword load with upper bits zeroed */
1308 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1309 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1310 } else {
1311 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1312 }
1313 break;
1314 case LD_INT16:
1315 if (bswap) {
1316 /* swapped sign-extended halfword load */
1317 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1318 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1319 } else {
1320 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1321 }
1322 break;
1323 case LD_UINT32:
1324 if (bswap) {
1325 /* swapped unsigned int load with upper bits zeroed */
1326 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1327 tgen_ext32u(s, data, data);
1328 } else {
1329 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1330 }
1331 break;
1332 case LD_INT32:
1333 if (bswap) {
1334 /* swapped sign-extended int load */
1335 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1336 tgen_ext32s(s, data, data);
1337 } else {
1338 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1339 }
1340 break;
1341 case LD_UINT64:
1342 if (bswap) {
1343 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1344 } else {
1345 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1346 }
1347 break;
1348 default:
1349 tcg_abort();
1350 }
1351 }
1352
1353 static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data,
1354 TCGReg base, TCGReg index, int disp)
1355 {
1356 #ifdef TARGET_WORDS_BIGENDIAN
1357 const int bswap = 0;
1358 #else
1359 const int bswap = 1;
1360 #endif
1361 switch (opc) {
1362 case LD_UINT8:
1363 if (disp >= 0 && disp < 0x1000) {
1364 tcg_out_insn(s, RX, STC, data, base, index, disp);
1365 } else {
1366 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1367 }
1368 break;
1369 case LD_UINT16:
1370 if (bswap) {
1371 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1372 } else if (disp >= 0 && disp < 0x1000) {
1373 tcg_out_insn(s, RX, STH, data, base, index, disp);
1374 } else {
1375 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1376 }
1377 break;
1378 case LD_UINT32:
1379 if (bswap) {
1380 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1381 } else if (disp >= 0 && disp < 0x1000) {
1382 tcg_out_insn(s, RX, ST, data, base, index, disp);
1383 } else {
1384 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1385 }
1386 break;
1387 case LD_UINT64:
1388 if (bswap) {
1389 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1390 } else {
1391 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1392 }
1393 break;
1394 default:
1395 tcg_abort();
1396 }
1397 }
1398
1399 #if defined(CONFIG_SOFTMMU)
1400 static TCGReg tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
1401 TCGReg addr_reg, int mem_index, int opc,
1402 uint16_t **label2_ptr_p, int is_store)
1403 {
1404 const TCGReg arg0 = tcg_target_call_iarg_regs[0];
1405 const TCGReg arg1 = tcg_target_call_iarg_regs[1];
1406 const TCGReg arg2 = tcg_target_call_iarg_regs[2];
1407 const TCGReg arg3 = tcg_target_call_iarg_regs[3];
1408 int s_bits = opc & 3;
1409 uint16_t *label1_ptr;
1410 tcg_target_long ofs;
1411
1412 if (TARGET_LONG_BITS == 32) {
1413 tgen_ext32u(s, arg1, addr_reg);
1414 } else {
1415 tcg_out_mov(s, TCG_TYPE_I64, arg1, addr_reg);
1416 }
1417
1418 tcg_out_sh64(s, RSY_SRLG, arg2, addr_reg, TCG_REG_NONE,
1419 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1420
1421 tgen_andi(s, TCG_TYPE_I64, arg1, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
1422 tgen_andi(s, TCG_TYPE_I64, arg2, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1423
1424 if (is_store) {
1425 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1426 } else {
1427 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1428 }
1429 assert(ofs < 0x80000);
1430
1431 if (TARGET_LONG_BITS == 32) {
1432 tcg_out_mem(s, RX_C, RXY_CY, arg1, arg2, TCG_AREG0, ofs);
1433 } else {
1434 tcg_out_mem(s, 0, RXY_CG, arg1, arg2, TCG_AREG0, ofs);
1435 }
1436
1437 if (TARGET_LONG_BITS == 32) {
1438 tgen_ext32u(s, arg1, addr_reg);
1439 } else {
1440 tcg_out_mov(s, TCG_TYPE_I64, arg1, addr_reg);
1441 }
1442
1443 label1_ptr = (uint16_t*)s->code_ptr;
1444
1445 /* je label1 (offset will be patched in later) */
1446 tcg_out_insn(s, RI, BRC, S390_CC_EQ, 0);
1447
1448 /* call load/store helper */
1449 if (is_store) {
1450 /* Make sure to zero-extend the value to the full register
1451 for the calling convention. */
1452 switch (opc) {
1453 case LD_UINT8:
1454 tgen_ext8u(s, TCG_TYPE_I64, arg2, data_reg);
1455 break;
1456 case LD_UINT16:
1457 tgen_ext16u(s, TCG_TYPE_I64, arg2, data_reg);
1458 break;
1459 case LD_UINT32:
1460 tgen_ext32u(s, arg2, data_reg);
1461 break;
1462 case LD_UINT64:
1463 tcg_out_mov(s, TCG_TYPE_I64, arg2, data_reg);
1464 break;
1465 default:
1466 tcg_abort();
1467 }
1468 tcg_out_movi(s, TCG_TYPE_I32, arg3, mem_index);
1469 tcg_out_mov(s, TCG_TYPE_I64, arg0, TCG_AREG0);
1470 tgen_calli(s, (tcg_target_ulong)qemu_st_helpers[s_bits]);
1471 } else {
1472 tcg_out_movi(s, TCG_TYPE_I32, arg2, mem_index);
1473 tcg_out_mov(s, TCG_TYPE_I64, arg0, TCG_AREG0);
1474 tgen_calli(s, (tcg_target_ulong)qemu_ld_helpers[s_bits]);
1475
1476 /* sign extension */
1477 switch (opc) {
1478 case LD_INT8:
1479 tgen_ext8s(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
1480 break;
1481 case LD_INT16:
1482 tgen_ext16s(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
1483 break;
1484 case LD_INT32:
1485 tgen_ext32s(s, data_reg, TCG_REG_R2);
1486 break;
1487 default:
1488 /* unsigned -> just copy */
1489 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
1490 break;
1491 }
1492 }
1493
1494 /* jump to label2 (end) */
1495 *label2_ptr_p = (uint16_t*)s->code_ptr;
1496
1497 tcg_out_insn(s, RI, BRC, S390_CC_ALWAYS, 0);
1498
1499 /* this is label1, patch branch */
1500 *(label1_ptr + 1) = ((unsigned long)s->code_ptr -
1501 (unsigned long)label1_ptr) >> 1;
1502
1503 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1504 assert(ofs < 0x80000);
1505
1506 tcg_out_mem(s, 0, RXY_AG, arg1, arg2, TCG_AREG0, ofs);
1507
1508 return arg1;
1509 }
1510
1511 static void tcg_finish_qemu_ldst(TCGContext* s, uint16_t *label2_ptr)
1512 {
1513 /* patch branch */
1514 *(label2_ptr + 1) = ((unsigned long)s->code_ptr -
1515 (unsigned long)label2_ptr) >> 1;
1516 }
1517 #else
1518 static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1519 TCGReg *index_reg, tcg_target_long *disp)
1520 {
1521 if (TARGET_LONG_BITS == 32) {
1522 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1523 *addr_reg = TCG_TMP0;
1524 }
1525 if (GUEST_BASE < 0x80000) {
1526 *index_reg = TCG_REG_NONE;
1527 *disp = GUEST_BASE;
1528 } else {
1529 *index_reg = TCG_GUEST_BASE_REG;
1530 *disp = 0;
1531 }
1532 }
1533 #endif /* CONFIG_SOFTMMU */
1534
1535 /* load data with address translation (if applicable)
1536 and endianness conversion */
1537 static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* args, int opc)
1538 {
1539 TCGReg addr_reg, data_reg;
1540 #if defined(CONFIG_SOFTMMU)
1541 int mem_index;
1542 uint16_t *label2_ptr;
1543 #else
1544 TCGReg index_reg;
1545 tcg_target_long disp;
1546 #endif
1547
1548 data_reg = *args++;
1549 addr_reg = *args++;
1550
1551 #if defined(CONFIG_SOFTMMU)
1552 mem_index = *args;
1553
1554 addr_reg = tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1555 opc, &label2_ptr, 0);
1556
1557 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, TCG_REG_NONE, 0);
1558
1559 tcg_finish_qemu_ldst(s, label2_ptr);
1560 #else
1561 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1562 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1563 #endif
1564 }
1565
1566 static void tcg_out_qemu_st(TCGContext* s, const TCGArg* args, int opc)
1567 {
1568 TCGReg addr_reg, data_reg;
1569 #if defined(CONFIG_SOFTMMU)
1570 int mem_index;
1571 uint16_t *label2_ptr;
1572 #else
1573 TCGReg index_reg;
1574 tcg_target_long disp;
1575 #endif
1576
1577 data_reg = *args++;
1578 addr_reg = *args++;
1579
1580 #if defined(CONFIG_SOFTMMU)
1581 mem_index = *args;
1582
1583 addr_reg = tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1584 opc, &label2_ptr, 1);
1585
1586 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, TCG_REG_NONE, 0);
1587
1588 tcg_finish_qemu_ldst(s, label2_ptr);
1589 #else
1590 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1591 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1592 #endif
1593 }
1594
1595 # define OP_32_64(x) \
1596 case glue(glue(INDEX_op_,x),_i32): \
1597 case glue(glue(INDEX_op_,x),_i64)
1598
1599 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1600 const TCGArg *args, const int *const_args)
1601 {
1602 S390Opcode op;
1603 TCGArg a0, a1, a2;
1604
1605 switch (opc) {
1606 case INDEX_op_exit_tb:
1607 /* return value */
1608 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
1609 tgen_gotoi(s, S390_CC_ALWAYS, (unsigned long)tb_ret_addr);
1610 break;
1611
1612 case INDEX_op_goto_tb:
1613 if (s->tb_jmp_offset) {
1614 tcg_abort();
1615 } else {
1616 /* load address stored at s->tb_next + args[0] */
1617 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
1618 /* and go there */
1619 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1620 }
1621 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1622 break;
1623
1624 case INDEX_op_call:
1625 if (const_args[0]) {
1626 tgen_calli(s, args[0]);
1627 } else {
1628 tcg_out_insn(s, RR, BASR, TCG_REG_R14, args[0]);
1629 }
1630 break;
1631
1632 case INDEX_op_mov_i32:
1633 tcg_out_mov(s, TCG_TYPE_I32, args[0], args[1]);
1634 break;
1635 case INDEX_op_movi_i32:
1636 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1637 break;
1638
1639 OP_32_64(ld8u):
1640 /* ??? LLC (RXY format) is only present with the extended-immediate
1641 facility, whereas LLGC is always present. */
1642 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1643 break;
1644
1645 OP_32_64(ld8s):
1646 /* ??? LB is no smaller than LGB, so no point to using it. */
1647 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1648 break;
1649
1650 OP_32_64(ld16u):
1651 /* ??? LLH (RXY format) is only present with the extended-immediate
1652 facility, whereas LLGH is always present. */
1653 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1654 break;
1655
1656 case INDEX_op_ld16s_i32:
1657 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1658 break;
1659
1660 case INDEX_op_ld_i32:
1661 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1662 break;
1663
1664 OP_32_64(st8):
1665 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1666 TCG_REG_NONE, args[2]);
1667 break;
1668
1669 OP_32_64(st16):
1670 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1671 TCG_REG_NONE, args[2]);
1672 break;
1673
1674 case INDEX_op_st_i32:
1675 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1676 break;
1677
1678 case INDEX_op_add_i32:
1679 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1680 if (const_args[2]) {
1681 do_addi_32:
1682 if (a0 == a1) {
1683 if (a2 == (int16_t)a2) {
1684 tcg_out_insn(s, RI, AHI, a0, a2);
1685 break;
1686 }
1687 if (facilities & FACILITY_EXT_IMM) {
1688 tcg_out_insn(s, RIL, AFI, a0, a2);
1689 break;
1690 }
1691 }
1692 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1693 } else if (a0 == a1) {
1694 tcg_out_insn(s, RR, AR, a0, a2);
1695 } else {
1696 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
1697 }
1698 break;
1699 case INDEX_op_sub_i32:
1700 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1701 if (const_args[2]) {
1702 a2 = -a2;
1703 goto do_addi_32;
1704 }
1705 tcg_out_insn(s, RR, SR, args[0], args[2]);
1706 break;
1707
1708 case INDEX_op_and_i32:
1709 if (const_args[2]) {
1710 tgen_andi(s, TCG_TYPE_I32, args[0], args[2]);
1711 } else {
1712 tcg_out_insn(s, RR, NR, args[0], args[2]);
1713 }
1714 break;
1715 case INDEX_op_or_i32:
1716 if (const_args[2]) {
1717 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1718 } else {
1719 tcg_out_insn(s, RR, OR, args[0], args[2]);
1720 }
1721 break;
1722 case INDEX_op_xor_i32:
1723 if (const_args[2]) {
1724 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1725 } else {
1726 tcg_out_insn(s, RR, XR, args[0], args[2]);
1727 }
1728 break;
1729
1730 case INDEX_op_neg_i32:
1731 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1732 break;
1733
1734 case INDEX_op_mul_i32:
1735 if (const_args[2]) {
1736 if ((int32_t)args[2] == (int16_t)args[2]) {
1737 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1738 } else {
1739 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1740 }
1741 } else {
1742 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1743 }
1744 break;
1745
1746 case INDEX_op_div2_i32:
1747 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1748 break;
1749 case INDEX_op_divu2_i32:
1750 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1751 break;
1752
1753 case INDEX_op_shl_i32:
1754 op = RS_SLL;
1755 do_shift32:
1756 if (const_args[2]) {
1757 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1758 } else {
1759 tcg_out_sh32(s, op, args[0], args[2], 0);
1760 }
1761 break;
1762 case INDEX_op_shr_i32:
1763 op = RS_SRL;
1764 goto do_shift32;
1765 case INDEX_op_sar_i32:
1766 op = RS_SRA;
1767 goto do_shift32;
1768
1769 case INDEX_op_rotl_i32:
1770 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1771 if (const_args[2]) {
1772 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1773 } else {
1774 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1775 }
1776 break;
1777 case INDEX_op_rotr_i32:
1778 if (const_args[2]) {
1779 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1780 TCG_REG_NONE, (32 - args[2]) & 31);
1781 } else {
1782 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1783 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1784 }
1785 break;
1786
1787 case INDEX_op_ext8s_i32:
1788 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1789 break;
1790 case INDEX_op_ext16s_i32:
1791 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1792 break;
1793 case INDEX_op_ext8u_i32:
1794 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1795 break;
1796 case INDEX_op_ext16u_i32:
1797 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1798 break;
1799
1800 OP_32_64(bswap16):
1801 /* The TCG bswap definition requires bits 0-47 already be zero.
1802 Thus we don't need the G-type insns to implement bswap16_i64. */
1803 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1804 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1805 break;
1806 OP_32_64(bswap32):
1807 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1808 break;
1809
1810 case INDEX_op_add2_i32:
1811 /* ??? Make use of ALFI. */
1812 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1813 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1814 break;
1815 case INDEX_op_sub2_i32:
1816 /* ??? Make use of SLFI. */
1817 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1818 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1819 break;
1820
1821 case INDEX_op_br:
1822 tgen_branch(s, S390_CC_ALWAYS, args[0]);
1823 break;
1824
1825 case INDEX_op_brcond_i32:
1826 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
1827 args[1], const_args[1], args[3]);
1828 break;
1829 case INDEX_op_setcond_i32:
1830 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1831 args[2], const_args[2]);
1832 break;
1833 case INDEX_op_movcond_i32:
1834 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
1835 args[2], const_args[2], args[3]);
1836 break;
1837
1838 case INDEX_op_qemu_ld8u:
1839 tcg_out_qemu_ld(s, args, LD_UINT8);
1840 break;
1841 case INDEX_op_qemu_ld8s:
1842 tcg_out_qemu_ld(s, args, LD_INT8);
1843 break;
1844 case INDEX_op_qemu_ld16u:
1845 tcg_out_qemu_ld(s, args, LD_UINT16);
1846 break;
1847 case INDEX_op_qemu_ld16s:
1848 tcg_out_qemu_ld(s, args, LD_INT16);
1849 break;
1850 case INDEX_op_qemu_ld32:
1851 /* ??? Technically we can use a non-extending instruction. */
1852 tcg_out_qemu_ld(s, args, LD_UINT32);
1853 break;
1854 case INDEX_op_qemu_ld64:
1855 tcg_out_qemu_ld(s, args, LD_UINT64);
1856 break;
1857
1858 case INDEX_op_qemu_st8:
1859 tcg_out_qemu_st(s, args, LD_UINT8);
1860 break;
1861 case INDEX_op_qemu_st16:
1862 tcg_out_qemu_st(s, args, LD_UINT16);
1863 break;
1864 case INDEX_op_qemu_st32:
1865 tcg_out_qemu_st(s, args, LD_UINT32);
1866 break;
1867 case INDEX_op_qemu_st64:
1868 tcg_out_qemu_st(s, args, LD_UINT64);
1869 break;
1870
1871 case INDEX_op_mov_i64:
1872 tcg_out_mov(s, TCG_TYPE_I64, args[0], args[1]);
1873 break;
1874 case INDEX_op_movi_i64:
1875 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1876 break;
1877
1878 case INDEX_op_ld16s_i64:
1879 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1880 break;
1881 case INDEX_op_ld32u_i64:
1882 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1883 break;
1884 case INDEX_op_ld32s_i64:
1885 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1886 break;
1887 case INDEX_op_ld_i64:
1888 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1889 break;
1890
1891 case INDEX_op_st32_i64:
1892 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1893 break;
1894 case INDEX_op_st_i64:
1895 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1896 break;
1897
1898 case INDEX_op_add_i64:
1899 a0 = args[0], a1 = args[1], a2 = args[2];
1900 if (const_args[2]) {
1901 do_addi_64:
1902 if (a0 == a1) {
1903 if (a2 == (int16_t)a2) {
1904 tcg_out_insn(s, RI, AGHI, a0, a2);
1905 break;
1906 }
1907 if (facilities & FACILITY_EXT_IMM) {
1908 if (a2 == (int32_t)a2) {
1909 tcg_out_insn(s, RIL, AGFI, a0, a2);
1910 break;
1911 } else if (a2 == (uint32_t)a2) {
1912 tcg_out_insn(s, RIL, ALGFI, a0, a2);
1913 break;
1914 } else if (-a2 == (uint32_t)-a2) {
1915 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
1916 break;
1917 }
1918 }
1919 }
1920 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1921 } else if (a0 == a1) {
1922 tcg_out_insn(s, RRE, AGR, a0, a2);
1923 } else {
1924 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
1925 }
1926 break;
1927 case INDEX_op_sub_i64:
1928 a0 = args[0], a1 = args[1], a2 = args[2];
1929 if (const_args[2]) {
1930 a2 = -a2;
1931 goto do_addi_64;
1932 } else {
1933 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
1934 }
1935 break;
1936
1937 case INDEX_op_and_i64:
1938 if (const_args[2]) {
1939 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
1940 } else {
1941 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
1942 }
1943 break;
1944 case INDEX_op_or_i64:
1945 if (const_args[2]) {
1946 tgen64_ori(s, args[0], args[2]);
1947 } else {
1948 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
1949 }
1950 break;
1951 case INDEX_op_xor_i64:
1952 if (const_args[2]) {
1953 tgen64_xori(s, args[0], args[2]);
1954 } else {
1955 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
1956 }
1957 break;
1958
1959 case INDEX_op_neg_i64:
1960 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
1961 break;
1962 case INDEX_op_bswap64_i64:
1963 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
1964 break;
1965
1966 case INDEX_op_mul_i64:
1967 if (const_args[2]) {
1968 if (args[2] == (int16_t)args[2]) {
1969 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
1970 } else {
1971 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
1972 }
1973 } else {
1974 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
1975 }
1976 break;
1977
1978 case INDEX_op_div2_i64:
1979 /* ??? We get an unnecessary sign-extension of the dividend
1980 into R3 with this definition, but as we do in fact always
1981 produce both quotient and remainder using INDEX_op_div_i64
1982 instead requires jumping through even more hoops. */
1983 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
1984 break;
1985 case INDEX_op_divu2_i64:
1986 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
1987 break;
1988 case INDEX_op_mulu2_i64:
1989 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
1990 break;
1991
1992 case INDEX_op_shl_i64:
1993 op = RSY_SLLG;
1994 do_shift64:
1995 if (const_args[2]) {
1996 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
1997 } else {
1998 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
1999 }
2000 break;
2001 case INDEX_op_shr_i64:
2002 op = RSY_SRLG;
2003 goto do_shift64;
2004 case INDEX_op_sar_i64:
2005 op = RSY_SRAG;
2006 goto do_shift64;
2007
2008 case INDEX_op_rotl_i64:
2009 if (const_args[2]) {
2010 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2011 TCG_REG_NONE, args[2]);
2012 } else {
2013 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2014 }
2015 break;
2016 case INDEX_op_rotr_i64:
2017 if (const_args[2]) {
2018 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2019 TCG_REG_NONE, (64 - args[2]) & 63);
2020 } else {
2021 /* We can use the smaller 32-bit negate because only the
2022 low 6 bits are examined for the rotate. */
2023 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2024 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2025 }
2026 break;
2027
2028 case INDEX_op_ext8s_i64:
2029 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2030 break;
2031 case INDEX_op_ext16s_i64:
2032 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2033 break;
2034 case INDEX_op_ext32s_i64:
2035 tgen_ext32s(s, args[0], args[1]);
2036 break;
2037 case INDEX_op_ext8u_i64:
2038 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2039 break;
2040 case INDEX_op_ext16u_i64:
2041 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2042 break;
2043 case INDEX_op_ext32u_i64:
2044 tgen_ext32u(s, args[0], args[1]);
2045 break;
2046
2047 case INDEX_op_add2_i64:
2048 /* ??? Make use of ALGFI and SLGFI. */
2049 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2050 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2051 break;
2052 case INDEX_op_sub2_i64:
2053 /* ??? Make use of ALGFI and SLGFI. */
2054 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2055 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2056 break;
2057
2058 case INDEX_op_brcond_i64:
2059 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2060 args[1], const_args[1], args[3]);
2061 break;
2062 case INDEX_op_setcond_i64:
2063 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2064 args[2], const_args[2]);
2065 break;
2066 case INDEX_op_movcond_i64:
2067 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2068 args[2], const_args[2], args[3]);
2069 break;
2070
2071 case INDEX_op_qemu_ld32u:
2072 tcg_out_qemu_ld(s, args, LD_UINT32);
2073 break;
2074 case INDEX_op_qemu_ld32s:
2075 tcg_out_qemu_ld(s, args, LD_INT32);
2076 break;
2077
2078 OP_32_64(deposit):
2079 tgen_deposit(s, args[0], args[2], args[3], args[4]);
2080 break;
2081
2082 default:
2083 fprintf(stderr,"unimplemented opc 0x%x\n",opc);
2084 tcg_abort();
2085 }
2086 }
2087
2088 static const TCGTargetOpDef s390_op_defs[] = {
2089 { INDEX_op_exit_tb, { } },
2090 { INDEX_op_goto_tb, { } },
2091 { INDEX_op_call, { "ri" } },
2092 { INDEX_op_br, { } },
2093
2094 { INDEX_op_mov_i32, { "r", "r" } },
2095 { INDEX_op_movi_i32, { "r" } },
2096
2097 { INDEX_op_ld8u_i32, { "r", "r" } },
2098 { INDEX_op_ld8s_i32, { "r", "r" } },
2099 { INDEX_op_ld16u_i32, { "r", "r" } },
2100 { INDEX_op_ld16s_i32, { "r", "r" } },
2101 { INDEX_op_ld_i32, { "r", "r" } },
2102 { INDEX_op_st8_i32, { "r", "r" } },
2103 { INDEX_op_st16_i32, { "r", "r" } },
2104 { INDEX_op_st_i32, { "r", "r" } },
2105
2106 { INDEX_op_add_i32, { "r", "r", "ri" } },
2107 { INDEX_op_sub_i32, { "r", "0", "ri" } },
2108 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2109
2110 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2111 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2112
2113 { INDEX_op_and_i32, { "r", "0", "ri" } },
2114 { INDEX_op_or_i32, { "r", "0", "rWO" } },
2115 { INDEX_op_xor_i32, { "r", "0", "rWX" } },
2116
2117 { INDEX_op_neg_i32, { "r", "r" } },
2118
2119 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2120 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2121 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2122
2123 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2124 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2125
2126 { INDEX_op_ext8s_i32, { "r", "r" } },
2127 { INDEX_op_ext8u_i32, { "r", "r" } },
2128 { INDEX_op_ext16s_i32, { "r", "r" } },
2129 { INDEX_op_ext16u_i32, { "r", "r" } },
2130
2131 { INDEX_op_bswap16_i32, { "r", "r" } },
2132 { INDEX_op_bswap32_i32, { "r", "r" } },
2133
2134 { INDEX_op_add2_i32, { "r", "r", "0", "1", "r", "r" } },
2135 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "r", "r" } },
2136
2137 { INDEX_op_brcond_i32, { "r", "rWC" } },
2138 { INDEX_op_setcond_i32, { "r", "r", "rWC" } },
2139 { INDEX_op_movcond_i32, { "r", "r", "rWC", "r", "0" } },
2140 { INDEX_op_deposit_i32, { "r", "0", "r" } },
2141
2142 { INDEX_op_qemu_ld8u, { "r", "L" } },
2143 { INDEX_op_qemu_ld8s, { "r", "L" } },
2144 { INDEX_op_qemu_ld16u, { "r", "L" } },
2145 { INDEX_op_qemu_ld16s, { "r", "L" } },
2146 { INDEX_op_qemu_ld32, { "r", "L" } },
2147 { INDEX_op_qemu_ld64, { "r", "L" } },
2148
2149 { INDEX_op_qemu_st8, { "L", "L" } },
2150 { INDEX_op_qemu_st16, { "L", "L" } },
2151 { INDEX_op_qemu_st32, { "L", "L" } },
2152 { INDEX_op_qemu_st64, { "L", "L" } },
2153
2154 { INDEX_op_mov_i64, { "r", "r" } },
2155 { INDEX_op_movi_i64, { "r" } },
2156
2157 { INDEX_op_ld8u_i64, { "r", "r" } },
2158 { INDEX_op_ld8s_i64, { "r", "r" } },
2159 { INDEX_op_ld16u_i64, { "r", "r" } },
2160 { INDEX_op_ld16s_i64, { "r", "r" } },
2161 { INDEX_op_ld32u_i64, { "r", "r" } },
2162 { INDEX_op_ld32s_i64, { "r", "r" } },
2163 { INDEX_op_ld_i64, { "r", "r" } },
2164
2165 { INDEX_op_st8_i64, { "r", "r" } },
2166 { INDEX_op_st16_i64, { "r", "r" } },
2167 { INDEX_op_st32_i64, { "r", "r" } },
2168 { INDEX_op_st_i64, { "r", "r" } },
2169
2170 { INDEX_op_add_i64, { "r", "r", "ri" } },
2171 { INDEX_op_sub_i64, { "r", "0", "ri" } },
2172 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2173
2174 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2175 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
2176 { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
2177
2178 { INDEX_op_and_i64, { "r", "0", "ri" } },
2179 { INDEX_op_or_i64, { "r", "0", "rO" } },
2180 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2181
2182 { INDEX_op_neg_i64, { "r", "r" } },
2183
2184 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2185 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2186 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2187
2188 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2189 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2190
2191 { INDEX_op_ext8s_i64, { "r", "r" } },
2192 { INDEX_op_ext8u_i64, { "r", "r" } },
2193 { INDEX_op_ext16s_i64, { "r", "r" } },
2194 { INDEX_op_ext16u_i64, { "r", "r" } },
2195 { INDEX_op_ext32s_i64, { "r", "r" } },
2196 { INDEX_op_ext32u_i64, { "r", "r" } },
2197
2198 { INDEX_op_bswap16_i64, { "r", "r" } },
2199 { INDEX_op_bswap32_i64, { "r", "r" } },
2200 { INDEX_op_bswap64_i64, { "r", "r" } },
2201
2202 { INDEX_op_add2_i64, { "r", "r", "0", "1", "r", "r" } },
2203 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "r", "r" } },
2204
2205 { INDEX_op_brcond_i64, { "r", "rC" } },
2206 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
2207 { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
2208 { INDEX_op_deposit_i64, { "r", "0", "r" } },
2209
2210 { INDEX_op_qemu_ld32u, { "r", "L" } },
2211 { INDEX_op_qemu_ld32s, { "r", "L" } },
2212
2213 { -1 },
2214 };
2215
2216 /* ??? Linux kernels provide an AUXV entry AT_HWCAP that provides most of
2217 this information. However, getting at that entry is not easy this far
2218 away from main. Our options are: start searching from environ, but
2219 that fails as soon as someone does a setenv in between. Read the data
2220 from /proc/self/auxv. Or do the probing ourselves. The only thing
2221 extra that AT_HWCAP gives us is HWCAP_S390_HIGH_GPRS, which indicates
2222 that the kernel saves all 64-bits of the registers around traps while
2223 in 31-bit mode. But this is true of all "recent" kernels (ought to dig
2224 back and see from when this might not be true). */
2225
2226 #include <signal.h>
2227
2228 static volatile sig_atomic_t got_sigill;
2229
2230 static void sigill_handler(int sig)
2231 {
2232 got_sigill = 1;
2233 }
2234
2235 static void query_facilities(void)
2236 {
2237 struct sigaction sa_old, sa_new;
2238 register int r0 __asm__("0");
2239 register void *r1 __asm__("1");
2240 int fail;
2241
2242 memset(&sa_new, 0, sizeof(sa_new));
2243 sa_new.sa_handler = sigill_handler;
2244 sigaction(SIGILL, &sa_new, &sa_old);
2245
2246 /* First, try STORE FACILITY LIST EXTENDED. If this is present, then
2247 we need not do any more probing. Unfortunately, this itself is an
2248 extension and the original STORE FACILITY LIST instruction is
2249 kernel-only, storing its results at absolute address 200. */
2250 /* stfle 0(%r1) */
2251 r1 = &facilities;
2252 asm volatile(".word 0xb2b0,0x1000"
2253 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
2254
2255 if (got_sigill) {
2256 /* STORE FACILITY EXTENDED is not available. Probe for one of each
2257 kind of instruction that we're interested in. */
2258 /* ??? Possibly some of these are in practice never present unless
2259 the store-facility-extended facility is also present. But since
2260 that isn't documented it's just better to probe for each. */
2261
2262 /* Test for z/Architecture. Required even in 31-bit mode. */
2263 got_sigill = 0;
2264 /* agr %r0,%r0 */
2265 asm volatile(".word 0xb908,0x0000" : "=r"(r0) : : "cc");
2266 if (!got_sigill) {
2267 facilities |= FACILITY_ZARCH_ACTIVE;
2268 }
2269
2270 /* Test for long displacement. */
2271 got_sigill = 0;
2272 /* ly %r0,0(%r1) */
2273 r1 = &facilities;
2274 asm volatile(".word 0xe300,0x1000,0x0058"
2275 : "=r"(r0) : "r"(r1) : "cc");
2276 if (!got_sigill) {
2277 facilities |= FACILITY_LONG_DISP;
2278 }
2279
2280 /* Test for extended immediates. */
2281 got_sigill = 0;
2282 /* afi %r0,0 */
2283 asm volatile(".word 0xc209,0x0000,0x0000" : : : "cc");
2284 if (!got_sigill) {
2285 facilities |= FACILITY_EXT_IMM;
2286 }
2287
2288 /* Test for general-instructions-extension. */
2289 got_sigill = 0;
2290 /* msfi %r0,1 */
2291 asm volatile(".word 0xc201,0x0000,0x0001");
2292 if (!got_sigill) {
2293 facilities |= FACILITY_GEN_INST_EXT;
2294 }
2295 }
2296
2297 sigaction(SIGILL, &sa_old, NULL);
2298
2299 /* The translator currently uses these extensions unconditionally.
2300 Pruning this back to the base ESA/390 architecture doesn't seem
2301 worthwhile, since even the KVM target requires z/Arch. */
2302 fail = 0;
2303 if ((facilities & FACILITY_ZARCH_ACTIVE) == 0) {
2304 fprintf(stderr, "TCG: z/Arch facility is required.\n");
2305 fprintf(stderr, "TCG: Boot with a 64-bit enabled kernel.\n");
2306 fail = 1;
2307 }
2308 if ((facilities & FACILITY_LONG_DISP) == 0) {
2309 fprintf(stderr, "TCG: long-displacement facility is required.\n");
2310 fail = 1;
2311 }
2312
2313 /* So far there's just enough support for 31-bit mode to let the
2314 compile succeed. This is good enough to run QEMU with KVM. */
2315 if (sizeof(void *) != 8) {
2316 fprintf(stderr, "TCG: 31-bit mode is not supported.\n");
2317 fail = 1;
2318 }
2319
2320 if (fail) {
2321 exit(-1);
2322 }
2323 }
2324
2325 static void tcg_target_init(TCGContext *s)
2326 {
2327 #if !defined(CONFIG_USER_ONLY)
2328 /* fail safe */
2329 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) {
2330 tcg_abort();
2331 }
2332 #endif
2333
2334 query_facilities();
2335
2336 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2337 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2338
2339 tcg_regset_clear(tcg_target_call_clobber_regs);
2340 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2341 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2342 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2343 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2344 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2345 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2346 /* The return register can be considered call-clobbered. */
2347 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2348
2349 tcg_regset_clear(s->reserved_regs);
2350 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2351 /* XXX many insns can't be used with R0, so we better avoid it for now */
2352 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2353 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2354
2355 tcg_add_target_add_op_defs(s390_op_defs);
2356 }
2357
2358 static void tcg_target_qemu_prologue(TCGContext *s)
2359 {
2360 tcg_target_long frame_size;
2361
2362 /* stmg %r6,%r15,48(%r15) (save registers) */
2363 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2364
2365 /* aghi %r15,-frame_size */
2366 frame_size = TCG_TARGET_CALL_STACK_OFFSET;
2367 frame_size += TCG_STATIC_CALL_ARGS_SIZE;
2368 frame_size += CPU_TEMP_BUF_NLONGS * sizeof(long);
2369 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -frame_size);
2370
2371 tcg_set_frame(s, TCG_REG_CALL_STACK,
2372 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2373 CPU_TEMP_BUF_NLONGS * sizeof(long));
2374
2375 if (GUEST_BASE >= 0x80000) {
2376 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
2377 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2378 }
2379
2380 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2381 /* br %r3 (go to TB) */
2382 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
2383
2384 tb_ret_addr = s->code_ptr;
2385
2386 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2387 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
2388 frame_size + 48);
2389
2390 /* br %r14 (return) */
2391 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2392 }