]> git.proxmox.com Git - qemu.git/blame - tcg/s390/tcg-target.c
tcg-ppc: Cleanup tcg_out_qemu_ld/st_slow_path
[qemu.git] / tcg / s390 / tcg-target.c
CommitLineData
2827822e
AG
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
48bb3750
RH
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
2827822e
AG
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
a01fc30d
RH
27/* We only support generating code for 64-bit mode. */
28#if TCG_TARGET_REG_BITS != 64
29#error "unsupported code generation mode"
30#endif
31
48bb3750
RH
32/* ??? The translation blocks produced by TCG are generally small enough to
33 be entirely reachable with a 16-bit displacement. Leaving the option for
34 a 32-bit displacement here Just In Case. */
35#define USE_LONG_BRANCHES 0
36
37#define TCG_CT_CONST_32 0x0100
48bb3750 38#define TCG_CT_CONST_MULI 0x0800
48bb3750
RH
39#define TCG_CT_CONST_ORI 0x2000
40#define TCG_CT_CONST_XORI 0x4000
41#define TCG_CT_CONST_CMPI 0x8000
42
43/* Several places within the instruction set 0 means "no register"
44 rather than TCG_REG_R0. */
45#define TCG_REG_NONE 0
46
47/* A scratch register that may be be used throughout the backend. */
48#define TCG_TMP0 TCG_REG_R14
49
50#ifdef CONFIG_USE_GUEST_BASE
51#define TCG_GUEST_BASE_REG TCG_REG_R13
52#else
53#define TCG_GUEST_BASE_REG TCG_REG_R0
54#endif
55
56#ifndef GUEST_BASE
57#define GUEST_BASE 0
58#endif
59
60
61/* All of the following instructions are prefixed with their instruction
62 format, and are defined as 8- or 16-bit quantities, even when the two
63 halves of the 16-bit quantity may appear 32 bits apart in the insn.
64 This makes it easy to copy the values from the tables in Appendix B. */
65typedef enum S390Opcode {
66 RIL_AFI = 0xc209,
67 RIL_AGFI = 0xc208,
3790b918 68 RIL_ALFI = 0xc20b,
48bb3750
RH
69 RIL_ALGFI = 0xc20a,
70 RIL_BRASL = 0xc005,
71 RIL_BRCL = 0xc004,
72 RIL_CFI = 0xc20d,
73 RIL_CGFI = 0xc20c,
74 RIL_CLFI = 0xc20f,
75 RIL_CLGFI = 0xc20e,
76 RIL_IIHF = 0xc008,
77 RIL_IILF = 0xc009,
78 RIL_LARL = 0xc000,
79 RIL_LGFI = 0xc001,
80 RIL_LGRL = 0xc408,
81 RIL_LLIHF = 0xc00e,
82 RIL_LLILF = 0xc00f,
83 RIL_LRL = 0xc40d,
84 RIL_MSFI = 0xc201,
85 RIL_MSGFI = 0xc200,
86 RIL_NIHF = 0xc00a,
87 RIL_NILF = 0xc00b,
88 RIL_OIHF = 0xc00c,
89 RIL_OILF = 0xc00d,
3790b918 90 RIL_SLFI = 0xc205,
0db921e6 91 RIL_SLGFI = 0xc204,
48bb3750
RH
92 RIL_XIHF = 0xc006,
93 RIL_XILF = 0xc007,
94
95 RI_AGHI = 0xa70b,
96 RI_AHI = 0xa70a,
97 RI_BRC = 0xa704,
98 RI_IIHH = 0xa500,
99 RI_IIHL = 0xa501,
100 RI_IILH = 0xa502,
101 RI_IILL = 0xa503,
102 RI_LGHI = 0xa709,
103 RI_LLIHH = 0xa50c,
104 RI_LLIHL = 0xa50d,
105 RI_LLILH = 0xa50e,
106 RI_LLILL = 0xa50f,
107 RI_MGHI = 0xa70d,
108 RI_MHI = 0xa70c,
109 RI_NIHH = 0xa504,
110 RI_NIHL = 0xa505,
111 RI_NILH = 0xa506,
112 RI_NILL = 0xa507,
113 RI_OIHH = 0xa508,
114 RI_OIHL = 0xa509,
115 RI_OILH = 0xa50a,
116 RI_OILL = 0xa50b,
117
118 RIE_CGIJ = 0xec7c,
119 RIE_CGRJ = 0xec64,
120 RIE_CIJ = 0xec7e,
121 RIE_CLGRJ = 0xec65,
122 RIE_CLIJ = 0xec7f,
123 RIE_CLGIJ = 0xec7d,
124 RIE_CLRJ = 0xec77,
125 RIE_CRJ = 0xec76,
d5690ea4 126 RIE_RISBG = 0xec55,
48bb3750
RH
127
128 RRE_AGR = 0xb908,
3790b918
RH
129 RRE_ALGR = 0xb90a,
130 RRE_ALCR = 0xb998,
131 RRE_ALCGR = 0xb988,
48bb3750
RH
132 RRE_CGR = 0xb920,
133 RRE_CLGR = 0xb921,
134 RRE_DLGR = 0xb987,
135 RRE_DLR = 0xb997,
136 RRE_DSGFR = 0xb91d,
137 RRE_DSGR = 0xb90d,
138 RRE_LGBR = 0xb906,
139 RRE_LCGR = 0xb903,
140 RRE_LGFR = 0xb914,
141 RRE_LGHR = 0xb907,
142 RRE_LGR = 0xb904,
143 RRE_LLGCR = 0xb984,
144 RRE_LLGFR = 0xb916,
145 RRE_LLGHR = 0xb985,
146 RRE_LRVR = 0xb91f,
147 RRE_LRVGR = 0xb90f,
148 RRE_LTGR = 0xb902,
36017dc6 149 RRE_MLGR = 0xb986,
48bb3750
RH
150 RRE_MSGR = 0xb90c,
151 RRE_MSR = 0xb252,
152 RRE_NGR = 0xb980,
153 RRE_OGR = 0xb981,
154 RRE_SGR = 0xb909,
3790b918
RH
155 RRE_SLGR = 0xb90b,
156 RRE_SLBR = 0xb999,
157 RRE_SLBGR = 0xb989,
48bb3750
RH
158 RRE_XGR = 0xb982,
159
96a9f093
RH
160 RRF_LOCR = 0xb9f2,
161 RRF_LOCGR = 0xb9e2,
162
48bb3750 163 RR_AR = 0x1a,
3790b918 164 RR_ALR = 0x1e,
48bb3750
RH
165 RR_BASR = 0x0d,
166 RR_BCR = 0x07,
167 RR_CLR = 0x15,
168 RR_CR = 0x19,
169 RR_DR = 0x1d,
170 RR_LCR = 0x13,
171 RR_LR = 0x18,
172 RR_LTR = 0x12,
173 RR_NR = 0x14,
174 RR_OR = 0x16,
175 RR_SR = 0x1b,
3790b918 176 RR_SLR = 0x1f,
48bb3750
RH
177 RR_XR = 0x17,
178
179 RSY_RLL = 0xeb1d,
180 RSY_RLLG = 0xeb1c,
181 RSY_SLLG = 0xeb0d,
182 RSY_SRAG = 0xeb0a,
183 RSY_SRLG = 0xeb0c,
184
185 RS_SLL = 0x89,
186 RS_SRA = 0x8a,
187 RS_SRL = 0x88,
188
189 RXY_AG = 0xe308,
190 RXY_AY = 0xe35a,
191 RXY_CG = 0xe320,
192 RXY_CY = 0xe359,
0db921e6 193 RXY_LAY = 0xe371,
48bb3750
RH
194 RXY_LB = 0xe376,
195 RXY_LG = 0xe304,
196 RXY_LGB = 0xe377,
197 RXY_LGF = 0xe314,
198 RXY_LGH = 0xe315,
199 RXY_LHY = 0xe378,
200 RXY_LLGC = 0xe390,
201 RXY_LLGF = 0xe316,
202 RXY_LLGH = 0xe391,
203 RXY_LMG = 0xeb04,
204 RXY_LRV = 0xe31e,
205 RXY_LRVG = 0xe30f,
206 RXY_LRVH = 0xe31f,
207 RXY_LY = 0xe358,
208 RXY_STCY = 0xe372,
209 RXY_STG = 0xe324,
210 RXY_STHY = 0xe370,
211 RXY_STMG = 0xeb24,
212 RXY_STRV = 0xe33e,
213 RXY_STRVG = 0xe32f,
214 RXY_STRVH = 0xe33f,
215 RXY_STY = 0xe350,
216
217 RX_A = 0x5a,
218 RX_C = 0x59,
219 RX_L = 0x58,
0db921e6 220 RX_LA = 0x41,
48bb3750
RH
221 RX_LH = 0x48,
222 RX_ST = 0x50,
223 RX_STC = 0x42,
224 RX_STH = 0x40,
225} S390Opcode;
226
227#define LD_SIGNED 0x04
228#define LD_UINT8 0x00
229#define LD_INT8 (LD_UINT8 | LD_SIGNED)
230#define LD_UINT16 0x01
231#define LD_INT16 (LD_UINT16 | LD_SIGNED)
232#define LD_UINT32 0x02
233#define LD_INT32 (LD_UINT32 | LD_SIGNED)
234#define LD_UINT64 0x03
235#define LD_INT64 (LD_UINT64 | LD_SIGNED)
236
237#ifndef NDEBUG
238static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
239 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
240 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
241};
242#endif
243
244/* Since R6 is a potential argument register, choose it last of the
245 call-saved registers. Likewise prefer the call-clobbered registers
246 in reverse order to maximize the chance of avoiding the arguments. */
2827822e 247static const int tcg_target_reg_alloc_order[] = {
48bb3750
RH
248 TCG_REG_R13,
249 TCG_REG_R12,
250 TCG_REG_R11,
251 TCG_REG_R10,
252 TCG_REG_R9,
253 TCG_REG_R8,
254 TCG_REG_R7,
255 TCG_REG_R6,
256 TCG_REG_R14,
257 TCG_REG_R0,
258 TCG_REG_R1,
259 TCG_REG_R5,
260 TCG_REG_R4,
261 TCG_REG_R3,
262 TCG_REG_R2,
2827822e
AG
263};
264
265static const int tcg_target_call_iarg_regs[] = {
48bb3750
RH
266 TCG_REG_R2,
267 TCG_REG_R3,
268 TCG_REG_R4,
269 TCG_REG_R5,
270 TCG_REG_R6,
2827822e
AG
271};
272
273static const int tcg_target_call_oarg_regs[] = {
48bb3750 274 TCG_REG_R2,
48bb3750
RH
275};
276
277#define S390_CC_EQ 8
278#define S390_CC_LT 4
279#define S390_CC_GT 2
280#define S390_CC_OV 1
281#define S390_CC_NE (S390_CC_LT | S390_CC_GT)
282#define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
283#define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
284#define S390_CC_NEVER 0
285#define S390_CC_ALWAYS 15
286
287/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
0aed257f 288static const uint8_t tcg_cond_to_s390_cond[] = {
48bb3750
RH
289 [TCG_COND_EQ] = S390_CC_EQ,
290 [TCG_COND_NE] = S390_CC_NE,
291 [TCG_COND_LT] = S390_CC_LT,
292 [TCG_COND_LE] = S390_CC_LE,
293 [TCG_COND_GT] = S390_CC_GT,
294 [TCG_COND_GE] = S390_CC_GE,
295 [TCG_COND_LTU] = S390_CC_LT,
296 [TCG_COND_LEU] = S390_CC_LE,
297 [TCG_COND_GTU] = S390_CC_GT,
298 [TCG_COND_GEU] = S390_CC_GE,
299};
300
301/* Condition codes that result from a LOAD AND TEST. Here, we have no
302 unsigned instruction variation, however since the test is vs zero we
303 can re-map the outcomes appropriately. */
0aed257f 304static const uint8_t tcg_cond_to_ltr_cond[] = {
48bb3750
RH
305 [TCG_COND_EQ] = S390_CC_EQ,
306 [TCG_COND_NE] = S390_CC_NE,
307 [TCG_COND_LT] = S390_CC_LT,
308 [TCG_COND_LE] = S390_CC_LE,
309 [TCG_COND_GT] = S390_CC_GT,
310 [TCG_COND_GE] = S390_CC_GE,
311 [TCG_COND_LTU] = S390_CC_NEVER,
312 [TCG_COND_LEU] = S390_CC_EQ,
313 [TCG_COND_GTU] = S390_CC_NE,
314 [TCG_COND_GEU] = S390_CC_ALWAYS,
315};
316
317#ifdef CONFIG_SOFTMMU
e141ab52
BS
318/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
319 int mmu_idx) */
320static const void * const qemu_ld_helpers[4] = {
321 helper_ldb_mmu,
322 helper_ldw_mmu,
323 helper_ldl_mmu,
324 helper_ldq_mmu,
325};
326
327/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
328 uintxx_t val, int mmu_idx) */
329static const void * const qemu_st_helpers[4] = {
330 helper_stb_mmu,
331 helper_stw_mmu,
332 helper_stl_mmu,
333 helper_stq_mmu,
334};
e141ab52 335#endif
48bb3750
RH
336
337static uint8_t *tb_ret_addr;
338
339/* A list of relevant facilities used by this translator. Some of these
340 are required for proper operation, and these are checked at startup. */
341
342#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
343#define FACILITY_LONG_DISP (1ULL << (63 - 18))
344#define FACILITY_EXT_IMM (1ULL << (63 - 21))
345#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
96a9f093 346#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
48bb3750
RH
347
348static uint64_t facilities;
2827822e
AG
349
350static void patch_reloc(uint8_t *code_ptr, int type,
2ba7fae2 351 intptr_t value, intptr_t addend)
2827822e 352{
2ba7fae2
RH
353 intptr_t code_ptr_tl = (intptr_t)code_ptr;
354 intptr_t pcrel2;
48bb3750
RH
355
356 /* ??? Not the usual definition of "addend". */
357 pcrel2 = (value - (code_ptr_tl + addend)) >> 1;
358
359 switch (type) {
360 case R_390_PC16DBL:
361 assert(pcrel2 == (int16_t)pcrel2);
362 *(int16_t *)code_ptr = pcrel2;
363 break;
364 case R_390_PC32DBL:
365 assert(pcrel2 == (int32_t)pcrel2);
366 *(int32_t *)code_ptr = pcrel2;
367 break;
368 default:
369 tcg_abort();
370 break;
371 }
2827822e
AG
372}
373
2827822e
AG
374/* parse target specific constraints */
375static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
376{
48bb3750
RH
377 const char *ct_str = *pct_str;
378
379 switch (ct_str[0]) {
380 case 'r': /* all registers */
381 ct->ct |= TCG_CT_REG;
382 tcg_regset_set32(ct->u.regs, 0, 0xffff);
383 break;
384 case 'R': /* not R0 */
385 ct->ct |= TCG_CT_REG;
386 tcg_regset_set32(ct->u.regs, 0, 0xffff);
387 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
388 break;
389 case 'L': /* qemu_ld/st constraint */
390 ct->ct |= TCG_CT_REG;
391 tcg_regset_set32(ct->u.regs, 0, 0xffff);
392 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
393 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
65a62a75 394 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
48bb3750
RH
395 break;
396 case 'a': /* force R2 for division */
397 ct->ct |= TCG_CT_REG;
398 tcg_regset_clear(ct->u.regs);
399 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
400 break;
401 case 'b': /* force R3 for division */
402 ct->ct |= TCG_CT_REG;
403 tcg_regset_clear(ct->u.regs);
404 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
405 break;
48bb3750
RH
406 case 'W': /* force 32-bit ("word") immediate */
407 ct->ct |= TCG_CT_CONST_32;
408 break;
48bb3750
RH
409 case 'K':
410 ct->ct |= TCG_CT_CONST_MULI;
411 break;
48bb3750
RH
412 case 'O':
413 ct->ct |= TCG_CT_CONST_ORI;
414 break;
415 case 'X':
416 ct->ct |= TCG_CT_CONST_XORI;
417 break;
418 case 'C':
419 ct->ct |= TCG_CT_CONST_CMPI;
420 break;
421 default:
422 return -1;
423 }
424 ct_str++;
425 *pct_str = ct_str;
426
2827822e
AG
427 return 0;
428}
429
48bb3750
RH
430/* Immediates to be used with logical OR. This is an optimization only,
431 since a full 64-bit immediate OR can always be performed with 4 sequential
432 OI[LH][LH] instructions. What we're looking for is immediates that we
433 can load efficiently, and the immediate load plus the reg-reg OR is
434 smaller than the sequential OI's. */
435
436static int tcg_match_ori(int ct, tcg_target_long val)
437{
438 if (facilities & FACILITY_EXT_IMM) {
439 if (ct & TCG_CT_CONST_32) {
440 /* All 32-bit ORs can be performed with 1 48-bit insn. */
441 return 1;
442 }
443 }
444
445 /* Look for negative values. These are best to load with LGHI. */
446 if (val < 0) {
447 if (val == (int16_t)val) {
448 return 0;
449 }
450 if (facilities & FACILITY_EXT_IMM) {
451 if (val == (int32_t)val) {
452 return 0;
453 }
454 }
455 }
456
457 return 1;
458}
459
460/* Immediates to be used with logical XOR. This is almost, but not quite,
461 only an optimization. XOR with immediate is only supported with the
462 extended-immediate facility. That said, there are a few patterns for
463 which it is better to load the value into a register first. */
464
465static int tcg_match_xori(int ct, tcg_target_long val)
466{
467 if ((facilities & FACILITY_EXT_IMM) == 0) {
468 return 0;
469 }
470
471 if (ct & TCG_CT_CONST_32) {
472 /* All 32-bit XORs can be performed with 1 48-bit insn. */
473 return 1;
474 }
475
476 /* Look for negative values. These are best to load with LGHI. */
477 if (val < 0 && val == (int32_t)val) {
478 return 0;
479 }
480
481 return 1;
482}
483
484/* Imediates to be used with comparisons. */
485
486static int tcg_match_cmpi(int ct, tcg_target_long val)
487{
488 if (facilities & FACILITY_EXT_IMM) {
489 /* The COMPARE IMMEDIATE instruction is available. */
490 if (ct & TCG_CT_CONST_32) {
491 /* We have a 32-bit immediate and can compare against anything. */
492 return 1;
493 } else {
494 /* ??? We have no insight here into whether the comparison is
495 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
496 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
497 a 32-bit unsigned immediate. If we were to use the (semi)
498 obvious "val == (int32_t)val" we would be enabling unsigned
499 comparisons vs very large numbers. The only solution is to
500 take the intersection of the ranges. */
501 /* ??? Another possible solution is to simply lie and allow all
502 constants here and force the out-of-range values into a temp
503 register in tgen_cmp when we have knowledge of the actual
504 comparison code in use. */
505 return val >= 0 && val <= 0x7fffffff;
506 }
507 } else {
508 /* Only the LOAD AND TEST instruction is available. */
509 return val == 0;
510 }
511}
512
2827822e 513/* Test if a constant matches the constraint. */
48bb3750
RH
514static int tcg_target_const_match(tcg_target_long val,
515 const TCGArgConstraint *arg_ct)
2827822e 516{
48bb3750
RH
517 int ct = arg_ct->ct;
518
519 if (ct & TCG_CT_CONST) {
520 return 1;
521 }
522
523 /* Handle the modifiers. */
48bb3750
RH
524 if (ct & TCG_CT_CONST_32) {
525 val = (int32_t)val;
526 }
527
528 /* The following are mutually exclusive. */
0db921e6 529 if (ct & TCG_CT_CONST_MULI) {
48bb3750
RH
530 /* Immediates that may be used with multiply. If we have the
531 general-instruction-extensions, then we have MULTIPLY SINGLE
532 IMMEDIATE with a signed 32-bit, otherwise we have only
533 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
534 if (facilities & FACILITY_GEN_INST_EXT) {
535 return val == (int32_t)val;
536 } else {
537 return val == (int16_t)val;
538 }
48bb3750
RH
539 } else if (ct & TCG_CT_CONST_ORI) {
540 return tcg_match_ori(ct, val);
541 } else if (ct & TCG_CT_CONST_XORI) {
542 return tcg_match_xori(ct, val);
543 } else if (ct & TCG_CT_CONST_CMPI) {
544 return tcg_match_cmpi(ct, val);
545 }
546
2827822e
AG
547 return 0;
548}
549
48bb3750
RH
550/* Emit instructions according to the given instruction format. */
551
552static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
553{
554 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
555}
556
557static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
558 TCGReg r1, TCGReg r2)
559{
560 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
561}
562
96a9f093
RH
563static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
564 TCGReg r1, TCGReg r2, int m3)
565{
566 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
567}
568
48bb3750
RH
569static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
570{
571 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
572}
573
574static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
575{
576 tcg_out16(s, op | (r1 << 4));
577 tcg_out32(s, i2);
578}
579
580static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
581 TCGReg b2, TCGReg r3, int disp)
582{
583 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
584 | (disp & 0xfff));
585}
586
587static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
588 TCGReg b2, TCGReg r3, int disp)
589{
590 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
591 tcg_out32(s, (op & 0xff) | (b2 << 28)
592 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
593}
594
595#define tcg_out_insn_RX tcg_out_insn_RS
596#define tcg_out_insn_RXY tcg_out_insn_RSY
597
598/* Emit an opcode with "type-checking" of the format. */
599#define tcg_out_insn(S, FMT, OP, ...) \
600 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
601
602
603/* emit 64-bit shifts */
604static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
605 TCGReg src, TCGReg sh_reg, int sh_imm)
606{
607 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
608}
609
610/* emit 32-bit shifts */
611static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
612 TCGReg sh_reg, int sh_imm)
613{
614 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
615}
616
617static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
618{
619 if (src != dst) {
620 if (type == TCG_TYPE_I32) {
621 tcg_out_insn(s, RR, LR, dst, src);
622 } else {
623 tcg_out_insn(s, RRE, LGR, dst, src);
624 }
625 }
626}
627
2827822e 628/* load a register with an immediate value */
48bb3750
RH
629static void tcg_out_movi(TCGContext *s, TCGType type,
630 TCGReg ret, tcg_target_long sval)
2827822e 631{
48bb3750
RH
632 static const S390Opcode lli_insns[4] = {
633 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
634 };
635
636 tcg_target_ulong uval = sval;
637 int i;
638
639 if (type == TCG_TYPE_I32) {
640 uval = (uint32_t)sval;
641 sval = (int32_t)sval;
642 }
643
644 /* Try all 32-bit insns that can load it in one go. */
645 if (sval >= -0x8000 && sval < 0x8000) {
646 tcg_out_insn(s, RI, LGHI, ret, sval);
647 return;
648 }
649
650 for (i = 0; i < 4; i++) {
651 tcg_target_long mask = 0xffffull << i*16;
652 if ((uval & mask) == uval) {
653 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
654 return;
655 }
656 }
657
658 /* Try all 48-bit insns that can load it in one go. */
659 if (facilities & FACILITY_EXT_IMM) {
660 if (sval == (int32_t)sval) {
661 tcg_out_insn(s, RIL, LGFI, ret, sval);
662 return;
663 }
664 if (uval <= 0xffffffff) {
665 tcg_out_insn(s, RIL, LLILF, ret, uval);
666 return;
667 }
668 if ((uval & 0xffffffff) == 0) {
669 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
670 return;
671 }
672 }
673
674 /* Try for PC-relative address load. */
675 if ((sval & 1) == 0) {
676 intptr_t off = (sval - (intptr_t)s->code_ptr) >> 1;
677 if (off == (int32_t)off) {
678 tcg_out_insn(s, RIL, LARL, ret, off);
679 return;
680 }
681 }
682
683 /* If extended immediates are not present, then we may have to issue
684 several instructions to load the low 32 bits. */
685 if (!(facilities & FACILITY_EXT_IMM)) {
686 /* A 32-bit unsigned value can be loaded in 2 insns. And given
687 that the lli_insns loop above did not succeed, we know that
688 both insns are required. */
689 if (uval <= 0xffffffff) {
690 tcg_out_insn(s, RI, LLILL, ret, uval);
691 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
692 return;
693 }
694
695 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
696 We first want to make sure that all the high bits get set. With
697 luck the low 16-bits can be considered negative to perform that for
698 free, otherwise we load an explicit -1. */
699 if (sval >> 31 >> 1 == -1) {
700 if (uval & 0x8000) {
701 tcg_out_insn(s, RI, LGHI, ret, uval);
702 } else {
703 tcg_out_insn(s, RI, LGHI, ret, -1);
704 tcg_out_insn(s, RI, IILL, ret, uval);
705 }
706 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
707 return;
708 }
709 }
710
711 /* If we get here, both the high and low parts have non-zero bits. */
712
713 /* Recurse to load the lower 32-bits. */
a22971f9 714 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
48bb3750
RH
715
716 /* Insert data into the high 32-bits. */
717 uval = uval >> 31 >> 1;
718 if (facilities & FACILITY_EXT_IMM) {
719 if (uval < 0x10000) {
720 tcg_out_insn(s, RI, IIHL, ret, uval);
721 } else if ((uval & 0xffff) == 0) {
722 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
723 } else {
724 tcg_out_insn(s, RIL, IIHF, ret, uval);
725 }
726 } else {
727 if (uval & 0xffff) {
728 tcg_out_insn(s, RI, IIHL, ret, uval);
729 }
730 if (uval & 0xffff0000) {
731 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
732 }
733 }
734}
735
736
737/* Emit a load/store type instruction. Inputs are:
738 DATA: The register to be loaded or stored.
739 BASE+OFS: The effective address.
740 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
741 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
742
743static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
744 TCGReg data, TCGReg base, TCGReg index,
745 tcg_target_long ofs)
746{
747 if (ofs < -0x80000 || ofs >= 0x80000) {
78c9f7c5
RH
748 /* Combine the low 20 bits of the offset with the actual load insn;
749 the high 44 bits must come from an immediate load. */
750 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
751 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
752 ofs = low;
48bb3750
RH
753
754 /* If we were already given an index register, add it in. */
755 if (index != TCG_REG_NONE) {
756 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
757 }
758 index = TCG_TMP0;
759 }
760
761 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
762 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
763 } else {
764 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
765 }
2827822e
AG
766}
767
48bb3750 768
2827822e 769/* load data without address translation or endianness conversion */
48bb3750 770static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 771 TCGReg base, intptr_t ofs)
2827822e 772{
48bb3750
RH
773 if (type == TCG_TYPE_I32) {
774 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
775 } else {
776 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
777 }
2827822e
AG
778}
779
48bb3750 780static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 781 TCGReg base, intptr_t ofs)
2827822e 782{
48bb3750
RH
783 if (type == TCG_TYPE_I32) {
784 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
785 } else {
786 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
787 }
788}
789
790/* load data from an absolute host address */
791static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
792{
793 tcg_target_long addr = (tcg_target_long)abs;
794
795 if (facilities & FACILITY_GEN_INST_EXT) {
796 tcg_target_long disp = (addr - (tcg_target_long)s->code_ptr) >> 1;
797 if (disp == (int32_t)disp) {
798 if (type == TCG_TYPE_I32) {
799 tcg_out_insn(s, RIL, LRL, dest, disp);
800 } else {
801 tcg_out_insn(s, RIL, LGRL, dest, disp);
802 }
803 return;
804 }
805 }
806
807 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
808 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
809}
810
f0bffc27
RH
811static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
812 int msb, int lsb, int ofs, int z)
813{
814 /* Format RIE-f */
815 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
816 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
817 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
818}
819
48bb3750
RH
820static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
821{
822 if (facilities & FACILITY_EXT_IMM) {
823 tcg_out_insn(s, RRE, LGBR, dest, src);
824 return;
825 }
826
827 if (type == TCG_TYPE_I32) {
828 if (dest == src) {
829 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
830 } else {
831 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
832 }
833 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
834 } else {
835 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
836 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
837 }
838}
839
840static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
841{
842 if (facilities & FACILITY_EXT_IMM) {
843 tcg_out_insn(s, RRE, LLGCR, dest, src);
844 return;
845 }
846
847 if (dest == src) {
848 tcg_out_movi(s, type, TCG_TMP0, 0xff);
849 src = TCG_TMP0;
850 } else {
851 tcg_out_movi(s, type, dest, 0xff);
852 }
853 if (type == TCG_TYPE_I32) {
854 tcg_out_insn(s, RR, NR, dest, src);
855 } else {
856 tcg_out_insn(s, RRE, NGR, dest, src);
857 }
858}
859
860static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
861{
862 if (facilities & FACILITY_EXT_IMM) {
863 tcg_out_insn(s, RRE, LGHR, dest, src);
864 return;
865 }
866
867 if (type == TCG_TYPE_I32) {
868 if (dest == src) {
869 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
870 } else {
871 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
872 }
873 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
874 } else {
875 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
876 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
877 }
878}
879
880static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
881{
882 if (facilities & FACILITY_EXT_IMM) {
883 tcg_out_insn(s, RRE, LLGHR, dest, src);
884 return;
885 }
886
887 if (dest == src) {
888 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
889 src = TCG_TMP0;
890 } else {
891 tcg_out_movi(s, type, dest, 0xffff);
892 }
893 if (type == TCG_TYPE_I32) {
894 tcg_out_insn(s, RR, NR, dest, src);
895 } else {
896 tcg_out_insn(s, RRE, NGR, dest, src);
897 }
898}
899
900static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
901{
902 tcg_out_insn(s, RRE, LGFR, dest, src);
903}
904
905static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
906{
907 tcg_out_insn(s, RRE, LLGFR, dest, src);
908}
909
f0bffc27
RH
910/* Accept bit patterns like these:
911 0....01....1
912 1....10....0
913 1..10..01..1
914 0..01..10..0
915 Copied from gcc sources. */
916static inline bool risbg_mask(uint64_t c)
917{
918 uint64_t lsb;
919 /* We don't change the number of transitions by inverting,
920 so make sure we start with the LSB zero. */
921 if (c & 1) {
922 c = ~c;
923 }
924 /* Reject all zeros or all ones. */
925 if (c == 0) {
926 return false;
927 }
928 /* Find the first transition. */
929 lsb = c & -c;
930 /* Invert to look for a second transition. */
931 c = ~c;
932 /* Erase the first transition. */
933 c &= -lsb;
934 /* Find the second transition, if any. */
935 lsb = c & -c;
936 /* Match if all the bits are 1's, or if c is zero. */
937 return c == -lsb;
938}
939
07ff7983 940static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750
RH
941{
942 static const S390Opcode ni_insns[4] = {
943 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
944 };
945 static const S390Opcode nif_insns[2] = {
946 RIL_NILF, RIL_NIHF
947 };
07ff7983 948 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
48bb3750
RH
949 int i;
950
48bb3750 951 /* Look for the zero-extensions. */
07ff7983 952 if ((val & valid) == 0xffffffff) {
48bb3750
RH
953 tgen_ext32u(s, dest, dest);
954 return;
955 }
48bb3750 956 if (facilities & FACILITY_EXT_IMM) {
07ff7983 957 if ((val & valid) == 0xff) {
48bb3750
RH
958 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
959 return;
960 }
07ff7983 961 if ((val & valid) == 0xffff) {
48bb3750
RH
962 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
963 return;
964 }
07ff7983 965 }
48bb3750 966
07ff7983
RH
967 /* Try all 32-bit insns that can perform it in one go. */
968 for (i = 0; i < 4; i++) {
969 tcg_target_ulong mask = ~(0xffffull << i*16);
970 if (((val | ~valid) & mask) == mask) {
971 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
972 return;
48bb3750 973 }
07ff7983 974 }
48bb3750 975
07ff7983
RH
976 /* Try all 48-bit insns that can perform it in one go. */
977 if (facilities & FACILITY_EXT_IMM) {
978 for (i = 0; i < 2; i++) {
979 tcg_target_ulong mask = ~(0xffffffffull << i*32);
980 if (((val | ~valid) & mask) == mask) {
981 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
982 return;
48bb3750
RH
983 }
984 }
07ff7983 985 }
f0bffc27
RH
986 if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
987 int msb, lsb;
988 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
989 /* Achieve wraparound by swapping msb and lsb. */
990 msb = 63 - ctz64(~val);
991 lsb = clz64(~val) + 1;
992 } else {
993 msb = clz64(val);
994 lsb = 63 - ctz64(val);
995 }
996 tcg_out_risbg(s, dest, dest, msb, lsb, 0, 1);
997 return;
998 }
48bb3750 999
07ff7983
RH
1000 /* Fall back to loading the constant. */
1001 tcg_out_movi(s, type, TCG_TMP0, val);
1002 if (type == TCG_TYPE_I32) {
1003 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
48bb3750 1004 } else {
07ff7983 1005 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
48bb3750
RH
1006 }
1007}
1008
1009static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1010{
1011 static const S390Opcode oi_insns[4] = {
1012 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1013 };
1014 static const S390Opcode nif_insns[2] = {
1015 RIL_OILF, RIL_OIHF
1016 };
1017
1018 int i;
1019
1020 /* Look for no-op. */
1021 if (val == 0) {
1022 return;
1023 }
1024
1025 if (facilities & FACILITY_EXT_IMM) {
1026 /* Try all 32-bit insns that can perform it in one go. */
1027 for (i = 0; i < 4; i++) {
1028 tcg_target_ulong mask = (0xffffull << i*16);
1029 if ((val & mask) != 0 && (val & ~mask) == 0) {
1030 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1031 return;
1032 }
1033 }
1034
1035 /* Try all 48-bit insns that can perform it in one go. */
1036 for (i = 0; i < 2; i++) {
1037 tcg_target_ulong mask = (0xffffffffull << i*32);
1038 if ((val & mask) != 0 && (val & ~mask) == 0) {
1039 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1040 return;
1041 }
1042 }
1043
1044 /* Perform the OR via sequential modifications to the high and
1045 low parts. Do this via recursion to handle 16-bit vs 32-bit
1046 masks in each half. */
1047 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1048 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1049 } else {
1050 /* With no extended-immediate facility, we don't need to be so
1051 clever. Just iterate over the insns and mask in the constant. */
1052 for (i = 0; i < 4; i++) {
1053 tcg_target_ulong mask = (0xffffull << i*16);
1054 if ((val & mask) != 0) {
1055 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1056 }
1057 }
1058 }
1059}
1060
1061static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1062{
1063 /* Perform the xor by parts. */
1064 if (val & 0xffffffff) {
1065 tcg_out_insn(s, RIL, XILF, dest, val);
1066 }
1067 if (val > 0xffffffff) {
1068 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1069 }
1070}
1071
1072static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1073 TCGArg c2, int c2const)
1074{
bcc66562 1075 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1076 if (c2const) {
1077 if (c2 == 0) {
1078 if (type == TCG_TYPE_I32) {
1079 tcg_out_insn(s, RR, LTR, r1, r1);
1080 } else {
1081 tcg_out_insn(s, RRE, LTGR, r1, r1);
1082 }
1083 return tcg_cond_to_ltr_cond[c];
1084 } else {
1085 if (is_unsigned) {
1086 if (type == TCG_TYPE_I32) {
1087 tcg_out_insn(s, RIL, CLFI, r1, c2);
1088 } else {
1089 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1090 }
1091 } else {
1092 if (type == TCG_TYPE_I32) {
1093 tcg_out_insn(s, RIL, CFI, r1, c2);
1094 } else {
1095 tcg_out_insn(s, RIL, CGFI, r1, c2);
1096 }
1097 }
1098 }
1099 } else {
1100 if (is_unsigned) {
1101 if (type == TCG_TYPE_I32) {
1102 tcg_out_insn(s, RR, CLR, r1, c2);
1103 } else {
1104 tcg_out_insn(s, RRE, CLGR, r1, c2);
1105 }
1106 } else {
1107 if (type == TCG_TYPE_I32) {
1108 tcg_out_insn(s, RR, CR, r1, c2);
1109 } else {
1110 tcg_out_insn(s, RRE, CGR, r1, c2);
1111 }
1112 }
1113 }
1114 return tcg_cond_to_s390_cond[c];
1115}
1116
1117static void tgen_setcond(TCGContext *s, TCGType type, TCGCond c,
96a9f093 1118 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
48bb3750 1119{
96a9f093 1120 int cc = tgen_cmp(s, type, c, c1, c2, c2const);
48bb3750
RH
1121
1122 /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over: */
1123 tcg_out_movi(s, type, dest, 1);
1124 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1125 tcg_out_movi(s, type, dest, 0);
1126}
1127
96a9f093
RH
1128static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1129 TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
1130{
1131 int cc;
1132 if (facilities & FACILITY_LOAD_ON_COND) {
1133 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1134 tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
1135 } else {
1136 c = tcg_invert_cond(c);
1137 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1138
1139 /* Emit: if (cc) goto over; dest = r3; over: */
1140 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1141 tcg_out_insn(s, RRE, LGR, dest, r3);
1142 }
1143}
1144
d5690ea4
RH
1145bool tcg_target_deposit_valid(int ofs, int len)
1146{
1147 return (facilities & FACILITY_GEN_INST_EXT) != 0;
1148}
1149
1150static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1151 int ofs, int len)
1152{
1153 int lsb = (63 - ofs);
1154 int msb = lsb - (len - 1);
f0bffc27 1155 tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0);
d5690ea4
RH
1156}
1157
48bb3750
RH
1158static void tgen_gotoi(TCGContext *s, int cc, tcg_target_long dest)
1159{
1160 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1161 if (off > -0x8000 && off < 0x7fff) {
1162 tcg_out_insn(s, RI, BRC, cc, off);
1163 } else if (off == (int32_t)off) {
1164 tcg_out_insn(s, RIL, BRCL, cc, off);
1165 } else {
1166 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1167 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1168 }
1169}
1170
1171static void tgen_branch(TCGContext *s, int cc, int labelno)
1172{
1173 TCGLabel* l = &s->labels[labelno];
1174 if (l->has_value) {
1175 tgen_gotoi(s, cc, l->u.value);
1176 } else if (USE_LONG_BRANCHES) {
1177 tcg_out16(s, RIL_BRCL | (cc << 4));
1178 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, labelno, -2);
1179 s->code_ptr += 4;
1180 } else {
1181 tcg_out16(s, RI_BRC | (cc << 4));
1182 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, labelno, -2);
1183 s->code_ptr += 2;
1184 }
1185}
1186
1187static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1188 TCGReg r1, TCGReg r2, int labelno)
1189{
1190 TCGLabel* l = &s->labels[labelno];
1191 tcg_target_long off;
1192
1193 if (l->has_value) {
1194 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1195 } else {
1196 /* We need to keep the offset unchanged for retranslation. */
1197 off = ((int16_t *)s->code_ptr)[1];
1198 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1199 }
1200
1201 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1202 tcg_out16(s, off);
1203 tcg_out16(s, cc << 12 | (opc & 0xff));
1204}
1205
1206static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1207 TCGReg r1, int i2, int labelno)
1208{
1209 TCGLabel* l = &s->labels[labelno];
1210 tcg_target_long off;
1211
1212 if (l->has_value) {
1213 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1214 } else {
1215 /* We need to keep the offset unchanged for retranslation. */
1216 off = ((int16_t *)s->code_ptr)[1];
1217 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1218 }
1219
1220 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1221 tcg_out16(s, off);
1222 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1223}
1224
1225static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1226 TCGReg r1, TCGArg c2, int c2const, int labelno)
1227{
1228 int cc;
1229
1230 if (facilities & FACILITY_GEN_INST_EXT) {
b879f308 1231 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1232 bool in_range;
1233 S390Opcode opc;
1234
1235 cc = tcg_cond_to_s390_cond[c];
1236
1237 if (!c2const) {
1238 opc = (type == TCG_TYPE_I32
1239 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1240 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1241 tgen_compare_branch(s, opc, cc, r1, c2, labelno);
1242 return;
1243 }
1244
1245 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1246 If the immediate we've been given does not fit that range, we'll
1247 fall back to separate compare and branch instructions using the
1248 larger comparison range afforded by COMPARE IMMEDIATE. */
1249 if (type == TCG_TYPE_I32) {
1250 if (is_unsigned) {
1251 opc = RIE_CLIJ;
1252 in_range = (uint32_t)c2 == (uint8_t)c2;
1253 } else {
1254 opc = RIE_CIJ;
1255 in_range = (int32_t)c2 == (int8_t)c2;
1256 }
1257 } else {
1258 if (is_unsigned) {
1259 opc = RIE_CLGIJ;
1260 in_range = (uint64_t)c2 == (uint8_t)c2;
1261 } else {
1262 opc = RIE_CGIJ;
1263 in_range = (int64_t)c2 == (int8_t)c2;
1264 }
1265 }
1266 if (in_range) {
1267 tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno);
1268 return;
1269 }
1270 }
1271
1272 cc = tgen_cmp(s, type, c, r1, c2, c2const);
1273 tgen_branch(s, cc, labelno);
1274}
1275
1276static void tgen_calli(TCGContext *s, tcg_target_long dest)
1277{
1278 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1279 if (off == (int32_t)off) {
1280 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1281 } else {
1282 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1283 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1284 }
1285}
1286
1287static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data,
1288 TCGReg base, TCGReg index, int disp)
1289{
1290#ifdef TARGET_WORDS_BIGENDIAN
1291 const int bswap = 0;
1292#else
1293 const int bswap = 1;
1294#endif
1295 switch (opc) {
1296 case LD_UINT8:
1297 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1298 break;
1299 case LD_INT8:
1300 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1301 break;
1302 case LD_UINT16:
1303 if (bswap) {
1304 /* swapped unsigned halfword load with upper bits zeroed */
1305 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1306 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1307 } else {
1308 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1309 }
1310 break;
1311 case LD_INT16:
1312 if (bswap) {
1313 /* swapped sign-extended halfword load */
1314 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1315 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1316 } else {
1317 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1318 }
1319 break;
1320 case LD_UINT32:
1321 if (bswap) {
1322 /* swapped unsigned int load with upper bits zeroed */
1323 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1324 tgen_ext32u(s, data, data);
1325 } else {
1326 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1327 }
1328 break;
1329 case LD_INT32:
1330 if (bswap) {
1331 /* swapped sign-extended int load */
1332 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1333 tgen_ext32s(s, data, data);
1334 } else {
1335 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1336 }
1337 break;
1338 case LD_UINT64:
1339 if (bswap) {
1340 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1341 } else {
1342 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1343 }
1344 break;
1345 default:
1346 tcg_abort();
1347 }
1348}
1349
1350static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data,
1351 TCGReg base, TCGReg index, int disp)
1352{
1353#ifdef TARGET_WORDS_BIGENDIAN
1354 const int bswap = 0;
1355#else
1356 const int bswap = 1;
1357#endif
1358 switch (opc) {
1359 case LD_UINT8:
1360 if (disp >= 0 && disp < 0x1000) {
1361 tcg_out_insn(s, RX, STC, data, base, index, disp);
1362 } else {
1363 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1364 }
1365 break;
1366 case LD_UINT16:
1367 if (bswap) {
1368 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1369 } else if (disp >= 0 && disp < 0x1000) {
1370 tcg_out_insn(s, RX, STH, data, base, index, disp);
1371 } else {
1372 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1373 }
1374 break;
1375 case LD_UINT32:
1376 if (bswap) {
1377 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1378 } else if (disp >= 0 && disp < 0x1000) {
1379 tcg_out_insn(s, RX, ST, data, base, index, disp);
1380 } else {
1381 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1382 }
1383 break;
1384 case LD_UINT64:
1385 if (bswap) {
1386 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1387 } else {
1388 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1389 }
1390 break;
1391 default:
1392 tcg_abort();
1393 }
1394}
1395
1396#if defined(CONFIG_SOFTMMU)
65a62a75
RH
1397static TCGReg tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
1398 TCGReg addr_reg, int mem_index, int opc,
1399 uint16_t **label2_ptr_p, int is_store)
48bb3750 1400{
65a62a75
RH
1401 const TCGReg arg0 = tcg_target_call_iarg_regs[0];
1402 const TCGReg arg1 = tcg_target_call_iarg_regs[1];
1403 const TCGReg arg2 = tcg_target_call_iarg_regs[2];
1404 const TCGReg arg3 = tcg_target_call_iarg_regs[3];
48bb3750
RH
1405 int s_bits = opc & 3;
1406 uint16_t *label1_ptr;
1407 tcg_target_long ofs;
1408
1409 if (TARGET_LONG_BITS == 32) {
65a62a75 1410 tgen_ext32u(s, arg1, addr_reg);
48bb3750 1411 } else {
65a62a75 1412 tcg_out_mov(s, TCG_TYPE_I64, arg1, addr_reg);
48bb3750
RH
1413 }
1414
65a62a75 1415 tcg_out_sh64(s, RSY_SRLG, arg2, addr_reg, TCG_REG_NONE,
48bb3750
RH
1416 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1417
65a62a75
RH
1418 tgen_andi(s, TCG_TYPE_I64, arg1, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
1419 tgen_andi(s, TCG_TYPE_I64, arg2, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
48bb3750
RH
1420
1421 if (is_store) {
9349b4f9 1422 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
48bb3750 1423 } else {
9349b4f9 1424 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
48bb3750
RH
1425 }
1426 assert(ofs < 0x80000);
1427
1428 if (TARGET_LONG_BITS == 32) {
65a62a75 1429 tcg_out_mem(s, RX_C, RXY_CY, arg1, arg2, TCG_AREG0, ofs);
48bb3750 1430 } else {
65a62a75 1431 tcg_out_mem(s, 0, RXY_CG, arg1, arg2, TCG_AREG0, ofs);
48bb3750
RH
1432 }
1433
1434 if (TARGET_LONG_BITS == 32) {
65a62a75 1435 tgen_ext32u(s, arg1, addr_reg);
48bb3750 1436 } else {
65a62a75 1437 tcg_out_mov(s, TCG_TYPE_I64, arg1, addr_reg);
48bb3750
RH
1438 }
1439
1440 label1_ptr = (uint16_t*)s->code_ptr;
1441
1442 /* je label1 (offset will be patched in later) */
1443 tcg_out_insn(s, RI, BRC, S390_CC_EQ, 0);
1444
1445 /* call load/store helper */
1446 if (is_store) {
1447 /* Make sure to zero-extend the value to the full register
1448 for the calling convention. */
1449 switch (opc) {
1450 case LD_UINT8:
65a62a75 1451 tgen_ext8u(s, TCG_TYPE_I64, arg2, data_reg);
48bb3750
RH
1452 break;
1453 case LD_UINT16:
65a62a75 1454 tgen_ext16u(s, TCG_TYPE_I64, arg2, data_reg);
48bb3750
RH
1455 break;
1456 case LD_UINT32:
65a62a75 1457 tgen_ext32u(s, arg2, data_reg);
48bb3750
RH
1458 break;
1459 case LD_UINT64:
65a62a75 1460 tcg_out_mov(s, TCG_TYPE_I64, arg2, data_reg);
48bb3750
RH
1461 break;
1462 default:
1463 tcg_abort();
1464 }
65a62a75
RH
1465 tcg_out_movi(s, TCG_TYPE_I32, arg3, mem_index);
1466 tcg_out_mov(s, TCG_TYPE_I64, arg0, TCG_AREG0);
48bb3750
RH
1467 tgen_calli(s, (tcg_target_ulong)qemu_st_helpers[s_bits]);
1468 } else {
65a62a75
RH
1469 tcg_out_movi(s, TCG_TYPE_I32, arg2, mem_index);
1470 tcg_out_mov(s, TCG_TYPE_I64, arg0, TCG_AREG0);
48bb3750
RH
1471 tgen_calli(s, (tcg_target_ulong)qemu_ld_helpers[s_bits]);
1472
1473 /* sign extension */
1474 switch (opc) {
1475 case LD_INT8:
65a62a75 1476 tgen_ext8s(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
48bb3750
RH
1477 break;
1478 case LD_INT16:
65a62a75 1479 tgen_ext16s(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
48bb3750
RH
1480 break;
1481 case LD_INT32:
65a62a75 1482 tgen_ext32s(s, data_reg, TCG_REG_R2);
48bb3750
RH
1483 break;
1484 default:
1485 /* unsigned -> just copy */
65a62a75 1486 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
48bb3750
RH
1487 break;
1488 }
1489 }
1490
1491 /* jump to label2 (end) */
1492 *label2_ptr_p = (uint16_t*)s->code_ptr;
1493
1494 tcg_out_insn(s, RI, BRC, S390_CC_ALWAYS, 0);
1495
1496 /* this is label1, patch branch */
1497 *(label1_ptr + 1) = ((unsigned long)s->code_ptr -
1498 (unsigned long)label1_ptr) >> 1;
1499
9349b4f9 1500 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
48bb3750
RH
1501 assert(ofs < 0x80000);
1502
65a62a75
RH
1503 tcg_out_mem(s, 0, RXY_AG, arg1, arg2, TCG_AREG0, ofs);
1504
1505 return arg1;
48bb3750
RH
1506}
1507
1508static void tcg_finish_qemu_ldst(TCGContext* s, uint16_t *label2_ptr)
1509{
1510 /* patch branch */
1511 *(label2_ptr + 1) = ((unsigned long)s->code_ptr -
1512 (unsigned long)label2_ptr) >> 1;
1513}
1514#else
1515static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1516 TCGReg *index_reg, tcg_target_long *disp)
1517{
1518 if (TARGET_LONG_BITS == 32) {
1519 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1520 *addr_reg = TCG_TMP0;
1521 }
1522 if (GUEST_BASE < 0x80000) {
1523 *index_reg = TCG_REG_NONE;
1524 *disp = GUEST_BASE;
1525 } else {
1526 *index_reg = TCG_GUEST_BASE_REG;
1527 *disp = 0;
1528 }
1529}
1530#endif /* CONFIG_SOFTMMU */
1531
1532/* load data with address translation (if applicable)
1533 and endianness conversion */
1534static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* args, int opc)
1535{
1536 TCGReg addr_reg, data_reg;
1537#if defined(CONFIG_SOFTMMU)
1538 int mem_index;
1539 uint16_t *label2_ptr;
1540#else
1541 TCGReg index_reg;
1542 tcg_target_long disp;
1543#endif
1544
1545 data_reg = *args++;
1546 addr_reg = *args++;
1547
1548#if defined(CONFIG_SOFTMMU)
1549 mem_index = *args;
1550
65a62a75
RH
1551 addr_reg = tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1552 opc, &label2_ptr, 0);
48bb3750 1553
65a62a75 1554 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, TCG_REG_NONE, 0);
48bb3750
RH
1555
1556 tcg_finish_qemu_ldst(s, label2_ptr);
1557#else
1558 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1559 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1560#endif
1561}
1562
1563static void tcg_out_qemu_st(TCGContext* s, const TCGArg* args, int opc)
1564{
1565 TCGReg addr_reg, data_reg;
1566#if defined(CONFIG_SOFTMMU)
1567 int mem_index;
1568 uint16_t *label2_ptr;
1569#else
1570 TCGReg index_reg;
1571 tcg_target_long disp;
1572#endif
1573
1574 data_reg = *args++;
1575 addr_reg = *args++;
1576
1577#if defined(CONFIG_SOFTMMU)
1578 mem_index = *args;
1579
65a62a75
RH
1580 addr_reg = tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1581 opc, &label2_ptr, 1);
48bb3750 1582
65a62a75 1583 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, TCG_REG_NONE, 0);
48bb3750
RH
1584
1585 tcg_finish_qemu_ldst(s, label2_ptr);
1586#else
1587 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1588 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1589#endif
2827822e
AG
1590}
1591
48bb3750
RH
1592# define OP_32_64(x) \
1593 case glue(glue(INDEX_op_,x),_i32): \
1594 case glue(glue(INDEX_op_,x),_i64)
48bb3750 1595
a9751609 1596static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2827822e
AG
1597 const TCGArg *args, const int *const_args)
1598{
48bb3750 1599 S390Opcode op;
0db921e6 1600 TCGArg a0, a1, a2;
48bb3750
RH
1601
1602 switch (opc) {
1603 case INDEX_op_exit_tb:
1604 /* return value */
1605 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
1606 tgen_gotoi(s, S390_CC_ALWAYS, (unsigned long)tb_ret_addr);
1607 break;
1608
1609 case INDEX_op_goto_tb:
1610 if (s->tb_jmp_offset) {
1611 tcg_abort();
1612 } else {
1613 /* load address stored at s->tb_next + args[0] */
1614 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
1615 /* and go there */
1616 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1617 }
1618 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1619 break;
1620
1621 case INDEX_op_call:
1622 if (const_args[0]) {
1623 tgen_calli(s, args[0]);
1624 } else {
1625 tcg_out_insn(s, RR, BASR, TCG_REG_R14, args[0]);
1626 }
1627 break;
1628
1629 case INDEX_op_mov_i32:
1630 tcg_out_mov(s, TCG_TYPE_I32, args[0], args[1]);
1631 break;
1632 case INDEX_op_movi_i32:
1633 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1634 break;
1635
1636 OP_32_64(ld8u):
1637 /* ??? LLC (RXY format) is only present with the extended-immediate
1638 facility, whereas LLGC is always present. */
1639 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1640 break;
1641
1642 OP_32_64(ld8s):
1643 /* ??? LB is no smaller than LGB, so no point to using it. */
1644 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1645 break;
1646
1647 OP_32_64(ld16u):
1648 /* ??? LLH (RXY format) is only present with the extended-immediate
1649 facility, whereas LLGH is always present. */
1650 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1651 break;
1652
1653 case INDEX_op_ld16s_i32:
1654 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1655 break;
1656
1657 case INDEX_op_ld_i32:
1658 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1659 break;
1660
1661 OP_32_64(st8):
1662 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1663 TCG_REG_NONE, args[2]);
1664 break;
1665
1666 OP_32_64(st16):
1667 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1668 TCG_REG_NONE, args[2]);
1669 break;
1670
1671 case INDEX_op_st_i32:
1672 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1673 break;
1674
1675 case INDEX_op_add_i32:
0db921e6 1676 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1677 if (const_args[2]) {
0db921e6
RH
1678 do_addi_32:
1679 if (a0 == a1) {
1680 if (a2 == (int16_t)a2) {
1681 tcg_out_insn(s, RI, AHI, a0, a2);
1682 break;
1683 }
1684 if (facilities & FACILITY_EXT_IMM) {
1685 tcg_out_insn(s, RIL, AFI, a0, a2);
1686 break;
1687 }
1688 }
1689 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1690 } else if (a0 == a1) {
1691 tcg_out_insn(s, RR, AR, a0, a2);
48bb3750 1692 } else {
0db921e6 1693 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1694 }
1695 break;
1696 case INDEX_op_sub_i32:
0db921e6 1697 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1698 if (const_args[2]) {
0db921e6
RH
1699 a2 = -a2;
1700 goto do_addi_32;
48bb3750 1701 }
0db921e6 1702 tcg_out_insn(s, RR, SR, args[0], args[2]);
48bb3750
RH
1703 break;
1704
1705 case INDEX_op_and_i32:
1706 if (const_args[2]) {
07ff7983 1707 tgen_andi(s, TCG_TYPE_I32, args[0], args[2]);
48bb3750
RH
1708 } else {
1709 tcg_out_insn(s, RR, NR, args[0], args[2]);
1710 }
1711 break;
1712 case INDEX_op_or_i32:
1713 if (const_args[2]) {
1714 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1715 } else {
1716 tcg_out_insn(s, RR, OR, args[0], args[2]);
1717 }
1718 break;
1719 case INDEX_op_xor_i32:
1720 if (const_args[2]) {
1721 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1722 } else {
1723 tcg_out_insn(s, RR, XR, args[0], args[2]);
1724 }
1725 break;
1726
1727 case INDEX_op_neg_i32:
1728 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1729 break;
1730
1731 case INDEX_op_mul_i32:
1732 if (const_args[2]) {
1733 if ((int32_t)args[2] == (int16_t)args[2]) {
1734 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1735 } else {
1736 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1737 }
1738 } else {
1739 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1740 }
1741 break;
1742
1743 case INDEX_op_div2_i32:
1744 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1745 break;
1746 case INDEX_op_divu2_i32:
1747 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1748 break;
1749
1750 case INDEX_op_shl_i32:
1751 op = RS_SLL;
1752 do_shift32:
1753 if (const_args[2]) {
1754 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1755 } else {
1756 tcg_out_sh32(s, op, args[0], args[2], 0);
1757 }
1758 break;
1759 case INDEX_op_shr_i32:
1760 op = RS_SRL;
1761 goto do_shift32;
1762 case INDEX_op_sar_i32:
1763 op = RS_SRA;
1764 goto do_shift32;
1765
1766 case INDEX_op_rotl_i32:
1767 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1768 if (const_args[2]) {
1769 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1770 } else {
1771 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1772 }
1773 break;
1774 case INDEX_op_rotr_i32:
1775 if (const_args[2]) {
1776 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1777 TCG_REG_NONE, (32 - args[2]) & 31);
1778 } else {
1779 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1780 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1781 }
1782 break;
1783
1784 case INDEX_op_ext8s_i32:
1785 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1786 break;
1787 case INDEX_op_ext16s_i32:
1788 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1789 break;
1790 case INDEX_op_ext8u_i32:
1791 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1792 break;
1793 case INDEX_op_ext16u_i32:
1794 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1795 break;
1796
1797 OP_32_64(bswap16):
1798 /* The TCG bswap definition requires bits 0-47 already be zero.
1799 Thus we don't need the G-type insns to implement bswap16_i64. */
1800 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1801 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1802 break;
1803 OP_32_64(bswap32):
1804 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1805 break;
1806
3790b918
RH
1807 case INDEX_op_add2_i32:
1808 /* ??? Make use of ALFI. */
1809 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1810 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1811 break;
1812 case INDEX_op_sub2_i32:
1813 /* ??? Make use of SLFI. */
1814 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1815 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1816 break;
1817
48bb3750
RH
1818 case INDEX_op_br:
1819 tgen_branch(s, S390_CC_ALWAYS, args[0]);
1820 break;
1821
1822 case INDEX_op_brcond_i32:
1823 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
1824 args[1], const_args[1], args[3]);
1825 break;
1826 case INDEX_op_setcond_i32:
1827 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1828 args[2], const_args[2]);
1829 break;
96a9f093
RH
1830 case INDEX_op_movcond_i32:
1831 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
1832 args[2], const_args[2], args[3]);
1833 break;
48bb3750
RH
1834
1835 case INDEX_op_qemu_ld8u:
1836 tcg_out_qemu_ld(s, args, LD_UINT8);
1837 break;
1838 case INDEX_op_qemu_ld8s:
1839 tcg_out_qemu_ld(s, args, LD_INT8);
1840 break;
1841 case INDEX_op_qemu_ld16u:
1842 tcg_out_qemu_ld(s, args, LD_UINT16);
1843 break;
1844 case INDEX_op_qemu_ld16s:
1845 tcg_out_qemu_ld(s, args, LD_INT16);
1846 break;
1847 case INDEX_op_qemu_ld32:
1848 /* ??? Technically we can use a non-extending instruction. */
1849 tcg_out_qemu_ld(s, args, LD_UINT32);
1850 break;
1851 case INDEX_op_qemu_ld64:
1852 tcg_out_qemu_ld(s, args, LD_UINT64);
1853 break;
1854
1855 case INDEX_op_qemu_st8:
1856 tcg_out_qemu_st(s, args, LD_UINT8);
1857 break;
1858 case INDEX_op_qemu_st16:
1859 tcg_out_qemu_st(s, args, LD_UINT16);
1860 break;
1861 case INDEX_op_qemu_st32:
1862 tcg_out_qemu_st(s, args, LD_UINT32);
1863 break;
1864 case INDEX_op_qemu_st64:
1865 tcg_out_qemu_st(s, args, LD_UINT64);
1866 break;
1867
48bb3750
RH
1868 case INDEX_op_mov_i64:
1869 tcg_out_mov(s, TCG_TYPE_I64, args[0], args[1]);
1870 break;
1871 case INDEX_op_movi_i64:
1872 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1873 break;
1874
1875 case INDEX_op_ld16s_i64:
1876 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1877 break;
1878 case INDEX_op_ld32u_i64:
1879 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1880 break;
1881 case INDEX_op_ld32s_i64:
1882 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1883 break;
1884 case INDEX_op_ld_i64:
1885 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1886 break;
1887
1888 case INDEX_op_st32_i64:
1889 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1890 break;
1891 case INDEX_op_st_i64:
1892 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1893 break;
1894
1895 case INDEX_op_add_i64:
0db921e6 1896 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 1897 if (const_args[2]) {
0db921e6
RH
1898 do_addi_64:
1899 if (a0 == a1) {
1900 if (a2 == (int16_t)a2) {
1901 tcg_out_insn(s, RI, AGHI, a0, a2);
1902 break;
1903 }
1904 if (facilities & FACILITY_EXT_IMM) {
1905 if (a2 == (int32_t)a2) {
1906 tcg_out_insn(s, RIL, AGFI, a0, a2);
1907 break;
1908 } else if (a2 == (uint32_t)a2) {
1909 tcg_out_insn(s, RIL, ALGFI, a0, a2);
1910 break;
1911 } else if (-a2 == (uint32_t)-a2) {
1912 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
1913 break;
1914 }
1915 }
1916 }
1917 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1918 } else if (a0 == a1) {
1919 tcg_out_insn(s, RRE, AGR, a0, a2);
48bb3750 1920 } else {
0db921e6 1921 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1922 }
1923 break;
1924 case INDEX_op_sub_i64:
0db921e6 1925 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 1926 if (const_args[2]) {
0db921e6
RH
1927 a2 = -a2;
1928 goto do_addi_64;
48bb3750
RH
1929 } else {
1930 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
1931 }
1932 break;
1933
1934 case INDEX_op_and_i64:
1935 if (const_args[2]) {
07ff7983 1936 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
48bb3750
RH
1937 } else {
1938 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
1939 }
1940 break;
1941 case INDEX_op_or_i64:
1942 if (const_args[2]) {
1943 tgen64_ori(s, args[0], args[2]);
1944 } else {
1945 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
1946 }
1947 break;
1948 case INDEX_op_xor_i64:
1949 if (const_args[2]) {
1950 tgen64_xori(s, args[0], args[2]);
1951 } else {
1952 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
1953 }
1954 break;
1955
1956 case INDEX_op_neg_i64:
1957 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
1958 break;
1959 case INDEX_op_bswap64_i64:
1960 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
1961 break;
1962
1963 case INDEX_op_mul_i64:
1964 if (const_args[2]) {
1965 if (args[2] == (int16_t)args[2]) {
1966 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
1967 } else {
1968 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
1969 }
1970 } else {
1971 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
1972 }
1973 break;
1974
1975 case INDEX_op_div2_i64:
1976 /* ??? We get an unnecessary sign-extension of the dividend
1977 into R3 with this definition, but as we do in fact always
1978 produce both quotient and remainder using INDEX_op_div_i64
1979 instead requires jumping through even more hoops. */
1980 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
1981 break;
1982 case INDEX_op_divu2_i64:
1983 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
1984 break;
36017dc6
RH
1985 case INDEX_op_mulu2_i64:
1986 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
1987 break;
48bb3750
RH
1988
1989 case INDEX_op_shl_i64:
1990 op = RSY_SLLG;
1991 do_shift64:
1992 if (const_args[2]) {
1993 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
1994 } else {
1995 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
1996 }
1997 break;
1998 case INDEX_op_shr_i64:
1999 op = RSY_SRLG;
2000 goto do_shift64;
2001 case INDEX_op_sar_i64:
2002 op = RSY_SRAG;
2003 goto do_shift64;
2004
2005 case INDEX_op_rotl_i64:
2006 if (const_args[2]) {
2007 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2008 TCG_REG_NONE, args[2]);
2009 } else {
2010 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2011 }
2012 break;
2013 case INDEX_op_rotr_i64:
2014 if (const_args[2]) {
2015 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2016 TCG_REG_NONE, (64 - args[2]) & 63);
2017 } else {
2018 /* We can use the smaller 32-bit negate because only the
2019 low 6 bits are examined for the rotate. */
2020 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2021 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2022 }
2023 break;
2024
2025 case INDEX_op_ext8s_i64:
2026 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2027 break;
2028 case INDEX_op_ext16s_i64:
2029 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2030 break;
2031 case INDEX_op_ext32s_i64:
2032 tgen_ext32s(s, args[0], args[1]);
2033 break;
2034 case INDEX_op_ext8u_i64:
2035 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2036 break;
2037 case INDEX_op_ext16u_i64:
2038 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2039 break;
2040 case INDEX_op_ext32u_i64:
2041 tgen_ext32u(s, args[0], args[1]);
2042 break;
2043
3790b918
RH
2044 case INDEX_op_add2_i64:
2045 /* ??? Make use of ALGFI and SLGFI. */
2046 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2047 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2048 break;
2049 case INDEX_op_sub2_i64:
2050 /* ??? Make use of ALGFI and SLGFI. */
2051 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2052 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2053 break;
2054
48bb3750
RH
2055 case INDEX_op_brcond_i64:
2056 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2057 args[1], const_args[1], args[3]);
2058 break;
2059 case INDEX_op_setcond_i64:
2060 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2061 args[2], const_args[2]);
2062 break;
96a9f093
RH
2063 case INDEX_op_movcond_i64:
2064 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2065 args[2], const_args[2], args[3]);
2066 break;
48bb3750
RH
2067
2068 case INDEX_op_qemu_ld32u:
2069 tcg_out_qemu_ld(s, args, LD_UINT32);
2070 break;
2071 case INDEX_op_qemu_ld32s:
2072 tcg_out_qemu_ld(s, args, LD_INT32);
2073 break;
48bb3750 2074
d5690ea4
RH
2075 OP_32_64(deposit):
2076 tgen_deposit(s, args[0], args[2], args[3], args[4]);
2077 break;
2078
48bb3750
RH
2079 default:
2080 fprintf(stderr,"unimplemented opc 0x%x\n",opc);
2081 tcg_abort();
2082 }
2827822e
AG
2083}
2084
48bb3750
RH
2085static const TCGTargetOpDef s390_op_defs[] = {
2086 { INDEX_op_exit_tb, { } },
2087 { INDEX_op_goto_tb, { } },
2088 { INDEX_op_call, { "ri" } },
48bb3750
RH
2089 { INDEX_op_br, { } },
2090
2091 { INDEX_op_mov_i32, { "r", "r" } },
2092 { INDEX_op_movi_i32, { "r" } },
2093
2094 { INDEX_op_ld8u_i32, { "r", "r" } },
2095 { INDEX_op_ld8s_i32, { "r", "r" } },
2096 { INDEX_op_ld16u_i32, { "r", "r" } },
2097 { INDEX_op_ld16s_i32, { "r", "r" } },
2098 { INDEX_op_ld_i32, { "r", "r" } },
2099 { INDEX_op_st8_i32, { "r", "r" } },
2100 { INDEX_op_st16_i32, { "r", "r" } },
2101 { INDEX_op_st_i32, { "r", "r" } },
2102
0db921e6
RH
2103 { INDEX_op_add_i32, { "r", "r", "ri" } },
2104 { INDEX_op_sub_i32, { "r", "0", "ri" } },
48bb3750
RH
2105 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2106
2107 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2108 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2109
07ff7983 2110 { INDEX_op_and_i32, { "r", "0", "ri" } },
48bb3750
RH
2111 { INDEX_op_or_i32, { "r", "0", "rWO" } },
2112 { INDEX_op_xor_i32, { "r", "0", "rWX" } },
2113
2114 { INDEX_op_neg_i32, { "r", "r" } },
2115
2116 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2117 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2118 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2119
2120 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2121 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2122
2123 { INDEX_op_ext8s_i32, { "r", "r" } },
2124 { INDEX_op_ext8u_i32, { "r", "r" } },
2125 { INDEX_op_ext16s_i32, { "r", "r" } },
2126 { INDEX_op_ext16u_i32, { "r", "r" } },
2127
2128 { INDEX_op_bswap16_i32, { "r", "r" } },
2129 { INDEX_op_bswap32_i32, { "r", "r" } },
2130
3790b918
RH
2131 { INDEX_op_add2_i32, { "r", "r", "0", "1", "r", "r" } },
2132 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "r", "r" } },
2133
48bb3750
RH
2134 { INDEX_op_brcond_i32, { "r", "rWC" } },
2135 { INDEX_op_setcond_i32, { "r", "r", "rWC" } },
96a9f093 2136 { INDEX_op_movcond_i32, { "r", "r", "rWC", "r", "0" } },
d5690ea4 2137 { INDEX_op_deposit_i32, { "r", "0", "r" } },
48bb3750
RH
2138
2139 { INDEX_op_qemu_ld8u, { "r", "L" } },
2140 { INDEX_op_qemu_ld8s, { "r", "L" } },
2141 { INDEX_op_qemu_ld16u, { "r", "L" } },
2142 { INDEX_op_qemu_ld16s, { "r", "L" } },
2143 { INDEX_op_qemu_ld32, { "r", "L" } },
2144 { INDEX_op_qemu_ld64, { "r", "L" } },
2145
2146 { INDEX_op_qemu_st8, { "L", "L" } },
2147 { INDEX_op_qemu_st16, { "L", "L" } },
2148 { INDEX_op_qemu_st32, { "L", "L" } },
2149 { INDEX_op_qemu_st64, { "L", "L" } },
2150
48bb3750
RH
2151 { INDEX_op_mov_i64, { "r", "r" } },
2152 { INDEX_op_movi_i64, { "r" } },
2153
2154 { INDEX_op_ld8u_i64, { "r", "r" } },
2155 { INDEX_op_ld8s_i64, { "r", "r" } },
2156 { INDEX_op_ld16u_i64, { "r", "r" } },
2157 { INDEX_op_ld16s_i64, { "r", "r" } },
2158 { INDEX_op_ld32u_i64, { "r", "r" } },
2159 { INDEX_op_ld32s_i64, { "r", "r" } },
2160 { INDEX_op_ld_i64, { "r", "r" } },
2161
2162 { INDEX_op_st8_i64, { "r", "r" } },
2163 { INDEX_op_st16_i64, { "r", "r" } },
2164 { INDEX_op_st32_i64, { "r", "r" } },
2165 { INDEX_op_st_i64, { "r", "r" } },
2166
0db921e6
RH
2167 { INDEX_op_add_i64, { "r", "r", "ri" } },
2168 { INDEX_op_sub_i64, { "r", "0", "ri" } },
48bb3750
RH
2169 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2170
2171 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2172 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
36017dc6 2173 { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
48bb3750 2174
07ff7983 2175 { INDEX_op_and_i64, { "r", "0", "ri" } },
48bb3750
RH
2176 { INDEX_op_or_i64, { "r", "0", "rO" } },
2177 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2178
2179 { INDEX_op_neg_i64, { "r", "r" } },
2180
2181 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2182 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2183 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2184
2185 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2186 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2187
2188 { INDEX_op_ext8s_i64, { "r", "r" } },
2189 { INDEX_op_ext8u_i64, { "r", "r" } },
2190 { INDEX_op_ext16s_i64, { "r", "r" } },
2191 { INDEX_op_ext16u_i64, { "r", "r" } },
2192 { INDEX_op_ext32s_i64, { "r", "r" } },
2193 { INDEX_op_ext32u_i64, { "r", "r" } },
2194
2195 { INDEX_op_bswap16_i64, { "r", "r" } },
2196 { INDEX_op_bswap32_i64, { "r", "r" } },
2197 { INDEX_op_bswap64_i64, { "r", "r" } },
2198
3790b918
RH
2199 { INDEX_op_add2_i64, { "r", "r", "0", "1", "r", "r" } },
2200 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "r", "r" } },
2201
48bb3750
RH
2202 { INDEX_op_brcond_i64, { "r", "rC" } },
2203 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
96a9f093 2204 { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
d5690ea4 2205 { INDEX_op_deposit_i64, { "r", "0", "r" } },
48bb3750
RH
2206
2207 { INDEX_op_qemu_ld32u, { "r", "L" } },
2208 { INDEX_op_qemu_ld32s, { "r", "L" } },
48bb3750
RH
2209
2210 { -1 },
2211};
2212
2213/* ??? Linux kernels provide an AUXV entry AT_HWCAP that provides most of
2214 this information. However, getting at that entry is not easy this far
2215 away from main. Our options are: start searching from environ, but
2216 that fails as soon as someone does a setenv in between. Read the data
2217 from /proc/self/auxv. Or do the probing ourselves. The only thing
2218 extra that AT_HWCAP gives us is HWCAP_S390_HIGH_GPRS, which indicates
2219 that the kernel saves all 64-bits of the registers around traps while
2220 in 31-bit mode. But this is true of all "recent" kernels (ought to dig
2221 back and see from when this might not be true). */
2222
2223#include <signal.h>
2224
2225static volatile sig_atomic_t got_sigill;
2226
2227static void sigill_handler(int sig)
2827822e 2228{
48bb3750 2229 got_sigill = 1;
2827822e
AG
2230}
2231
48bb3750
RH
2232static void query_facilities(void)
2233{
2234 struct sigaction sa_old, sa_new;
2235 register int r0 __asm__("0");
2236 register void *r1 __asm__("1");
2237 int fail;
2238
2239 memset(&sa_new, 0, sizeof(sa_new));
2240 sa_new.sa_handler = sigill_handler;
2241 sigaction(SIGILL, &sa_new, &sa_old);
2242
2243 /* First, try STORE FACILITY LIST EXTENDED. If this is present, then
2244 we need not do any more probing. Unfortunately, this itself is an
2245 extension and the original STORE FACILITY LIST instruction is
2246 kernel-only, storing its results at absolute address 200. */
2247 /* stfle 0(%r1) */
2248 r1 = &facilities;
2249 asm volatile(".word 0xb2b0,0x1000"
2250 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
2251
2252 if (got_sigill) {
2253 /* STORE FACILITY EXTENDED is not available. Probe for one of each
2254 kind of instruction that we're interested in. */
2255 /* ??? Possibly some of these are in practice never present unless
2256 the store-facility-extended facility is also present. But since
2257 that isn't documented it's just better to probe for each. */
2258
2259 /* Test for z/Architecture. Required even in 31-bit mode. */
2260 got_sigill = 0;
2261 /* agr %r0,%r0 */
2262 asm volatile(".word 0xb908,0x0000" : "=r"(r0) : : "cc");
2263 if (!got_sigill) {
2264 facilities |= FACILITY_ZARCH_ACTIVE;
2265 }
2266
2267 /* Test for long displacement. */
2268 got_sigill = 0;
2269 /* ly %r0,0(%r1) */
2270 r1 = &facilities;
2271 asm volatile(".word 0xe300,0x1000,0x0058"
2272 : "=r"(r0) : "r"(r1) : "cc");
2273 if (!got_sigill) {
2274 facilities |= FACILITY_LONG_DISP;
2275 }
2276
2277 /* Test for extended immediates. */
2278 got_sigill = 0;
2279 /* afi %r0,0 */
2280 asm volatile(".word 0xc209,0x0000,0x0000" : : : "cc");
2281 if (!got_sigill) {
2282 facilities |= FACILITY_EXT_IMM;
2283 }
2284
2285 /* Test for general-instructions-extension. */
2286 got_sigill = 0;
2287 /* msfi %r0,1 */
2288 asm volatile(".word 0xc201,0x0000,0x0001");
2289 if (!got_sigill) {
2290 facilities |= FACILITY_GEN_INST_EXT;
2291 }
2292 }
2293
2294 sigaction(SIGILL, &sa_old, NULL);
2295
2296 /* The translator currently uses these extensions unconditionally.
2297 Pruning this back to the base ESA/390 architecture doesn't seem
2298 worthwhile, since even the KVM target requires z/Arch. */
2299 fail = 0;
2300 if ((facilities & FACILITY_ZARCH_ACTIVE) == 0) {
2301 fprintf(stderr, "TCG: z/Arch facility is required.\n");
2302 fprintf(stderr, "TCG: Boot with a 64-bit enabled kernel.\n");
2303 fail = 1;
2304 }
2305 if ((facilities & FACILITY_LONG_DISP) == 0) {
2306 fprintf(stderr, "TCG: long-displacement facility is required.\n");
2307 fail = 1;
2308 }
2309
2310 /* So far there's just enough support for 31-bit mode to let the
2311 compile succeed. This is good enough to run QEMU with KVM. */
2312 if (sizeof(void *) != 8) {
2313 fprintf(stderr, "TCG: 31-bit mode is not supported.\n");
2314 fail = 1;
2315 }
2316
2317 if (fail) {
2318 exit(-1);
2319 }
2320}
2321
2322static void tcg_target_init(TCGContext *s)
2827822e 2323{
48bb3750
RH
2324 query_facilities();
2325
2326 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2327 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2328
2329 tcg_regset_clear(tcg_target_call_clobber_regs);
2330 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2331 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2332 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2333 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2334 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2335 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2336 /* The return register can be considered call-clobbered. */
2337 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2338
2339 tcg_regset_clear(s->reserved_regs);
2340 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2341 /* XXX many insns can't be used with R0, so we better avoid it for now */
2342 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2343 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2344
2345 tcg_add_target_add_op_defs(s390_op_defs);
2827822e
AG
2346}
2347
48bb3750 2348static void tcg_target_qemu_prologue(TCGContext *s)
2827822e 2349{
a4924e8b
RH
2350 tcg_target_long frame_size;
2351
48bb3750
RH
2352 /* stmg %r6,%r15,48(%r15) (save registers) */
2353 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2354
a4924e8b
RH
2355 /* aghi %r15,-frame_size */
2356 frame_size = TCG_TARGET_CALL_STACK_OFFSET;
2357 frame_size += TCG_STATIC_CALL_ARGS_SIZE;
2358 frame_size += CPU_TEMP_BUF_NLONGS * sizeof(long);
2359 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -frame_size);
2360
2361 tcg_set_frame(s, TCG_REG_CALL_STACK,
2362 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2363 CPU_TEMP_BUF_NLONGS * sizeof(long));
48bb3750
RH
2364
2365 if (GUEST_BASE >= 0x80000) {
2366 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
2367 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2368 }
2369
cea5f9a2
BS
2370 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2371 /* br %r3 (go to TB) */
2372 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
48bb3750
RH
2373
2374 tb_ret_addr = s->code_ptr;
2375
a4924e8b
RH
2376 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2377 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
2378 frame_size + 48);
48bb3750
RH
2379
2380 /* br %r14 (return) */
2381 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2827822e 2382}