]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/s390/tcg-target.c
tcg-s390: Use risbgz for andi
[mirror_qemu.git] / tcg / s390 / tcg-target.c
CommitLineData
2827822e
AG
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
48bb3750
RH
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
2827822e
AG
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
a01fc30d
RH
27/* We only support generating code for 64-bit mode. */
28#if TCG_TARGET_REG_BITS != 64
29#error "unsupported code generation mode"
30#endif
31
48bb3750
RH
32/* ??? The translation blocks produced by TCG are generally small enough to
33 be entirely reachable with a 16-bit displacement. Leaving the option for
34 a 32-bit displacement here Just In Case. */
35#define USE_LONG_BRANCHES 0
36
37#define TCG_CT_CONST_32 0x0100
38#define TCG_CT_CONST_NEG 0x0200
39#define TCG_CT_CONST_ADDI 0x0400
40#define TCG_CT_CONST_MULI 0x0800
48bb3750
RH
41#define TCG_CT_CONST_ORI 0x2000
42#define TCG_CT_CONST_XORI 0x4000
43#define TCG_CT_CONST_CMPI 0x8000
44
45/* Several places within the instruction set 0 means "no register"
46 rather than TCG_REG_R0. */
47#define TCG_REG_NONE 0
48
49/* A scratch register that may be be used throughout the backend. */
50#define TCG_TMP0 TCG_REG_R14
51
52#ifdef CONFIG_USE_GUEST_BASE
53#define TCG_GUEST_BASE_REG TCG_REG_R13
54#else
55#define TCG_GUEST_BASE_REG TCG_REG_R0
56#endif
57
58#ifndef GUEST_BASE
59#define GUEST_BASE 0
60#endif
61
62
63/* All of the following instructions are prefixed with their instruction
64 format, and are defined as 8- or 16-bit quantities, even when the two
65 halves of the 16-bit quantity may appear 32 bits apart in the insn.
66 This makes it easy to copy the values from the tables in Appendix B. */
67typedef enum S390Opcode {
68 RIL_AFI = 0xc209,
69 RIL_AGFI = 0xc208,
3790b918 70 RIL_ALFI = 0xc20b,
48bb3750
RH
71 RIL_ALGFI = 0xc20a,
72 RIL_BRASL = 0xc005,
73 RIL_BRCL = 0xc004,
74 RIL_CFI = 0xc20d,
75 RIL_CGFI = 0xc20c,
76 RIL_CLFI = 0xc20f,
77 RIL_CLGFI = 0xc20e,
78 RIL_IIHF = 0xc008,
79 RIL_IILF = 0xc009,
80 RIL_LARL = 0xc000,
81 RIL_LGFI = 0xc001,
82 RIL_LGRL = 0xc408,
83 RIL_LLIHF = 0xc00e,
84 RIL_LLILF = 0xc00f,
85 RIL_LRL = 0xc40d,
86 RIL_MSFI = 0xc201,
87 RIL_MSGFI = 0xc200,
88 RIL_NIHF = 0xc00a,
89 RIL_NILF = 0xc00b,
90 RIL_OIHF = 0xc00c,
91 RIL_OILF = 0xc00d,
3790b918 92 RIL_SLFI = 0xc205,
48bb3750
RH
93 RIL_XIHF = 0xc006,
94 RIL_XILF = 0xc007,
95
96 RI_AGHI = 0xa70b,
97 RI_AHI = 0xa70a,
98 RI_BRC = 0xa704,
99 RI_IIHH = 0xa500,
100 RI_IIHL = 0xa501,
101 RI_IILH = 0xa502,
102 RI_IILL = 0xa503,
103 RI_LGHI = 0xa709,
104 RI_LLIHH = 0xa50c,
105 RI_LLIHL = 0xa50d,
106 RI_LLILH = 0xa50e,
107 RI_LLILL = 0xa50f,
108 RI_MGHI = 0xa70d,
109 RI_MHI = 0xa70c,
110 RI_NIHH = 0xa504,
111 RI_NIHL = 0xa505,
112 RI_NILH = 0xa506,
113 RI_NILL = 0xa507,
114 RI_OIHH = 0xa508,
115 RI_OIHL = 0xa509,
116 RI_OILH = 0xa50a,
117 RI_OILL = 0xa50b,
118
119 RIE_CGIJ = 0xec7c,
120 RIE_CGRJ = 0xec64,
121 RIE_CIJ = 0xec7e,
122 RIE_CLGRJ = 0xec65,
123 RIE_CLIJ = 0xec7f,
124 RIE_CLGIJ = 0xec7d,
125 RIE_CLRJ = 0xec77,
126 RIE_CRJ = 0xec76,
d5690ea4 127 RIE_RISBG = 0xec55,
48bb3750
RH
128
129 RRE_AGR = 0xb908,
3790b918
RH
130 RRE_ALGR = 0xb90a,
131 RRE_ALCR = 0xb998,
132 RRE_ALCGR = 0xb988,
48bb3750
RH
133 RRE_CGR = 0xb920,
134 RRE_CLGR = 0xb921,
135 RRE_DLGR = 0xb987,
136 RRE_DLR = 0xb997,
137 RRE_DSGFR = 0xb91d,
138 RRE_DSGR = 0xb90d,
139 RRE_LGBR = 0xb906,
140 RRE_LCGR = 0xb903,
141 RRE_LGFR = 0xb914,
142 RRE_LGHR = 0xb907,
143 RRE_LGR = 0xb904,
144 RRE_LLGCR = 0xb984,
145 RRE_LLGFR = 0xb916,
146 RRE_LLGHR = 0xb985,
147 RRE_LRVR = 0xb91f,
148 RRE_LRVGR = 0xb90f,
149 RRE_LTGR = 0xb902,
36017dc6 150 RRE_MLGR = 0xb986,
48bb3750
RH
151 RRE_MSGR = 0xb90c,
152 RRE_MSR = 0xb252,
153 RRE_NGR = 0xb980,
154 RRE_OGR = 0xb981,
155 RRE_SGR = 0xb909,
3790b918
RH
156 RRE_SLGR = 0xb90b,
157 RRE_SLBR = 0xb999,
158 RRE_SLBGR = 0xb989,
48bb3750
RH
159 RRE_XGR = 0xb982,
160
96a9f093
RH
161 RRF_LOCR = 0xb9f2,
162 RRF_LOCGR = 0xb9e2,
163
48bb3750 164 RR_AR = 0x1a,
3790b918 165 RR_ALR = 0x1e,
48bb3750
RH
166 RR_BASR = 0x0d,
167 RR_BCR = 0x07,
168 RR_CLR = 0x15,
169 RR_CR = 0x19,
170 RR_DR = 0x1d,
171 RR_LCR = 0x13,
172 RR_LR = 0x18,
173 RR_LTR = 0x12,
174 RR_NR = 0x14,
175 RR_OR = 0x16,
176 RR_SR = 0x1b,
3790b918 177 RR_SLR = 0x1f,
48bb3750
RH
178 RR_XR = 0x17,
179
180 RSY_RLL = 0xeb1d,
181 RSY_RLLG = 0xeb1c,
182 RSY_SLLG = 0xeb0d,
183 RSY_SRAG = 0xeb0a,
184 RSY_SRLG = 0xeb0c,
185
186 RS_SLL = 0x89,
187 RS_SRA = 0x8a,
188 RS_SRL = 0x88,
189
190 RXY_AG = 0xe308,
191 RXY_AY = 0xe35a,
192 RXY_CG = 0xe320,
193 RXY_CY = 0xe359,
194 RXY_LB = 0xe376,
195 RXY_LG = 0xe304,
196 RXY_LGB = 0xe377,
197 RXY_LGF = 0xe314,
198 RXY_LGH = 0xe315,
199 RXY_LHY = 0xe378,
200 RXY_LLGC = 0xe390,
201 RXY_LLGF = 0xe316,
202 RXY_LLGH = 0xe391,
203 RXY_LMG = 0xeb04,
204 RXY_LRV = 0xe31e,
205 RXY_LRVG = 0xe30f,
206 RXY_LRVH = 0xe31f,
207 RXY_LY = 0xe358,
208 RXY_STCY = 0xe372,
209 RXY_STG = 0xe324,
210 RXY_STHY = 0xe370,
211 RXY_STMG = 0xeb24,
212 RXY_STRV = 0xe33e,
213 RXY_STRVG = 0xe32f,
214 RXY_STRVH = 0xe33f,
215 RXY_STY = 0xe350,
216
217 RX_A = 0x5a,
218 RX_C = 0x59,
219 RX_L = 0x58,
220 RX_LH = 0x48,
221 RX_ST = 0x50,
222 RX_STC = 0x42,
223 RX_STH = 0x40,
224} S390Opcode;
225
226#define LD_SIGNED 0x04
227#define LD_UINT8 0x00
228#define LD_INT8 (LD_UINT8 | LD_SIGNED)
229#define LD_UINT16 0x01
230#define LD_INT16 (LD_UINT16 | LD_SIGNED)
231#define LD_UINT32 0x02
232#define LD_INT32 (LD_UINT32 | LD_SIGNED)
233#define LD_UINT64 0x03
234#define LD_INT64 (LD_UINT64 | LD_SIGNED)
235
236#ifndef NDEBUG
237static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
238 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
239 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
240};
241#endif
242
243/* Since R6 is a potential argument register, choose it last of the
244 call-saved registers. Likewise prefer the call-clobbered registers
245 in reverse order to maximize the chance of avoiding the arguments. */
2827822e 246static const int tcg_target_reg_alloc_order[] = {
48bb3750
RH
247 TCG_REG_R13,
248 TCG_REG_R12,
249 TCG_REG_R11,
250 TCG_REG_R10,
251 TCG_REG_R9,
252 TCG_REG_R8,
253 TCG_REG_R7,
254 TCG_REG_R6,
255 TCG_REG_R14,
256 TCG_REG_R0,
257 TCG_REG_R1,
258 TCG_REG_R5,
259 TCG_REG_R4,
260 TCG_REG_R3,
261 TCG_REG_R2,
2827822e
AG
262};
263
264static const int tcg_target_call_iarg_regs[] = {
48bb3750
RH
265 TCG_REG_R2,
266 TCG_REG_R3,
267 TCG_REG_R4,
268 TCG_REG_R5,
269 TCG_REG_R6,
2827822e
AG
270};
271
272static const int tcg_target_call_oarg_regs[] = {
48bb3750 273 TCG_REG_R2,
48bb3750
RH
274};
275
276#define S390_CC_EQ 8
277#define S390_CC_LT 4
278#define S390_CC_GT 2
279#define S390_CC_OV 1
280#define S390_CC_NE (S390_CC_LT | S390_CC_GT)
281#define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
282#define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
283#define S390_CC_NEVER 0
284#define S390_CC_ALWAYS 15
285
286/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
0aed257f 287static const uint8_t tcg_cond_to_s390_cond[] = {
48bb3750
RH
288 [TCG_COND_EQ] = S390_CC_EQ,
289 [TCG_COND_NE] = S390_CC_NE,
290 [TCG_COND_LT] = S390_CC_LT,
291 [TCG_COND_LE] = S390_CC_LE,
292 [TCG_COND_GT] = S390_CC_GT,
293 [TCG_COND_GE] = S390_CC_GE,
294 [TCG_COND_LTU] = S390_CC_LT,
295 [TCG_COND_LEU] = S390_CC_LE,
296 [TCG_COND_GTU] = S390_CC_GT,
297 [TCG_COND_GEU] = S390_CC_GE,
298};
299
300/* Condition codes that result from a LOAD AND TEST. Here, we have no
301 unsigned instruction variation, however since the test is vs zero we
302 can re-map the outcomes appropriately. */
0aed257f 303static const uint8_t tcg_cond_to_ltr_cond[] = {
48bb3750
RH
304 [TCG_COND_EQ] = S390_CC_EQ,
305 [TCG_COND_NE] = S390_CC_NE,
306 [TCG_COND_LT] = S390_CC_LT,
307 [TCG_COND_LE] = S390_CC_LE,
308 [TCG_COND_GT] = S390_CC_GT,
309 [TCG_COND_GE] = S390_CC_GE,
310 [TCG_COND_LTU] = S390_CC_NEVER,
311 [TCG_COND_LEU] = S390_CC_EQ,
312 [TCG_COND_GTU] = S390_CC_NE,
313 [TCG_COND_GEU] = S390_CC_ALWAYS,
314};
315
316#ifdef CONFIG_SOFTMMU
317
022c62cb 318#include "exec/softmmu_defs.h"
48bb3750 319
e141ab52
BS
320/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
321 int mmu_idx) */
322static const void * const qemu_ld_helpers[4] = {
323 helper_ldb_mmu,
324 helper_ldw_mmu,
325 helper_ldl_mmu,
326 helper_ldq_mmu,
327};
328
329/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
330 uintxx_t val, int mmu_idx) */
331static const void * const qemu_st_helpers[4] = {
332 helper_stb_mmu,
333 helper_stw_mmu,
334 helper_stl_mmu,
335 helper_stq_mmu,
336};
e141ab52 337#endif
48bb3750
RH
338
339static uint8_t *tb_ret_addr;
340
341/* A list of relevant facilities used by this translator. Some of these
342 are required for proper operation, and these are checked at startup. */
343
344#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
345#define FACILITY_LONG_DISP (1ULL << (63 - 18))
346#define FACILITY_EXT_IMM (1ULL << (63 - 21))
347#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
96a9f093 348#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
48bb3750
RH
349
350static uint64_t facilities;
2827822e
AG
351
352static void patch_reloc(uint8_t *code_ptr, int type,
48bb3750 353 tcg_target_long value, tcg_target_long addend)
2827822e 354{
48bb3750
RH
355 tcg_target_long code_ptr_tl = (tcg_target_long)code_ptr;
356 tcg_target_long pcrel2;
357
358 /* ??? Not the usual definition of "addend". */
359 pcrel2 = (value - (code_ptr_tl + addend)) >> 1;
360
361 switch (type) {
362 case R_390_PC16DBL:
363 assert(pcrel2 == (int16_t)pcrel2);
364 *(int16_t *)code_ptr = pcrel2;
365 break;
366 case R_390_PC32DBL:
367 assert(pcrel2 == (int32_t)pcrel2);
368 *(int32_t *)code_ptr = pcrel2;
369 break;
370 default:
371 tcg_abort();
372 break;
373 }
2827822e
AG
374}
375
2827822e
AG
376/* parse target specific constraints */
377static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
378{
48bb3750
RH
379 const char *ct_str = *pct_str;
380
381 switch (ct_str[0]) {
382 case 'r': /* all registers */
383 ct->ct |= TCG_CT_REG;
384 tcg_regset_set32(ct->u.regs, 0, 0xffff);
385 break;
386 case 'R': /* not R0 */
387 ct->ct |= TCG_CT_REG;
388 tcg_regset_set32(ct->u.regs, 0, 0xffff);
389 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
390 break;
391 case 'L': /* qemu_ld/st constraint */
392 ct->ct |= TCG_CT_REG;
393 tcg_regset_set32(ct->u.regs, 0, 0xffff);
394 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
395 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
396 break;
397 case 'a': /* force R2 for division */
398 ct->ct |= TCG_CT_REG;
399 tcg_regset_clear(ct->u.regs);
400 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
401 break;
402 case 'b': /* force R3 for division */
403 ct->ct |= TCG_CT_REG;
404 tcg_regset_clear(ct->u.regs);
405 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
406 break;
407 case 'N': /* force immediate negate */
408 ct->ct |= TCG_CT_CONST_NEG;
409 break;
410 case 'W': /* force 32-bit ("word") immediate */
411 ct->ct |= TCG_CT_CONST_32;
412 break;
413 case 'I':
414 ct->ct |= TCG_CT_CONST_ADDI;
415 break;
416 case 'K':
417 ct->ct |= TCG_CT_CONST_MULI;
418 break;
48bb3750
RH
419 case 'O':
420 ct->ct |= TCG_CT_CONST_ORI;
421 break;
422 case 'X':
423 ct->ct |= TCG_CT_CONST_XORI;
424 break;
425 case 'C':
426 ct->ct |= TCG_CT_CONST_CMPI;
427 break;
428 default:
429 return -1;
430 }
431 ct_str++;
432 *pct_str = ct_str;
433
2827822e
AG
434 return 0;
435}
436
48bb3750
RH
437/* Immediates to be used with logical OR. This is an optimization only,
438 since a full 64-bit immediate OR can always be performed with 4 sequential
439 OI[LH][LH] instructions. What we're looking for is immediates that we
440 can load efficiently, and the immediate load plus the reg-reg OR is
441 smaller than the sequential OI's. */
442
443static int tcg_match_ori(int ct, tcg_target_long val)
444{
445 if (facilities & FACILITY_EXT_IMM) {
446 if (ct & TCG_CT_CONST_32) {
447 /* All 32-bit ORs can be performed with 1 48-bit insn. */
448 return 1;
449 }
450 }
451
452 /* Look for negative values. These are best to load with LGHI. */
453 if (val < 0) {
454 if (val == (int16_t)val) {
455 return 0;
456 }
457 if (facilities & FACILITY_EXT_IMM) {
458 if (val == (int32_t)val) {
459 return 0;
460 }
461 }
462 }
463
464 return 1;
465}
466
467/* Immediates to be used with logical XOR. This is almost, but not quite,
468 only an optimization. XOR with immediate is only supported with the
469 extended-immediate facility. That said, there are a few patterns for
470 which it is better to load the value into a register first. */
471
472static int tcg_match_xori(int ct, tcg_target_long val)
473{
474 if ((facilities & FACILITY_EXT_IMM) == 0) {
475 return 0;
476 }
477
478 if (ct & TCG_CT_CONST_32) {
479 /* All 32-bit XORs can be performed with 1 48-bit insn. */
480 return 1;
481 }
482
483 /* Look for negative values. These are best to load with LGHI. */
484 if (val < 0 && val == (int32_t)val) {
485 return 0;
486 }
487
488 return 1;
489}
490
491/* Imediates to be used with comparisons. */
492
493static int tcg_match_cmpi(int ct, tcg_target_long val)
494{
495 if (facilities & FACILITY_EXT_IMM) {
496 /* The COMPARE IMMEDIATE instruction is available. */
497 if (ct & TCG_CT_CONST_32) {
498 /* We have a 32-bit immediate and can compare against anything. */
499 return 1;
500 } else {
501 /* ??? We have no insight here into whether the comparison is
502 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
503 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
504 a 32-bit unsigned immediate. If we were to use the (semi)
505 obvious "val == (int32_t)val" we would be enabling unsigned
506 comparisons vs very large numbers. The only solution is to
507 take the intersection of the ranges. */
508 /* ??? Another possible solution is to simply lie and allow all
509 constants here and force the out-of-range values into a temp
510 register in tgen_cmp when we have knowledge of the actual
511 comparison code in use. */
512 return val >= 0 && val <= 0x7fffffff;
513 }
514 } else {
515 /* Only the LOAD AND TEST instruction is available. */
516 return val == 0;
517 }
518}
519
2827822e 520/* Test if a constant matches the constraint. */
48bb3750
RH
521static int tcg_target_const_match(tcg_target_long val,
522 const TCGArgConstraint *arg_ct)
2827822e 523{
48bb3750
RH
524 int ct = arg_ct->ct;
525
526 if (ct & TCG_CT_CONST) {
527 return 1;
528 }
529
530 /* Handle the modifiers. */
531 if (ct & TCG_CT_CONST_NEG) {
532 val = -val;
533 }
534 if (ct & TCG_CT_CONST_32) {
535 val = (int32_t)val;
536 }
537
538 /* The following are mutually exclusive. */
539 if (ct & TCG_CT_CONST_ADDI) {
540 /* Immediates that may be used with add. If we have the
541 extended-immediates facility then we have ADD IMMEDIATE
542 with signed and unsigned 32-bit, otherwise we have only
543 ADD HALFWORD IMMEDIATE with a signed 16-bit. */
544 if (facilities & FACILITY_EXT_IMM) {
545 return val == (int32_t)val || val == (uint32_t)val;
546 } else {
547 return val == (int16_t)val;
548 }
549 } else if (ct & TCG_CT_CONST_MULI) {
550 /* Immediates that may be used with multiply. If we have the
551 general-instruction-extensions, then we have MULTIPLY SINGLE
552 IMMEDIATE with a signed 32-bit, otherwise we have only
553 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
554 if (facilities & FACILITY_GEN_INST_EXT) {
555 return val == (int32_t)val;
556 } else {
557 return val == (int16_t)val;
558 }
48bb3750
RH
559 } else if (ct & TCG_CT_CONST_ORI) {
560 return tcg_match_ori(ct, val);
561 } else if (ct & TCG_CT_CONST_XORI) {
562 return tcg_match_xori(ct, val);
563 } else if (ct & TCG_CT_CONST_CMPI) {
564 return tcg_match_cmpi(ct, val);
565 }
566
2827822e
AG
567 return 0;
568}
569
48bb3750
RH
570/* Emit instructions according to the given instruction format. */
571
572static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
573{
574 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
575}
576
577static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
578 TCGReg r1, TCGReg r2)
579{
580 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
581}
582
96a9f093
RH
583static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
584 TCGReg r1, TCGReg r2, int m3)
585{
586 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
587}
588
48bb3750
RH
589static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
590{
591 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
592}
593
594static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
595{
596 tcg_out16(s, op | (r1 << 4));
597 tcg_out32(s, i2);
598}
599
600static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
601 TCGReg b2, TCGReg r3, int disp)
602{
603 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
604 | (disp & 0xfff));
605}
606
607static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
608 TCGReg b2, TCGReg r3, int disp)
609{
610 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
611 tcg_out32(s, (op & 0xff) | (b2 << 28)
612 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
613}
614
615#define tcg_out_insn_RX tcg_out_insn_RS
616#define tcg_out_insn_RXY tcg_out_insn_RSY
617
618/* Emit an opcode with "type-checking" of the format. */
619#define tcg_out_insn(S, FMT, OP, ...) \
620 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
621
622
623/* emit 64-bit shifts */
624static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
625 TCGReg src, TCGReg sh_reg, int sh_imm)
626{
627 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
628}
629
630/* emit 32-bit shifts */
631static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
632 TCGReg sh_reg, int sh_imm)
633{
634 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
635}
636
637static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
638{
639 if (src != dst) {
640 if (type == TCG_TYPE_I32) {
641 tcg_out_insn(s, RR, LR, dst, src);
642 } else {
643 tcg_out_insn(s, RRE, LGR, dst, src);
644 }
645 }
646}
647
2827822e 648/* load a register with an immediate value */
48bb3750
RH
649static void tcg_out_movi(TCGContext *s, TCGType type,
650 TCGReg ret, tcg_target_long sval)
2827822e 651{
48bb3750
RH
652 static const S390Opcode lli_insns[4] = {
653 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
654 };
655
656 tcg_target_ulong uval = sval;
657 int i;
658
659 if (type == TCG_TYPE_I32) {
660 uval = (uint32_t)sval;
661 sval = (int32_t)sval;
662 }
663
664 /* Try all 32-bit insns that can load it in one go. */
665 if (sval >= -0x8000 && sval < 0x8000) {
666 tcg_out_insn(s, RI, LGHI, ret, sval);
667 return;
668 }
669
670 for (i = 0; i < 4; i++) {
671 tcg_target_long mask = 0xffffull << i*16;
672 if ((uval & mask) == uval) {
673 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
674 return;
675 }
676 }
677
678 /* Try all 48-bit insns that can load it in one go. */
679 if (facilities & FACILITY_EXT_IMM) {
680 if (sval == (int32_t)sval) {
681 tcg_out_insn(s, RIL, LGFI, ret, sval);
682 return;
683 }
684 if (uval <= 0xffffffff) {
685 tcg_out_insn(s, RIL, LLILF, ret, uval);
686 return;
687 }
688 if ((uval & 0xffffffff) == 0) {
689 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
690 return;
691 }
692 }
693
694 /* Try for PC-relative address load. */
695 if ((sval & 1) == 0) {
696 intptr_t off = (sval - (intptr_t)s->code_ptr) >> 1;
697 if (off == (int32_t)off) {
698 tcg_out_insn(s, RIL, LARL, ret, off);
699 return;
700 }
701 }
702
703 /* If extended immediates are not present, then we may have to issue
704 several instructions to load the low 32 bits. */
705 if (!(facilities & FACILITY_EXT_IMM)) {
706 /* A 32-bit unsigned value can be loaded in 2 insns. And given
707 that the lli_insns loop above did not succeed, we know that
708 both insns are required. */
709 if (uval <= 0xffffffff) {
710 tcg_out_insn(s, RI, LLILL, ret, uval);
711 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
712 return;
713 }
714
715 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
716 We first want to make sure that all the high bits get set. With
717 luck the low 16-bits can be considered negative to perform that for
718 free, otherwise we load an explicit -1. */
719 if (sval >> 31 >> 1 == -1) {
720 if (uval & 0x8000) {
721 tcg_out_insn(s, RI, LGHI, ret, uval);
722 } else {
723 tcg_out_insn(s, RI, LGHI, ret, -1);
724 tcg_out_insn(s, RI, IILL, ret, uval);
725 }
726 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
727 return;
728 }
729 }
730
731 /* If we get here, both the high and low parts have non-zero bits. */
732
733 /* Recurse to load the lower 32-bits. */
a22971f9 734 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
48bb3750
RH
735
736 /* Insert data into the high 32-bits. */
737 uval = uval >> 31 >> 1;
738 if (facilities & FACILITY_EXT_IMM) {
739 if (uval < 0x10000) {
740 tcg_out_insn(s, RI, IIHL, ret, uval);
741 } else if ((uval & 0xffff) == 0) {
742 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
743 } else {
744 tcg_out_insn(s, RIL, IIHF, ret, uval);
745 }
746 } else {
747 if (uval & 0xffff) {
748 tcg_out_insn(s, RI, IIHL, ret, uval);
749 }
750 if (uval & 0xffff0000) {
751 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
752 }
753 }
754}
755
756
757/* Emit a load/store type instruction. Inputs are:
758 DATA: The register to be loaded or stored.
759 BASE+OFS: The effective address.
760 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
761 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
762
763static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
764 TCGReg data, TCGReg base, TCGReg index,
765 tcg_target_long ofs)
766{
767 if (ofs < -0x80000 || ofs >= 0x80000) {
768 /* Combine the low 16 bits of the offset with the actual load insn;
769 the high 48 bits must come from an immediate load. */
770 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs & ~0xffff);
771 ofs &= 0xffff;
772
773 /* If we were already given an index register, add it in. */
774 if (index != TCG_REG_NONE) {
775 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
776 }
777 index = TCG_TMP0;
778 }
779
780 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
781 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
782 } else {
783 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
784 }
2827822e
AG
785}
786
48bb3750 787
2827822e 788/* load data without address translation or endianness conversion */
48bb3750
RH
789static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
790 TCGReg base, tcg_target_long ofs)
2827822e 791{
48bb3750
RH
792 if (type == TCG_TYPE_I32) {
793 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
794 } else {
795 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
796 }
2827822e
AG
797}
798
48bb3750
RH
799static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
800 TCGReg base, tcg_target_long ofs)
2827822e 801{
48bb3750
RH
802 if (type == TCG_TYPE_I32) {
803 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
804 } else {
805 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
806 }
807}
808
809/* load data from an absolute host address */
810static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
811{
812 tcg_target_long addr = (tcg_target_long)abs;
813
814 if (facilities & FACILITY_GEN_INST_EXT) {
815 tcg_target_long disp = (addr - (tcg_target_long)s->code_ptr) >> 1;
816 if (disp == (int32_t)disp) {
817 if (type == TCG_TYPE_I32) {
818 tcg_out_insn(s, RIL, LRL, dest, disp);
819 } else {
820 tcg_out_insn(s, RIL, LGRL, dest, disp);
821 }
822 return;
823 }
824 }
825
826 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
827 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
828}
829
f0bffc27
RH
830static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
831 int msb, int lsb, int ofs, int z)
832{
833 /* Format RIE-f */
834 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
835 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
836 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
837}
838
48bb3750
RH
839static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
840{
841 if (facilities & FACILITY_EXT_IMM) {
842 tcg_out_insn(s, RRE, LGBR, dest, src);
843 return;
844 }
845
846 if (type == TCG_TYPE_I32) {
847 if (dest == src) {
848 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
849 } else {
850 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
851 }
852 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
853 } else {
854 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
855 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
856 }
857}
858
859static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
860{
861 if (facilities & FACILITY_EXT_IMM) {
862 tcg_out_insn(s, RRE, LLGCR, dest, src);
863 return;
864 }
865
866 if (dest == src) {
867 tcg_out_movi(s, type, TCG_TMP0, 0xff);
868 src = TCG_TMP0;
869 } else {
870 tcg_out_movi(s, type, dest, 0xff);
871 }
872 if (type == TCG_TYPE_I32) {
873 tcg_out_insn(s, RR, NR, dest, src);
874 } else {
875 tcg_out_insn(s, RRE, NGR, dest, src);
876 }
877}
878
879static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
880{
881 if (facilities & FACILITY_EXT_IMM) {
882 tcg_out_insn(s, RRE, LGHR, dest, src);
883 return;
884 }
885
886 if (type == TCG_TYPE_I32) {
887 if (dest == src) {
888 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
889 } else {
890 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
891 }
892 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
893 } else {
894 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
895 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
896 }
897}
898
899static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
900{
901 if (facilities & FACILITY_EXT_IMM) {
902 tcg_out_insn(s, RRE, LLGHR, dest, src);
903 return;
904 }
905
906 if (dest == src) {
907 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
908 src = TCG_TMP0;
909 } else {
910 tcg_out_movi(s, type, dest, 0xffff);
911 }
912 if (type == TCG_TYPE_I32) {
913 tcg_out_insn(s, RR, NR, dest, src);
914 } else {
915 tcg_out_insn(s, RRE, NGR, dest, src);
916 }
917}
918
919static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
920{
921 tcg_out_insn(s, RRE, LGFR, dest, src);
922}
923
924static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
925{
926 tcg_out_insn(s, RRE, LLGFR, dest, src);
927}
928
929static inline void tgen32_addi(TCGContext *s, TCGReg dest, int32_t val)
930{
931 if (val == (int16_t)val) {
932 tcg_out_insn(s, RI, AHI, dest, val);
933 } else {
934 tcg_out_insn(s, RIL, AFI, dest, val);
935 }
936}
937
938static inline void tgen64_addi(TCGContext *s, TCGReg dest, int64_t val)
939{
940 if (val == (int16_t)val) {
941 tcg_out_insn(s, RI, AGHI, dest, val);
942 } else if (val == (int32_t)val) {
943 tcg_out_insn(s, RIL, AGFI, dest, val);
944 } else if (val == (uint32_t)val) {
945 tcg_out_insn(s, RIL, ALGFI, dest, val);
946 } else {
947 tcg_abort();
948 }
949
950}
951
f0bffc27
RH
952/* Accept bit patterns like these:
953 0....01....1
954 1....10....0
955 1..10..01..1
956 0..01..10..0
957 Copied from gcc sources. */
958static inline bool risbg_mask(uint64_t c)
959{
960 uint64_t lsb;
961 /* We don't change the number of transitions by inverting,
962 so make sure we start with the LSB zero. */
963 if (c & 1) {
964 c = ~c;
965 }
966 /* Reject all zeros or all ones. */
967 if (c == 0) {
968 return false;
969 }
970 /* Find the first transition. */
971 lsb = c & -c;
972 /* Invert to look for a second transition. */
973 c = ~c;
974 /* Erase the first transition. */
975 c &= -lsb;
976 /* Find the second transition, if any. */
977 lsb = c & -c;
978 /* Match if all the bits are 1's, or if c is zero. */
979 return c == -lsb;
980}
981
07ff7983 982static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750
RH
983{
984 static const S390Opcode ni_insns[4] = {
985 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
986 };
987 static const S390Opcode nif_insns[2] = {
988 RIL_NILF, RIL_NIHF
989 };
07ff7983 990 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
48bb3750
RH
991 int i;
992
48bb3750 993 /* Look for the zero-extensions. */
07ff7983 994 if ((val & valid) == 0xffffffff) {
48bb3750
RH
995 tgen_ext32u(s, dest, dest);
996 return;
997 }
48bb3750 998 if (facilities & FACILITY_EXT_IMM) {
07ff7983 999 if ((val & valid) == 0xff) {
48bb3750
RH
1000 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
1001 return;
1002 }
07ff7983 1003 if ((val & valid) == 0xffff) {
48bb3750
RH
1004 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
1005 return;
1006 }
07ff7983 1007 }
48bb3750 1008
07ff7983
RH
1009 /* Try all 32-bit insns that can perform it in one go. */
1010 for (i = 0; i < 4; i++) {
1011 tcg_target_ulong mask = ~(0xffffull << i*16);
1012 if (((val | ~valid) & mask) == mask) {
1013 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
1014 return;
48bb3750 1015 }
07ff7983 1016 }
48bb3750 1017
07ff7983
RH
1018 /* Try all 48-bit insns that can perform it in one go. */
1019 if (facilities & FACILITY_EXT_IMM) {
1020 for (i = 0; i < 2; i++) {
1021 tcg_target_ulong mask = ~(0xffffffffull << i*32);
1022 if (((val | ~valid) & mask) == mask) {
1023 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1024 return;
48bb3750
RH
1025 }
1026 }
07ff7983 1027 }
f0bffc27
RH
1028 if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
1029 int msb, lsb;
1030 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
1031 /* Achieve wraparound by swapping msb and lsb. */
1032 msb = 63 - ctz64(~val);
1033 lsb = clz64(~val) + 1;
1034 } else {
1035 msb = clz64(val);
1036 lsb = 63 - ctz64(val);
1037 }
1038 tcg_out_risbg(s, dest, dest, msb, lsb, 0, 1);
1039 return;
1040 }
48bb3750 1041
07ff7983
RH
1042 /* Fall back to loading the constant. */
1043 tcg_out_movi(s, type, TCG_TMP0, val);
1044 if (type == TCG_TYPE_I32) {
1045 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
48bb3750 1046 } else {
07ff7983 1047 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
48bb3750
RH
1048 }
1049}
1050
1051static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1052{
1053 static const S390Opcode oi_insns[4] = {
1054 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1055 };
1056 static const S390Opcode nif_insns[2] = {
1057 RIL_OILF, RIL_OIHF
1058 };
1059
1060 int i;
1061
1062 /* Look for no-op. */
1063 if (val == 0) {
1064 return;
1065 }
1066
1067 if (facilities & FACILITY_EXT_IMM) {
1068 /* Try all 32-bit insns that can perform it in one go. */
1069 for (i = 0; i < 4; i++) {
1070 tcg_target_ulong mask = (0xffffull << i*16);
1071 if ((val & mask) != 0 && (val & ~mask) == 0) {
1072 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1073 return;
1074 }
1075 }
1076
1077 /* Try all 48-bit insns that can perform it in one go. */
1078 for (i = 0; i < 2; i++) {
1079 tcg_target_ulong mask = (0xffffffffull << i*32);
1080 if ((val & mask) != 0 && (val & ~mask) == 0) {
1081 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1082 return;
1083 }
1084 }
1085
1086 /* Perform the OR via sequential modifications to the high and
1087 low parts. Do this via recursion to handle 16-bit vs 32-bit
1088 masks in each half. */
1089 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1090 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1091 } else {
1092 /* With no extended-immediate facility, we don't need to be so
1093 clever. Just iterate over the insns and mask in the constant. */
1094 for (i = 0; i < 4; i++) {
1095 tcg_target_ulong mask = (0xffffull << i*16);
1096 if ((val & mask) != 0) {
1097 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1098 }
1099 }
1100 }
1101}
1102
1103static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1104{
1105 /* Perform the xor by parts. */
1106 if (val & 0xffffffff) {
1107 tcg_out_insn(s, RIL, XILF, dest, val);
1108 }
1109 if (val > 0xffffffff) {
1110 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1111 }
1112}
1113
1114static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1115 TCGArg c2, int c2const)
1116{
bcc66562 1117 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1118 if (c2const) {
1119 if (c2 == 0) {
1120 if (type == TCG_TYPE_I32) {
1121 tcg_out_insn(s, RR, LTR, r1, r1);
1122 } else {
1123 tcg_out_insn(s, RRE, LTGR, r1, r1);
1124 }
1125 return tcg_cond_to_ltr_cond[c];
1126 } else {
1127 if (is_unsigned) {
1128 if (type == TCG_TYPE_I32) {
1129 tcg_out_insn(s, RIL, CLFI, r1, c2);
1130 } else {
1131 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1132 }
1133 } else {
1134 if (type == TCG_TYPE_I32) {
1135 tcg_out_insn(s, RIL, CFI, r1, c2);
1136 } else {
1137 tcg_out_insn(s, RIL, CGFI, r1, c2);
1138 }
1139 }
1140 }
1141 } else {
1142 if (is_unsigned) {
1143 if (type == TCG_TYPE_I32) {
1144 tcg_out_insn(s, RR, CLR, r1, c2);
1145 } else {
1146 tcg_out_insn(s, RRE, CLGR, r1, c2);
1147 }
1148 } else {
1149 if (type == TCG_TYPE_I32) {
1150 tcg_out_insn(s, RR, CR, r1, c2);
1151 } else {
1152 tcg_out_insn(s, RRE, CGR, r1, c2);
1153 }
1154 }
1155 }
1156 return tcg_cond_to_s390_cond[c];
1157}
1158
1159static void tgen_setcond(TCGContext *s, TCGType type, TCGCond c,
96a9f093 1160 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
48bb3750 1161{
96a9f093 1162 int cc = tgen_cmp(s, type, c, c1, c2, c2const);
48bb3750
RH
1163
1164 /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over: */
1165 tcg_out_movi(s, type, dest, 1);
1166 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1167 tcg_out_movi(s, type, dest, 0);
1168}
1169
96a9f093
RH
1170static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1171 TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
1172{
1173 int cc;
1174 if (facilities & FACILITY_LOAD_ON_COND) {
1175 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1176 tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
1177 } else {
1178 c = tcg_invert_cond(c);
1179 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1180
1181 /* Emit: if (cc) goto over; dest = r3; over: */
1182 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1183 tcg_out_insn(s, RRE, LGR, dest, r3);
1184 }
1185}
1186
d5690ea4
RH
1187bool tcg_target_deposit_valid(int ofs, int len)
1188{
1189 return (facilities & FACILITY_GEN_INST_EXT) != 0;
1190}
1191
1192static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1193 int ofs, int len)
1194{
1195 int lsb = (63 - ofs);
1196 int msb = lsb - (len - 1);
f0bffc27 1197 tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0);
d5690ea4
RH
1198}
1199
48bb3750
RH
1200static void tgen_gotoi(TCGContext *s, int cc, tcg_target_long dest)
1201{
1202 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1203 if (off > -0x8000 && off < 0x7fff) {
1204 tcg_out_insn(s, RI, BRC, cc, off);
1205 } else if (off == (int32_t)off) {
1206 tcg_out_insn(s, RIL, BRCL, cc, off);
1207 } else {
1208 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1209 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1210 }
1211}
1212
1213static void tgen_branch(TCGContext *s, int cc, int labelno)
1214{
1215 TCGLabel* l = &s->labels[labelno];
1216 if (l->has_value) {
1217 tgen_gotoi(s, cc, l->u.value);
1218 } else if (USE_LONG_BRANCHES) {
1219 tcg_out16(s, RIL_BRCL | (cc << 4));
1220 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, labelno, -2);
1221 s->code_ptr += 4;
1222 } else {
1223 tcg_out16(s, RI_BRC | (cc << 4));
1224 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, labelno, -2);
1225 s->code_ptr += 2;
1226 }
1227}
1228
1229static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1230 TCGReg r1, TCGReg r2, int labelno)
1231{
1232 TCGLabel* l = &s->labels[labelno];
1233 tcg_target_long off;
1234
1235 if (l->has_value) {
1236 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1237 } else {
1238 /* We need to keep the offset unchanged for retranslation. */
1239 off = ((int16_t *)s->code_ptr)[1];
1240 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1241 }
1242
1243 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1244 tcg_out16(s, off);
1245 tcg_out16(s, cc << 12 | (opc & 0xff));
1246}
1247
1248static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1249 TCGReg r1, int i2, int labelno)
1250{
1251 TCGLabel* l = &s->labels[labelno];
1252 tcg_target_long off;
1253
1254 if (l->has_value) {
1255 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1256 } else {
1257 /* We need to keep the offset unchanged for retranslation. */
1258 off = ((int16_t *)s->code_ptr)[1];
1259 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1260 }
1261
1262 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1263 tcg_out16(s, off);
1264 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1265}
1266
1267static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1268 TCGReg r1, TCGArg c2, int c2const, int labelno)
1269{
1270 int cc;
1271
1272 if (facilities & FACILITY_GEN_INST_EXT) {
1273 bool is_unsigned = (c > TCG_COND_GT);
1274 bool in_range;
1275 S390Opcode opc;
1276
1277 cc = tcg_cond_to_s390_cond[c];
1278
1279 if (!c2const) {
1280 opc = (type == TCG_TYPE_I32
1281 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1282 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1283 tgen_compare_branch(s, opc, cc, r1, c2, labelno);
1284 return;
1285 }
1286
1287 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1288 If the immediate we've been given does not fit that range, we'll
1289 fall back to separate compare and branch instructions using the
1290 larger comparison range afforded by COMPARE IMMEDIATE. */
1291 if (type == TCG_TYPE_I32) {
1292 if (is_unsigned) {
1293 opc = RIE_CLIJ;
1294 in_range = (uint32_t)c2 == (uint8_t)c2;
1295 } else {
1296 opc = RIE_CIJ;
1297 in_range = (int32_t)c2 == (int8_t)c2;
1298 }
1299 } else {
1300 if (is_unsigned) {
1301 opc = RIE_CLGIJ;
1302 in_range = (uint64_t)c2 == (uint8_t)c2;
1303 } else {
1304 opc = RIE_CGIJ;
1305 in_range = (int64_t)c2 == (int8_t)c2;
1306 }
1307 }
1308 if (in_range) {
1309 tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno);
1310 return;
1311 }
1312 }
1313
1314 cc = tgen_cmp(s, type, c, r1, c2, c2const);
1315 tgen_branch(s, cc, labelno);
1316}
1317
1318static void tgen_calli(TCGContext *s, tcg_target_long dest)
1319{
1320 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1321 if (off == (int32_t)off) {
1322 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1323 } else {
1324 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1325 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1326 }
1327}
1328
1329static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data,
1330 TCGReg base, TCGReg index, int disp)
1331{
1332#ifdef TARGET_WORDS_BIGENDIAN
1333 const int bswap = 0;
1334#else
1335 const int bswap = 1;
1336#endif
1337 switch (opc) {
1338 case LD_UINT8:
1339 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1340 break;
1341 case LD_INT8:
1342 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1343 break;
1344 case LD_UINT16:
1345 if (bswap) {
1346 /* swapped unsigned halfword load with upper bits zeroed */
1347 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1348 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1349 } else {
1350 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1351 }
1352 break;
1353 case LD_INT16:
1354 if (bswap) {
1355 /* swapped sign-extended halfword load */
1356 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1357 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1358 } else {
1359 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1360 }
1361 break;
1362 case LD_UINT32:
1363 if (bswap) {
1364 /* swapped unsigned int load with upper bits zeroed */
1365 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1366 tgen_ext32u(s, data, data);
1367 } else {
1368 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1369 }
1370 break;
1371 case LD_INT32:
1372 if (bswap) {
1373 /* swapped sign-extended int load */
1374 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1375 tgen_ext32s(s, data, data);
1376 } else {
1377 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1378 }
1379 break;
1380 case LD_UINT64:
1381 if (bswap) {
1382 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1383 } else {
1384 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1385 }
1386 break;
1387 default:
1388 tcg_abort();
1389 }
1390}
1391
1392static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data,
1393 TCGReg base, TCGReg index, int disp)
1394{
1395#ifdef TARGET_WORDS_BIGENDIAN
1396 const int bswap = 0;
1397#else
1398 const int bswap = 1;
1399#endif
1400 switch (opc) {
1401 case LD_UINT8:
1402 if (disp >= 0 && disp < 0x1000) {
1403 tcg_out_insn(s, RX, STC, data, base, index, disp);
1404 } else {
1405 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1406 }
1407 break;
1408 case LD_UINT16:
1409 if (bswap) {
1410 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1411 } else if (disp >= 0 && disp < 0x1000) {
1412 tcg_out_insn(s, RX, STH, data, base, index, disp);
1413 } else {
1414 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1415 }
1416 break;
1417 case LD_UINT32:
1418 if (bswap) {
1419 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1420 } else if (disp >= 0 && disp < 0x1000) {
1421 tcg_out_insn(s, RX, ST, data, base, index, disp);
1422 } else {
1423 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1424 }
1425 break;
1426 case LD_UINT64:
1427 if (bswap) {
1428 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1429 } else {
1430 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1431 }
1432 break;
1433 default:
1434 tcg_abort();
1435 }
1436}
1437
1438#if defined(CONFIG_SOFTMMU)
48bb3750
RH
1439static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
1440 TCGReg addr_reg, int mem_index, int opc,
1441 uint16_t **label2_ptr_p, int is_store)
1442{
1443 const TCGReg arg0 = TCG_REG_R2;
1444 const TCGReg arg1 = TCG_REG_R3;
1445 int s_bits = opc & 3;
1446 uint16_t *label1_ptr;
1447 tcg_target_long ofs;
1448
1449 if (TARGET_LONG_BITS == 32) {
1450 tgen_ext32u(s, arg0, addr_reg);
1451 } else {
1452 tcg_out_mov(s, TCG_TYPE_I64, arg0, addr_reg);
1453 }
1454
1455 tcg_out_sh64(s, RSY_SRLG, arg1, addr_reg, TCG_REG_NONE,
1456 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1457
07ff7983
RH
1458 tgen_andi(s, TCG_TYPE_I64, arg0, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
1459 tgen_andi(s, TCG_TYPE_I64, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
48bb3750
RH
1460
1461 if (is_store) {
9349b4f9 1462 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
48bb3750 1463 } else {
9349b4f9 1464 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
48bb3750
RH
1465 }
1466 assert(ofs < 0x80000);
1467
1468 if (TARGET_LONG_BITS == 32) {
1469 tcg_out_mem(s, RX_C, RXY_CY, arg0, arg1, TCG_AREG0, ofs);
1470 } else {
1471 tcg_out_mem(s, 0, RXY_CG, arg0, arg1, TCG_AREG0, ofs);
1472 }
1473
1474 if (TARGET_LONG_BITS == 32) {
1475 tgen_ext32u(s, arg0, addr_reg);
1476 } else {
1477 tcg_out_mov(s, TCG_TYPE_I64, arg0, addr_reg);
1478 }
1479
1480 label1_ptr = (uint16_t*)s->code_ptr;
1481
1482 /* je label1 (offset will be patched in later) */
1483 tcg_out_insn(s, RI, BRC, S390_CC_EQ, 0);
1484
1485 /* call load/store helper */
1486 if (is_store) {
1487 /* Make sure to zero-extend the value to the full register
1488 for the calling convention. */
1489 switch (opc) {
1490 case LD_UINT8:
1491 tgen_ext8u(s, TCG_TYPE_I64, arg1, data_reg);
1492 break;
1493 case LD_UINT16:
1494 tgen_ext16u(s, TCG_TYPE_I64, arg1, data_reg);
1495 break;
1496 case LD_UINT32:
1497 tgen_ext32u(s, arg1, data_reg);
1498 break;
1499 case LD_UINT64:
1500 tcg_out_mov(s, TCG_TYPE_I64, arg1, data_reg);
1501 break;
1502 default:
1503 tcg_abort();
1504 }
1505 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, mem_index);
e141ab52 1506 /* XXX/FIXME: suboptimal */
6845df48
AJ
1507 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[3],
1508 tcg_target_call_iarg_regs[2]);
1509 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
e141ab52 1510 tcg_target_call_iarg_regs[1]);
6845df48 1511 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[1],
e141ab52 1512 tcg_target_call_iarg_regs[0]);
6845df48 1513 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
e141ab52 1514 TCG_AREG0);
48bb3750
RH
1515 tgen_calli(s, (tcg_target_ulong)qemu_st_helpers[s_bits]);
1516 } else {
1517 tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
e141ab52 1518 /* XXX/FIXME: suboptimal */
e141ab52
BS
1519 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
1520 tcg_target_call_iarg_regs[1]);
6845df48 1521 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[1],
e141ab52 1522 tcg_target_call_iarg_regs[0]);
6845df48 1523 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
e141ab52 1524 TCG_AREG0);
48bb3750
RH
1525 tgen_calli(s, (tcg_target_ulong)qemu_ld_helpers[s_bits]);
1526
1527 /* sign extension */
1528 switch (opc) {
1529 case LD_INT8:
1530 tgen_ext8s(s, TCG_TYPE_I64, data_reg, arg0);
1531 break;
1532 case LD_INT16:
1533 tgen_ext16s(s, TCG_TYPE_I64, data_reg, arg0);
1534 break;
1535 case LD_INT32:
1536 tgen_ext32s(s, data_reg, arg0);
1537 break;
1538 default:
1539 /* unsigned -> just copy */
1540 tcg_out_mov(s, TCG_TYPE_I64, data_reg, arg0);
1541 break;
1542 }
1543 }
1544
1545 /* jump to label2 (end) */
1546 *label2_ptr_p = (uint16_t*)s->code_ptr;
1547
1548 tcg_out_insn(s, RI, BRC, S390_CC_ALWAYS, 0);
1549
1550 /* this is label1, patch branch */
1551 *(label1_ptr + 1) = ((unsigned long)s->code_ptr -
1552 (unsigned long)label1_ptr) >> 1;
1553
9349b4f9 1554 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
48bb3750
RH
1555 assert(ofs < 0x80000);
1556
1557 tcg_out_mem(s, 0, RXY_AG, arg0, arg1, TCG_AREG0, ofs);
1558}
1559
1560static void tcg_finish_qemu_ldst(TCGContext* s, uint16_t *label2_ptr)
1561{
1562 /* patch branch */
1563 *(label2_ptr + 1) = ((unsigned long)s->code_ptr -
1564 (unsigned long)label2_ptr) >> 1;
1565}
1566#else
1567static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1568 TCGReg *index_reg, tcg_target_long *disp)
1569{
1570 if (TARGET_LONG_BITS == 32) {
1571 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1572 *addr_reg = TCG_TMP0;
1573 }
1574 if (GUEST_BASE < 0x80000) {
1575 *index_reg = TCG_REG_NONE;
1576 *disp = GUEST_BASE;
1577 } else {
1578 *index_reg = TCG_GUEST_BASE_REG;
1579 *disp = 0;
1580 }
1581}
1582#endif /* CONFIG_SOFTMMU */
1583
1584/* load data with address translation (if applicable)
1585 and endianness conversion */
1586static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* args, int opc)
1587{
1588 TCGReg addr_reg, data_reg;
1589#if defined(CONFIG_SOFTMMU)
1590 int mem_index;
1591 uint16_t *label2_ptr;
1592#else
1593 TCGReg index_reg;
1594 tcg_target_long disp;
1595#endif
1596
1597 data_reg = *args++;
1598 addr_reg = *args++;
1599
1600#if defined(CONFIG_SOFTMMU)
1601 mem_index = *args;
1602
1603 tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1604 opc, &label2_ptr, 0);
1605
1606 tcg_out_qemu_ld_direct(s, opc, data_reg, TCG_REG_R2, TCG_REG_NONE, 0);
1607
1608 tcg_finish_qemu_ldst(s, label2_ptr);
1609#else
1610 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1611 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1612#endif
1613}
1614
1615static void tcg_out_qemu_st(TCGContext* s, const TCGArg* args, int opc)
1616{
1617 TCGReg addr_reg, data_reg;
1618#if defined(CONFIG_SOFTMMU)
1619 int mem_index;
1620 uint16_t *label2_ptr;
1621#else
1622 TCGReg index_reg;
1623 tcg_target_long disp;
1624#endif
1625
1626 data_reg = *args++;
1627 addr_reg = *args++;
1628
1629#if defined(CONFIG_SOFTMMU)
1630 mem_index = *args;
1631
1632 tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1633 opc, &label2_ptr, 1);
1634
1635 tcg_out_qemu_st_direct(s, opc, data_reg, TCG_REG_R2, TCG_REG_NONE, 0);
1636
1637 tcg_finish_qemu_ldst(s, label2_ptr);
1638#else
1639 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1640 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1641#endif
2827822e
AG
1642}
1643
48bb3750
RH
1644# define OP_32_64(x) \
1645 case glue(glue(INDEX_op_,x),_i32): \
1646 case glue(glue(INDEX_op_,x),_i64)
48bb3750 1647
a9751609 1648static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2827822e
AG
1649 const TCGArg *args, const int *const_args)
1650{
48bb3750
RH
1651 S390Opcode op;
1652
1653 switch (opc) {
1654 case INDEX_op_exit_tb:
1655 /* return value */
1656 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
1657 tgen_gotoi(s, S390_CC_ALWAYS, (unsigned long)tb_ret_addr);
1658 break;
1659
1660 case INDEX_op_goto_tb:
1661 if (s->tb_jmp_offset) {
1662 tcg_abort();
1663 } else {
1664 /* load address stored at s->tb_next + args[0] */
1665 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
1666 /* and go there */
1667 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1668 }
1669 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1670 break;
1671
1672 case INDEX_op_call:
1673 if (const_args[0]) {
1674 tgen_calli(s, args[0]);
1675 } else {
1676 tcg_out_insn(s, RR, BASR, TCG_REG_R14, args[0]);
1677 }
1678 break;
1679
1680 case INDEX_op_mov_i32:
1681 tcg_out_mov(s, TCG_TYPE_I32, args[0], args[1]);
1682 break;
1683 case INDEX_op_movi_i32:
1684 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1685 break;
1686
1687 OP_32_64(ld8u):
1688 /* ??? LLC (RXY format) is only present with the extended-immediate
1689 facility, whereas LLGC is always present. */
1690 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1691 break;
1692
1693 OP_32_64(ld8s):
1694 /* ??? LB is no smaller than LGB, so no point to using it. */
1695 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1696 break;
1697
1698 OP_32_64(ld16u):
1699 /* ??? LLH (RXY format) is only present with the extended-immediate
1700 facility, whereas LLGH is always present. */
1701 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1702 break;
1703
1704 case INDEX_op_ld16s_i32:
1705 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1706 break;
1707
1708 case INDEX_op_ld_i32:
1709 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1710 break;
1711
1712 OP_32_64(st8):
1713 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1714 TCG_REG_NONE, args[2]);
1715 break;
1716
1717 OP_32_64(st16):
1718 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1719 TCG_REG_NONE, args[2]);
1720 break;
1721
1722 case INDEX_op_st_i32:
1723 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1724 break;
1725
1726 case INDEX_op_add_i32:
1727 if (const_args[2]) {
1728 tgen32_addi(s, args[0], args[2]);
1729 } else {
1730 tcg_out_insn(s, RR, AR, args[0], args[2]);
1731 }
1732 break;
1733 case INDEX_op_sub_i32:
1734 if (const_args[2]) {
1735 tgen32_addi(s, args[0], -args[2]);
1736 } else {
1737 tcg_out_insn(s, RR, SR, args[0], args[2]);
1738 }
1739 break;
1740
1741 case INDEX_op_and_i32:
1742 if (const_args[2]) {
07ff7983 1743 tgen_andi(s, TCG_TYPE_I32, args[0], args[2]);
48bb3750
RH
1744 } else {
1745 tcg_out_insn(s, RR, NR, args[0], args[2]);
1746 }
1747 break;
1748 case INDEX_op_or_i32:
1749 if (const_args[2]) {
1750 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1751 } else {
1752 tcg_out_insn(s, RR, OR, args[0], args[2]);
1753 }
1754 break;
1755 case INDEX_op_xor_i32:
1756 if (const_args[2]) {
1757 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1758 } else {
1759 tcg_out_insn(s, RR, XR, args[0], args[2]);
1760 }
1761 break;
1762
1763 case INDEX_op_neg_i32:
1764 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1765 break;
1766
1767 case INDEX_op_mul_i32:
1768 if (const_args[2]) {
1769 if ((int32_t)args[2] == (int16_t)args[2]) {
1770 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1771 } else {
1772 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1773 }
1774 } else {
1775 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1776 }
1777 break;
1778
1779 case INDEX_op_div2_i32:
1780 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1781 break;
1782 case INDEX_op_divu2_i32:
1783 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1784 break;
1785
1786 case INDEX_op_shl_i32:
1787 op = RS_SLL;
1788 do_shift32:
1789 if (const_args[2]) {
1790 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1791 } else {
1792 tcg_out_sh32(s, op, args[0], args[2], 0);
1793 }
1794 break;
1795 case INDEX_op_shr_i32:
1796 op = RS_SRL;
1797 goto do_shift32;
1798 case INDEX_op_sar_i32:
1799 op = RS_SRA;
1800 goto do_shift32;
1801
1802 case INDEX_op_rotl_i32:
1803 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1804 if (const_args[2]) {
1805 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1806 } else {
1807 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1808 }
1809 break;
1810 case INDEX_op_rotr_i32:
1811 if (const_args[2]) {
1812 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1813 TCG_REG_NONE, (32 - args[2]) & 31);
1814 } else {
1815 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1816 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1817 }
1818 break;
1819
1820 case INDEX_op_ext8s_i32:
1821 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1822 break;
1823 case INDEX_op_ext16s_i32:
1824 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1825 break;
1826 case INDEX_op_ext8u_i32:
1827 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1828 break;
1829 case INDEX_op_ext16u_i32:
1830 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1831 break;
1832
1833 OP_32_64(bswap16):
1834 /* The TCG bswap definition requires bits 0-47 already be zero.
1835 Thus we don't need the G-type insns to implement bswap16_i64. */
1836 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1837 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1838 break;
1839 OP_32_64(bswap32):
1840 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1841 break;
1842
3790b918
RH
1843 case INDEX_op_add2_i32:
1844 /* ??? Make use of ALFI. */
1845 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1846 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1847 break;
1848 case INDEX_op_sub2_i32:
1849 /* ??? Make use of SLFI. */
1850 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1851 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1852 break;
1853
48bb3750
RH
1854 case INDEX_op_br:
1855 tgen_branch(s, S390_CC_ALWAYS, args[0]);
1856 break;
1857
1858 case INDEX_op_brcond_i32:
1859 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
1860 args[1], const_args[1], args[3]);
1861 break;
1862 case INDEX_op_setcond_i32:
1863 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1864 args[2], const_args[2]);
1865 break;
96a9f093
RH
1866 case INDEX_op_movcond_i32:
1867 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
1868 args[2], const_args[2], args[3]);
1869 break;
48bb3750
RH
1870
1871 case INDEX_op_qemu_ld8u:
1872 tcg_out_qemu_ld(s, args, LD_UINT8);
1873 break;
1874 case INDEX_op_qemu_ld8s:
1875 tcg_out_qemu_ld(s, args, LD_INT8);
1876 break;
1877 case INDEX_op_qemu_ld16u:
1878 tcg_out_qemu_ld(s, args, LD_UINT16);
1879 break;
1880 case INDEX_op_qemu_ld16s:
1881 tcg_out_qemu_ld(s, args, LD_INT16);
1882 break;
1883 case INDEX_op_qemu_ld32:
1884 /* ??? Technically we can use a non-extending instruction. */
1885 tcg_out_qemu_ld(s, args, LD_UINT32);
1886 break;
1887 case INDEX_op_qemu_ld64:
1888 tcg_out_qemu_ld(s, args, LD_UINT64);
1889 break;
1890
1891 case INDEX_op_qemu_st8:
1892 tcg_out_qemu_st(s, args, LD_UINT8);
1893 break;
1894 case INDEX_op_qemu_st16:
1895 tcg_out_qemu_st(s, args, LD_UINT16);
1896 break;
1897 case INDEX_op_qemu_st32:
1898 tcg_out_qemu_st(s, args, LD_UINT32);
1899 break;
1900 case INDEX_op_qemu_st64:
1901 tcg_out_qemu_st(s, args, LD_UINT64);
1902 break;
1903
48bb3750
RH
1904 case INDEX_op_mov_i64:
1905 tcg_out_mov(s, TCG_TYPE_I64, args[0], args[1]);
1906 break;
1907 case INDEX_op_movi_i64:
1908 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1909 break;
1910
1911 case INDEX_op_ld16s_i64:
1912 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1913 break;
1914 case INDEX_op_ld32u_i64:
1915 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1916 break;
1917 case INDEX_op_ld32s_i64:
1918 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1919 break;
1920 case INDEX_op_ld_i64:
1921 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1922 break;
1923
1924 case INDEX_op_st32_i64:
1925 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1926 break;
1927 case INDEX_op_st_i64:
1928 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1929 break;
1930
1931 case INDEX_op_add_i64:
1932 if (const_args[2]) {
1933 tgen64_addi(s, args[0], args[2]);
1934 } else {
1935 tcg_out_insn(s, RRE, AGR, args[0], args[2]);
1936 }
1937 break;
1938 case INDEX_op_sub_i64:
1939 if (const_args[2]) {
1940 tgen64_addi(s, args[0], -args[2]);
1941 } else {
1942 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
1943 }
1944 break;
1945
1946 case INDEX_op_and_i64:
1947 if (const_args[2]) {
07ff7983 1948 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
48bb3750
RH
1949 } else {
1950 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
1951 }
1952 break;
1953 case INDEX_op_or_i64:
1954 if (const_args[2]) {
1955 tgen64_ori(s, args[0], args[2]);
1956 } else {
1957 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
1958 }
1959 break;
1960 case INDEX_op_xor_i64:
1961 if (const_args[2]) {
1962 tgen64_xori(s, args[0], args[2]);
1963 } else {
1964 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
1965 }
1966 break;
1967
1968 case INDEX_op_neg_i64:
1969 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
1970 break;
1971 case INDEX_op_bswap64_i64:
1972 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
1973 break;
1974
1975 case INDEX_op_mul_i64:
1976 if (const_args[2]) {
1977 if (args[2] == (int16_t)args[2]) {
1978 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
1979 } else {
1980 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
1981 }
1982 } else {
1983 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
1984 }
1985 break;
1986
1987 case INDEX_op_div2_i64:
1988 /* ??? We get an unnecessary sign-extension of the dividend
1989 into R3 with this definition, but as we do in fact always
1990 produce both quotient and remainder using INDEX_op_div_i64
1991 instead requires jumping through even more hoops. */
1992 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
1993 break;
1994 case INDEX_op_divu2_i64:
1995 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
1996 break;
36017dc6
RH
1997 case INDEX_op_mulu2_i64:
1998 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
1999 break;
48bb3750
RH
2000
2001 case INDEX_op_shl_i64:
2002 op = RSY_SLLG;
2003 do_shift64:
2004 if (const_args[2]) {
2005 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2006 } else {
2007 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2008 }
2009 break;
2010 case INDEX_op_shr_i64:
2011 op = RSY_SRLG;
2012 goto do_shift64;
2013 case INDEX_op_sar_i64:
2014 op = RSY_SRAG;
2015 goto do_shift64;
2016
2017 case INDEX_op_rotl_i64:
2018 if (const_args[2]) {
2019 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2020 TCG_REG_NONE, args[2]);
2021 } else {
2022 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2023 }
2024 break;
2025 case INDEX_op_rotr_i64:
2026 if (const_args[2]) {
2027 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2028 TCG_REG_NONE, (64 - args[2]) & 63);
2029 } else {
2030 /* We can use the smaller 32-bit negate because only the
2031 low 6 bits are examined for the rotate. */
2032 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2033 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2034 }
2035 break;
2036
2037 case INDEX_op_ext8s_i64:
2038 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2039 break;
2040 case INDEX_op_ext16s_i64:
2041 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2042 break;
2043 case INDEX_op_ext32s_i64:
2044 tgen_ext32s(s, args[0], args[1]);
2045 break;
2046 case INDEX_op_ext8u_i64:
2047 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2048 break;
2049 case INDEX_op_ext16u_i64:
2050 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2051 break;
2052 case INDEX_op_ext32u_i64:
2053 tgen_ext32u(s, args[0], args[1]);
2054 break;
2055
3790b918
RH
2056 case INDEX_op_add2_i64:
2057 /* ??? Make use of ALGFI and SLGFI. */
2058 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2059 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2060 break;
2061 case INDEX_op_sub2_i64:
2062 /* ??? Make use of ALGFI and SLGFI. */
2063 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2064 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2065 break;
2066
48bb3750
RH
2067 case INDEX_op_brcond_i64:
2068 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2069 args[1], const_args[1], args[3]);
2070 break;
2071 case INDEX_op_setcond_i64:
2072 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2073 args[2], const_args[2]);
2074 break;
96a9f093
RH
2075 case INDEX_op_movcond_i64:
2076 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2077 args[2], const_args[2], args[3]);
2078 break;
48bb3750
RH
2079
2080 case INDEX_op_qemu_ld32u:
2081 tcg_out_qemu_ld(s, args, LD_UINT32);
2082 break;
2083 case INDEX_op_qemu_ld32s:
2084 tcg_out_qemu_ld(s, args, LD_INT32);
2085 break;
48bb3750 2086
d5690ea4
RH
2087 OP_32_64(deposit):
2088 tgen_deposit(s, args[0], args[2], args[3], args[4]);
2089 break;
2090
48bb3750
RH
2091 default:
2092 fprintf(stderr,"unimplemented opc 0x%x\n",opc);
2093 tcg_abort();
2094 }
2827822e
AG
2095}
2096
48bb3750
RH
2097static const TCGTargetOpDef s390_op_defs[] = {
2098 { INDEX_op_exit_tb, { } },
2099 { INDEX_op_goto_tb, { } },
2100 { INDEX_op_call, { "ri" } },
48bb3750
RH
2101 { INDEX_op_br, { } },
2102
2103 { INDEX_op_mov_i32, { "r", "r" } },
2104 { INDEX_op_movi_i32, { "r" } },
2105
2106 { INDEX_op_ld8u_i32, { "r", "r" } },
2107 { INDEX_op_ld8s_i32, { "r", "r" } },
2108 { INDEX_op_ld16u_i32, { "r", "r" } },
2109 { INDEX_op_ld16s_i32, { "r", "r" } },
2110 { INDEX_op_ld_i32, { "r", "r" } },
2111 { INDEX_op_st8_i32, { "r", "r" } },
2112 { INDEX_op_st16_i32, { "r", "r" } },
2113 { INDEX_op_st_i32, { "r", "r" } },
2114
2115 { INDEX_op_add_i32, { "r", "0", "rWI" } },
2116 { INDEX_op_sub_i32, { "r", "0", "rWNI" } },
2117 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2118
2119 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2120 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2121
07ff7983 2122 { INDEX_op_and_i32, { "r", "0", "ri" } },
48bb3750
RH
2123 { INDEX_op_or_i32, { "r", "0", "rWO" } },
2124 { INDEX_op_xor_i32, { "r", "0", "rWX" } },
2125
2126 { INDEX_op_neg_i32, { "r", "r" } },
2127
2128 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2129 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2130 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2131
2132 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2133 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2134
2135 { INDEX_op_ext8s_i32, { "r", "r" } },
2136 { INDEX_op_ext8u_i32, { "r", "r" } },
2137 { INDEX_op_ext16s_i32, { "r", "r" } },
2138 { INDEX_op_ext16u_i32, { "r", "r" } },
2139
2140 { INDEX_op_bswap16_i32, { "r", "r" } },
2141 { INDEX_op_bswap32_i32, { "r", "r" } },
2142
3790b918
RH
2143 { INDEX_op_add2_i32, { "r", "r", "0", "1", "r", "r" } },
2144 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "r", "r" } },
2145
48bb3750
RH
2146 { INDEX_op_brcond_i32, { "r", "rWC" } },
2147 { INDEX_op_setcond_i32, { "r", "r", "rWC" } },
96a9f093 2148 { INDEX_op_movcond_i32, { "r", "r", "rWC", "r", "0" } },
d5690ea4 2149 { INDEX_op_deposit_i32, { "r", "0", "r" } },
48bb3750
RH
2150
2151 { INDEX_op_qemu_ld8u, { "r", "L" } },
2152 { INDEX_op_qemu_ld8s, { "r", "L" } },
2153 { INDEX_op_qemu_ld16u, { "r", "L" } },
2154 { INDEX_op_qemu_ld16s, { "r", "L" } },
2155 { INDEX_op_qemu_ld32, { "r", "L" } },
2156 { INDEX_op_qemu_ld64, { "r", "L" } },
2157
2158 { INDEX_op_qemu_st8, { "L", "L" } },
2159 { INDEX_op_qemu_st16, { "L", "L" } },
2160 { INDEX_op_qemu_st32, { "L", "L" } },
2161 { INDEX_op_qemu_st64, { "L", "L" } },
2162
48bb3750
RH
2163 { INDEX_op_mov_i64, { "r", "r" } },
2164 { INDEX_op_movi_i64, { "r" } },
2165
2166 { INDEX_op_ld8u_i64, { "r", "r" } },
2167 { INDEX_op_ld8s_i64, { "r", "r" } },
2168 { INDEX_op_ld16u_i64, { "r", "r" } },
2169 { INDEX_op_ld16s_i64, { "r", "r" } },
2170 { INDEX_op_ld32u_i64, { "r", "r" } },
2171 { INDEX_op_ld32s_i64, { "r", "r" } },
2172 { INDEX_op_ld_i64, { "r", "r" } },
2173
2174 { INDEX_op_st8_i64, { "r", "r" } },
2175 { INDEX_op_st16_i64, { "r", "r" } },
2176 { INDEX_op_st32_i64, { "r", "r" } },
2177 { INDEX_op_st_i64, { "r", "r" } },
2178
2179 { INDEX_op_add_i64, { "r", "0", "rI" } },
2180 { INDEX_op_sub_i64, { "r", "0", "rNI" } },
2181 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2182
2183 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2184 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
36017dc6 2185 { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
48bb3750 2186
07ff7983 2187 { INDEX_op_and_i64, { "r", "0", "ri" } },
48bb3750
RH
2188 { INDEX_op_or_i64, { "r", "0", "rO" } },
2189 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2190
2191 { INDEX_op_neg_i64, { "r", "r" } },
2192
2193 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2194 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2195 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2196
2197 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2198 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2199
2200 { INDEX_op_ext8s_i64, { "r", "r" } },
2201 { INDEX_op_ext8u_i64, { "r", "r" } },
2202 { INDEX_op_ext16s_i64, { "r", "r" } },
2203 { INDEX_op_ext16u_i64, { "r", "r" } },
2204 { INDEX_op_ext32s_i64, { "r", "r" } },
2205 { INDEX_op_ext32u_i64, { "r", "r" } },
2206
2207 { INDEX_op_bswap16_i64, { "r", "r" } },
2208 { INDEX_op_bswap32_i64, { "r", "r" } },
2209 { INDEX_op_bswap64_i64, { "r", "r" } },
2210
3790b918
RH
2211 { INDEX_op_add2_i64, { "r", "r", "0", "1", "r", "r" } },
2212 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "r", "r" } },
2213
48bb3750
RH
2214 { INDEX_op_brcond_i64, { "r", "rC" } },
2215 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
96a9f093 2216 { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
d5690ea4 2217 { INDEX_op_deposit_i64, { "r", "0", "r" } },
48bb3750
RH
2218
2219 { INDEX_op_qemu_ld32u, { "r", "L" } },
2220 { INDEX_op_qemu_ld32s, { "r", "L" } },
48bb3750
RH
2221
2222 { -1 },
2223};
2224
2225/* ??? Linux kernels provide an AUXV entry AT_HWCAP that provides most of
2226 this information. However, getting at that entry is not easy this far
2227 away from main. Our options are: start searching from environ, but
2228 that fails as soon as someone does a setenv in between. Read the data
2229 from /proc/self/auxv. Or do the probing ourselves. The only thing
2230 extra that AT_HWCAP gives us is HWCAP_S390_HIGH_GPRS, which indicates
2231 that the kernel saves all 64-bits of the registers around traps while
2232 in 31-bit mode. But this is true of all "recent" kernels (ought to dig
2233 back and see from when this might not be true). */
2234
2235#include <signal.h>
2236
2237static volatile sig_atomic_t got_sigill;
2238
2239static void sigill_handler(int sig)
2827822e 2240{
48bb3750 2241 got_sigill = 1;
2827822e
AG
2242}
2243
48bb3750
RH
2244static void query_facilities(void)
2245{
2246 struct sigaction sa_old, sa_new;
2247 register int r0 __asm__("0");
2248 register void *r1 __asm__("1");
2249 int fail;
2250
2251 memset(&sa_new, 0, sizeof(sa_new));
2252 sa_new.sa_handler = sigill_handler;
2253 sigaction(SIGILL, &sa_new, &sa_old);
2254
2255 /* First, try STORE FACILITY LIST EXTENDED. If this is present, then
2256 we need not do any more probing. Unfortunately, this itself is an
2257 extension and the original STORE FACILITY LIST instruction is
2258 kernel-only, storing its results at absolute address 200. */
2259 /* stfle 0(%r1) */
2260 r1 = &facilities;
2261 asm volatile(".word 0xb2b0,0x1000"
2262 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
2263
2264 if (got_sigill) {
2265 /* STORE FACILITY EXTENDED is not available. Probe for one of each
2266 kind of instruction that we're interested in. */
2267 /* ??? Possibly some of these are in practice never present unless
2268 the store-facility-extended facility is also present. But since
2269 that isn't documented it's just better to probe for each. */
2270
2271 /* Test for z/Architecture. Required even in 31-bit mode. */
2272 got_sigill = 0;
2273 /* agr %r0,%r0 */
2274 asm volatile(".word 0xb908,0x0000" : "=r"(r0) : : "cc");
2275 if (!got_sigill) {
2276 facilities |= FACILITY_ZARCH_ACTIVE;
2277 }
2278
2279 /* Test for long displacement. */
2280 got_sigill = 0;
2281 /* ly %r0,0(%r1) */
2282 r1 = &facilities;
2283 asm volatile(".word 0xe300,0x1000,0x0058"
2284 : "=r"(r0) : "r"(r1) : "cc");
2285 if (!got_sigill) {
2286 facilities |= FACILITY_LONG_DISP;
2287 }
2288
2289 /* Test for extended immediates. */
2290 got_sigill = 0;
2291 /* afi %r0,0 */
2292 asm volatile(".word 0xc209,0x0000,0x0000" : : : "cc");
2293 if (!got_sigill) {
2294 facilities |= FACILITY_EXT_IMM;
2295 }
2296
2297 /* Test for general-instructions-extension. */
2298 got_sigill = 0;
2299 /* msfi %r0,1 */
2300 asm volatile(".word 0xc201,0x0000,0x0001");
2301 if (!got_sigill) {
2302 facilities |= FACILITY_GEN_INST_EXT;
2303 }
2304 }
2305
2306 sigaction(SIGILL, &sa_old, NULL);
2307
2308 /* The translator currently uses these extensions unconditionally.
2309 Pruning this back to the base ESA/390 architecture doesn't seem
2310 worthwhile, since even the KVM target requires z/Arch. */
2311 fail = 0;
2312 if ((facilities & FACILITY_ZARCH_ACTIVE) == 0) {
2313 fprintf(stderr, "TCG: z/Arch facility is required.\n");
2314 fprintf(stderr, "TCG: Boot with a 64-bit enabled kernel.\n");
2315 fail = 1;
2316 }
2317 if ((facilities & FACILITY_LONG_DISP) == 0) {
2318 fprintf(stderr, "TCG: long-displacement facility is required.\n");
2319 fail = 1;
2320 }
2321
2322 /* So far there's just enough support for 31-bit mode to let the
2323 compile succeed. This is good enough to run QEMU with KVM. */
2324 if (sizeof(void *) != 8) {
2325 fprintf(stderr, "TCG: 31-bit mode is not supported.\n");
2326 fail = 1;
2327 }
2328
2329 if (fail) {
2330 exit(-1);
2331 }
2332}
2333
2334static void tcg_target_init(TCGContext *s)
2827822e 2335{
48bb3750
RH
2336#if !defined(CONFIG_USER_ONLY)
2337 /* fail safe */
2338 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) {
2339 tcg_abort();
2340 }
2341#endif
2342
2343 query_facilities();
2344
2345 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2346 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2347
2348 tcg_regset_clear(tcg_target_call_clobber_regs);
2349 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2350 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2351 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2352 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2353 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2354 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2355 /* The return register can be considered call-clobbered. */
2356 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2357
2358 tcg_regset_clear(s->reserved_regs);
2359 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2360 /* XXX many insns can't be used with R0, so we better avoid it for now */
2361 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2362 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2363
2364 tcg_add_target_add_op_defs(s390_op_defs);
2827822e
AG
2365}
2366
48bb3750 2367static void tcg_target_qemu_prologue(TCGContext *s)
2827822e 2368{
a4924e8b
RH
2369 tcg_target_long frame_size;
2370
48bb3750
RH
2371 /* stmg %r6,%r15,48(%r15) (save registers) */
2372 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2373
a4924e8b
RH
2374 /* aghi %r15,-frame_size */
2375 frame_size = TCG_TARGET_CALL_STACK_OFFSET;
2376 frame_size += TCG_STATIC_CALL_ARGS_SIZE;
2377 frame_size += CPU_TEMP_BUF_NLONGS * sizeof(long);
2378 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -frame_size);
2379
2380 tcg_set_frame(s, TCG_REG_CALL_STACK,
2381 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2382 CPU_TEMP_BUF_NLONGS * sizeof(long));
48bb3750
RH
2383
2384 if (GUEST_BASE >= 0x80000) {
2385 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
2386 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2387 }
2388
cea5f9a2
BS
2389 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2390 /* br %r3 (go to TB) */
2391 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
48bb3750
RH
2392
2393 tb_ret_addr = s->code_ptr;
2394
a4924e8b
RH
2395 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2396 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
2397 frame_size + 48);
48bb3750
RH
2398
2399 /* br %r14 (return) */
2400 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2827822e 2401}