]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/s390/tcg-target.c
tcg-aarch64: Define TCG_TARGET_INSN_UNIT_SIZE
[mirror_qemu.git] / tcg / s390 / tcg-target.c
CommitLineData
2827822e
AG
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
48bb3750
RH
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
2827822e
AG
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
3cf246f0
RH
27#include "tcg-be-null.h"
28
a01fc30d
RH
29/* We only support generating code for 64-bit mode. */
30#if TCG_TARGET_REG_BITS != 64
31#error "unsupported code generation mode"
32#endif
33
c9baa30f
RH
34#include "elf.h"
35
48bb3750
RH
36/* ??? The translation blocks produced by TCG are generally small enough to
37 be entirely reachable with a 16-bit displacement. Leaving the option for
38 a 32-bit displacement here Just In Case. */
39#define USE_LONG_BRANCHES 0
40
671c835b
RH
41#define TCG_CT_CONST_MULI 0x100
42#define TCG_CT_CONST_ORI 0x200
43#define TCG_CT_CONST_XORI 0x400
44#define TCG_CT_CONST_CMPI 0x800
48bb3750
RH
45
46/* Several places within the instruction set 0 means "no register"
47 rather than TCG_REG_R0. */
48#define TCG_REG_NONE 0
49
50/* A scratch register that may be be used throughout the backend. */
51#define TCG_TMP0 TCG_REG_R14
52
53#ifdef CONFIG_USE_GUEST_BASE
54#define TCG_GUEST_BASE_REG TCG_REG_R13
55#else
56#define TCG_GUEST_BASE_REG TCG_REG_R0
57#endif
58
59#ifndef GUEST_BASE
60#define GUEST_BASE 0
61#endif
62
63
64/* All of the following instructions are prefixed with their instruction
65 format, and are defined as 8- or 16-bit quantities, even when the two
66 halves of the 16-bit quantity may appear 32 bits apart in the insn.
67 This makes it easy to copy the values from the tables in Appendix B. */
68typedef enum S390Opcode {
69 RIL_AFI = 0xc209,
70 RIL_AGFI = 0xc208,
3790b918 71 RIL_ALFI = 0xc20b,
48bb3750
RH
72 RIL_ALGFI = 0xc20a,
73 RIL_BRASL = 0xc005,
74 RIL_BRCL = 0xc004,
75 RIL_CFI = 0xc20d,
76 RIL_CGFI = 0xc20c,
77 RIL_CLFI = 0xc20f,
78 RIL_CLGFI = 0xc20e,
79 RIL_IIHF = 0xc008,
80 RIL_IILF = 0xc009,
81 RIL_LARL = 0xc000,
82 RIL_LGFI = 0xc001,
83 RIL_LGRL = 0xc408,
84 RIL_LLIHF = 0xc00e,
85 RIL_LLILF = 0xc00f,
86 RIL_LRL = 0xc40d,
87 RIL_MSFI = 0xc201,
88 RIL_MSGFI = 0xc200,
89 RIL_NIHF = 0xc00a,
90 RIL_NILF = 0xc00b,
91 RIL_OIHF = 0xc00c,
92 RIL_OILF = 0xc00d,
3790b918 93 RIL_SLFI = 0xc205,
0db921e6 94 RIL_SLGFI = 0xc204,
48bb3750
RH
95 RIL_XIHF = 0xc006,
96 RIL_XILF = 0xc007,
97
98 RI_AGHI = 0xa70b,
99 RI_AHI = 0xa70a,
100 RI_BRC = 0xa704,
101 RI_IIHH = 0xa500,
102 RI_IIHL = 0xa501,
103 RI_IILH = 0xa502,
104 RI_IILL = 0xa503,
105 RI_LGHI = 0xa709,
106 RI_LLIHH = 0xa50c,
107 RI_LLIHL = 0xa50d,
108 RI_LLILH = 0xa50e,
109 RI_LLILL = 0xa50f,
110 RI_MGHI = 0xa70d,
111 RI_MHI = 0xa70c,
112 RI_NIHH = 0xa504,
113 RI_NIHL = 0xa505,
114 RI_NILH = 0xa506,
115 RI_NILL = 0xa507,
116 RI_OIHH = 0xa508,
117 RI_OIHL = 0xa509,
118 RI_OILH = 0xa50a,
119 RI_OILL = 0xa50b,
120
121 RIE_CGIJ = 0xec7c,
122 RIE_CGRJ = 0xec64,
123 RIE_CIJ = 0xec7e,
124 RIE_CLGRJ = 0xec65,
125 RIE_CLIJ = 0xec7f,
126 RIE_CLGIJ = 0xec7d,
127 RIE_CLRJ = 0xec77,
128 RIE_CRJ = 0xec76,
d5690ea4 129 RIE_RISBG = 0xec55,
48bb3750
RH
130
131 RRE_AGR = 0xb908,
3790b918
RH
132 RRE_ALGR = 0xb90a,
133 RRE_ALCR = 0xb998,
134 RRE_ALCGR = 0xb988,
48bb3750
RH
135 RRE_CGR = 0xb920,
136 RRE_CLGR = 0xb921,
137 RRE_DLGR = 0xb987,
138 RRE_DLR = 0xb997,
139 RRE_DSGFR = 0xb91d,
140 RRE_DSGR = 0xb90d,
141 RRE_LGBR = 0xb906,
142 RRE_LCGR = 0xb903,
143 RRE_LGFR = 0xb914,
144 RRE_LGHR = 0xb907,
145 RRE_LGR = 0xb904,
146 RRE_LLGCR = 0xb984,
147 RRE_LLGFR = 0xb916,
148 RRE_LLGHR = 0xb985,
149 RRE_LRVR = 0xb91f,
150 RRE_LRVGR = 0xb90f,
151 RRE_LTGR = 0xb902,
36017dc6 152 RRE_MLGR = 0xb986,
48bb3750
RH
153 RRE_MSGR = 0xb90c,
154 RRE_MSR = 0xb252,
155 RRE_NGR = 0xb980,
156 RRE_OGR = 0xb981,
157 RRE_SGR = 0xb909,
3790b918
RH
158 RRE_SLGR = 0xb90b,
159 RRE_SLBR = 0xb999,
160 RRE_SLBGR = 0xb989,
48bb3750
RH
161 RRE_XGR = 0xb982,
162
96a9f093
RH
163 RRF_LOCR = 0xb9f2,
164 RRF_LOCGR = 0xb9e2,
165
48bb3750 166 RR_AR = 0x1a,
3790b918 167 RR_ALR = 0x1e,
48bb3750
RH
168 RR_BASR = 0x0d,
169 RR_BCR = 0x07,
170 RR_CLR = 0x15,
171 RR_CR = 0x19,
172 RR_DR = 0x1d,
173 RR_LCR = 0x13,
174 RR_LR = 0x18,
175 RR_LTR = 0x12,
176 RR_NR = 0x14,
177 RR_OR = 0x16,
178 RR_SR = 0x1b,
3790b918 179 RR_SLR = 0x1f,
48bb3750
RH
180 RR_XR = 0x17,
181
182 RSY_RLL = 0xeb1d,
183 RSY_RLLG = 0xeb1c,
184 RSY_SLLG = 0xeb0d,
185 RSY_SRAG = 0xeb0a,
186 RSY_SRLG = 0xeb0c,
187
188 RS_SLL = 0x89,
189 RS_SRA = 0x8a,
190 RS_SRL = 0x88,
191
192 RXY_AG = 0xe308,
193 RXY_AY = 0xe35a,
194 RXY_CG = 0xe320,
195 RXY_CY = 0xe359,
0db921e6 196 RXY_LAY = 0xe371,
48bb3750
RH
197 RXY_LB = 0xe376,
198 RXY_LG = 0xe304,
199 RXY_LGB = 0xe377,
200 RXY_LGF = 0xe314,
201 RXY_LGH = 0xe315,
202 RXY_LHY = 0xe378,
203 RXY_LLGC = 0xe390,
204 RXY_LLGF = 0xe316,
205 RXY_LLGH = 0xe391,
206 RXY_LMG = 0xeb04,
207 RXY_LRV = 0xe31e,
208 RXY_LRVG = 0xe30f,
209 RXY_LRVH = 0xe31f,
210 RXY_LY = 0xe358,
211 RXY_STCY = 0xe372,
212 RXY_STG = 0xe324,
213 RXY_STHY = 0xe370,
214 RXY_STMG = 0xeb24,
215 RXY_STRV = 0xe33e,
216 RXY_STRVG = 0xe32f,
217 RXY_STRVH = 0xe33f,
218 RXY_STY = 0xe350,
219
220 RX_A = 0x5a,
221 RX_C = 0x59,
222 RX_L = 0x58,
0db921e6 223 RX_LA = 0x41,
48bb3750
RH
224 RX_LH = 0x48,
225 RX_ST = 0x50,
226 RX_STC = 0x42,
227 RX_STH = 0x40,
228} S390Opcode;
229
230#define LD_SIGNED 0x04
231#define LD_UINT8 0x00
232#define LD_INT8 (LD_UINT8 | LD_SIGNED)
233#define LD_UINT16 0x01
234#define LD_INT16 (LD_UINT16 | LD_SIGNED)
235#define LD_UINT32 0x02
236#define LD_INT32 (LD_UINT32 | LD_SIGNED)
237#define LD_UINT64 0x03
238#define LD_INT64 (LD_UINT64 | LD_SIGNED)
239
240#ifndef NDEBUG
241static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
242 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
243 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
244};
245#endif
246
247/* Since R6 is a potential argument register, choose it last of the
248 call-saved registers. Likewise prefer the call-clobbered registers
249 in reverse order to maximize the chance of avoiding the arguments. */
2827822e 250static const int tcg_target_reg_alloc_order[] = {
48bb3750
RH
251 TCG_REG_R13,
252 TCG_REG_R12,
253 TCG_REG_R11,
254 TCG_REG_R10,
255 TCG_REG_R9,
256 TCG_REG_R8,
257 TCG_REG_R7,
258 TCG_REG_R6,
259 TCG_REG_R14,
260 TCG_REG_R0,
261 TCG_REG_R1,
262 TCG_REG_R5,
263 TCG_REG_R4,
264 TCG_REG_R3,
265 TCG_REG_R2,
2827822e
AG
266};
267
268static const int tcg_target_call_iarg_regs[] = {
48bb3750
RH
269 TCG_REG_R2,
270 TCG_REG_R3,
271 TCG_REG_R4,
272 TCG_REG_R5,
273 TCG_REG_R6,
2827822e
AG
274};
275
276static const int tcg_target_call_oarg_regs[] = {
48bb3750 277 TCG_REG_R2,
48bb3750
RH
278};
279
280#define S390_CC_EQ 8
281#define S390_CC_LT 4
282#define S390_CC_GT 2
283#define S390_CC_OV 1
284#define S390_CC_NE (S390_CC_LT | S390_CC_GT)
285#define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
286#define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
287#define S390_CC_NEVER 0
288#define S390_CC_ALWAYS 15
289
290/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
0aed257f 291static const uint8_t tcg_cond_to_s390_cond[] = {
48bb3750
RH
292 [TCG_COND_EQ] = S390_CC_EQ,
293 [TCG_COND_NE] = S390_CC_NE,
294 [TCG_COND_LT] = S390_CC_LT,
295 [TCG_COND_LE] = S390_CC_LE,
296 [TCG_COND_GT] = S390_CC_GT,
297 [TCG_COND_GE] = S390_CC_GE,
298 [TCG_COND_LTU] = S390_CC_LT,
299 [TCG_COND_LEU] = S390_CC_LE,
300 [TCG_COND_GTU] = S390_CC_GT,
301 [TCG_COND_GEU] = S390_CC_GE,
302};
303
304/* Condition codes that result from a LOAD AND TEST. Here, we have no
305 unsigned instruction variation, however since the test is vs zero we
306 can re-map the outcomes appropriately. */
0aed257f 307static const uint8_t tcg_cond_to_ltr_cond[] = {
48bb3750
RH
308 [TCG_COND_EQ] = S390_CC_EQ,
309 [TCG_COND_NE] = S390_CC_NE,
310 [TCG_COND_LT] = S390_CC_LT,
311 [TCG_COND_LE] = S390_CC_LE,
312 [TCG_COND_GT] = S390_CC_GT,
313 [TCG_COND_GE] = S390_CC_GE,
314 [TCG_COND_LTU] = S390_CC_NEVER,
315 [TCG_COND_LEU] = S390_CC_EQ,
316 [TCG_COND_GTU] = S390_CC_NE,
317 [TCG_COND_GEU] = S390_CC_ALWAYS,
318};
319
320#ifdef CONFIG_SOFTMMU
e141ab52
BS
321/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
322 int mmu_idx) */
323static const void * const qemu_ld_helpers[4] = {
324 helper_ldb_mmu,
325 helper_ldw_mmu,
326 helper_ldl_mmu,
327 helper_ldq_mmu,
328};
329
330/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
331 uintxx_t val, int mmu_idx) */
332static const void * const qemu_st_helpers[4] = {
333 helper_stb_mmu,
334 helper_stw_mmu,
335 helper_stl_mmu,
336 helper_stq_mmu,
337};
e141ab52 338#endif
48bb3750
RH
339
340static uint8_t *tb_ret_addr;
341
342/* A list of relevant facilities used by this translator. Some of these
343 are required for proper operation, and these are checked at startup. */
344
345#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
346#define FACILITY_LONG_DISP (1ULL << (63 - 18))
347#define FACILITY_EXT_IMM (1ULL << (63 - 21))
348#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
96a9f093 349#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
48bb3750
RH
350
351static uint64_t facilities;
2827822e
AG
352
353static void patch_reloc(uint8_t *code_ptr, int type,
2ba7fae2 354 intptr_t value, intptr_t addend)
2827822e 355{
2ba7fae2
RH
356 intptr_t code_ptr_tl = (intptr_t)code_ptr;
357 intptr_t pcrel2;
48bb3750
RH
358
359 /* ??? Not the usual definition of "addend". */
360 pcrel2 = (value - (code_ptr_tl + addend)) >> 1;
361
362 switch (type) {
363 case R_390_PC16DBL:
364 assert(pcrel2 == (int16_t)pcrel2);
365 *(int16_t *)code_ptr = pcrel2;
366 break;
367 case R_390_PC32DBL:
368 assert(pcrel2 == (int32_t)pcrel2);
369 *(int32_t *)code_ptr = pcrel2;
370 break;
371 default:
372 tcg_abort();
373 break;
374 }
2827822e
AG
375}
376
2827822e
AG
377/* parse target specific constraints */
378static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
379{
48bb3750
RH
380 const char *ct_str = *pct_str;
381
382 switch (ct_str[0]) {
383 case 'r': /* all registers */
384 ct->ct |= TCG_CT_REG;
385 tcg_regset_set32(ct->u.regs, 0, 0xffff);
386 break;
387 case 'R': /* not R0 */
388 ct->ct |= TCG_CT_REG;
389 tcg_regset_set32(ct->u.regs, 0, 0xffff);
390 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
391 break;
392 case 'L': /* qemu_ld/st constraint */
393 ct->ct |= TCG_CT_REG;
394 tcg_regset_set32(ct->u.regs, 0, 0xffff);
395 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
396 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
65a62a75 397 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
48bb3750
RH
398 break;
399 case 'a': /* force R2 for division */
400 ct->ct |= TCG_CT_REG;
401 tcg_regset_clear(ct->u.regs);
402 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
403 break;
404 case 'b': /* force R3 for division */
405 ct->ct |= TCG_CT_REG;
406 tcg_regset_clear(ct->u.regs);
407 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
408 break;
48bb3750
RH
409 case 'K':
410 ct->ct |= TCG_CT_CONST_MULI;
411 break;
48bb3750
RH
412 case 'O':
413 ct->ct |= TCG_CT_CONST_ORI;
414 break;
415 case 'X':
416 ct->ct |= TCG_CT_CONST_XORI;
417 break;
418 case 'C':
419 ct->ct |= TCG_CT_CONST_CMPI;
420 break;
421 default:
422 return -1;
423 }
424 ct_str++;
425 *pct_str = ct_str;
426
2827822e
AG
427 return 0;
428}
429
48bb3750
RH
430/* Immediates to be used with logical OR. This is an optimization only,
431 since a full 64-bit immediate OR can always be performed with 4 sequential
432 OI[LH][LH] instructions. What we're looking for is immediates that we
433 can load efficiently, and the immediate load plus the reg-reg OR is
434 smaller than the sequential OI's. */
435
671c835b 436static int tcg_match_ori(TCGType type, tcg_target_long val)
48bb3750
RH
437{
438 if (facilities & FACILITY_EXT_IMM) {
671c835b 439 if (type == TCG_TYPE_I32) {
48bb3750
RH
440 /* All 32-bit ORs can be performed with 1 48-bit insn. */
441 return 1;
442 }
443 }
444
445 /* Look for negative values. These are best to load with LGHI. */
446 if (val < 0) {
447 if (val == (int16_t)val) {
448 return 0;
449 }
450 if (facilities & FACILITY_EXT_IMM) {
451 if (val == (int32_t)val) {
452 return 0;
453 }
454 }
455 }
456
457 return 1;
458}
459
460/* Immediates to be used with logical XOR. This is almost, but not quite,
461 only an optimization. XOR with immediate is only supported with the
462 extended-immediate facility. That said, there are a few patterns for
463 which it is better to load the value into a register first. */
464
671c835b 465static int tcg_match_xori(TCGType type, tcg_target_long val)
48bb3750
RH
466{
467 if ((facilities & FACILITY_EXT_IMM) == 0) {
468 return 0;
469 }
470
671c835b 471 if (type == TCG_TYPE_I32) {
48bb3750
RH
472 /* All 32-bit XORs can be performed with 1 48-bit insn. */
473 return 1;
474 }
475
476 /* Look for negative values. These are best to load with LGHI. */
477 if (val < 0 && val == (int32_t)val) {
478 return 0;
479 }
480
481 return 1;
482}
483
484/* Imediates to be used with comparisons. */
485
671c835b 486static int tcg_match_cmpi(TCGType type, tcg_target_long val)
48bb3750
RH
487{
488 if (facilities & FACILITY_EXT_IMM) {
489 /* The COMPARE IMMEDIATE instruction is available. */
671c835b 490 if (type == TCG_TYPE_I32) {
48bb3750
RH
491 /* We have a 32-bit immediate and can compare against anything. */
492 return 1;
493 } else {
494 /* ??? We have no insight here into whether the comparison is
495 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
496 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
497 a 32-bit unsigned immediate. If we were to use the (semi)
498 obvious "val == (int32_t)val" we would be enabling unsigned
499 comparisons vs very large numbers. The only solution is to
500 take the intersection of the ranges. */
501 /* ??? Another possible solution is to simply lie and allow all
502 constants here and force the out-of-range values into a temp
503 register in tgen_cmp when we have knowledge of the actual
504 comparison code in use. */
505 return val >= 0 && val <= 0x7fffffff;
506 }
507 } else {
508 /* Only the LOAD AND TEST instruction is available. */
509 return val == 0;
510 }
511}
512
2827822e 513/* Test if a constant matches the constraint. */
f6c6afc1 514static int tcg_target_const_match(tcg_target_long val, TCGType type,
48bb3750 515 const TCGArgConstraint *arg_ct)
2827822e 516{
48bb3750
RH
517 int ct = arg_ct->ct;
518
519 if (ct & TCG_CT_CONST) {
520 return 1;
521 }
522
671c835b 523 if (type == TCG_TYPE_I32) {
48bb3750
RH
524 val = (int32_t)val;
525 }
526
527 /* The following are mutually exclusive. */
0db921e6 528 if (ct & TCG_CT_CONST_MULI) {
48bb3750
RH
529 /* Immediates that may be used with multiply. If we have the
530 general-instruction-extensions, then we have MULTIPLY SINGLE
531 IMMEDIATE with a signed 32-bit, otherwise we have only
532 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
533 if (facilities & FACILITY_GEN_INST_EXT) {
534 return val == (int32_t)val;
535 } else {
536 return val == (int16_t)val;
537 }
48bb3750 538 } else if (ct & TCG_CT_CONST_ORI) {
671c835b 539 return tcg_match_ori(type, val);
48bb3750 540 } else if (ct & TCG_CT_CONST_XORI) {
671c835b 541 return tcg_match_xori(type, val);
48bb3750 542 } else if (ct & TCG_CT_CONST_CMPI) {
671c835b 543 return tcg_match_cmpi(type, val);
48bb3750
RH
544 }
545
2827822e
AG
546 return 0;
547}
548
48bb3750
RH
549/* Emit instructions according to the given instruction format. */
550
551static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
552{
553 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
554}
555
556static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
557 TCGReg r1, TCGReg r2)
558{
559 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
560}
561
96a9f093
RH
562static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
563 TCGReg r1, TCGReg r2, int m3)
564{
565 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
566}
567
48bb3750
RH
568static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
569{
570 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
571}
572
573static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
574{
575 tcg_out16(s, op | (r1 << 4));
576 tcg_out32(s, i2);
577}
578
579static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
580 TCGReg b2, TCGReg r3, int disp)
581{
582 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
583 | (disp & 0xfff));
584}
585
586static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
587 TCGReg b2, TCGReg r3, int disp)
588{
589 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
590 tcg_out32(s, (op & 0xff) | (b2 << 28)
591 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
592}
593
594#define tcg_out_insn_RX tcg_out_insn_RS
595#define tcg_out_insn_RXY tcg_out_insn_RSY
596
597/* Emit an opcode with "type-checking" of the format. */
598#define tcg_out_insn(S, FMT, OP, ...) \
599 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
600
601
602/* emit 64-bit shifts */
603static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
604 TCGReg src, TCGReg sh_reg, int sh_imm)
605{
606 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
607}
608
609/* emit 32-bit shifts */
610static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
611 TCGReg sh_reg, int sh_imm)
612{
613 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
614}
615
616static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
617{
618 if (src != dst) {
619 if (type == TCG_TYPE_I32) {
620 tcg_out_insn(s, RR, LR, dst, src);
621 } else {
622 tcg_out_insn(s, RRE, LGR, dst, src);
623 }
624 }
625}
626
2827822e 627/* load a register with an immediate value */
48bb3750
RH
628static void tcg_out_movi(TCGContext *s, TCGType type,
629 TCGReg ret, tcg_target_long sval)
2827822e 630{
48bb3750
RH
631 static const S390Opcode lli_insns[4] = {
632 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
633 };
634
635 tcg_target_ulong uval = sval;
636 int i;
637
638 if (type == TCG_TYPE_I32) {
639 uval = (uint32_t)sval;
640 sval = (int32_t)sval;
641 }
642
643 /* Try all 32-bit insns that can load it in one go. */
644 if (sval >= -0x8000 && sval < 0x8000) {
645 tcg_out_insn(s, RI, LGHI, ret, sval);
646 return;
647 }
648
649 for (i = 0; i < 4; i++) {
650 tcg_target_long mask = 0xffffull << i*16;
651 if ((uval & mask) == uval) {
652 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
653 return;
654 }
655 }
656
657 /* Try all 48-bit insns that can load it in one go. */
658 if (facilities & FACILITY_EXT_IMM) {
659 if (sval == (int32_t)sval) {
660 tcg_out_insn(s, RIL, LGFI, ret, sval);
661 return;
662 }
663 if (uval <= 0xffffffff) {
664 tcg_out_insn(s, RIL, LLILF, ret, uval);
665 return;
666 }
667 if ((uval & 0xffffffff) == 0) {
668 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
669 return;
670 }
671 }
672
673 /* Try for PC-relative address load. */
674 if ((sval & 1) == 0) {
675 intptr_t off = (sval - (intptr_t)s->code_ptr) >> 1;
676 if (off == (int32_t)off) {
677 tcg_out_insn(s, RIL, LARL, ret, off);
678 return;
679 }
680 }
681
682 /* If extended immediates are not present, then we may have to issue
683 several instructions to load the low 32 bits. */
684 if (!(facilities & FACILITY_EXT_IMM)) {
685 /* A 32-bit unsigned value can be loaded in 2 insns. And given
686 that the lli_insns loop above did not succeed, we know that
687 both insns are required. */
688 if (uval <= 0xffffffff) {
689 tcg_out_insn(s, RI, LLILL, ret, uval);
690 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
691 return;
692 }
693
694 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
695 We first want to make sure that all the high bits get set. With
696 luck the low 16-bits can be considered negative to perform that for
697 free, otherwise we load an explicit -1. */
698 if (sval >> 31 >> 1 == -1) {
699 if (uval & 0x8000) {
700 tcg_out_insn(s, RI, LGHI, ret, uval);
701 } else {
702 tcg_out_insn(s, RI, LGHI, ret, -1);
703 tcg_out_insn(s, RI, IILL, ret, uval);
704 }
705 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
706 return;
707 }
708 }
709
710 /* If we get here, both the high and low parts have non-zero bits. */
711
712 /* Recurse to load the lower 32-bits. */
a22971f9 713 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
48bb3750
RH
714
715 /* Insert data into the high 32-bits. */
716 uval = uval >> 31 >> 1;
717 if (facilities & FACILITY_EXT_IMM) {
718 if (uval < 0x10000) {
719 tcg_out_insn(s, RI, IIHL, ret, uval);
720 } else if ((uval & 0xffff) == 0) {
721 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
722 } else {
723 tcg_out_insn(s, RIL, IIHF, ret, uval);
724 }
725 } else {
726 if (uval & 0xffff) {
727 tcg_out_insn(s, RI, IIHL, ret, uval);
728 }
729 if (uval & 0xffff0000) {
730 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
731 }
732 }
733}
734
735
736/* Emit a load/store type instruction. Inputs are:
737 DATA: The register to be loaded or stored.
738 BASE+OFS: The effective address.
739 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
740 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
741
742static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
743 TCGReg data, TCGReg base, TCGReg index,
744 tcg_target_long ofs)
745{
746 if (ofs < -0x80000 || ofs >= 0x80000) {
78c9f7c5
RH
747 /* Combine the low 20 bits of the offset with the actual load insn;
748 the high 44 bits must come from an immediate load. */
749 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
750 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
751 ofs = low;
48bb3750
RH
752
753 /* If we were already given an index register, add it in. */
754 if (index != TCG_REG_NONE) {
755 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
756 }
757 index = TCG_TMP0;
758 }
759
760 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
761 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
762 } else {
763 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
764 }
2827822e
AG
765}
766
48bb3750 767
2827822e 768/* load data without address translation or endianness conversion */
48bb3750 769static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 770 TCGReg base, intptr_t ofs)
2827822e 771{
48bb3750
RH
772 if (type == TCG_TYPE_I32) {
773 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
774 } else {
775 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
776 }
2827822e
AG
777}
778
48bb3750 779static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 780 TCGReg base, intptr_t ofs)
2827822e 781{
48bb3750
RH
782 if (type == TCG_TYPE_I32) {
783 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
784 } else {
785 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
786 }
787}
788
789/* load data from an absolute host address */
790static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
791{
792 tcg_target_long addr = (tcg_target_long)abs;
793
794 if (facilities & FACILITY_GEN_INST_EXT) {
795 tcg_target_long disp = (addr - (tcg_target_long)s->code_ptr) >> 1;
796 if (disp == (int32_t)disp) {
797 if (type == TCG_TYPE_I32) {
798 tcg_out_insn(s, RIL, LRL, dest, disp);
799 } else {
800 tcg_out_insn(s, RIL, LGRL, dest, disp);
801 }
802 return;
803 }
804 }
805
806 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
807 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
808}
809
f0bffc27
RH
810static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
811 int msb, int lsb, int ofs, int z)
812{
813 /* Format RIE-f */
814 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
815 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
816 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
817}
818
48bb3750
RH
819static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
820{
821 if (facilities & FACILITY_EXT_IMM) {
822 tcg_out_insn(s, RRE, LGBR, dest, src);
823 return;
824 }
825
826 if (type == TCG_TYPE_I32) {
827 if (dest == src) {
828 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
829 } else {
830 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
831 }
832 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
833 } else {
834 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
835 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
836 }
837}
838
839static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
840{
841 if (facilities & FACILITY_EXT_IMM) {
842 tcg_out_insn(s, RRE, LLGCR, dest, src);
843 return;
844 }
845
846 if (dest == src) {
847 tcg_out_movi(s, type, TCG_TMP0, 0xff);
848 src = TCG_TMP0;
849 } else {
850 tcg_out_movi(s, type, dest, 0xff);
851 }
852 if (type == TCG_TYPE_I32) {
853 tcg_out_insn(s, RR, NR, dest, src);
854 } else {
855 tcg_out_insn(s, RRE, NGR, dest, src);
856 }
857}
858
859static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
860{
861 if (facilities & FACILITY_EXT_IMM) {
862 tcg_out_insn(s, RRE, LGHR, dest, src);
863 return;
864 }
865
866 if (type == TCG_TYPE_I32) {
867 if (dest == src) {
868 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
869 } else {
870 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
871 }
872 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
873 } else {
874 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
875 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
876 }
877}
878
879static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
880{
881 if (facilities & FACILITY_EXT_IMM) {
882 tcg_out_insn(s, RRE, LLGHR, dest, src);
883 return;
884 }
885
886 if (dest == src) {
887 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
888 src = TCG_TMP0;
889 } else {
890 tcg_out_movi(s, type, dest, 0xffff);
891 }
892 if (type == TCG_TYPE_I32) {
893 tcg_out_insn(s, RR, NR, dest, src);
894 } else {
895 tcg_out_insn(s, RRE, NGR, dest, src);
896 }
897}
898
899static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
900{
901 tcg_out_insn(s, RRE, LGFR, dest, src);
902}
903
904static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
905{
906 tcg_out_insn(s, RRE, LLGFR, dest, src);
907}
908
f0bffc27
RH
909/* Accept bit patterns like these:
910 0....01....1
911 1....10....0
912 1..10..01..1
913 0..01..10..0
914 Copied from gcc sources. */
915static inline bool risbg_mask(uint64_t c)
916{
917 uint64_t lsb;
918 /* We don't change the number of transitions by inverting,
919 so make sure we start with the LSB zero. */
920 if (c & 1) {
921 c = ~c;
922 }
923 /* Reject all zeros or all ones. */
924 if (c == 0) {
925 return false;
926 }
927 /* Find the first transition. */
928 lsb = c & -c;
929 /* Invert to look for a second transition. */
930 c = ~c;
931 /* Erase the first transition. */
932 c &= -lsb;
933 /* Find the second transition, if any. */
934 lsb = c & -c;
935 /* Match if all the bits are 1's, or if c is zero. */
936 return c == -lsb;
937}
938
07ff7983 939static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750
RH
940{
941 static const S390Opcode ni_insns[4] = {
942 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
943 };
944 static const S390Opcode nif_insns[2] = {
945 RIL_NILF, RIL_NIHF
946 };
07ff7983 947 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
48bb3750
RH
948 int i;
949
48bb3750 950 /* Look for the zero-extensions. */
07ff7983 951 if ((val & valid) == 0xffffffff) {
48bb3750
RH
952 tgen_ext32u(s, dest, dest);
953 return;
954 }
48bb3750 955 if (facilities & FACILITY_EXT_IMM) {
07ff7983 956 if ((val & valid) == 0xff) {
48bb3750
RH
957 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
958 return;
959 }
07ff7983 960 if ((val & valid) == 0xffff) {
48bb3750
RH
961 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
962 return;
963 }
07ff7983 964 }
48bb3750 965
07ff7983
RH
966 /* Try all 32-bit insns that can perform it in one go. */
967 for (i = 0; i < 4; i++) {
968 tcg_target_ulong mask = ~(0xffffull << i*16);
969 if (((val | ~valid) & mask) == mask) {
970 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
971 return;
48bb3750 972 }
07ff7983 973 }
48bb3750 974
07ff7983
RH
975 /* Try all 48-bit insns that can perform it in one go. */
976 if (facilities & FACILITY_EXT_IMM) {
977 for (i = 0; i < 2; i++) {
978 tcg_target_ulong mask = ~(0xffffffffull << i*32);
979 if (((val | ~valid) & mask) == mask) {
980 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
981 return;
48bb3750
RH
982 }
983 }
07ff7983 984 }
f0bffc27
RH
985 if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
986 int msb, lsb;
987 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
988 /* Achieve wraparound by swapping msb and lsb. */
989 msb = 63 - ctz64(~val);
990 lsb = clz64(~val) + 1;
991 } else {
992 msb = clz64(val);
993 lsb = 63 - ctz64(val);
994 }
995 tcg_out_risbg(s, dest, dest, msb, lsb, 0, 1);
996 return;
997 }
48bb3750 998
07ff7983
RH
999 /* Fall back to loading the constant. */
1000 tcg_out_movi(s, type, TCG_TMP0, val);
1001 if (type == TCG_TYPE_I32) {
1002 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
48bb3750 1003 } else {
07ff7983 1004 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
48bb3750
RH
1005 }
1006}
1007
1008static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1009{
1010 static const S390Opcode oi_insns[4] = {
1011 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1012 };
1013 static const S390Opcode nif_insns[2] = {
1014 RIL_OILF, RIL_OIHF
1015 };
1016
1017 int i;
1018
1019 /* Look for no-op. */
1020 if (val == 0) {
1021 return;
1022 }
1023
1024 if (facilities & FACILITY_EXT_IMM) {
1025 /* Try all 32-bit insns that can perform it in one go. */
1026 for (i = 0; i < 4; i++) {
1027 tcg_target_ulong mask = (0xffffull << i*16);
1028 if ((val & mask) != 0 && (val & ~mask) == 0) {
1029 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1030 return;
1031 }
1032 }
1033
1034 /* Try all 48-bit insns that can perform it in one go. */
1035 for (i = 0; i < 2; i++) {
1036 tcg_target_ulong mask = (0xffffffffull << i*32);
1037 if ((val & mask) != 0 && (val & ~mask) == 0) {
1038 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1039 return;
1040 }
1041 }
1042
1043 /* Perform the OR via sequential modifications to the high and
1044 low parts. Do this via recursion to handle 16-bit vs 32-bit
1045 masks in each half. */
1046 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1047 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1048 } else {
1049 /* With no extended-immediate facility, we don't need to be so
1050 clever. Just iterate over the insns and mask in the constant. */
1051 for (i = 0; i < 4; i++) {
1052 tcg_target_ulong mask = (0xffffull << i*16);
1053 if ((val & mask) != 0) {
1054 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1055 }
1056 }
1057 }
1058}
1059
1060static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1061{
1062 /* Perform the xor by parts. */
1063 if (val & 0xffffffff) {
1064 tcg_out_insn(s, RIL, XILF, dest, val);
1065 }
1066 if (val > 0xffffffff) {
1067 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1068 }
1069}
1070
1071static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1072 TCGArg c2, int c2const)
1073{
bcc66562 1074 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1075 if (c2const) {
1076 if (c2 == 0) {
1077 if (type == TCG_TYPE_I32) {
1078 tcg_out_insn(s, RR, LTR, r1, r1);
1079 } else {
1080 tcg_out_insn(s, RRE, LTGR, r1, r1);
1081 }
1082 return tcg_cond_to_ltr_cond[c];
1083 } else {
1084 if (is_unsigned) {
1085 if (type == TCG_TYPE_I32) {
1086 tcg_out_insn(s, RIL, CLFI, r1, c2);
1087 } else {
1088 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1089 }
1090 } else {
1091 if (type == TCG_TYPE_I32) {
1092 tcg_out_insn(s, RIL, CFI, r1, c2);
1093 } else {
1094 tcg_out_insn(s, RIL, CGFI, r1, c2);
1095 }
1096 }
1097 }
1098 } else {
1099 if (is_unsigned) {
1100 if (type == TCG_TYPE_I32) {
1101 tcg_out_insn(s, RR, CLR, r1, c2);
1102 } else {
1103 tcg_out_insn(s, RRE, CLGR, r1, c2);
1104 }
1105 } else {
1106 if (type == TCG_TYPE_I32) {
1107 tcg_out_insn(s, RR, CR, r1, c2);
1108 } else {
1109 tcg_out_insn(s, RRE, CGR, r1, c2);
1110 }
1111 }
1112 }
1113 return tcg_cond_to_s390_cond[c];
1114}
1115
1116static void tgen_setcond(TCGContext *s, TCGType type, TCGCond c,
96a9f093 1117 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
48bb3750 1118{
96a9f093 1119 int cc = tgen_cmp(s, type, c, c1, c2, c2const);
48bb3750
RH
1120
1121 /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over: */
1122 tcg_out_movi(s, type, dest, 1);
1123 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1124 tcg_out_movi(s, type, dest, 0);
1125}
1126
96a9f093
RH
1127static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1128 TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
1129{
1130 int cc;
1131 if (facilities & FACILITY_LOAD_ON_COND) {
1132 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1133 tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
1134 } else {
1135 c = tcg_invert_cond(c);
1136 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1137
1138 /* Emit: if (cc) goto over; dest = r3; over: */
1139 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1140 tcg_out_insn(s, RRE, LGR, dest, r3);
1141 }
1142}
1143
d5690ea4
RH
1144bool tcg_target_deposit_valid(int ofs, int len)
1145{
1146 return (facilities & FACILITY_GEN_INST_EXT) != 0;
1147}
1148
1149static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1150 int ofs, int len)
1151{
1152 int lsb = (63 - ofs);
1153 int msb = lsb - (len - 1);
f0bffc27 1154 tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0);
d5690ea4
RH
1155}
1156
48bb3750
RH
1157static void tgen_gotoi(TCGContext *s, int cc, tcg_target_long dest)
1158{
1159 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1160 if (off > -0x8000 && off < 0x7fff) {
1161 tcg_out_insn(s, RI, BRC, cc, off);
1162 } else if (off == (int32_t)off) {
1163 tcg_out_insn(s, RIL, BRCL, cc, off);
1164 } else {
1165 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1166 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1167 }
1168}
1169
1170static void tgen_branch(TCGContext *s, int cc, int labelno)
1171{
1172 TCGLabel* l = &s->labels[labelno];
1173 if (l->has_value) {
1174 tgen_gotoi(s, cc, l->u.value);
1175 } else if (USE_LONG_BRANCHES) {
1176 tcg_out16(s, RIL_BRCL | (cc << 4));
1177 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, labelno, -2);
1178 s->code_ptr += 4;
1179 } else {
1180 tcg_out16(s, RI_BRC | (cc << 4));
1181 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, labelno, -2);
1182 s->code_ptr += 2;
1183 }
1184}
1185
1186static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1187 TCGReg r1, TCGReg r2, int labelno)
1188{
1189 TCGLabel* l = &s->labels[labelno];
1190 tcg_target_long off;
1191
1192 if (l->has_value) {
1193 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1194 } else {
1195 /* We need to keep the offset unchanged for retranslation. */
1196 off = ((int16_t *)s->code_ptr)[1];
1197 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1198 }
1199
1200 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1201 tcg_out16(s, off);
1202 tcg_out16(s, cc << 12 | (opc & 0xff));
1203}
1204
1205static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1206 TCGReg r1, int i2, int labelno)
1207{
1208 TCGLabel* l = &s->labels[labelno];
1209 tcg_target_long off;
1210
1211 if (l->has_value) {
1212 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1213 } else {
1214 /* We need to keep the offset unchanged for retranslation. */
1215 off = ((int16_t *)s->code_ptr)[1];
1216 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1217 }
1218
1219 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1220 tcg_out16(s, off);
1221 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1222}
1223
1224static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1225 TCGReg r1, TCGArg c2, int c2const, int labelno)
1226{
1227 int cc;
1228
1229 if (facilities & FACILITY_GEN_INST_EXT) {
b879f308 1230 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1231 bool in_range;
1232 S390Opcode opc;
1233
1234 cc = tcg_cond_to_s390_cond[c];
1235
1236 if (!c2const) {
1237 opc = (type == TCG_TYPE_I32
1238 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1239 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1240 tgen_compare_branch(s, opc, cc, r1, c2, labelno);
1241 return;
1242 }
1243
1244 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1245 If the immediate we've been given does not fit that range, we'll
1246 fall back to separate compare and branch instructions using the
1247 larger comparison range afforded by COMPARE IMMEDIATE. */
1248 if (type == TCG_TYPE_I32) {
1249 if (is_unsigned) {
1250 opc = RIE_CLIJ;
1251 in_range = (uint32_t)c2 == (uint8_t)c2;
1252 } else {
1253 opc = RIE_CIJ;
1254 in_range = (int32_t)c2 == (int8_t)c2;
1255 }
1256 } else {
1257 if (is_unsigned) {
1258 opc = RIE_CLGIJ;
1259 in_range = (uint64_t)c2 == (uint8_t)c2;
1260 } else {
1261 opc = RIE_CGIJ;
1262 in_range = (int64_t)c2 == (int8_t)c2;
1263 }
1264 }
1265 if (in_range) {
1266 tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno);
1267 return;
1268 }
1269 }
1270
1271 cc = tgen_cmp(s, type, c, r1, c2, c2const);
1272 tgen_branch(s, cc, labelno);
1273}
1274
1275static void tgen_calli(TCGContext *s, tcg_target_long dest)
1276{
1277 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1278 if (off == (int32_t)off) {
1279 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1280 } else {
1281 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1282 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1283 }
1284}
1285
1286static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data,
1287 TCGReg base, TCGReg index, int disp)
1288{
1289#ifdef TARGET_WORDS_BIGENDIAN
1290 const int bswap = 0;
1291#else
1292 const int bswap = 1;
1293#endif
1294 switch (opc) {
1295 case LD_UINT8:
1296 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1297 break;
1298 case LD_INT8:
1299 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1300 break;
1301 case LD_UINT16:
1302 if (bswap) {
1303 /* swapped unsigned halfword load with upper bits zeroed */
1304 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1305 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1306 } else {
1307 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1308 }
1309 break;
1310 case LD_INT16:
1311 if (bswap) {
1312 /* swapped sign-extended halfword load */
1313 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1314 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1315 } else {
1316 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1317 }
1318 break;
1319 case LD_UINT32:
1320 if (bswap) {
1321 /* swapped unsigned int load with upper bits zeroed */
1322 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1323 tgen_ext32u(s, data, data);
1324 } else {
1325 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1326 }
1327 break;
1328 case LD_INT32:
1329 if (bswap) {
1330 /* swapped sign-extended int load */
1331 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1332 tgen_ext32s(s, data, data);
1333 } else {
1334 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1335 }
1336 break;
1337 case LD_UINT64:
1338 if (bswap) {
1339 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1340 } else {
1341 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1342 }
1343 break;
1344 default:
1345 tcg_abort();
1346 }
1347}
1348
1349static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data,
1350 TCGReg base, TCGReg index, int disp)
1351{
1352#ifdef TARGET_WORDS_BIGENDIAN
1353 const int bswap = 0;
1354#else
1355 const int bswap = 1;
1356#endif
1357 switch (opc) {
1358 case LD_UINT8:
1359 if (disp >= 0 && disp < 0x1000) {
1360 tcg_out_insn(s, RX, STC, data, base, index, disp);
1361 } else {
1362 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1363 }
1364 break;
1365 case LD_UINT16:
1366 if (bswap) {
1367 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1368 } else if (disp >= 0 && disp < 0x1000) {
1369 tcg_out_insn(s, RX, STH, data, base, index, disp);
1370 } else {
1371 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1372 }
1373 break;
1374 case LD_UINT32:
1375 if (bswap) {
1376 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1377 } else if (disp >= 0 && disp < 0x1000) {
1378 tcg_out_insn(s, RX, ST, data, base, index, disp);
1379 } else {
1380 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1381 }
1382 break;
1383 case LD_UINT64:
1384 if (bswap) {
1385 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1386 } else {
1387 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1388 }
1389 break;
1390 default:
1391 tcg_abort();
1392 }
1393}
1394
1395#if defined(CONFIG_SOFTMMU)
65a62a75
RH
1396static TCGReg tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
1397 TCGReg addr_reg, int mem_index, int opc,
1398 uint16_t **label2_ptr_p, int is_store)
48bb3750 1399{
65a62a75
RH
1400 const TCGReg arg0 = tcg_target_call_iarg_regs[0];
1401 const TCGReg arg1 = tcg_target_call_iarg_regs[1];
1402 const TCGReg arg2 = tcg_target_call_iarg_regs[2];
1403 const TCGReg arg3 = tcg_target_call_iarg_regs[3];
48bb3750
RH
1404 int s_bits = opc & 3;
1405 uint16_t *label1_ptr;
1406 tcg_target_long ofs;
1407
1408 if (TARGET_LONG_BITS == 32) {
65a62a75 1409 tgen_ext32u(s, arg1, addr_reg);
48bb3750 1410 } else {
65a62a75 1411 tcg_out_mov(s, TCG_TYPE_I64, arg1, addr_reg);
48bb3750
RH
1412 }
1413
65a62a75 1414 tcg_out_sh64(s, RSY_SRLG, arg2, addr_reg, TCG_REG_NONE,
48bb3750
RH
1415 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1416
65a62a75
RH
1417 tgen_andi(s, TCG_TYPE_I64, arg1, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
1418 tgen_andi(s, TCG_TYPE_I64, arg2, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
48bb3750
RH
1419
1420 if (is_store) {
9349b4f9 1421 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
48bb3750 1422 } else {
9349b4f9 1423 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
48bb3750
RH
1424 }
1425 assert(ofs < 0x80000);
1426
1427 if (TARGET_LONG_BITS == 32) {
65a62a75 1428 tcg_out_mem(s, RX_C, RXY_CY, arg1, arg2, TCG_AREG0, ofs);
48bb3750 1429 } else {
65a62a75 1430 tcg_out_mem(s, 0, RXY_CG, arg1, arg2, TCG_AREG0, ofs);
48bb3750
RH
1431 }
1432
1433 if (TARGET_LONG_BITS == 32) {
65a62a75 1434 tgen_ext32u(s, arg1, addr_reg);
48bb3750 1435 } else {
65a62a75 1436 tcg_out_mov(s, TCG_TYPE_I64, arg1, addr_reg);
48bb3750
RH
1437 }
1438
1439 label1_ptr = (uint16_t*)s->code_ptr;
1440
1441 /* je label1 (offset will be patched in later) */
1442 tcg_out_insn(s, RI, BRC, S390_CC_EQ, 0);
1443
1444 /* call load/store helper */
1445 if (is_store) {
1446 /* Make sure to zero-extend the value to the full register
1447 for the calling convention. */
1448 switch (opc) {
1449 case LD_UINT8:
65a62a75 1450 tgen_ext8u(s, TCG_TYPE_I64, arg2, data_reg);
48bb3750
RH
1451 break;
1452 case LD_UINT16:
65a62a75 1453 tgen_ext16u(s, TCG_TYPE_I64, arg2, data_reg);
48bb3750
RH
1454 break;
1455 case LD_UINT32:
65a62a75 1456 tgen_ext32u(s, arg2, data_reg);
48bb3750
RH
1457 break;
1458 case LD_UINT64:
65a62a75 1459 tcg_out_mov(s, TCG_TYPE_I64, arg2, data_reg);
48bb3750
RH
1460 break;
1461 default:
1462 tcg_abort();
1463 }
65a62a75
RH
1464 tcg_out_movi(s, TCG_TYPE_I32, arg3, mem_index);
1465 tcg_out_mov(s, TCG_TYPE_I64, arg0, TCG_AREG0);
48bb3750
RH
1466 tgen_calli(s, (tcg_target_ulong)qemu_st_helpers[s_bits]);
1467 } else {
65a62a75
RH
1468 tcg_out_movi(s, TCG_TYPE_I32, arg2, mem_index);
1469 tcg_out_mov(s, TCG_TYPE_I64, arg0, TCG_AREG0);
48bb3750
RH
1470 tgen_calli(s, (tcg_target_ulong)qemu_ld_helpers[s_bits]);
1471
1472 /* sign extension */
1473 switch (opc) {
1474 case LD_INT8:
65a62a75 1475 tgen_ext8s(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
48bb3750
RH
1476 break;
1477 case LD_INT16:
65a62a75 1478 tgen_ext16s(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
48bb3750
RH
1479 break;
1480 case LD_INT32:
65a62a75 1481 tgen_ext32s(s, data_reg, TCG_REG_R2);
48bb3750
RH
1482 break;
1483 default:
1484 /* unsigned -> just copy */
65a62a75 1485 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
48bb3750
RH
1486 break;
1487 }
1488 }
1489
1490 /* jump to label2 (end) */
1491 *label2_ptr_p = (uint16_t*)s->code_ptr;
1492
1493 tcg_out_insn(s, RI, BRC, S390_CC_ALWAYS, 0);
1494
1495 /* this is label1, patch branch */
1496 *(label1_ptr + 1) = ((unsigned long)s->code_ptr -
1497 (unsigned long)label1_ptr) >> 1;
1498
9349b4f9 1499 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
48bb3750
RH
1500 assert(ofs < 0x80000);
1501
65a62a75
RH
1502 tcg_out_mem(s, 0, RXY_AG, arg1, arg2, TCG_AREG0, ofs);
1503
1504 return arg1;
48bb3750
RH
1505}
1506
1507static void tcg_finish_qemu_ldst(TCGContext* s, uint16_t *label2_ptr)
1508{
1509 /* patch branch */
1510 *(label2_ptr + 1) = ((unsigned long)s->code_ptr -
1511 (unsigned long)label2_ptr) >> 1;
1512}
1513#else
1514static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1515 TCGReg *index_reg, tcg_target_long *disp)
1516{
1517 if (TARGET_LONG_BITS == 32) {
1518 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1519 *addr_reg = TCG_TMP0;
1520 }
1521 if (GUEST_BASE < 0x80000) {
1522 *index_reg = TCG_REG_NONE;
1523 *disp = GUEST_BASE;
1524 } else {
1525 *index_reg = TCG_GUEST_BASE_REG;
1526 *disp = 0;
1527 }
1528}
1529#endif /* CONFIG_SOFTMMU */
1530
1531/* load data with address translation (if applicable)
1532 and endianness conversion */
1533static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* args, int opc)
1534{
1535 TCGReg addr_reg, data_reg;
1536#if defined(CONFIG_SOFTMMU)
1537 int mem_index;
1538 uint16_t *label2_ptr;
1539#else
1540 TCGReg index_reg;
1541 tcg_target_long disp;
1542#endif
1543
1544 data_reg = *args++;
1545 addr_reg = *args++;
1546
1547#if defined(CONFIG_SOFTMMU)
1548 mem_index = *args;
1549
65a62a75
RH
1550 addr_reg = tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1551 opc, &label2_ptr, 0);
48bb3750 1552
65a62a75 1553 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, TCG_REG_NONE, 0);
48bb3750
RH
1554
1555 tcg_finish_qemu_ldst(s, label2_ptr);
1556#else
1557 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1558 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1559#endif
1560}
1561
1562static void tcg_out_qemu_st(TCGContext* s, const TCGArg* args, int opc)
1563{
1564 TCGReg addr_reg, data_reg;
1565#if defined(CONFIG_SOFTMMU)
1566 int mem_index;
1567 uint16_t *label2_ptr;
1568#else
1569 TCGReg index_reg;
1570 tcg_target_long disp;
1571#endif
1572
1573 data_reg = *args++;
1574 addr_reg = *args++;
1575
1576#if defined(CONFIG_SOFTMMU)
1577 mem_index = *args;
1578
65a62a75
RH
1579 addr_reg = tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1580 opc, &label2_ptr, 1);
48bb3750 1581
65a62a75 1582 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, TCG_REG_NONE, 0);
48bb3750
RH
1583
1584 tcg_finish_qemu_ldst(s, label2_ptr);
1585#else
1586 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1587 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1588#endif
2827822e
AG
1589}
1590
48bb3750
RH
1591# define OP_32_64(x) \
1592 case glue(glue(INDEX_op_,x),_i32): \
1593 case glue(glue(INDEX_op_,x),_i64)
48bb3750 1594
a9751609 1595static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2827822e
AG
1596 const TCGArg *args, const int *const_args)
1597{
48bb3750 1598 S390Opcode op;
0db921e6 1599 TCGArg a0, a1, a2;
48bb3750
RH
1600
1601 switch (opc) {
1602 case INDEX_op_exit_tb:
1603 /* return value */
1604 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
1605 tgen_gotoi(s, S390_CC_ALWAYS, (unsigned long)tb_ret_addr);
1606 break;
1607
1608 case INDEX_op_goto_tb:
1609 if (s->tb_jmp_offset) {
1610 tcg_abort();
1611 } else {
1612 /* load address stored at s->tb_next + args[0] */
1613 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
1614 /* and go there */
1615 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1616 }
1617 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1618 break;
1619
1620 case INDEX_op_call:
1621 if (const_args[0]) {
1622 tgen_calli(s, args[0]);
1623 } else {
1624 tcg_out_insn(s, RR, BASR, TCG_REG_R14, args[0]);
1625 }
1626 break;
1627
1628 case INDEX_op_mov_i32:
1629 tcg_out_mov(s, TCG_TYPE_I32, args[0], args[1]);
1630 break;
1631 case INDEX_op_movi_i32:
1632 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1633 break;
1634
1635 OP_32_64(ld8u):
1636 /* ??? LLC (RXY format) is only present with the extended-immediate
1637 facility, whereas LLGC is always present. */
1638 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1639 break;
1640
1641 OP_32_64(ld8s):
1642 /* ??? LB is no smaller than LGB, so no point to using it. */
1643 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1644 break;
1645
1646 OP_32_64(ld16u):
1647 /* ??? LLH (RXY format) is only present with the extended-immediate
1648 facility, whereas LLGH is always present. */
1649 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1650 break;
1651
1652 case INDEX_op_ld16s_i32:
1653 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1654 break;
1655
1656 case INDEX_op_ld_i32:
1657 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1658 break;
1659
1660 OP_32_64(st8):
1661 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1662 TCG_REG_NONE, args[2]);
1663 break;
1664
1665 OP_32_64(st16):
1666 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1667 TCG_REG_NONE, args[2]);
1668 break;
1669
1670 case INDEX_op_st_i32:
1671 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1672 break;
1673
1674 case INDEX_op_add_i32:
0db921e6 1675 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1676 if (const_args[2]) {
0db921e6
RH
1677 do_addi_32:
1678 if (a0 == a1) {
1679 if (a2 == (int16_t)a2) {
1680 tcg_out_insn(s, RI, AHI, a0, a2);
1681 break;
1682 }
1683 if (facilities & FACILITY_EXT_IMM) {
1684 tcg_out_insn(s, RIL, AFI, a0, a2);
1685 break;
1686 }
1687 }
1688 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1689 } else if (a0 == a1) {
1690 tcg_out_insn(s, RR, AR, a0, a2);
48bb3750 1691 } else {
0db921e6 1692 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1693 }
1694 break;
1695 case INDEX_op_sub_i32:
0db921e6 1696 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1697 if (const_args[2]) {
0db921e6
RH
1698 a2 = -a2;
1699 goto do_addi_32;
48bb3750 1700 }
0db921e6 1701 tcg_out_insn(s, RR, SR, args[0], args[2]);
48bb3750
RH
1702 break;
1703
1704 case INDEX_op_and_i32:
1705 if (const_args[2]) {
07ff7983 1706 tgen_andi(s, TCG_TYPE_I32, args[0], args[2]);
48bb3750
RH
1707 } else {
1708 tcg_out_insn(s, RR, NR, args[0], args[2]);
1709 }
1710 break;
1711 case INDEX_op_or_i32:
1712 if (const_args[2]) {
1713 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1714 } else {
1715 tcg_out_insn(s, RR, OR, args[0], args[2]);
1716 }
1717 break;
1718 case INDEX_op_xor_i32:
1719 if (const_args[2]) {
1720 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1721 } else {
1722 tcg_out_insn(s, RR, XR, args[0], args[2]);
1723 }
1724 break;
1725
1726 case INDEX_op_neg_i32:
1727 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1728 break;
1729
1730 case INDEX_op_mul_i32:
1731 if (const_args[2]) {
1732 if ((int32_t)args[2] == (int16_t)args[2]) {
1733 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1734 } else {
1735 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1736 }
1737 } else {
1738 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1739 }
1740 break;
1741
1742 case INDEX_op_div2_i32:
1743 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1744 break;
1745 case INDEX_op_divu2_i32:
1746 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1747 break;
1748
1749 case INDEX_op_shl_i32:
1750 op = RS_SLL;
1751 do_shift32:
1752 if (const_args[2]) {
1753 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1754 } else {
1755 tcg_out_sh32(s, op, args[0], args[2], 0);
1756 }
1757 break;
1758 case INDEX_op_shr_i32:
1759 op = RS_SRL;
1760 goto do_shift32;
1761 case INDEX_op_sar_i32:
1762 op = RS_SRA;
1763 goto do_shift32;
1764
1765 case INDEX_op_rotl_i32:
1766 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1767 if (const_args[2]) {
1768 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1769 } else {
1770 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1771 }
1772 break;
1773 case INDEX_op_rotr_i32:
1774 if (const_args[2]) {
1775 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1776 TCG_REG_NONE, (32 - args[2]) & 31);
1777 } else {
1778 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1779 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1780 }
1781 break;
1782
1783 case INDEX_op_ext8s_i32:
1784 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1785 break;
1786 case INDEX_op_ext16s_i32:
1787 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1788 break;
1789 case INDEX_op_ext8u_i32:
1790 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1791 break;
1792 case INDEX_op_ext16u_i32:
1793 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1794 break;
1795
1796 OP_32_64(bswap16):
1797 /* The TCG bswap definition requires bits 0-47 already be zero.
1798 Thus we don't need the G-type insns to implement bswap16_i64. */
1799 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1800 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1801 break;
1802 OP_32_64(bswap32):
1803 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1804 break;
1805
3790b918
RH
1806 case INDEX_op_add2_i32:
1807 /* ??? Make use of ALFI. */
1808 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1809 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1810 break;
1811 case INDEX_op_sub2_i32:
1812 /* ??? Make use of SLFI. */
1813 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1814 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1815 break;
1816
48bb3750
RH
1817 case INDEX_op_br:
1818 tgen_branch(s, S390_CC_ALWAYS, args[0]);
1819 break;
1820
1821 case INDEX_op_brcond_i32:
1822 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
1823 args[1], const_args[1], args[3]);
1824 break;
1825 case INDEX_op_setcond_i32:
1826 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1827 args[2], const_args[2]);
1828 break;
96a9f093
RH
1829 case INDEX_op_movcond_i32:
1830 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
1831 args[2], const_args[2], args[3]);
1832 break;
48bb3750
RH
1833
1834 case INDEX_op_qemu_ld8u:
1835 tcg_out_qemu_ld(s, args, LD_UINT8);
1836 break;
1837 case INDEX_op_qemu_ld8s:
1838 tcg_out_qemu_ld(s, args, LD_INT8);
1839 break;
1840 case INDEX_op_qemu_ld16u:
1841 tcg_out_qemu_ld(s, args, LD_UINT16);
1842 break;
1843 case INDEX_op_qemu_ld16s:
1844 tcg_out_qemu_ld(s, args, LD_INT16);
1845 break;
1846 case INDEX_op_qemu_ld32:
1847 /* ??? Technically we can use a non-extending instruction. */
1848 tcg_out_qemu_ld(s, args, LD_UINT32);
1849 break;
1850 case INDEX_op_qemu_ld64:
1851 tcg_out_qemu_ld(s, args, LD_UINT64);
1852 break;
1853
1854 case INDEX_op_qemu_st8:
1855 tcg_out_qemu_st(s, args, LD_UINT8);
1856 break;
1857 case INDEX_op_qemu_st16:
1858 tcg_out_qemu_st(s, args, LD_UINT16);
1859 break;
1860 case INDEX_op_qemu_st32:
1861 tcg_out_qemu_st(s, args, LD_UINT32);
1862 break;
1863 case INDEX_op_qemu_st64:
1864 tcg_out_qemu_st(s, args, LD_UINT64);
1865 break;
1866
48bb3750
RH
1867 case INDEX_op_mov_i64:
1868 tcg_out_mov(s, TCG_TYPE_I64, args[0], args[1]);
1869 break;
1870 case INDEX_op_movi_i64:
1871 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1872 break;
1873
1874 case INDEX_op_ld16s_i64:
1875 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1876 break;
1877 case INDEX_op_ld32u_i64:
1878 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1879 break;
1880 case INDEX_op_ld32s_i64:
1881 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1882 break;
1883 case INDEX_op_ld_i64:
1884 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1885 break;
1886
1887 case INDEX_op_st32_i64:
1888 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1889 break;
1890 case INDEX_op_st_i64:
1891 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1892 break;
1893
1894 case INDEX_op_add_i64:
0db921e6 1895 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 1896 if (const_args[2]) {
0db921e6
RH
1897 do_addi_64:
1898 if (a0 == a1) {
1899 if (a2 == (int16_t)a2) {
1900 tcg_out_insn(s, RI, AGHI, a0, a2);
1901 break;
1902 }
1903 if (facilities & FACILITY_EXT_IMM) {
1904 if (a2 == (int32_t)a2) {
1905 tcg_out_insn(s, RIL, AGFI, a0, a2);
1906 break;
1907 } else if (a2 == (uint32_t)a2) {
1908 tcg_out_insn(s, RIL, ALGFI, a0, a2);
1909 break;
1910 } else if (-a2 == (uint32_t)-a2) {
1911 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
1912 break;
1913 }
1914 }
1915 }
1916 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1917 } else if (a0 == a1) {
1918 tcg_out_insn(s, RRE, AGR, a0, a2);
48bb3750 1919 } else {
0db921e6 1920 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1921 }
1922 break;
1923 case INDEX_op_sub_i64:
0db921e6 1924 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 1925 if (const_args[2]) {
0db921e6
RH
1926 a2 = -a2;
1927 goto do_addi_64;
48bb3750
RH
1928 } else {
1929 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
1930 }
1931 break;
1932
1933 case INDEX_op_and_i64:
1934 if (const_args[2]) {
07ff7983 1935 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
48bb3750
RH
1936 } else {
1937 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
1938 }
1939 break;
1940 case INDEX_op_or_i64:
1941 if (const_args[2]) {
1942 tgen64_ori(s, args[0], args[2]);
1943 } else {
1944 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
1945 }
1946 break;
1947 case INDEX_op_xor_i64:
1948 if (const_args[2]) {
1949 tgen64_xori(s, args[0], args[2]);
1950 } else {
1951 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
1952 }
1953 break;
1954
1955 case INDEX_op_neg_i64:
1956 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
1957 break;
1958 case INDEX_op_bswap64_i64:
1959 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
1960 break;
1961
1962 case INDEX_op_mul_i64:
1963 if (const_args[2]) {
1964 if (args[2] == (int16_t)args[2]) {
1965 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
1966 } else {
1967 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
1968 }
1969 } else {
1970 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
1971 }
1972 break;
1973
1974 case INDEX_op_div2_i64:
1975 /* ??? We get an unnecessary sign-extension of the dividend
1976 into R3 with this definition, but as we do in fact always
1977 produce both quotient and remainder using INDEX_op_div_i64
1978 instead requires jumping through even more hoops. */
1979 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
1980 break;
1981 case INDEX_op_divu2_i64:
1982 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
1983 break;
36017dc6
RH
1984 case INDEX_op_mulu2_i64:
1985 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
1986 break;
48bb3750
RH
1987
1988 case INDEX_op_shl_i64:
1989 op = RSY_SLLG;
1990 do_shift64:
1991 if (const_args[2]) {
1992 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
1993 } else {
1994 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
1995 }
1996 break;
1997 case INDEX_op_shr_i64:
1998 op = RSY_SRLG;
1999 goto do_shift64;
2000 case INDEX_op_sar_i64:
2001 op = RSY_SRAG;
2002 goto do_shift64;
2003
2004 case INDEX_op_rotl_i64:
2005 if (const_args[2]) {
2006 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2007 TCG_REG_NONE, args[2]);
2008 } else {
2009 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2010 }
2011 break;
2012 case INDEX_op_rotr_i64:
2013 if (const_args[2]) {
2014 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2015 TCG_REG_NONE, (64 - args[2]) & 63);
2016 } else {
2017 /* We can use the smaller 32-bit negate because only the
2018 low 6 bits are examined for the rotate. */
2019 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2020 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2021 }
2022 break;
2023
2024 case INDEX_op_ext8s_i64:
2025 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2026 break;
2027 case INDEX_op_ext16s_i64:
2028 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2029 break;
2030 case INDEX_op_ext32s_i64:
2031 tgen_ext32s(s, args[0], args[1]);
2032 break;
2033 case INDEX_op_ext8u_i64:
2034 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2035 break;
2036 case INDEX_op_ext16u_i64:
2037 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2038 break;
2039 case INDEX_op_ext32u_i64:
2040 tgen_ext32u(s, args[0], args[1]);
2041 break;
2042
3790b918
RH
2043 case INDEX_op_add2_i64:
2044 /* ??? Make use of ALGFI and SLGFI. */
2045 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2046 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2047 break;
2048 case INDEX_op_sub2_i64:
2049 /* ??? Make use of ALGFI and SLGFI. */
2050 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2051 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2052 break;
2053
48bb3750
RH
2054 case INDEX_op_brcond_i64:
2055 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2056 args[1], const_args[1], args[3]);
2057 break;
2058 case INDEX_op_setcond_i64:
2059 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2060 args[2], const_args[2]);
2061 break;
96a9f093
RH
2062 case INDEX_op_movcond_i64:
2063 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2064 args[2], const_args[2], args[3]);
2065 break;
48bb3750
RH
2066
2067 case INDEX_op_qemu_ld32u:
2068 tcg_out_qemu_ld(s, args, LD_UINT32);
2069 break;
2070 case INDEX_op_qemu_ld32s:
2071 tcg_out_qemu_ld(s, args, LD_INT32);
2072 break;
48bb3750 2073
d5690ea4
RH
2074 OP_32_64(deposit):
2075 tgen_deposit(s, args[0], args[2], args[3], args[4]);
2076 break;
2077
48bb3750
RH
2078 default:
2079 fprintf(stderr,"unimplemented opc 0x%x\n",opc);
2080 tcg_abort();
2081 }
2827822e
AG
2082}
2083
48bb3750
RH
2084static const TCGTargetOpDef s390_op_defs[] = {
2085 { INDEX_op_exit_tb, { } },
2086 { INDEX_op_goto_tb, { } },
2087 { INDEX_op_call, { "ri" } },
48bb3750
RH
2088 { INDEX_op_br, { } },
2089
2090 { INDEX_op_mov_i32, { "r", "r" } },
2091 { INDEX_op_movi_i32, { "r" } },
2092
2093 { INDEX_op_ld8u_i32, { "r", "r" } },
2094 { INDEX_op_ld8s_i32, { "r", "r" } },
2095 { INDEX_op_ld16u_i32, { "r", "r" } },
2096 { INDEX_op_ld16s_i32, { "r", "r" } },
2097 { INDEX_op_ld_i32, { "r", "r" } },
2098 { INDEX_op_st8_i32, { "r", "r" } },
2099 { INDEX_op_st16_i32, { "r", "r" } },
2100 { INDEX_op_st_i32, { "r", "r" } },
2101
0db921e6
RH
2102 { INDEX_op_add_i32, { "r", "r", "ri" } },
2103 { INDEX_op_sub_i32, { "r", "0", "ri" } },
48bb3750
RH
2104 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2105
2106 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2107 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2108
07ff7983 2109 { INDEX_op_and_i32, { "r", "0", "ri" } },
671c835b
RH
2110 { INDEX_op_or_i32, { "r", "0", "rO" } },
2111 { INDEX_op_xor_i32, { "r", "0", "rX" } },
48bb3750
RH
2112
2113 { INDEX_op_neg_i32, { "r", "r" } },
2114
2115 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2116 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2117 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2118
2119 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2120 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2121
2122 { INDEX_op_ext8s_i32, { "r", "r" } },
2123 { INDEX_op_ext8u_i32, { "r", "r" } },
2124 { INDEX_op_ext16s_i32, { "r", "r" } },
2125 { INDEX_op_ext16u_i32, { "r", "r" } },
2126
2127 { INDEX_op_bswap16_i32, { "r", "r" } },
2128 { INDEX_op_bswap32_i32, { "r", "r" } },
2129
3790b918
RH
2130 { INDEX_op_add2_i32, { "r", "r", "0", "1", "r", "r" } },
2131 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "r", "r" } },
2132
671c835b
RH
2133 { INDEX_op_brcond_i32, { "r", "rC" } },
2134 { INDEX_op_setcond_i32, { "r", "r", "rC" } },
2135 { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } },
d5690ea4 2136 { INDEX_op_deposit_i32, { "r", "0", "r" } },
48bb3750
RH
2137
2138 { INDEX_op_qemu_ld8u, { "r", "L" } },
2139 { INDEX_op_qemu_ld8s, { "r", "L" } },
2140 { INDEX_op_qemu_ld16u, { "r", "L" } },
2141 { INDEX_op_qemu_ld16s, { "r", "L" } },
2142 { INDEX_op_qemu_ld32, { "r", "L" } },
2143 { INDEX_op_qemu_ld64, { "r", "L" } },
2144
2145 { INDEX_op_qemu_st8, { "L", "L" } },
2146 { INDEX_op_qemu_st16, { "L", "L" } },
2147 { INDEX_op_qemu_st32, { "L", "L" } },
2148 { INDEX_op_qemu_st64, { "L", "L" } },
2149
48bb3750
RH
2150 { INDEX_op_mov_i64, { "r", "r" } },
2151 { INDEX_op_movi_i64, { "r" } },
2152
2153 { INDEX_op_ld8u_i64, { "r", "r" } },
2154 { INDEX_op_ld8s_i64, { "r", "r" } },
2155 { INDEX_op_ld16u_i64, { "r", "r" } },
2156 { INDEX_op_ld16s_i64, { "r", "r" } },
2157 { INDEX_op_ld32u_i64, { "r", "r" } },
2158 { INDEX_op_ld32s_i64, { "r", "r" } },
2159 { INDEX_op_ld_i64, { "r", "r" } },
2160
2161 { INDEX_op_st8_i64, { "r", "r" } },
2162 { INDEX_op_st16_i64, { "r", "r" } },
2163 { INDEX_op_st32_i64, { "r", "r" } },
2164 { INDEX_op_st_i64, { "r", "r" } },
2165
0db921e6
RH
2166 { INDEX_op_add_i64, { "r", "r", "ri" } },
2167 { INDEX_op_sub_i64, { "r", "0", "ri" } },
48bb3750
RH
2168 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2169
2170 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2171 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
36017dc6 2172 { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
48bb3750 2173
07ff7983 2174 { INDEX_op_and_i64, { "r", "0", "ri" } },
48bb3750
RH
2175 { INDEX_op_or_i64, { "r", "0", "rO" } },
2176 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2177
2178 { INDEX_op_neg_i64, { "r", "r" } },
2179
2180 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2181 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2182 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2183
2184 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2185 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2186
2187 { INDEX_op_ext8s_i64, { "r", "r" } },
2188 { INDEX_op_ext8u_i64, { "r", "r" } },
2189 { INDEX_op_ext16s_i64, { "r", "r" } },
2190 { INDEX_op_ext16u_i64, { "r", "r" } },
2191 { INDEX_op_ext32s_i64, { "r", "r" } },
2192 { INDEX_op_ext32u_i64, { "r", "r" } },
2193
2194 { INDEX_op_bswap16_i64, { "r", "r" } },
2195 { INDEX_op_bswap32_i64, { "r", "r" } },
2196 { INDEX_op_bswap64_i64, { "r", "r" } },
2197
3790b918
RH
2198 { INDEX_op_add2_i64, { "r", "r", "0", "1", "r", "r" } },
2199 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "r", "r" } },
2200
48bb3750
RH
2201 { INDEX_op_brcond_i64, { "r", "rC" } },
2202 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
96a9f093 2203 { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
d5690ea4 2204 { INDEX_op_deposit_i64, { "r", "0", "r" } },
48bb3750
RH
2205
2206 { INDEX_op_qemu_ld32u, { "r", "L" } },
2207 { INDEX_op_qemu_ld32s, { "r", "L" } },
48bb3750
RH
2208
2209 { -1 },
2210};
2211
48bb3750
RH
2212static void query_facilities(void)
2213{
c9baa30f 2214 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
48bb3750 2215
c9baa30f
RH
2216 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2217 is present on all 64-bit systems, but let's check for it anyway. */
2218 if (hwcap & HWCAP_S390_STFLE) {
2219 register int r0 __asm__("0");
2220 register void *r1 __asm__("1");
48bb3750 2221
c9baa30f
RH
2222 /* stfle 0(%r1) */
2223 r1 = &facilities;
2224 asm volatile(".word 0xb2b0,0x1000"
2225 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
48bb3750
RH
2226 }
2227}
2228
2229static void tcg_target_init(TCGContext *s)
2827822e 2230{
48bb3750
RH
2231 query_facilities();
2232
2233 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2234 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2235
2236 tcg_regset_clear(tcg_target_call_clobber_regs);
2237 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2238 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2239 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2240 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2241 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2242 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2243 /* The return register can be considered call-clobbered. */
2244 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2245
2246 tcg_regset_clear(s->reserved_regs);
2247 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2248 /* XXX many insns can't be used with R0, so we better avoid it for now */
2249 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2250 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2251
2252 tcg_add_target_add_op_defs(s390_op_defs);
2827822e
AG
2253}
2254
48bb3750 2255static void tcg_target_qemu_prologue(TCGContext *s)
2827822e 2256{
a4924e8b
RH
2257 tcg_target_long frame_size;
2258
48bb3750
RH
2259 /* stmg %r6,%r15,48(%r15) (save registers) */
2260 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2261
a4924e8b
RH
2262 /* aghi %r15,-frame_size */
2263 frame_size = TCG_TARGET_CALL_STACK_OFFSET;
2264 frame_size += TCG_STATIC_CALL_ARGS_SIZE;
2265 frame_size += CPU_TEMP_BUF_NLONGS * sizeof(long);
2266 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -frame_size);
2267
2268 tcg_set_frame(s, TCG_REG_CALL_STACK,
2269 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2270 CPU_TEMP_BUF_NLONGS * sizeof(long));
48bb3750
RH
2271
2272 if (GUEST_BASE >= 0x80000) {
2273 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
2274 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2275 }
2276
cea5f9a2
BS
2277 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2278 /* br %r3 (go to TB) */
2279 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
48bb3750
RH
2280
2281 tb_ret_addr = s->code_ptr;
2282
a4924e8b
RH
2283 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2284 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
2285 frame_size + 48);
48bb3750
RH
2286
2287 /* br %r14 (return) */
2288 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2827822e 2289}