]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/s390/tcg-target.c
tcg-s390: Fix off-by-one in wraparound andi
[mirror_qemu.git] / tcg / s390 / tcg-target.c
CommitLineData
2827822e
AG
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
48bb3750
RH
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
2827822e
AG
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
3cf246f0
RH
27#include "tcg-be-null.h"
28
a01fc30d
RH
29/* We only support generating code for 64-bit mode. */
30#if TCG_TARGET_REG_BITS != 64
31#error "unsupported code generation mode"
32#endif
33
c9baa30f
RH
34#include "elf.h"
35
48bb3750
RH
36/* ??? The translation blocks produced by TCG are generally small enough to
37 be entirely reachable with a 16-bit displacement. Leaving the option for
38 a 32-bit displacement here Just In Case. */
39#define USE_LONG_BRANCHES 0
40
671c835b
RH
41#define TCG_CT_CONST_MULI 0x100
42#define TCG_CT_CONST_ORI 0x200
43#define TCG_CT_CONST_XORI 0x400
44#define TCG_CT_CONST_CMPI 0x800
48bb3750
RH
45
46/* Several places within the instruction set 0 means "no register"
47 rather than TCG_REG_R0. */
48#define TCG_REG_NONE 0
49
50/* A scratch register that may be be used throughout the backend. */
51#define TCG_TMP0 TCG_REG_R14
52
53#ifdef CONFIG_USE_GUEST_BASE
54#define TCG_GUEST_BASE_REG TCG_REG_R13
55#else
56#define TCG_GUEST_BASE_REG TCG_REG_R0
57#endif
58
59#ifndef GUEST_BASE
60#define GUEST_BASE 0
61#endif
62
63
64/* All of the following instructions are prefixed with their instruction
65 format, and are defined as 8- or 16-bit quantities, even when the two
66 halves of the 16-bit quantity may appear 32 bits apart in the insn.
67 This makes it easy to copy the values from the tables in Appendix B. */
68typedef enum S390Opcode {
69 RIL_AFI = 0xc209,
70 RIL_AGFI = 0xc208,
3790b918 71 RIL_ALFI = 0xc20b,
48bb3750
RH
72 RIL_ALGFI = 0xc20a,
73 RIL_BRASL = 0xc005,
74 RIL_BRCL = 0xc004,
75 RIL_CFI = 0xc20d,
76 RIL_CGFI = 0xc20c,
77 RIL_CLFI = 0xc20f,
78 RIL_CLGFI = 0xc20e,
79 RIL_IIHF = 0xc008,
80 RIL_IILF = 0xc009,
81 RIL_LARL = 0xc000,
82 RIL_LGFI = 0xc001,
83 RIL_LGRL = 0xc408,
84 RIL_LLIHF = 0xc00e,
85 RIL_LLILF = 0xc00f,
86 RIL_LRL = 0xc40d,
87 RIL_MSFI = 0xc201,
88 RIL_MSGFI = 0xc200,
89 RIL_NIHF = 0xc00a,
90 RIL_NILF = 0xc00b,
91 RIL_OIHF = 0xc00c,
92 RIL_OILF = 0xc00d,
3790b918 93 RIL_SLFI = 0xc205,
0db921e6 94 RIL_SLGFI = 0xc204,
48bb3750
RH
95 RIL_XIHF = 0xc006,
96 RIL_XILF = 0xc007,
97
98 RI_AGHI = 0xa70b,
99 RI_AHI = 0xa70a,
100 RI_BRC = 0xa704,
101 RI_IIHH = 0xa500,
102 RI_IIHL = 0xa501,
103 RI_IILH = 0xa502,
104 RI_IILL = 0xa503,
105 RI_LGHI = 0xa709,
106 RI_LLIHH = 0xa50c,
107 RI_LLIHL = 0xa50d,
108 RI_LLILH = 0xa50e,
109 RI_LLILL = 0xa50f,
110 RI_MGHI = 0xa70d,
111 RI_MHI = 0xa70c,
112 RI_NIHH = 0xa504,
113 RI_NIHL = 0xa505,
114 RI_NILH = 0xa506,
115 RI_NILL = 0xa507,
116 RI_OIHH = 0xa508,
117 RI_OIHL = 0xa509,
118 RI_OILH = 0xa50a,
119 RI_OILL = 0xa50b,
120
121 RIE_CGIJ = 0xec7c,
122 RIE_CGRJ = 0xec64,
123 RIE_CIJ = 0xec7e,
124 RIE_CLGRJ = 0xec65,
125 RIE_CLIJ = 0xec7f,
126 RIE_CLGIJ = 0xec7d,
127 RIE_CLRJ = 0xec77,
128 RIE_CRJ = 0xec76,
d5690ea4 129 RIE_RISBG = 0xec55,
48bb3750
RH
130
131 RRE_AGR = 0xb908,
3790b918
RH
132 RRE_ALGR = 0xb90a,
133 RRE_ALCR = 0xb998,
134 RRE_ALCGR = 0xb988,
48bb3750
RH
135 RRE_CGR = 0xb920,
136 RRE_CLGR = 0xb921,
137 RRE_DLGR = 0xb987,
138 RRE_DLR = 0xb997,
139 RRE_DSGFR = 0xb91d,
140 RRE_DSGR = 0xb90d,
141 RRE_LGBR = 0xb906,
142 RRE_LCGR = 0xb903,
143 RRE_LGFR = 0xb914,
144 RRE_LGHR = 0xb907,
145 RRE_LGR = 0xb904,
146 RRE_LLGCR = 0xb984,
147 RRE_LLGFR = 0xb916,
148 RRE_LLGHR = 0xb985,
149 RRE_LRVR = 0xb91f,
150 RRE_LRVGR = 0xb90f,
151 RRE_LTGR = 0xb902,
36017dc6 152 RRE_MLGR = 0xb986,
48bb3750
RH
153 RRE_MSGR = 0xb90c,
154 RRE_MSR = 0xb252,
155 RRE_NGR = 0xb980,
156 RRE_OGR = 0xb981,
157 RRE_SGR = 0xb909,
3790b918
RH
158 RRE_SLGR = 0xb90b,
159 RRE_SLBR = 0xb999,
160 RRE_SLBGR = 0xb989,
48bb3750
RH
161 RRE_XGR = 0xb982,
162
96a9f093
RH
163 RRF_LOCR = 0xb9f2,
164 RRF_LOCGR = 0xb9e2,
165
48bb3750 166 RR_AR = 0x1a,
3790b918 167 RR_ALR = 0x1e,
48bb3750
RH
168 RR_BASR = 0x0d,
169 RR_BCR = 0x07,
170 RR_CLR = 0x15,
171 RR_CR = 0x19,
172 RR_DR = 0x1d,
173 RR_LCR = 0x13,
174 RR_LR = 0x18,
175 RR_LTR = 0x12,
176 RR_NR = 0x14,
177 RR_OR = 0x16,
178 RR_SR = 0x1b,
3790b918 179 RR_SLR = 0x1f,
48bb3750
RH
180 RR_XR = 0x17,
181
182 RSY_RLL = 0xeb1d,
183 RSY_RLLG = 0xeb1c,
184 RSY_SLLG = 0xeb0d,
185 RSY_SRAG = 0xeb0a,
186 RSY_SRLG = 0xeb0c,
187
188 RS_SLL = 0x89,
189 RS_SRA = 0x8a,
190 RS_SRL = 0x88,
191
192 RXY_AG = 0xe308,
193 RXY_AY = 0xe35a,
194 RXY_CG = 0xe320,
195 RXY_CY = 0xe359,
0db921e6 196 RXY_LAY = 0xe371,
48bb3750
RH
197 RXY_LB = 0xe376,
198 RXY_LG = 0xe304,
199 RXY_LGB = 0xe377,
200 RXY_LGF = 0xe314,
201 RXY_LGH = 0xe315,
202 RXY_LHY = 0xe378,
203 RXY_LLGC = 0xe390,
204 RXY_LLGF = 0xe316,
205 RXY_LLGH = 0xe391,
206 RXY_LMG = 0xeb04,
207 RXY_LRV = 0xe31e,
208 RXY_LRVG = 0xe30f,
209 RXY_LRVH = 0xe31f,
210 RXY_LY = 0xe358,
211 RXY_STCY = 0xe372,
212 RXY_STG = 0xe324,
213 RXY_STHY = 0xe370,
214 RXY_STMG = 0xeb24,
215 RXY_STRV = 0xe33e,
216 RXY_STRVG = 0xe32f,
217 RXY_STRVH = 0xe33f,
218 RXY_STY = 0xe350,
219
220 RX_A = 0x5a,
221 RX_C = 0x59,
222 RX_L = 0x58,
0db921e6 223 RX_LA = 0x41,
48bb3750
RH
224 RX_LH = 0x48,
225 RX_ST = 0x50,
226 RX_STC = 0x42,
227 RX_STH = 0x40,
228} S390Opcode;
229
230#define LD_SIGNED 0x04
231#define LD_UINT8 0x00
232#define LD_INT8 (LD_UINT8 | LD_SIGNED)
233#define LD_UINT16 0x01
234#define LD_INT16 (LD_UINT16 | LD_SIGNED)
235#define LD_UINT32 0x02
236#define LD_INT32 (LD_UINT32 | LD_SIGNED)
237#define LD_UINT64 0x03
238#define LD_INT64 (LD_UINT64 | LD_SIGNED)
239
240#ifndef NDEBUG
241static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
242 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
243 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
244};
245#endif
246
247/* Since R6 is a potential argument register, choose it last of the
248 call-saved registers. Likewise prefer the call-clobbered registers
249 in reverse order to maximize the chance of avoiding the arguments. */
2827822e 250static const int tcg_target_reg_alloc_order[] = {
48bb3750
RH
251 TCG_REG_R13,
252 TCG_REG_R12,
253 TCG_REG_R11,
254 TCG_REG_R10,
255 TCG_REG_R9,
256 TCG_REG_R8,
257 TCG_REG_R7,
258 TCG_REG_R6,
259 TCG_REG_R14,
260 TCG_REG_R0,
261 TCG_REG_R1,
262 TCG_REG_R5,
263 TCG_REG_R4,
264 TCG_REG_R3,
265 TCG_REG_R2,
2827822e
AG
266};
267
268static const int tcg_target_call_iarg_regs[] = {
48bb3750
RH
269 TCG_REG_R2,
270 TCG_REG_R3,
271 TCG_REG_R4,
272 TCG_REG_R5,
273 TCG_REG_R6,
2827822e
AG
274};
275
276static const int tcg_target_call_oarg_regs[] = {
48bb3750 277 TCG_REG_R2,
48bb3750
RH
278};
279
280#define S390_CC_EQ 8
281#define S390_CC_LT 4
282#define S390_CC_GT 2
283#define S390_CC_OV 1
284#define S390_CC_NE (S390_CC_LT | S390_CC_GT)
285#define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
286#define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
287#define S390_CC_NEVER 0
288#define S390_CC_ALWAYS 15
289
290/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
0aed257f 291static const uint8_t tcg_cond_to_s390_cond[] = {
48bb3750
RH
292 [TCG_COND_EQ] = S390_CC_EQ,
293 [TCG_COND_NE] = S390_CC_NE,
294 [TCG_COND_LT] = S390_CC_LT,
295 [TCG_COND_LE] = S390_CC_LE,
296 [TCG_COND_GT] = S390_CC_GT,
297 [TCG_COND_GE] = S390_CC_GE,
298 [TCG_COND_LTU] = S390_CC_LT,
299 [TCG_COND_LEU] = S390_CC_LE,
300 [TCG_COND_GTU] = S390_CC_GT,
301 [TCG_COND_GEU] = S390_CC_GE,
302};
303
304/* Condition codes that result from a LOAD AND TEST. Here, we have no
305 unsigned instruction variation, however since the test is vs zero we
306 can re-map the outcomes appropriately. */
0aed257f 307static const uint8_t tcg_cond_to_ltr_cond[] = {
48bb3750
RH
308 [TCG_COND_EQ] = S390_CC_EQ,
309 [TCG_COND_NE] = S390_CC_NE,
310 [TCG_COND_LT] = S390_CC_LT,
311 [TCG_COND_LE] = S390_CC_LE,
312 [TCG_COND_GT] = S390_CC_GT,
313 [TCG_COND_GE] = S390_CC_GE,
314 [TCG_COND_LTU] = S390_CC_NEVER,
315 [TCG_COND_LEU] = S390_CC_EQ,
316 [TCG_COND_GTU] = S390_CC_NE,
317 [TCG_COND_GEU] = S390_CC_ALWAYS,
318};
319
320#ifdef CONFIG_SOFTMMU
e141ab52
BS
321/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
322 int mmu_idx) */
8c081b18 323static void * const qemu_ld_helpers[4] = {
e141ab52
BS
324 helper_ldb_mmu,
325 helper_ldw_mmu,
326 helper_ldl_mmu,
327 helper_ldq_mmu,
328};
329
330/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
331 uintxx_t val, int mmu_idx) */
8c081b18 332static void * const qemu_st_helpers[4] = {
e141ab52
BS
333 helper_stb_mmu,
334 helper_stw_mmu,
335 helper_stl_mmu,
336 helper_stq_mmu,
337};
e141ab52 338#endif
48bb3750 339
8c081b18 340static tcg_insn_unit *tb_ret_addr;
48bb3750
RH
341
342/* A list of relevant facilities used by this translator. Some of these
343 are required for proper operation, and these are checked at startup. */
344
345#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
346#define FACILITY_LONG_DISP (1ULL << (63 - 18))
347#define FACILITY_EXT_IMM (1ULL << (63 - 21))
348#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
96a9f093 349#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
48bb3750
RH
350
351static uint64_t facilities;
2827822e 352
8c081b18 353static void patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 354 intptr_t value, intptr_t addend)
2827822e 355{
8c081b18
RH
356 intptr_t pcrel2 = (tcg_insn_unit *)value - (code_ptr - 1);
357 assert(addend == -2);
48bb3750
RH
358
359 switch (type) {
360 case R_390_PC16DBL:
361 assert(pcrel2 == (int16_t)pcrel2);
8c081b18 362 tcg_patch16(code_ptr, pcrel2);
48bb3750
RH
363 break;
364 case R_390_PC32DBL:
365 assert(pcrel2 == (int32_t)pcrel2);
8c081b18 366 tcg_patch32(code_ptr, pcrel2);
48bb3750
RH
367 break;
368 default:
369 tcg_abort();
370 break;
371 }
2827822e
AG
372}
373
2827822e
AG
374/* parse target specific constraints */
375static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
376{
48bb3750
RH
377 const char *ct_str = *pct_str;
378
379 switch (ct_str[0]) {
380 case 'r': /* all registers */
381 ct->ct |= TCG_CT_REG;
382 tcg_regset_set32(ct->u.regs, 0, 0xffff);
383 break;
384 case 'R': /* not R0 */
385 ct->ct |= TCG_CT_REG;
386 tcg_regset_set32(ct->u.regs, 0, 0xffff);
387 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
388 break;
389 case 'L': /* qemu_ld/st constraint */
390 ct->ct |= TCG_CT_REG;
391 tcg_regset_set32(ct->u.regs, 0, 0xffff);
392 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
393 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
65a62a75 394 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
48bb3750
RH
395 break;
396 case 'a': /* force R2 for division */
397 ct->ct |= TCG_CT_REG;
398 tcg_regset_clear(ct->u.regs);
399 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
400 break;
401 case 'b': /* force R3 for division */
402 ct->ct |= TCG_CT_REG;
403 tcg_regset_clear(ct->u.regs);
404 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
405 break;
48bb3750
RH
406 case 'K':
407 ct->ct |= TCG_CT_CONST_MULI;
408 break;
48bb3750
RH
409 case 'O':
410 ct->ct |= TCG_CT_CONST_ORI;
411 break;
412 case 'X':
413 ct->ct |= TCG_CT_CONST_XORI;
414 break;
415 case 'C':
416 ct->ct |= TCG_CT_CONST_CMPI;
417 break;
418 default:
419 return -1;
420 }
421 ct_str++;
422 *pct_str = ct_str;
423
2827822e
AG
424 return 0;
425}
426
48bb3750
RH
427/* Immediates to be used with logical OR. This is an optimization only,
428 since a full 64-bit immediate OR can always be performed with 4 sequential
429 OI[LH][LH] instructions. What we're looking for is immediates that we
430 can load efficiently, and the immediate load plus the reg-reg OR is
431 smaller than the sequential OI's. */
432
671c835b 433static int tcg_match_ori(TCGType type, tcg_target_long val)
48bb3750
RH
434{
435 if (facilities & FACILITY_EXT_IMM) {
671c835b 436 if (type == TCG_TYPE_I32) {
48bb3750
RH
437 /* All 32-bit ORs can be performed with 1 48-bit insn. */
438 return 1;
439 }
440 }
441
442 /* Look for negative values. These are best to load with LGHI. */
443 if (val < 0) {
444 if (val == (int16_t)val) {
445 return 0;
446 }
447 if (facilities & FACILITY_EXT_IMM) {
448 if (val == (int32_t)val) {
449 return 0;
450 }
451 }
452 }
453
454 return 1;
455}
456
457/* Immediates to be used with logical XOR. This is almost, but not quite,
458 only an optimization. XOR with immediate is only supported with the
459 extended-immediate facility. That said, there are a few patterns for
460 which it is better to load the value into a register first. */
461
671c835b 462static int tcg_match_xori(TCGType type, tcg_target_long val)
48bb3750
RH
463{
464 if ((facilities & FACILITY_EXT_IMM) == 0) {
465 return 0;
466 }
467
671c835b 468 if (type == TCG_TYPE_I32) {
48bb3750
RH
469 /* All 32-bit XORs can be performed with 1 48-bit insn. */
470 return 1;
471 }
472
473 /* Look for negative values. These are best to load with LGHI. */
474 if (val < 0 && val == (int32_t)val) {
475 return 0;
476 }
477
478 return 1;
479}
480
481/* Imediates to be used with comparisons. */
482
671c835b 483static int tcg_match_cmpi(TCGType type, tcg_target_long val)
48bb3750
RH
484{
485 if (facilities & FACILITY_EXT_IMM) {
486 /* The COMPARE IMMEDIATE instruction is available. */
671c835b 487 if (type == TCG_TYPE_I32) {
48bb3750
RH
488 /* We have a 32-bit immediate and can compare against anything. */
489 return 1;
490 } else {
491 /* ??? We have no insight here into whether the comparison is
492 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
493 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
494 a 32-bit unsigned immediate. If we were to use the (semi)
495 obvious "val == (int32_t)val" we would be enabling unsigned
496 comparisons vs very large numbers. The only solution is to
497 take the intersection of the ranges. */
498 /* ??? Another possible solution is to simply lie and allow all
499 constants here and force the out-of-range values into a temp
500 register in tgen_cmp when we have knowledge of the actual
501 comparison code in use. */
502 return val >= 0 && val <= 0x7fffffff;
503 }
504 } else {
505 /* Only the LOAD AND TEST instruction is available. */
506 return val == 0;
507 }
508}
509
2827822e 510/* Test if a constant matches the constraint. */
f6c6afc1 511static int tcg_target_const_match(tcg_target_long val, TCGType type,
48bb3750 512 const TCGArgConstraint *arg_ct)
2827822e 513{
48bb3750
RH
514 int ct = arg_ct->ct;
515
516 if (ct & TCG_CT_CONST) {
517 return 1;
518 }
519
671c835b 520 if (type == TCG_TYPE_I32) {
48bb3750
RH
521 val = (int32_t)val;
522 }
523
524 /* The following are mutually exclusive. */
0db921e6 525 if (ct & TCG_CT_CONST_MULI) {
48bb3750
RH
526 /* Immediates that may be used with multiply. If we have the
527 general-instruction-extensions, then we have MULTIPLY SINGLE
528 IMMEDIATE with a signed 32-bit, otherwise we have only
529 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
530 if (facilities & FACILITY_GEN_INST_EXT) {
531 return val == (int32_t)val;
532 } else {
533 return val == (int16_t)val;
534 }
48bb3750 535 } else if (ct & TCG_CT_CONST_ORI) {
671c835b 536 return tcg_match_ori(type, val);
48bb3750 537 } else if (ct & TCG_CT_CONST_XORI) {
671c835b 538 return tcg_match_xori(type, val);
48bb3750 539 } else if (ct & TCG_CT_CONST_CMPI) {
671c835b 540 return tcg_match_cmpi(type, val);
48bb3750
RH
541 }
542
2827822e
AG
543 return 0;
544}
545
48bb3750
RH
546/* Emit instructions according to the given instruction format. */
547
548static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
549{
550 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
551}
552
553static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
554 TCGReg r1, TCGReg r2)
555{
556 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
557}
558
96a9f093
RH
559static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
560 TCGReg r1, TCGReg r2, int m3)
561{
562 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
563}
564
48bb3750
RH
565static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
566{
567 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
568}
569
570static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
571{
572 tcg_out16(s, op | (r1 << 4));
573 tcg_out32(s, i2);
574}
575
576static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
577 TCGReg b2, TCGReg r3, int disp)
578{
579 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
580 | (disp & 0xfff));
581}
582
583static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
584 TCGReg b2, TCGReg r3, int disp)
585{
586 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
587 tcg_out32(s, (op & 0xff) | (b2 << 28)
588 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
589}
590
591#define tcg_out_insn_RX tcg_out_insn_RS
592#define tcg_out_insn_RXY tcg_out_insn_RSY
593
594/* Emit an opcode with "type-checking" of the format. */
595#define tcg_out_insn(S, FMT, OP, ...) \
596 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
597
598
599/* emit 64-bit shifts */
600static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
601 TCGReg src, TCGReg sh_reg, int sh_imm)
602{
603 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
604}
605
606/* emit 32-bit shifts */
607static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
608 TCGReg sh_reg, int sh_imm)
609{
610 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
611}
612
613static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
614{
615 if (src != dst) {
616 if (type == TCG_TYPE_I32) {
617 tcg_out_insn(s, RR, LR, dst, src);
618 } else {
619 tcg_out_insn(s, RRE, LGR, dst, src);
620 }
621 }
622}
623
2827822e 624/* load a register with an immediate value */
48bb3750
RH
625static void tcg_out_movi(TCGContext *s, TCGType type,
626 TCGReg ret, tcg_target_long sval)
2827822e 627{
48bb3750
RH
628 static const S390Opcode lli_insns[4] = {
629 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
630 };
631
632 tcg_target_ulong uval = sval;
633 int i;
634
635 if (type == TCG_TYPE_I32) {
636 uval = (uint32_t)sval;
637 sval = (int32_t)sval;
638 }
639
640 /* Try all 32-bit insns that can load it in one go. */
641 if (sval >= -0x8000 && sval < 0x8000) {
642 tcg_out_insn(s, RI, LGHI, ret, sval);
643 return;
644 }
645
646 for (i = 0; i < 4; i++) {
647 tcg_target_long mask = 0xffffull << i*16;
648 if ((uval & mask) == uval) {
649 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
650 return;
651 }
652 }
653
654 /* Try all 48-bit insns that can load it in one go. */
655 if (facilities & FACILITY_EXT_IMM) {
656 if (sval == (int32_t)sval) {
657 tcg_out_insn(s, RIL, LGFI, ret, sval);
658 return;
659 }
660 if (uval <= 0xffffffff) {
661 tcg_out_insn(s, RIL, LLILF, ret, uval);
662 return;
663 }
664 if ((uval & 0xffffffff) == 0) {
665 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
666 return;
667 }
668 }
669
670 /* Try for PC-relative address load. */
671 if ((sval & 1) == 0) {
8c081b18 672 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
48bb3750
RH
673 if (off == (int32_t)off) {
674 tcg_out_insn(s, RIL, LARL, ret, off);
675 return;
676 }
677 }
678
679 /* If extended immediates are not present, then we may have to issue
680 several instructions to load the low 32 bits. */
681 if (!(facilities & FACILITY_EXT_IMM)) {
682 /* A 32-bit unsigned value can be loaded in 2 insns. And given
683 that the lli_insns loop above did not succeed, we know that
684 both insns are required. */
685 if (uval <= 0xffffffff) {
686 tcg_out_insn(s, RI, LLILL, ret, uval);
687 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
688 return;
689 }
690
691 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
692 We first want to make sure that all the high bits get set. With
693 luck the low 16-bits can be considered negative to perform that for
694 free, otherwise we load an explicit -1. */
695 if (sval >> 31 >> 1 == -1) {
696 if (uval & 0x8000) {
697 tcg_out_insn(s, RI, LGHI, ret, uval);
698 } else {
699 tcg_out_insn(s, RI, LGHI, ret, -1);
700 tcg_out_insn(s, RI, IILL, ret, uval);
701 }
702 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
703 return;
704 }
705 }
706
707 /* If we get here, both the high and low parts have non-zero bits. */
708
709 /* Recurse to load the lower 32-bits. */
a22971f9 710 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
48bb3750
RH
711
712 /* Insert data into the high 32-bits. */
713 uval = uval >> 31 >> 1;
714 if (facilities & FACILITY_EXT_IMM) {
715 if (uval < 0x10000) {
716 tcg_out_insn(s, RI, IIHL, ret, uval);
717 } else if ((uval & 0xffff) == 0) {
718 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
719 } else {
720 tcg_out_insn(s, RIL, IIHF, ret, uval);
721 }
722 } else {
723 if (uval & 0xffff) {
724 tcg_out_insn(s, RI, IIHL, ret, uval);
725 }
726 if (uval & 0xffff0000) {
727 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
728 }
729 }
730}
731
732
733/* Emit a load/store type instruction. Inputs are:
734 DATA: The register to be loaded or stored.
735 BASE+OFS: The effective address.
736 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
737 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
738
739static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
740 TCGReg data, TCGReg base, TCGReg index,
741 tcg_target_long ofs)
742{
743 if (ofs < -0x80000 || ofs >= 0x80000) {
78c9f7c5
RH
744 /* Combine the low 20 bits of the offset with the actual load insn;
745 the high 44 bits must come from an immediate load. */
746 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
747 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
748 ofs = low;
48bb3750
RH
749
750 /* If we were already given an index register, add it in. */
751 if (index != TCG_REG_NONE) {
752 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
753 }
754 index = TCG_TMP0;
755 }
756
757 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
758 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
759 } else {
760 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
761 }
2827822e
AG
762}
763
48bb3750 764
2827822e 765/* load data without address translation or endianness conversion */
48bb3750 766static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 767 TCGReg base, intptr_t ofs)
2827822e 768{
48bb3750
RH
769 if (type == TCG_TYPE_I32) {
770 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
771 } else {
772 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
773 }
2827822e
AG
774}
775
48bb3750 776static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 777 TCGReg base, intptr_t ofs)
2827822e 778{
48bb3750
RH
779 if (type == TCG_TYPE_I32) {
780 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
781 } else {
782 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
783 }
784}
785
786/* load data from an absolute host address */
787static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
788{
8c081b18 789 intptr_t addr = (intptr_t)abs;
48bb3750 790
8c081b18
RH
791 if ((facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
792 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
48bb3750
RH
793 if (disp == (int32_t)disp) {
794 if (type == TCG_TYPE_I32) {
795 tcg_out_insn(s, RIL, LRL, dest, disp);
796 } else {
797 tcg_out_insn(s, RIL, LGRL, dest, disp);
798 }
799 return;
800 }
801 }
802
803 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
804 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
805}
806
f0bffc27
RH
807static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
808 int msb, int lsb, int ofs, int z)
809{
810 /* Format RIE-f */
811 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
812 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
813 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
814}
815
48bb3750
RH
816static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
817{
818 if (facilities & FACILITY_EXT_IMM) {
819 tcg_out_insn(s, RRE, LGBR, dest, src);
820 return;
821 }
822
823 if (type == TCG_TYPE_I32) {
824 if (dest == src) {
825 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
826 } else {
827 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
828 }
829 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
830 } else {
831 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
832 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
833 }
834}
835
836static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
837{
838 if (facilities & FACILITY_EXT_IMM) {
839 tcg_out_insn(s, RRE, LLGCR, dest, src);
840 return;
841 }
842
843 if (dest == src) {
844 tcg_out_movi(s, type, TCG_TMP0, 0xff);
845 src = TCG_TMP0;
846 } else {
847 tcg_out_movi(s, type, dest, 0xff);
848 }
849 if (type == TCG_TYPE_I32) {
850 tcg_out_insn(s, RR, NR, dest, src);
851 } else {
852 tcg_out_insn(s, RRE, NGR, dest, src);
853 }
854}
855
856static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
857{
858 if (facilities & FACILITY_EXT_IMM) {
859 tcg_out_insn(s, RRE, LGHR, dest, src);
860 return;
861 }
862
863 if (type == TCG_TYPE_I32) {
864 if (dest == src) {
865 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
866 } else {
867 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
868 }
869 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
870 } else {
871 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
872 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
873 }
874}
875
876static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
877{
878 if (facilities & FACILITY_EXT_IMM) {
879 tcg_out_insn(s, RRE, LLGHR, dest, src);
880 return;
881 }
882
883 if (dest == src) {
884 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
885 src = TCG_TMP0;
886 } else {
887 tcg_out_movi(s, type, dest, 0xffff);
888 }
889 if (type == TCG_TYPE_I32) {
890 tcg_out_insn(s, RR, NR, dest, src);
891 } else {
892 tcg_out_insn(s, RRE, NGR, dest, src);
893 }
894}
895
896static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
897{
898 tcg_out_insn(s, RRE, LGFR, dest, src);
899}
900
901static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
902{
903 tcg_out_insn(s, RRE, LLGFR, dest, src);
904}
905
f0bffc27
RH
906/* Accept bit patterns like these:
907 0....01....1
908 1....10....0
909 1..10..01..1
910 0..01..10..0
911 Copied from gcc sources. */
912static inline bool risbg_mask(uint64_t c)
913{
914 uint64_t lsb;
915 /* We don't change the number of transitions by inverting,
916 so make sure we start with the LSB zero. */
917 if (c & 1) {
918 c = ~c;
919 }
920 /* Reject all zeros or all ones. */
921 if (c == 0) {
922 return false;
923 }
924 /* Find the first transition. */
925 lsb = c & -c;
926 /* Invert to look for a second transition. */
927 c = ~c;
928 /* Erase the first transition. */
929 c &= -lsb;
930 /* Find the second transition, if any. */
931 lsb = c & -c;
932 /* Match if all the bits are 1's, or if c is zero. */
933 return c == -lsb;
934}
935
07ff7983 936static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750
RH
937{
938 static const S390Opcode ni_insns[4] = {
939 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
940 };
941 static const S390Opcode nif_insns[2] = {
942 RIL_NILF, RIL_NIHF
943 };
07ff7983 944 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
48bb3750
RH
945 int i;
946
48bb3750 947 /* Look for the zero-extensions. */
07ff7983 948 if ((val & valid) == 0xffffffff) {
48bb3750
RH
949 tgen_ext32u(s, dest, dest);
950 return;
951 }
48bb3750 952 if (facilities & FACILITY_EXT_IMM) {
07ff7983 953 if ((val & valid) == 0xff) {
48bb3750
RH
954 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
955 return;
956 }
07ff7983 957 if ((val & valid) == 0xffff) {
48bb3750
RH
958 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
959 return;
960 }
07ff7983 961 }
48bb3750 962
07ff7983
RH
963 /* Try all 32-bit insns that can perform it in one go. */
964 for (i = 0; i < 4; i++) {
965 tcg_target_ulong mask = ~(0xffffull << i*16);
966 if (((val | ~valid) & mask) == mask) {
967 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
968 return;
48bb3750 969 }
07ff7983 970 }
48bb3750 971
07ff7983
RH
972 /* Try all 48-bit insns that can perform it in one go. */
973 if (facilities & FACILITY_EXT_IMM) {
974 for (i = 0; i < 2; i++) {
975 tcg_target_ulong mask = ~(0xffffffffull << i*32);
976 if (((val | ~valid) & mask) == mask) {
977 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
978 return;
48bb3750
RH
979 }
980 }
07ff7983 981 }
f0bffc27
RH
982 if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
983 int msb, lsb;
984 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
985 /* Achieve wraparound by swapping msb and lsb. */
a1756896
RH
986 msb = 64 - ctz64(~val);
987 lsb = clz64(~val) - 1;
f0bffc27
RH
988 } else {
989 msb = clz64(val);
990 lsb = 63 - ctz64(val);
991 }
992 tcg_out_risbg(s, dest, dest, msb, lsb, 0, 1);
993 return;
994 }
48bb3750 995
07ff7983
RH
996 /* Fall back to loading the constant. */
997 tcg_out_movi(s, type, TCG_TMP0, val);
998 if (type == TCG_TYPE_I32) {
999 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
48bb3750 1000 } else {
07ff7983 1001 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
48bb3750
RH
1002 }
1003}
1004
1005static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1006{
1007 static const S390Opcode oi_insns[4] = {
1008 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1009 };
1010 static const S390Opcode nif_insns[2] = {
1011 RIL_OILF, RIL_OIHF
1012 };
1013
1014 int i;
1015
1016 /* Look for no-op. */
1017 if (val == 0) {
1018 return;
1019 }
1020
1021 if (facilities & FACILITY_EXT_IMM) {
1022 /* Try all 32-bit insns that can perform it in one go. */
1023 for (i = 0; i < 4; i++) {
1024 tcg_target_ulong mask = (0xffffull << i*16);
1025 if ((val & mask) != 0 && (val & ~mask) == 0) {
1026 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1027 return;
1028 }
1029 }
1030
1031 /* Try all 48-bit insns that can perform it in one go. */
1032 for (i = 0; i < 2; i++) {
1033 tcg_target_ulong mask = (0xffffffffull << i*32);
1034 if ((val & mask) != 0 && (val & ~mask) == 0) {
1035 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1036 return;
1037 }
1038 }
1039
1040 /* Perform the OR via sequential modifications to the high and
1041 low parts. Do this via recursion to handle 16-bit vs 32-bit
1042 masks in each half. */
1043 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1044 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1045 } else {
1046 /* With no extended-immediate facility, we don't need to be so
1047 clever. Just iterate over the insns and mask in the constant. */
1048 for (i = 0; i < 4; i++) {
1049 tcg_target_ulong mask = (0xffffull << i*16);
1050 if ((val & mask) != 0) {
1051 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1052 }
1053 }
1054 }
1055}
1056
1057static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1058{
1059 /* Perform the xor by parts. */
1060 if (val & 0xffffffff) {
1061 tcg_out_insn(s, RIL, XILF, dest, val);
1062 }
1063 if (val > 0xffffffff) {
1064 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1065 }
1066}
1067
1068static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1069 TCGArg c2, int c2const)
1070{
bcc66562 1071 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1072 if (c2const) {
1073 if (c2 == 0) {
1074 if (type == TCG_TYPE_I32) {
1075 tcg_out_insn(s, RR, LTR, r1, r1);
1076 } else {
1077 tcg_out_insn(s, RRE, LTGR, r1, r1);
1078 }
1079 return tcg_cond_to_ltr_cond[c];
1080 } else {
1081 if (is_unsigned) {
1082 if (type == TCG_TYPE_I32) {
1083 tcg_out_insn(s, RIL, CLFI, r1, c2);
1084 } else {
1085 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1086 }
1087 } else {
1088 if (type == TCG_TYPE_I32) {
1089 tcg_out_insn(s, RIL, CFI, r1, c2);
1090 } else {
1091 tcg_out_insn(s, RIL, CGFI, r1, c2);
1092 }
1093 }
1094 }
1095 } else {
1096 if (is_unsigned) {
1097 if (type == TCG_TYPE_I32) {
1098 tcg_out_insn(s, RR, CLR, r1, c2);
1099 } else {
1100 tcg_out_insn(s, RRE, CLGR, r1, c2);
1101 }
1102 } else {
1103 if (type == TCG_TYPE_I32) {
1104 tcg_out_insn(s, RR, CR, r1, c2);
1105 } else {
1106 tcg_out_insn(s, RRE, CGR, r1, c2);
1107 }
1108 }
1109 }
1110 return tcg_cond_to_s390_cond[c];
1111}
1112
1113static void tgen_setcond(TCGContext *s, TCGType type, TCGCond c,
96a9f093 1114 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
48bb3750 1115{
96a9f093 1116 int cc = tgen_cmp(s, type, c, c1, c2, c2const);
48bb3750
RH
1117
1118 /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over: */
1119 tcg_out_movi(s, type, dest, 1);
1120 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1121 tcg_out_movi(s, type, dest, 0);
1122}
1123
96a9f093
RH
1124static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1125 TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
1126{
1127 int cc;
1128 if (facilities & FACILITY_LOAD_ON_COND) {
1129 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1130 tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
1131 } else {
1132 c = tcg_invert_cond(c);
1133 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1134
1135 /* Emit: if (cc) goto over; dest = r3; over: */
1136 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1137 tcg_out_insn(s, RRE, LGR, dest, r3);
1138 }
1139}
1140
d5690ea4
RH
1141bool tcg_target_deposit_valid(int ofs, int len)
1142{
1143 return (facilities & FACILITY_GEN_INST_EXT) != 0;
1144}
1145
1146static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1147 int ofs, int len)
1148{
1149 int lsb = (63 - ofs);
1150 int msb = lsb - (len - 1);
f0bffc27 1151 tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0);
d5690ea4
RH
1152}
1153
8c081b18 1154static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
48bb3750 1155{
8c081b18
RH
1156 ptrdiff_t off = dest - s->code_ptr;
1157 if (off == (int16_t)off) {
48bb3750
RH
1158 tcg_out_insn(s, RI, BRC, cc, off);
1159 } else if (off == (int32_t)off) {
1160 tcg_out_insn(s, RIL, BRCL, cc, off);
1161 } else {
8c081b18 1162 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1163 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1164 }
1165}
1166
1167static void tgen_branch(TCGContext *s, int cc, int labelno)
1168{
1169 TCGLabel* l = &s->labels[labelno];
1170 if (l->has_value) {
8c081b18 1171 tgen_gotoi(s, cc, l->u.value_ptr);
48bb3750
RH
1172 } else if (USE_LONG_BRANCHES) {
1173 tcg_out16(s, RIL_BRCL | (cc << 4));
1174 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, labelno, -2);
8c081b18 1175 s->code_ptr += 2;
48bb3750
RH
1176 } else {
1177 tcg_out16(s, RI_BRC | (cc << 4));
1178 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, labelno, -2);
8c081b18 1179 s->code_ptr += 1;
48bb3750
RH
1180 }
1181}
1182
1183static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1184 TCGReg r1, TCGReg r2, int labelno)
1185{
1186 TCGLabel* l = &s->labels[labelno];
8c081b18 1187 intptr_t off;
48bb3750
RH
1188
1189 if (l->has_value) {
8c081b18 1190 off = l->u.value_ptr - s->code_ptr;
48bb3750
RH
1191 } else {
1192 /* We need to keep the offset unchanged for retranslation. */
8c081b18
RH
1193 off = s->code_ptr[1];
1194 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, labelno, -2);
48bb3750
RH
1195 }
1196
1197 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1198 tcg_out16(s, off);
1199 tcg_out16(s, cc << 12 | (opc & 0xff));
1200}
1201
1202static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1203 TCGReg r1, int i2, int labelno)
1204{
1205 TCGLabel* l = &s->labels[labelno];
1206 tcg_target_long off;
1207
1208 if (l->has_value) {
8c081b18 1209 off = l->u.value_ptr - s->code_ptr;
48bb3750
RH
1210 } else {
1211 /* We need to keep the offset unchanged for retranslation. */
8c081b18
RH
1212 off = s->code_ptr[1];
1213 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, labelno, -2);
48bb3750
RH
1214 }
1215
1216 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1217 tcg_out16(s, off);
1218 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1219}
1220
1221static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1222 TCGReg r1, TCGArg c2, int c2const, int labelno)
1223{
1224 int cc;
1225
1226 if (facilities & FACILITY_GEN_INST_EXT) {
b879f308 1227 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1228 bool in_range;
1229 S390Opcode opc;
1230
1231 cc = tcg_cond_to_s390_cond[c];
1232
1233 if (!c2const) {
1234 opc = (type == TCG_TYPE_I32
1235 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1236 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1237 tgen_compare_branch(s, opc, cc, r1, c2, labelno);
1238 return;
1239 }
1240
1241 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1242 If the immediate we've been given does not fit that range, we'll
1243 fall back to separate compare and branch instructions using the
1244 larger comparison range afforded by COMPARE IMMEDIATE. */
1245 if (type == TCG_TYPE_I32) {
1246 if (is_unsigned) {
1247 opc = RIE_CLIJ;
1248 in_range = (uint32_t)c2 == (uint8_t)c2;
1249 } else {
1250 opc = RIE_CIJ;
1251 in_range = (int32_t)c2 == (int8_t)c2;
1252 }
1253 } else {
1254 if (is_unsigned) {
1255 opc = RIE_CLGIJ;
1256 in_range = (uint64_t)c2 == (uint8_t)c2;
1257 } else {
1258 opc = RIE_CGIJ;
1259 in_range = (int64_t)c2 == (int8_t)c2;
1260 }
1261 }
1262 if (in_range) {
1263 tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno);
1264 return;
1265 }
1266 }
1267
1268 cc = tgen_cmp(s, type, c, r1, c2, c2const);
1269 tgen_branch(s, cc, labelno);
1270}
1271
a8111212 1272static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
48bb3750 1273{
8c081b18 1274 ptrdiff_t off = dest - s->code_ptr;
48bb3750
RH
1275 if (off == (int32_t)off) {
1276 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1277 } else {
8c081b18 1278 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1279 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1280 }
1281}
1282
1283static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data,
1284 TCGReg base, TCGReg index, int disp)
1285{
1286#ifdef TARGET_WORDS_BIGENDIAN
1287 const int bswap = 0;
1288#else
1289 const int bswap = 1;
1290#endif
1291 switch (opc) {
1292 case LD_UINT8:
1293 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1294 break;
1295 case LD_INT8:
1296 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1297 break;
1298 case LD_UINT16:
1299 if (bswap) {
1300 /* swapped unsigned halfword load with upper bits zeroed */
1301 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1302 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1303 } else {
1304 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1305 }
1306 break;
1307 case LD_INT16:
1308 if (bswap) {
1309 /* swapped sign-extended halfword load */
1310 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1311 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1312 } else {
1313 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1314 }
1315 break;
1316 case LD_UINT32:
1317 if (bswap) {
1318 /* swapped unsigned int load with upper bits zeroed */
1319 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1320 tgen_ext32u(s, data, data);
1321 } else {
1322 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1323 }
1324 break;
1325 case LD_INT32:
1326 if (bswap) {
1327 /* swapped sign-extended int load */
1328 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1329 tgen_ext32s(s, data, data);
1330 } else {
1331 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1332 }
1333 break;
1334 case LD_UINT64:
1335 if (bswap) {
1336 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1337 } else {
1338 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1339 }
1340 break;
1341 default:
1342 tcg_abort();
1343 }
1344}
1345
1346static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data,
1347 TCGReg base, TCGReg index, int disp)
1348{
1349#ifdef TARGET_WORDS_BIGENDIAN
1350 const int bswap = 0;
1351#else
1352 const int bswap = 1;
1353#endif
1354 switch (opc) {
1355 case LD_UINT8:
1356 if (disp >= 0 && disp < 0x1000) {
1357 tcg_out_insn(s, RX, STC, data, base, index, disp);
1358 } else {
1359 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1360 }
1361 break;
1362 case LD_UINT16:
1363 if (bswap) {
1364 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1365 } else if (disp >= 0 && disp < 0x1000) {
1366 tcg_out_insn(s, RX, STH, data, base, index, disp);
1367 } else {
1368 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1369 }
1370 break;
1371 case LD_UINT32:
1372 if (bswap) {
1373 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1374 } else if (disp >= 0 && disp < 0x1000) {
1375 tcg_out_insn(s, RX, ST, data, base, index, disp);
1376 } else {
1377 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1378 }
1379 break;
1380 case LD_UINT64:
1381 if (bswap) {
1382 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1383 } else {
1384 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1385 }
1386 break;
1387 default:
1388 tcg_abort();
1389 }
1390}
1391
1392#if defined(CONFIG_SOFTMMU)
65a62a75
RH
1393static TCGReg tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
1394 TCGReg addr_reg, int mem_index, int opc,
8c081b18 1395 tcg_insn_unit **label2_ptr_p, int is_store)
48bb3750 1396{
65a62a75
RH
1397 const TCGReg arg0 = tcg_target_call_iarg_regs[0];
1398 const TCGReg arg1 = tcg_target_call_iarg_regs[1];
1399 const TCGReg arg2 = tcg_target_call_iarg_regs[2];
1400 const TCGReg arg3 = tcg_target_call_iarg_regs[3];
48bb3750 1401 int s_bits = opc & 3;
8c081b18 1402 tcg_insn_unit *label1_ptr;
48bb3750
RH
1403 tcg_target_long ofs;
1404
1405 if (TARGET_LONG_BITS == 32) {
65a62a75 1406 tgen_ext32u(s, arg1, addr_reg);
48bb3750 1407 } else {
65a62a75 1408 tcg_out_mov(s, TCG_TYPE_I64, arg1, addr_reg);
48bb3750
RH
1409 }
1410
65a62a75 1411 tcg_out_sh64(s, RSY_SRLG, arg2, addr_reg, TCG_REG_NONE,
48bb3750
RH
1412 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1413
65a62a75
RH
1414 tgen_andi(s, TCG_TYPE_I64, arg1, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
1415 tgen_andi(s, TCG_TYPE_I64, arg2, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
48bb3750
RH
1416
1417 if (is_store) {
9349b4f9 1418 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
48bb3750 1419 } else {
9349b4f9 1420 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
48bb3750
RH
1421 }
1422 assert(ofs < 0x80000);
1423
1424 if (TARGET_LONG_BITS == 32) {
65a62a75 1425 tcg_out_mem(s, RX_C, RXY_CY, arg1, arg2, TCG_AREG0, ofs);
48bb3750 1426 } else {
65a62a75 1427 tcg_out_mem(s, 0, RXY_CG, arg1, arg2, TCG_AREG0, ofs);
48bb3750
RH
1428 }
1429
1430 if (TARGET_LONG_BITS == 32) {
65a62a75 1431 tgen_ext32u(s, arg1, addr_reg);
48bb3750 1432 } else {
65a62a75 1433 tcg_out_mov(s, TCG_TYPE_I64, arg1, addr_reg);
48bb3750
RH
1434 }
1435
8c081b18 1436 label1_ptr = s->code_ptr;
48bb3750
RH
1437
1438 /* je label1 (offset will be patched in later) */
1439 tcg_out_insn(s, RI, BRC, S390_CC_EQ, 0);
1440
1441 /* call load/store helper */
1442 if (is_store) {
1443 /* Make sure to zero-extend the value to the full register
1444 for the calling convention. */
1445 switch (opc) {
1446 case LD_UINT8:
65a62a75 1447 tgen_ext8u(s, TCG_TYPE_I64, arg2, data_reg);
48bb3750
RH
1448 break;
1449 case LD_UINT16:
65a62a75 1450 tgen_ext16u(s, TCG_TYPE_I64, arg2, data_reg);
48bb3750
RH
1451 break;
1452 case LD_UINT32:
65a62a75 1453 tgen_ext32u(s, arg2, data_reg);
48bb3750
RH
1454 break;
1455 case LD_UINT64:
65a62a75 1456 tcg_out_mov(s, TCG_TYPE_I64, arg2, data_reg);
48bb3750
RH
1457 break;
1458 default:
1459 tcg_abort();
1460 }
65a62a75
RH
1461 tcg_out_movi(s, TCG_TYPE_I32, arg3, mem_index);
1462 tcg_out_mov(s, TCG_TYPE_I64, arg0, TCG_AREG0);
a8111212 1463 tcg_out_call(s, qemu_st_helpers[s_bits]);
48bb3750 1464 } else {
65a62a75
RH
1465 tcg_out_movi(s, TCG_TYPE_I32, arg2, mem_index);
1466 tcg_out_mov(s, TCG_TYPE_I64, arg0, TCG_AREG0);
a8111212 1467 tcg_out_call(s, qemu_ld_helpers[s_bits]);
48bb3750
RH
1468
1469 /* sign extension */
1470 switch (opc) {
1471 case LD_INT8:
65a62a75 1472 tgen_ext8s(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
48bb3750
RH
1473 break;
1474 case LD_INT16:
65a62a75 1475 tgen_ext16s(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
48bb3750
RH
1476 break;
1477 case LD_INT32:
65a62a75 1478 tgen_ext32s(s, data_reg, TCG_REG_R2);
48bb3750
RH
1479 break;
1480 default:
1481 /* unsigned -> just copy */
65a62a75 1482 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
48bb3750
RH
1483 break;
1484 }
1485 }
1486
1487 /* jump to label2 (end) */
8c081b18 1488 *label2_ptr_p = s->code_ptr;
48bb3750
RH
1489
1490 tcg_out_insn(s, RI, BRC, S390_CC_ALWAYS, 0);
1491
1492 /* this is label1, patch branch */
8c081b18 1493 label1_ptr[1] = s->code_ptr - label1_ptr;
48bb3750 1494
9349b4f9 1495 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
48bb3750
RH
1496 assert(ofs < 0x80000);
1497
65a62a75
RH
1498 tcg_out_mem(s, 0, RXY_AG, arg1, arg2, TCG_AREG0, ofs);
1499
1500 return arg1;
48bb3750
RH
1501}
1502
8c081b18 1503static void tcg_finish_qemu_ldst(TCGContext* s, tcg_insn_unit *label2_ptr)
48bb3750
RH
1504{
1505 /* patch branch */
8c081b18 1506 label2_ptr[1] = s->code_ptr - label2_ptr;
48bb3750
RH
1507}
1508#else
1509static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1510 TCGReg *index_reg, tcg_target_long *disp)
1511{
1512 if (TARGET_LONG_BITS == 32) {
1513 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1514 *addr_reg = TCG_TMP0;
1515 }
1516 if (GUEST_BASE < 0x80000) {
1517 *index_reg = TCG_REG_NONE;
1518 *disp = GUEST_BASE;
1519 } else {
1520 *index_reg = TCG_GUEST_BASE_REG;
1521 *disp = 0;
1522 }
1523}
1524#endif /* CONFIG_SOFTMMU */
1525
1526/* load data with address translation (if applicable)
1527 and endianness conversion */
1528static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* args, int opc)
1529{
1530 TCGReg addr_reg, data_reg;
1531#if defined(CONFIG_SOFTMMU)
1532 int mem_index;
8c081b18 1533 tcg_insn_unit *label2_ptr;
48bb3750
RH
1534#else
1535 TCGReg index_reg;
1536 tcg_target_long disp;
1537#endif
1538
1539 data_reg = *args++;
1540 addr_reg = *args++;
1541
1542#if defined(CONFIG_SOFTMMU)
1543 mem_index = *args;
1544
65a62a75
RH
1545 addr_reg = tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1546 opc, &label2_ptr, 0);
48bb3750 1547
65a62a75 1548 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, TCG_REG_NONE, 0);
48bb3750
RH
1549
1550 tcg_finish_qemu_ldst(s, label2_ptr);
1551#else
1552 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1553 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1554#endif
1555}
1556
1557static void tcg_out_qemu_st(TCGContext* s, const TCGArg* args, int opc)
1558{
1559 TCGReg addr_reg, data_reg;
1560#if defined(CONFIG_SOFTMMU)
1561 int mem_index;
8c081b18 1562 tcg_insn_unit *label2_ptr;
48bb3750
RH
1563#else
1564 TCGReg index_reg;
1565 tcg_target_long disp;
1566#endif
1567
1568 data_reg = *args++;
1569 addr_reg = *args++;
1570
1571#if defined(CONFIG_SOFTMMU)
1572 mem_index = *args;
1573
65a62a75
RH
1574 addr_reg = tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1575 opc, &label2_ptr, 1);
48bb3750 1576
65a62a75 1577 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, TCG_REG_NONE, 0);
48bb3750
RH
1578
1579 tcg_finish_qemu_ldst(s, label2_ptr);
1580#else
1581 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1582 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1583#endif
2827822e
AG
1584}
1585
48bb3750
RH
1586# define OP_32_64(x) \
1587 case glue(glue(INDEX_op_,x),_i32): \
1588 case glue(glue(INDEX_op_,x),_i64)
48bb3750 1589
a9751609 1590static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2827822e
AG
1591 const TCGArg *args, const int *const_args)
1592{
48bb3750 1593 S390Opcode op;
0db921e6 1594 TCGArg a0, a1, a2;
48bb3750
RH
1595
1596 switch (opc) {
1597 case INDEX_op_exit_tb:
1598 /* return value */
1599 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
8c081b18 1600 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
48bb3750
RH
1601 break;
1602
1603 case INDEX_op_goto_tb:
1604 if (s->tb_jmp_offset) {
1605 tcg_abort();
1606 } else {
1607 /* load address stored at s->tb_next + args[0] */
1608 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
1609 /* and go there */
1610 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1611 }
8c081b18 1612 s->tb_next_offset[args[0]] = tcg_current_code_size(s);
48bb3750
RH
1613 break;
1614
48bb3750
RH
1615 OP_32_64(ld8u):
1616 /* ??? LLC (RXY format) is only present with the extended-immediate
1617 facility, whereas LLGC is always present. */
1618 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1619 break;
1620
1621 OP_32_64(ld8s):
1622 /* ??? LB is no smaller than LGB, so no point to using it. */
1623 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1624 break;
1625
1626 OP_32_64(ld16u):
1627 /* ??? LLH (RXY format) is only present with the extended-immediate
1628 facility, whereas LLGH is always present. */
1629 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1630 break;
1631
1632 case INDEX_op_ld16s_i32:
1633 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1634 break;
1635
1636 case INDEX_op_ld_i32:
1637 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1638 break;
1639
1640 OP_32_64(st8):
1641 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1642 TCG_REG_NONE, args[2]);
1643 break;
1644
1645 OP_32_64(st16):
1646 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1647 TCG_REG_NONE, args[2]);
1648 break;
1649
1650 case INDEX_op_st_i32:
1651 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1652 break;
1653
1654 case INDEX_op_add_i32:
0db921e6 1655 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1656 if (const_args[2]) {
0db921e6
RH
1657 do_addi_32:
1658 if (a0 == a1) {
1659 if (a2 == (int16_t)a2) {
1660 tcg_out_insn(s, RI, AHI, a0, a2);
1661 break;
1662 }
1663 if (facilities & FACILITY_EXT_IMM) {
1664 tcg_out_insn(s, RIL, AFI, a0, a2);
1665 break;
1666 }
1667 }
1668 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1669 } else if (a0 == a1) {
1670 tcg_out_insn(s, RR, AR, a0, a2);
48bb3750 1671 } else {
0db921e6 1672 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1673 }
1674 break;
1675 case INDEX_op_sub_i32:
0db921e6 1676 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1677 if (const_args[2]) {
0db921e6
RH
1678 a2 = -a2;
1679 goto do_addi_32;
48bb3750 1680 }
0db921e6 1681 tcg_out_insn(s, RR, SR, args[0], args[2]);
48bb3750
RH
1682 break;
1683
1684 case INDEX_op_and_i32:
1685 if (const_args[2]) {
07ff7983 1686 tgen_andi(s, TCG_TYPE_I32, args[0], args[2]);
48bb3750
RH
1687 } else {
1688 tcg_out_insn(s, RR, NR, args[0], args[2]);
1689 }
1690 break;
1691 case INDEX_op_or_i32:
1692 if (const_args[2]) {
1693 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1694 } else {
1695 tcg_out_insn(s, RR, OR, args[0], args[2]);
1696 }
1697 break;
1698 case INDEX_op_xor_i32:
1699 if (const_args[2]) {
1700 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1701 } else {
1702 tcg_out_insn(s, RR, XR, args[0], args[2]);
1703 }
1704 break;
1705
1706 case INDEX_op_neg_i32:
1707 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1708 break;
1709
1710 case INDEX_op_mul_i32:
1711 if (const_args[2]) {
1712 if ((int32_t)args[2] == (int16_t)args[2]) {
1713 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1714 } else {
1715 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1716 }
1717 } else {
1718 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1719 }
1720 break;
1721
1722 case INDEX_op_div2_i32:
1723 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1724 break;
1725 case INDEX_op_divu2_i32:
1726 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1727 break;
1728
1729 case INDEX_op_shl_i32:
1730 op = RS_SLL;
1731 do_shift32:
1732 if (const_args[2]) {
1733 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1734 } else {
1735 tcg_out_sh32(s, op, args[0], args[2], 0);
1736 }
1737 break;
1738 case INDEX_op_shr_i32:
1739 op = RS_SRL;
1740 goto do_shift32;
1741 case INDEX_op_sar_i32:
1742 op = RS_SRA;
1743 goto do_shift32;
1744
1745 case INDEX_op_rotl_i32:
1746 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1747 if (const_args[2]) {
1748 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1749 } else {
1750 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1751 }
1752 break;
1753 case INDEX_op_rotr_i32:
1754 if (const_args[2]) {
1755 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1756 TCG_REG_NONE, (32 - args[2]) & 31);
1757 } else {
1758 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1759 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1760 }
1761 break;
1762
1763 case INDEX_op_ext8s_i32:
1764 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1765 break;
1766 case INDEX_op_ext16s_i32:
1767 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1768 break;
1769 case INDEX_op_ext8u_i32:
1770 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1771 break;
1772 case INDEX_op_ext16u_i32:
1773 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1774 break;
1775
1776 OP_32_64(bswap16):
1777 /* The TCG bswap definition requires bits 0-47 already be zero.
1778 Thus we don't need the G-type insns to implement bswap16_i64. */
1779 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1780 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1781 break;
1782 OP_32_64(bswap32):
1783 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1784 break;
1785
3790b918
RH
1786 case INDEX_op_add2_i32:
1787 /* ??? Make use of ALFI. */
1788 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1789 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1790 break;
1791 case INDEX_op_sub2_i32:
1792 /* ??? Make use of SLFI. */
1793 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1794 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1795 break;
1796
48bb3750
RH
1797 case INDEX_op_br:
1798 tgen_branch(s, S390_CC_ALWAYS, args[0]);
1799 break;
1800
1801 case INDEX_op_brcond_i32:
1802 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
1803 args[1], const_args[1], args[3]);
1804 break;
1805 case INDEX_op_setcond_i32:
1806 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1807 args[2], const_args[2]);
1808 break;
96a9f093
RH
1809 case INDEX_op_movcond_i32:
1810 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
1811 args[2], const_args[2], args[3]);
1812 break;
48bb3750
RH
1813
1814 case INDEX_op_qemu_ld8u:
1815 tcg_out_qemu_ld(s, args, LD_UINT8);
1816 break;
1817 case INDEX_op_qemu_ld8s:
1818 tcg_out_qemu_ld(s, args, LD_INT8);
1819 break;
1820 case INDEX_op_qemu_ld16u:
1821 tcg_out_qemu_ld(s, args, LD_UINT16);
1822 break;
1823 case INDEX_op_qemu_ld16s:
1824 tcg_out_qemu_ld(s, args, LD_INT16);
1825 break;
1826 case INDEX_op_qemu_ld32:
1827 /* ??? Technically we can use a non-extending instruction. */
1828 tcg_out_qemu_ld(s, args, LD_UINT32);
1829 break;
1830 case INDEX_op_qemu_ld64:
1831 tcg_out_qemu_ld(s, args, LD_UINT64);
1832 break;
1833
1834 case INDEX_op_qemu_st8:
1835 tcg_out_qemu_st(s, args, LD_UINT8);
1836 break;
1837 case INDEX_op_qemu_st16:
1838 tcg_out_qemu_st(s, args, LD_UINT16);
1839 break;
1840 case INDEX_op_qemu_st32:
1841 tcg_out_qemu_st(s, args, LD_UINT32);
1842 break;
1843 case INDEX_op_qemu_st64:
1844 tcg_out_qemu_st(s, args, LD_UINT64);
1845 break;
1846
48bb3750
RH
1847 case INDEX_op_ld16s_i64:
1848 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1849 break;
1850 case INDEX_op_ld32u_i64:
1851 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1852 break;
1853 case INDEX_op_ld32s_i64:
1854 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1855 break;
1856 case INDEX_op_ld_i64:
1857 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1858 break;
1859
1860 case INDEX_op_st32_i64:
1861 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1862 break;
1863 case INDEX_op_st_i64:
1864 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1865 break;
1866
1867 case INDEX_op_add_i64:
0db921e6 1868 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 1869 if (const_args[2]) {
0db921e6
RH
1870 do_addi_64:
1871 if (a0 == a1) {
1872 if (a2 == (int16_t)a2) {
1873 tcg_out_insn(s, RI, AGHI, a0, a2);
1874 break;
1875 }
1876 if (facilities & FACILITY_EXT_IMM) {
1877 if (a2 == (int32_t)a2) {
1878 tcg_out_insn(s, RIL, AGFI, a0, a2);
1879 break;
1880 } else if (a2 == (uint32_t)a2) {
1881 tcg_out_insn(s, RIL, ALGFI, a0, a2);
1882 break;
1883 } else if (-a2 == (uint32_t)-a2) {
1884 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
1885 break;
1886 }
1887 }
1888 }
1889 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1890 } else if (a0 == a1) {
1891 tcg_out_insn(s, RRE, AGR, a0, a2);
48bb3750 1892 } else {
0db921e6 1893 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1894 }
1895 break;
1896 case INDEX_op_sub_i64:
0db921e6 1897 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 1898 if (const_args[2]) {
0db921e6
RH
1899 a2 = -a2;
1900 goto do_addi_64;
48bb3750
RH
1901 } else {
1902 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
1903 }
1904 break;
1905
1906 case INDEX_op_and_i64:
1907 if (const_args[2]) {
07ff7983 1908 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
48bb3750
RH
1909 } else {
1910 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
1911 }
1912 break;
1913 case INDEX_op_or_i64:
1914 if (const_args[2]) {
1915 tgen64_ori(s, args[0], args[2]);
1916 } else {
1917 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
1918 }
1919 break;
1920 case INDEX_op_xor_i64:
1921 if (const_args[2]) {
1922 tgen64_xori(s, args[0], args[2]);
1923 } else {
1924 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
1925 }
1926 break;
1927
1928 case INDEX_op_neg_i64:
1929 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
1930 break;
1931 case INDEX_op_bswap64_i64:
1932 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
1933 break;
1934
1935 case INDEX_op_mul_i64:
1936 if (const_args[2]) {
1937 if (args[2] == (int16_t)args[2]) {
1938 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
1939 } else {
1940 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
1941 }
1942 } else {
1943 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
1944 }
1945 break;
1946
1947 case INDEX_op_div2_i64:
1948 /* ??? We get an unnecessary sign-extension of the dividend
1949 into R3 with this definition, but as we do in fact always
1950 produce both quotient and remainder using INDEX_op_div_i64
1951 instead requires jumping through even more hoops. */
1952 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
1953 break;
1954 case INDEX_op_divu2_i64:
1955 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
1956 break;
36017dc6
RH
1957 case INDEX_op_mulu2_i64:
1958 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
1959 break;
48bb3750
RH
1960
1961 case INDEX_op_shl_i64:
1962 op = RSY_SLLG;
1963 do_shift64:
1964 if (const_args[2]) {
1965 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
1966 } else {
1967 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
1968 }
1969 break;
1970 case INDEX_op_shr_i64:
1971 op = RSY_SRLG;
1972 goto do_shift64;
1973 case INDEX_op_sar_i64:
1974 op = RSY_SRAG;
1975 goto do_shift64;
1976
1977 case INDEX_op_rotl_i64:
1978 if (const_args[2]) {
1979 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
1980 TCG_REG_NONE, args[2]);
1981 } else {
1982 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
1983 }
1984 break;
1985 case INDEX_op_rotr_i64:
1986 if (const_args[2]) {
1987 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
1988 TCG_REG_NONE, (64 - args[2]) & 63);
1989 } else {
1990 /* We can use the smaller 32-bit negate because only the
1991 low 6 bits are examined for the rotate. */
1992 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1993 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
1994 }
1995 break;
1996
1997 case INDEX_op_ext8s_i64:
1998 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
1999 break;
2000 case INDEX_op_ext16s_i64:
2001 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2002 break;
2003 case INDEX_op_ext32s_i64:
2004 tgen_ext32s(s, args[0], args[1]);
2005 break;
2006 case INDEX_op_ext8u_i64:
2007 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2008 break;
2009 case INDEX_op_ext16u_i64:
2010 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2011 break;
2012 case INDEX_op_ext32u_i64:
2013 tgen_ext32u(s, args[0], args[1]);
2014 break;
2015
3790b918
RH
2016 case INDEX_op_add2_i64:
2017 /* ??? Make use of ALGFI and SLGFI. */
2018 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2019 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2020 break;
2021 case INDEX_op_sub2_i64:
2022 /* ??? Make use of ALGFI and SLGFI. */
2023 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2024 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2025 break;
2026
48bb3750
RH
2027 case INDEX_op_brcond_i64:
2028 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2029 args[1], const_args[1], args[3]);
2030 break;
2031 case INDEX_op_setcond_i64:
2032 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2033 args[2], const_args[2]);
2034 break;
96a9f093
RH
2035 case INDEX_op_movcond_i64:
2036 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2037 args[2], const_args[2], args[3]);
2038 break;
48bb3750
RH
2039
2040 case INDEX_op_qemu_ld32u:
2041 tcg_out_qemu_ld(s, args, LD_UINT32);
2042 break;
2043 case INDEX_op_qemu_ld32s:
2044 tcg_out_qemu_ld(s, args, LD_INT32);
2045 break;
48bb3750 2046
d5690ea4
RH
2047 OP_32_64(deposit):
2048 tgen_deposit(s, args[0], args[2], args[3], args[4]);
2049 break;
2050
96d0ee7f
RH
2051 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2052 case INDEX_op_mov_i64:
2053 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2054 case INDEX_op_movi_i64:
2055 case INDEX_op_call: /* Always emitted via tcg_out_call. */
48bb3750 2056 default:
48bb3750
RH
2057 tcg_abort();
2058 }
2827822e
AG
2059}
2060
48bb3750
RH
2061static const TCGTargetOpDef s390_op_defs[] = {
2062 { INDEX_op_exit_tb, { } },
2063 { INDEX_op_goto_tb, { } },
48bb3750
RH
2064 { INDEX_op_br, { } },
2065
48bb3750
RH
2066 { INDEX_op_ld8u_i32, { "r", "r" } },
2067 { INDEX_op_ld8s_i32, { "r", "r" } },
2068 { INDEX_op_ld16u_i32, { "r", "r" } },
2069 { INDEX_op_ld16s_i32, { "r", "r" } },
2070 { INDEX_op_ld_i32, { "r", "r" } },
2071 { INDEX_op_st8_i32, { "r", "r" } },
2072 { INDEX_op_st16_i32, { "r", "r" } },
2073 { INDEX_op_st_i32, { "r", "r" } },
2074
0db921e6
RH
2075 { INDEX_op_add_i32, { "r", "r", "ri" } },
2076 { INDEX_op_sub_i32, { "r", "0", "ri" } },
48bb3750
RH
2077 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2078
2079 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2080 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2081
07ff7983 2082 { INDEX_op_and_i32, { "r", "0", "ri" } },
671c835b
RH
2083 { INDEX_op_or_i32, { "r", "0", "rO" } },
2084 { INDEX_op_xor_i32, { "r", "0", "rX" } },
48bb3750
RH
2085
2086 { INDEX_op_neg_i32, { "r", "r" } },
2087
2088 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2089 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2090 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2091
2092 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2093 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2094
2095 { INDEX_op_ext8s_i32, { "r", "r" } },
2096 { INDEX_op_ext8u_i32, { "r", "r" } },
2097 { INDEX_op_ext16s_i32, { "r", "r" } },
2098 { INDEX_op_ext16u_i32, { "r", "r" } },
2099
2100 { INDEX_op_bswap16_i32, { "r", "r" } },
2101 { INDEX_op_bswap32_i32, { "r", "r" } },
2102
3790b918
RH
2103 { INDEX_op_add2_i32, { "r", "r", "0", "1", "r", "r" } },
2104 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "r", "r" } },
2105
671c835b
RH
2106 { INDEX_op_brcond_i32, { "r", "rC" } },
2107 { INDEX_op_setcond_i32, { "r", "r", "rC" } },
2108 { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } },
d5690ea4 2109 { INDEX_op_deposit_i32, { "r", "0", "r" } },
48bb3750
RH
2110
2111 { INDEX_op_qemu_ld8u, { "r", "L" } },
2112 { INDEX_op_qemu_ld8s, { "r", "L" } },
2113 { INDEX_op_qemu_ld16u, { "r", "L" } },
2114 { INDEX_op_qemu_ld16s, { "r", "L" } },
2115 { INDEX_op_qemu_ld32, { "r", "L" } },
2116 { INDEX_op_qemu_ld64, { "r", "L" } },
2117
2118 { INDEX_op_qemu_st8, { "L", "L" } },
2119 { INDEX_op_qemu_st16, { "L", "L" } },
2120 { INDEX_op_qemu_st32, { "L", "L" } },
2121 { INDEX_op_qemu_st64, { "L", "L" } },
2122
48bb3750
RH
2123 { INDEX_op_ld8u_i64, { "r", "r" } },
2124 { INDEX_op_ld8s_i64, { "r", "r" } },
2125 { INDEX_op_ld16u_i64, { "r", "r" } },
2126 { INDEX_op_ld16s_i64, { "r", "r" } },
2127 { INDEX_op_ld32u_i64, { "r", "r" } },
2128 { INDEX_op_ld32s_i64, { "r", "r" } },
2129 { INDEX_op_ld_i64, { "r", "r" } },
2130
2131 { INDEX_op_st8_i64, { "r", "r" } },
2132 { INDEX_op_st16_i64, { "r", "r" } },
2133 { INDEX_op_st32_i64, { "r", "r" } },
2134 { INDEX_op_st_i64, { "r", "r" } },
2135
0db921e6
RH
2136 { INDEX_op_add_i64, { "r", "r", "ri" } },
2137 { INDEX_op_sub_i64, { "r", "0", "ri" } },
48bb3750
RH
2138 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2139
2140 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2141 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
36017dc6 2142 { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
48bb3750 2143
07ff7983 2144 { INDEX_op_and_i64, { "r", "0", "ri" } },
48bb3750
RH
2145 { INDEX_op_or_i64, { "r", "0", "rO" } },
2146 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2147
2148 { INDEX_op_neg_i64, { "r", "r" } },
2149
2150 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2151 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2152 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2153
2154 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2155 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2156
2157 { INDEX_op_ext8s_i64, { "r", "r" } },
2158 { INDEX_op_ext8u_i64, { "r", "r" } },
2159 { INDEX_op_ext16s_i64, { "r", "r" } },
2160 { INDEX_op_ext16u_i64, { "r", "r" } },
2161 { INDEX_op_ext32s_i64, { "r", "r" } },
2162 { INDEX_op_ext32u_i64, { "r", "r" } },
2163
2164 { INDEX_op_bswap16_i64, { "r", "r" } },
2165 { INDEX_op_bswap32_i64, { "r", "r" } },
2166 { INDEX_op_bswap64_i64, { "r", "r" } },
2167
3790b918
RH
2168 { INDEX_op_add2_i64, { "r", "r", "0", "1", "r", "r" } },
2169 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "r", "r" } },
2170
48bb3750
RH
2171 { INDEX_op_brcond_i64, { "r", "rC" } },
2172 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
96a9f093 2173 { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
d5690ea4 2174 { INDEX_op_deposit_i64, { "r", "0", "r" } },
48bb3750
RH
2175
2176 { INDEX_op_qemu_ld32u, { "r", "L" } },
2177 { INDEX_op_qemu_ld32s, { "r", "L" } },
48bb3750
RH
2178
2179 { -1 },
2180};
2181
48bb3750
RH
2182static void query_facilities(void)
2183{
c9baa30f 2184 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
48bb3750 2185
c9baa30f
RH
2186 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2187 is present on all 64-bit systems, but let's check for it anyway. */
2188 if (hwcap & HWCAP_S390_STFLE) {
2189 register int r0 __asm__("0");
2190 register void *r1 __asm__("1");
48bb3750 2191
c9baa30f
RH
2192 /* stfle 0(%r1) */
2193 r1 = &facilities;
2194 asm volatile(".word 0xb2b0,0x1000"
2195 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
48bb3750
RH
2196 }
2197}
2198
2199static void tcg_target_init(TCGContext *s)
2827822e 2200{
48bb3750
RH
2201 query_facilities();
2202
2203 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2204 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2205
2206 tcg_regset_clear(tcg_target_call_clobber_regs);
2207 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2208 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2209 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2210 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2211 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2212 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2213 /* The return register can be considered call-clobbered. */
2214 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2215
2216 tcg_regset_clear(s->reserved_regs);
2217 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2218 /* XXX many insns can't be used with R0, so we better avoid it for now */
2219 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2220 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2221
2222 tcg_add_target_add_op_defs(s390_op_defs);
2827822e
AG
2223}
2224
48bb3750 2225static void tcg_target_qemu_prologue(TCGContext *s)
2827822e 2226{
a4924e8b
RH
2227 tcg_target_long frame_size;
2228
48bb3750
RH
2229 /* stmg %r6,%r15,48(%r15) (save registers) */
2230 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2231
a4924e8b
RH
2232 /* aghi %r15,-frame_size */
2233 frame_size = TCG_TARGET_CALL_STACK_OFFSET;
2234 frame_size += TCG_STATIC_CALL_ARGS_SIZE;
2235 frame_size += CPU_TEMP_BUF_NLONGS * sizeof(long);
2236 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -frame_size);
2237
2238 tcg_set_frame(s, TCG_REG_CALL_STACK,
2239 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2240 CPU_TEMP_BUF_NLONGS * sizeof(long));
48bb3750
RH
2241
2242 if (GUEST_BASE >= 0x80000) {
2243 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
2244 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2245 }
2246
cea5f9a2
BS
2247 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2248 /* br %r3 (go to TB) */
2249 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
48bb3750
RH
2250
2251 tb_ret_addr = s->code_ptr;
2252
a4924e8b
RH
2253 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2254 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
2255 frame_size + 48);
48bb3750
RH
2256
2257 /* br %r14 (return) */
2258 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2827822e 2259}