]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/s390/tcg-target.inc.c
tcg: Return success from patch_reloc
[mirror_qemu.git] / tcg / s390 / tcg-target.inc.c
CommitLineData
2827822e
AG
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
48bb3750
RH
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
2827822e
AG
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
a01fc30d
RH
27/* We only support generating code for 64-bit mode. */
28#if TCG_TARGET_REG_BITS != 64
29#error "unsupported code generation mode"
30#endif
31
28eef8aa 32#include "tcg-pool.inc.c"
c9baa30f
RH
33#include "elf.h"
34
48bb3750
RH
35/* ??? The translation blocks produced by TCG are generally small enough to
36 be entirely reachable with a 16-bit displacement. Leaving the option for
37 a 32-bit displacement here Just In Case. */
38#define USE_LONG_BRANCHES 0
39
a8f0269e
RH
40#define TCG_CT_CONST_S16 0x100
41#define TCG_CT_CONST_S32 0x200
a534bb15
RH
42#define TCG_CT_CONST_S33 0x400
43#define TCG_CT_CONST_ZERO 0x800
48bb3750
RH
44
45/* Several places within the instruction set 0 means "no register"
46 rather than TCG_REG_R0. */
47#define TCG_REG_NONE 0
48
49/* A scratch register that may be be used throughout the backend. */
ce411066 50#define TCG_TMP0 TCG_REG_R1
48bb3750 51
829e1376
RH
52/* A scratch register that holds a pointer to the beginning of the TB.
53 We don't need this when we have pc-relative loads with the general
54 instructions extension facility. */
55#define TCG_REG_TB TCG_REG_R12
56#define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT))
57
4cbea598 58#ifndef CONFIG_SOFTMMU
48bb3750 59#define TCG_GUEST_BASE_REG TCG_REG_R13
48bb3750
RH
60#endif
61
48bb3750
RH
62/* All of the following instructions are prefixed with their instruction
63 format, and are defined as 8- or 16-bit quantities, even when the two
64 halves of the 16-bit quantity may appear 32 bits apart in the insn.
65 This makes it easy to copy the values from the tables in Appendix B. */
66typedef enum S390Opcode {
67 RIL_AFI = 0xc209,
68 RIL_AGFI = 0xc208,
3790b918 69 RIL_ALFI = 0xc20b,
48bb3750
RH
70 RIL_ALGFI = 0xc20a,
71 RIL_BRASL = 0xc005,
72 RIL_BRCL = 0xc004,
73 RIL_CFI = 0xc20d,
74 RIL_CGFI = 0xc20c,
75 RIL_CLFI = 0xc20f,
76 RIL_CLGFI = 0xc20e,
a534bb15
RH
77 RIL_CLRL = 0xc60f,
78 RIL_CLGRL = 0xc60a,
79 RIL_CRL = 0xc60d,
80 RIL_CGRL = 0xc608,
48bb3750
RH
81 RIL_IIHF = 0xc008,
82 RIL_IILF = 0xc009,
83 RIL_LARL = 0xc000,
84 RIL_LGFI = 0xc001,
85 RIL_LGRL = 0xc408,
86 RIL_LLIHF = 0xc00e,
87 RIL_LLILF = 0xc00f,
88 RIL_LRL = 0xc40d,
89 RIL_MSFI = 0xc201,
90 RIL_MSGFI = 0xc200,
91 RIL_NIHF = 0xc00a,
92 RIL_NILF = 0xc00b,
93 RIL_OIHF = 0xc00c,
94 RIL_OILF = 0xc00d,
3790b918 95 RIL_SLFI = 0xc205,
0db921e6 96 RIL_SLGFI = 0xc204,
48bb3750
RH
97 RIL_XIHF = 0xc006,
98 RIL_XILF = 0xc007,
99
100 RI_AGHI = 0xa70b,
101 RI_AHI = 0xa70a,
102 RI_BRC = 0xa704,
a534bb15
RH
103 RI_CHI = 0xa70e,
104 RI_CGHI = 0xa70f,
48bb3750
RH
105 RI_IIHH = 0xa500,
106 RI_IIHL = 0xa501,
107 RI_IILH = 0xa502,
108 RI_IILL = 0xa503,
109 RI_LGHI = 0xa709,
110 RI_LLIHH = 0xa50c,
111 RI_LLIHL = 0xa50d,
112 RI_LLILH = 0xa50e,
113 RI_LLILL = 0xa50f,
114 RI_MGHI = 0xa70d,
115 RI_MHI = 0xa70c,
116 RI_NIHH = 0xa504,
117 RI_NIHL = 0xa505,
118 RI_NILH = 0xa506,
119 RI_NILL = 0xa507,
120 RI_OIHH = 0xa508,
121 RI_OIHL = 0xa509,
122 RI_OILH = 0xa50a,
123 RI_OILL = 0xa50b,
124
125 RIE_CGIJ = 0xec7c,
126 RIE_CGRJ = 0xec64,
127 RIE_CIJ = 0xec7e,
128 RIE_CLGRJ = 0xec65,
129 RIE_CLIJ = 0xec7f,
130 RIE_CLGIJ = 0xec7d,
131 RIE_CLRJ = 0xec77,
132 RIE_CRJ = 0xec76,
7af525af 133 RIE_LOCGHI = 0xec46,
d5690ea4 134 RIE_RISBG = 0xec55,
48bb3750
RH
135
136 RRE_AGR = 0xb908,
3790b918
RH
137 RRE_ALGR = 0xb90a,
138 RRE_ALCR = 0xb998,
139 RRE_ALCGR = 0xb988,
48bb3750
RH
140 RRE_CGR = 0xb920,
141 RRE_CLGR = 0xb921,
142 RRE_DLGR = 0xb987,
143 RRE_DLR = 0xb997,
144 RRE_DSGFR = 0xb91d,
145 RRE_DSGR = 0xb90d,
ce411066 146 RRE_FLOGR = 0xb983,
48bb3750
RH
147 RRE_LGBR = 0xb906,
148 RRE_LCGR = 0xb903,
149 RRE_LGFR = 0xb914,
150 RRE_LGHR = 0xb907,
151 RRE_LGR = 0xb904,
152 RRE_LLGCR = 0xb984,
153 RRE_LLGFR = 0xb916,
154 RRE_LLGHR = 0xb985,
155 RRE_LRVR = 0xb91f,
156 RRE_LRVGR = 0xb90f,
157 RRE_LTGR = 0xb902,
36017dc6 158 RRE_MLGR = 0xb986,
48bb3750
RH
159 RRE_MSGR = 0xb90c,
160 RRE_MSR = 0xb252,
161 RRE_NGR = 0xb980,
162 RRE_OGR = 0xb981,
163 RRE_SGR = 0xb909,
3790b918
RH
164 RRE_SLGR = 0xb90b,
165 RRE_SLBR = 0xb999,
166 RRE_SLBGR = 0xb989,
48bb3750
RH
167 RRE_XGR = 0xb982,
168
96a9f093
RH
169 RRF_LOCR = 0xb9f2,
170 RRF_LOCGR = 0xb9e2,
c2097136
RH
171 RRF_NRK = 0xb9f4,
172 RRF_NGRK = 0xb9e4,
173 RRF_ORK = 0xb9f6,
174 RRF_OGRK = 0xb9e6,
175 RRF_SRK = 0xb9f9,
176 RRF_SGRK = 0xb9e9,
177 RRF_SLRK = 0xb9fb,
178 RRF_SLGRK = 0xb9eb,
179 RRF_XRK = 0xb9f7,
180 RRF_XGRK = 0xb9e7,
96a9f093 181
48bb3750 182 RR_AR = 0x1a,
3790b918 183 RR_ALR = 0x1e,
48bb3750
RH
184 RR_BASR = 0x0d,
185 RR_BCR = 0x07,
186 RR_CLR = 0x15,
187 RR_CR = 0x19,
188 RR_DR = 0x1d,
189 RR_LCR = 0x13,
190 RR_LR = 0x18,
191 RR_LTR = 0x12,
192 RR_NR = 0x14,
193 RR_OR = 0x16,
194 RR_SR = 0x1b,
3790b918 195 RR_SLR = 0x1f,
48bb3750
RH
196 RR_XR = 0x17,
197
198 RSY_RLL = 0xeb1d,
199 RSY_RLLG = 0xeb1c,
200 RSY_SLLG = 0xeb0d,
c2097136 201 RSY_SLLK = 0xebdf,
48bb3750 202 RSY_SRAG = 0xeb0a,
c2097136 203 RSY_SRAK = 0xebdc,
48bb3750 204 RSY_SRLG = 0xeb0c,
c2097136 205 RSY_SRLK = 0xebde,
48bb3750
RH
206
207 RS_SLL = 0x89,
208 RS_SRA = 0x8a,
209 RS_SRL = 0x88,
210
211 RXY_AG = 0xe308,
212 RXY_AY = 0xe35a,
213 RXY_CG = 0xe320,
a534bb15
RH
214 RXY_CLG = 0xe321,
215 RXY_CLY = 0xe355,
48bb3750 216 RXY_CY = 0xe359,
0db921e6 217 RXY_LAY = 0xe371,
48bb3750
RH
218 RXY_LB = 0xe376,
219 RXY_LG = 0xe304,
220 RXY_LGB = 0xe377,
221 RXY_LGF = 0xe314,
222 RXY_LGH = 0xe315,
223 RXY_LHY = 0xe378,
224 RXY_LLGC = 0xe390,
225 RXY_LLGF = 0xe316,
226 RXY_LLGH = 0xe391,
227 RXY_LMG = 0xeb04,
228 RXY_LRV = 0xe31e,
229 RXY_LRVG = 0xe30f,
230 RXY_LRVH = 0xe31f,
231 RXY_LY = 0xe358,
bdcd5d19 232 RXY_NG = 0xe380,
4046d9ca 233 RXY_OG = 0xe381,
48bb3750
RH
234 RXY_STCY = 0xe372,
235 RXY_STG = 0xe324,
236 RXY_STHY = 0xe370,
237 RXY_STMG = 0xeb24,
238 RXY_STRV = 0xe33e,
239 RXY_STRVG = 0xe32f,
240 RXY_STRVH = 0xe33f,
241 RXY_STY = 0xe350,
5bf67a92 242 RXY_XG = 0xe382,
48bb3750
RH
243
244 RX_A = 0x5a,
245 RX_C = 0x59,
246 RX_L = 0x58,
0db921e6 247 RX_LA = 0x41,
48bb3750
RH
248 RX_LH = 0x48,
249 RX_ST = 0x50,
250 RX_STC = 0x42,
251 RX_STH = 0x40,
ed3d51ec
SF
252
253 NOP = 0x0707,
48bb3750
RH
254} S390Opcode;
255
8d8fdbae 256#ifdef CONFIG_DEBUG_TCG
48bb3750
RH
257static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
258 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
259 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
260};
261#endif
262
263/* Since R6 is a potential argument register, choose it last of the
264 call-saved registers. Likewise prefer the call-clobbered registers
265 in reverse order to maximize the chance of avoiding the arguments. */
2827822e 266static const int tcg_target_reg_alloc_order[] = {
f24efee4 267 /* Call saved registers. */
48bb3750
RH
268 TCG_REG_R13,
269 TCG_REG_R12,
270 TCG_REG_R11,
271 TCG_REG_R10,
272 TCG_REG_R9,
273 TCG_REG_R8,
274 TCG_REG_R7,
275 TCG_REG_R6,
f24efee4 276 /* Call clobbered registers. */
48bb3750
RH
277 TCG_REG_R14,
278 TCG_REG_R0,
279 TCG_REG_R1,
f24efee4 280 /* Argument registers, in reverse order of allocation. */
48bb3750
RH
281 TCG_REG_R5,
282 TCG_REG_R4,
283 TCG_REG_R3,
284 TCG_REG_R2,
2827822e
AG
285};
286
287static const int tcg_target_call_iarg_regs[] = {
48bb3750
RH
288 TCG_REG_R2,
289 TCG_REG_R3,
290 TCG_REG_R4,
291 TCG_REG_R5,
292 TCG_REG_R6,
2827822e
AG
293};
294
295static const int tcg_target_call_oarg_regs[] = {
48bb3750 296 TCG_REG_R2,
48bb3750
RH
297};
298
299#define S390_CC_EQ 8
300#define S390_CC_LT 4
301#define S390_CC_GT 2
302#define S390_CC_OV 1
303#define S390_CC_NE (S390_CC_LT | S390_CC_GT)
304#define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
305#define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
306#define S390_CC_NEVER 0
307#define S390_CC_ALWAYS 15
308
309/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
0aed257f 310static const uint8_t tcg_cond_to_s390_cond[] = {
48bb3750
RH
311 [TCG_COND_EQ] = S390_CC_EQ,
312 [TCG_COND_NE] = S390_CC_NE,
313 [TCG_COND_LT] = S390_CC_LT,
314 [TCG_COND_LE] = S390_CC_LE,
315 [TCG_COND_GT] = S390_CC_GT,
316 [TCG_COND_GE] = S390_CC_GE,
317 [TCG_COND_LTU] = S390_CC_LT,
318 [TCG_COND_LEU] = S390_CC_LE,
319 [TCG_COND_GTU] = S390_CC_GT,
320 [TCG_COND_GEU] = S390_CC_GE,
321};
322
323/* Condition codes that result from a LOAD AND TEST. Here, we have no
324 unsigned instruction variation, however since the test is vs zero we
325 can re-map the outcomes appropriately. */
0aed257f 326static const uint8_t tcg_cond_to_ltr_cond[] = {
48bb3750
RH
327 [TCG_COND_EQ] = S390_CC_EQ,
328 [TCG_COND_NE] = S390_CC_NE,
329 [TCG_COND_LT] = S390_CC_LT,
330 [TCG_COND_LE] = S390_CC_LE,
331 [TCG_COND_GT] = S390_CC_GT,
332 [TCG_COND_GE] = S390_CC_GE,
333 [TCG_COND_LTU] = S390_CC_NEVER,
334 [TCG_COND_LEU] = S390_CC_EQ,
335 [TCG_COND_GTU] = S390_CC_NE,
336 [TCG_COND_GEU] = S390_CC_ALWAYS,
337};
338
339#ifdef CONFIG_SOFTMMU
f24efee4
RH
340static void * const qemu_ld_helpers[16] = {
341 [MO_UB] = helper_ret_ldub_mmu,
342 [MO_SB] = helper_ret_ldsb_mmu,
343 [MO_LEUW] = helper_le_lduw_mmu,
344 [MO_LESW] = helper_le_ldsw_mmu,
345 [MO_LEUL] = helper_le_ldul_mmu,
346 [MO_LESL] = helper_le_ldsl_mmu,
347 [MO_LEQ] = helper_le_ldq_mmu,
348 [MO_BEUW] = helper_be_lduw_mmu,
349 [MO_BESW] = helper_be_ldsw_mmu,
350 [MO_BEUL] = helper_be_ldul_mmu,
351 [MO_BESL] = helper_be_ldsl_mmu,
352 [MO_BEQ] = helper_be_ldq_mmu,
e141ab52
BS
353};
354
f24efee4
RH
355static void * const qemu_st_helpers[16] = {
356 [MO_UB] = helper_ret_stb_mmu,
357 [MO_LEUW] = helper_le_stw_mmu,
358 [MO_LEUL] = helper_le_stl_mmu,
359 [MO_LEQ] = helper_le_stq_mmu,
360 [MO_BEUW] = helper_be_stw_mmu,
361 [MO_BEUL] = helper_be_stl_mmu,
362 [MO_BEQ] = helper_be_stq_mmu,
e141ab52 363};
e141ab52 364#endif
48bb3750 365
8c081b18 366static tcg_insn_unit *tb_ret_addr;
b2c98d9d 367uint64_t s390_facilities;
2827822e 368
6ac17786 369static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 370 intptr_t value, intptr_t addend)
2827822e 371{
e692a349 372 intptr_t pcrel2;
28eef8aa 373 uint32_t old;
e692a349
RH
374
375 value += addend;
376 pcrel2 = (tcg_insn_unit *)value - code_ptr;
48bb3750
RH
377
378 switch (type) {
379 case R_390_PC16DBL:
e692a349 380 assert(pcrel2 == (int16_t)pcrel2);
8c081b18 381 tcg_patch16(code_ptr, pcrel2);
48bb3750
RH
382 break;
383 case R_390_PC32DBL:
e692a349 384 assert(pcrel2 == (int32_t)pcrel2);
8c081b18 385 tcg_patch32(code_ptr, pcrel2);
48bb3750 386 break;
28eef8aa
RH
387 case R_390_20:
388 assert(value == sextract64(value, 0, 20));
389 old = *(uint32_t *)code_ptr & 0xf00000ff;
390 old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4);
391 tcg_patch32(code_ptr, old);
392 break;
48bb3750 393 default:
e692a349 394 g_assert_not_reached();
48bb3750 395 }
6ac17786 396 return true;
2827822e
AG
397}
398
2827822e 399/* parse target specific constraints */
069ea736
RH
400static const char *target_parse_constraint(TCGArgConstraint *ct,
401 const char *ct_str, TCGType type)
2827822e 402{
069ea736 403 switch (*ct_str++) {
48bb3750
RH
404 case 'r': /* all registers */
405 ct->ct |= TCG_CT_REG;
f46934df 406 ct->u.regs = 0xffff;
48bb3750 407 break;
48bb3750
RH
408 case 'L': /* qemu_ld/st constraint */
409 ct->ct |= TCG_CT_REG;
f46934df
RH
410 ct->u.regs = 0xffff;
411 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
412 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
413 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
48bb3750
RH
414 break;
415 case 'a': /* force R2 for division */
416 ct->ct |= TCG_CT_REG;
ccb1bb66 417 ct->u.regs = 0;
48bb3750
RH
418 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
419 break;
420 case 'b': /* force R3 for division */
421 ct->ct |= TCG_CT_REG;
ccb1bb66 422 ct->u.regs = 0;
48bb3750
RH
423 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
424 break;
ad19b358 425 case 'A':
ba18b07d 426 ct->ct |= TCG_CT_CONST_S33;
ad19b358 427 break;
a8f0269e
RH
428 case 'I':
429 ct->ct |= TCG_CT_CONST_S16;
430 break;
431 case 'J':
432 ct->ct |= TCG_CT_CONST_S32;
48bb3750 433 break;
752b1be9
RH
434 case 'Z':
435 ct->ct |= TCG_CT_CONST_ZERO;
436 break;
48bb3750 437 default:
069ea736 438 return NULL;
48bb3750 439 }
069ea736 440 return ct_str;
2827822e
AG
441}
442
443/* Test if a constant matches the constraint. */
f6c6afc1 444static int tcg_target_const_match(tcg_target_long val, TCGType type,
48bb3750 445 const TCGArgConstraint *arg_ct)
2827822e 446{
48bb3750
RH
447 int ct = arg_ct->ct;
448
449 if (ct & TCG_CT_CONST) {
450 return 1;
451 }
452
671c835b 453 if (type == TCG_TYPE_I32) {
48bb3750
RH
454 val = (int32_t)val;
455 }
456
457 /* The following are mutually exclusive. */
a8f0269e
RH
458 if (ct & TCG_CT_CONST_S16) {
459 return val == (int16_t)val;
460 } else if (ct & TCG_CT_CONST_S32) {
461 return val == (int32_t)val;
ba18b07d
RH
462 } else if (ct & TCG_CT_CONST_S33) {
463 return val >= -0xffffffffll && val <= 0xffffffffll;
752b1be9
RH
464 } else if (ct & TCG_CT_CONST_ZERO) {
465 return val == 0;
48bb3750
RH
466 }
467
2827822e
AG
468 return 0;
469}
470
48bb3750
RH
471/* Emit instructions according to the given instruction format. */
472
473static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
474{
475 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
476}
477
478static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
479 TCGReg r1, TCGReg r2)
480{
481 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
482}
483
96a9f093
RH
484static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
485 TCGReg r1, TCGReg r2, int m3)
486{
487 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
488}
489
48bb3750
RH
490static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
491{
492 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
493}
494
7af525af
RH
495static void tcg_out_insn_RIE(TCGContext *s, S390Opcode op, TCGReg r1,
496 int i2, int m3)
497{
498 tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
499 tcg_out32(s, (i2 << 16) | (op & 0xff));
500}
501
48bb3750
RH
502static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
503{
504 tcg_out16(s, op | (r1 << 4));
505 tcg_out32(s, i2);
506}
507
508static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
509 TCGReg b2, TCGReg r3, int disp)
510{
511 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
512 | (disp & 0xfff));
513}
514
515static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
516 TCGReg b2, TCGReg r3, int disp)
517{
518 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
519 tcg_out32(s, (op & 0xff) | (b2 << 28)
520 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
521}
522
523#define tcg_out_insn_RX tcg_out_insn_RS
524#define tcg_out_insn_RXY tcg_out_insn_RSY
525
526/* Emit an opcode with "type-checking" of the format. */
527#define tcg_out_insn(S, FMT, OP, ...) \
528 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
529
530
531/* emit 64-bit shifts */
532static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
533 TCGReg src, TCGReg sh_reg, int sh_imm)
534{
535 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
536}
537
538/* emit 32-bit shifts */
539static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
540 TCGReg sh_reg, int sh_imm)
541{
542 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
543}
544
545static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
546{
547 if (src != dst) {
548 if (type == TCG_TYPE_I32) {
549 tcg_out_insn(s, RR, LR, dst, src);
550 } else {
551 tcg_out_insn(s, RRE, LGR, dst, src);
552 }
553 }
554}
555
28eef8aa
RH
556static const S390Opcode lli_insns[4] = {
557 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
558};
48bb3750 559
28eef8aa
RH
560static bool maybe_out_small_movi(TCGContext *s, TCGType type,
561 TCGReg ret, tcg_target_long sval)
562{
48bb3750
RH
563 tcg_target_ulong uval = sval;
564 int i;
565
566 if (type == TCG_TYPE_I32) {
567 uval = (uint32_t)sval;
568 sval = (int32_t)sval;
569 }
570
571 /* Try all 32-bit insns that can load it in one go. */
572 if (sval >= -0x8000 && sval < 0x8000) {
573 tcg_out_insn(s, RI, LGHI, ret, sval);
28eef8aa 574 return true;
48bb3750
RH
575 }
576
577 for (i = 0; i < 4; i++) {
578 tcg_target_long mask = 0xffffull << i*16;
579 if ((uval & mask) == uval) {
580 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
28eef8aa 581 return true;
48bb3750
RH
582 }
583 }
584
28eef8aa
RH
585 return false;
586}
587
588/* load a register with an immediate value */
589static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
590 tcg_target_long sval, bool in_prologue)
591{
592 tcg_target_ulong uval;
593
594 /* Try all 32-bit insns that can load it in one go. */
595 if (maybe_out_small_movi(s, type, ret, sval)) {
596 return;
597 }
598
599 uval = sval;
600 if (type == TCG_TYPE_I32) {
601 uval = (uint32_t)sval;
602 sval = (int32_t)sval;
603 }
604
48bb3750 605 /* Try all 48-bit insns that can load it in one go. */
b2c98d9d 606 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
607 if (sval == (int32_t)sval) {
608 tcg_out_insn(s, RIL, LGFI, ret, sval);
609 return;
610 }
611 if (uval <= 0xffffffff) {
612 tcg_out_insn(s, RIL, LLILF, ret, uval);
613 return;
614 }
615 if ((uval & 0xffffffff) == 0) {
28eef8aa 616 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 32);
48bb3750
RH
617 return;
618 }
619 }
620
829e1376
RH
621 /* Try for PC-relative address load. For odd addresses,
622 attempt to use an offset from the start of the TB. */
48bb3750 623 if ((sval & 1) == 0) {
8c081b18 624 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
48bb3750
RH
625 if (off == (int32_t)off) {
626 tcg_out_insn(s, RIL, LARL, ret, off);
627 return;
628 }
829e1376
RH
629 } else if (USE_REG_TB && !in_prologue) {
630 ptrdiff_t off = sval - (uintptr_t)s->code_gen_ptr;
631 if (off == sextract64(off, 0, 20)) {
632 /* This is certain to be an address within TB, and therefore
633 OFF will be negative; don't try RX_LA. */
634 tcg_out_insn(s, RXY, LAY, ret, TCG_REG_TB, TCG_REG_NONE, off);
635 return;
636 }
48bb3750
RH
637 }
638
28eef8aa
RH
639 /* A 32-bit unsigned value can be loaded in 2 insns. And given
640 that LLILL, LLIHL, LLILF above did not succeed, we know that
641 both insns are required. */
642 if (uval <= 0xffffffff) {
643 tcg_out_insn(s, RI, LLILL, ret, uval);
644 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
645 return;
646 }
48bb3750 647
ba2c7479
RH
648 /* Otherwise, stuff it in the constant pool. */
649 if (s390_facilities & FACILITY_GEN_INST_EXT) {
650 tcg_out_insn(s, RIL, LGRL, ret, 0);
651 new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
652 } else if (USE_REG_TB && !in_prologue) {
653 tcg_out_insn(s, RXY, LG, ret, TCG_REG_TB, TCG_REG_NONE, 0);
654 new_pool_label(s, sval, R_390_20, s->code_ptr - 2,
655 -(intptr_t)s->code_gen_ptr);
48bb3750 656 } else {
ba2c7479
RH
657 TCGReg base = ret ? ret : TCG_TMP0;
658 tcg_out_insn(s, RIL, LARL, base, 0);
659 new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
660 tcg_out_insn(s, RXY, LG, ret, base, TCG_REG_NONE, 0);
48bb3750
RH
661 }
662}
663
829e1376
RH
664static void tcg_out_movi(TCGContext *s, TCGType type,
665 TCGReg ret, tcg_target_long sval)
666{
667 tcg_out_movi_int(s, type, ret, sval, false);
668}
48bb3750
RH
669
670/* Emit a load/store type instruction. Inputs are:
671 DATA: The register to be loaded or stored.
672 BASE+OFS: The effective address.
673 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
674 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
675
676static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
677 TCGReg data, TCGReg base, TCGReg index,
678 tcg_target_long ofs)
679{
680 if (ofs < -0x80000 || ofs >= 0x80000) {
78c9f7c5
RH
681 /* Combine the low 20 bits of the offset with the actual load insn;
682 the high 44 bits must come from an immediate load. */
683 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
684 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
685 ofs = low;
48bb3750
RH
686
687 /* If we were already given an index register, add it in. */
688 if (index != TCG_REG_NONE) {
689 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
690 }
691 index = TCG_TMP0;
692 }
693
694 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
695 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
696 } else {
697 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
698 }
2827822e
AG
699}
700
48bb3750 701
2827822e 702/* load data without address translation or endianness conversion */
48bb3750 703static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 704 TCGReg base, intptr_t ofs)
2827822e 705{
48bb3750
RH
706 if (type == TCG_TYPE_I32) {
707 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
708 } else {
709 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
710 }
2827822e
AG
711}
712
48bb3750 713static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 714 TCGReg base, intptr_t ofs)
2827822e 715{
48bb3750
RH
716 if (type == TCG_TYPE_I32) {
717 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
718 } else {
719 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
720 }
721}
722
59d7c14e
RH
723static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
724 TCGReg base, intptr_t ofs)
725{
726 return false;
727}
728
48bb3750
RH
729/* load data from an absolute host address */
730static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
731{
8c081b18 732 intptr_t addr = (intptr_t)abs;
48bb3750 733
b2c98d9d 734 if ((s390_facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
8c081b18 735 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
48bb3750
RH
736 if (disp == (int32_t)disp) {
737 if (type == TCG_TYPE_I32) {
738 tcg_out_insn(s, RIL, LRL, dest, disp);
739 } else {
740 tcg_out_insn(s, RIL, LGRL, dest, disp);
741 }
742 return;
743 }
744 }
829e1376
RH
745 if (USE_REG_TB) {
746 ptrdiff_t disp = abs - (void *)s->code_gen_ptr;
747 if (disp == sextract64(disp, 0, 20)) {
748 tcg_out_ld(s, type, dest, TCG_REG_TB, disp);
749 return;
750 }
751 }
48bb3750
RH
752
753 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
754 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
755}
756
f0bffc27
RH
757static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
758 int msb, int lsb, int ofs, int z)
759{
760 /* Format RIE-f */
761 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
762 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
763 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
764}
765
48bb3750
RH
766static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
767{
b2c98d9d 768 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
769 tcg_out_insn(s, RRE, LGBR, dest, src);
770 return;
771 }
772
773 if (type == TCG_TYPE_I32) {
774 if (dest == src) {
775 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
776 } else {
777 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
778 }
779 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
780 } else {
781 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
782 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
783 }
784}
785
786static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
787{
b2c98d9d 788 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
789 tcg_out_insn(s, RRE, LLGCR, dest, src);
790 return;
791 }
792
793 if (dest == src) {
794 tcg_out_movi(s, type, TCG_TMP0, 0xff);
795 src = TCG_TMP0;
796 } else {
797 tcg_out_movi(s, type, dest, 0xff);
798 }
799 if (type == TCG_TYPE_I32) {
800 tcg_out_insn(s, RR, NR, dest, src);
801 } else {
802 tcg_out_insn(s, RRE, NGR, dest, src);
803 }
804}
805
806static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
807{
b2c98d9d 808 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
809 tcg_out_insn(s, RRE, LGHR, dest, src);
810 return;
811 }
812
813 if (type == TCG_TYPE_I32) {
814 if (dest == src) {
815 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
816 } else {
817 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
818 }
819 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
820 } else {
821 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
822 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
823 }
824}
825
826static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
827{
b2c98d9d 828 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
829 tcg_out_insn(s, RRE, LLGHR, dest, src);
830 return;
831 }
832
833 if (dest == src) {
834 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
835 src = TCG_TMP0;
836 } else {
837 tcg_out_movi(s, type, dest, 0xffff);
838 }
839 if (type == TCG_TYPE_I32) {
840 tcg_out_insn(s, RR, NR, dest, src);
841 } else {
842 tcg_out_insn(s, RRE, NGR, dest, src);
843 }
844}
845
846static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
847{
848 tcg_out_insn(s, RRE, LGFR, dest, src);
849}
850
851static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
852{
853 tcg_out_insn(s, RRE, LLGFR, dest, src);
854}
855
f0bffc27
RH
856/* Accept bit patterns like these:
857 0....01....1
858 1....10....0
859 1..10..01..1
860 0..01..10..0
861 Copied from gcc sources. */
862static inline bool risbg_mask(uint64_t c)
863{
864 uint64_t lsb;
865 /* We don't change the number of transitions by inverting,
866 so make sure we start with the LSB zero. */
867 if (c & 1) {
868 c = ~c;
869 }
870 /* Reject all zeros or all ones. */
871 if (c == 0) {
872 return false;
873 }
874 /* Find the first transition. */
875 lsb = c & -c;
876 /* Invert to look for a second transition. */
877 c = ~c;
878 /* Erase the first transition. */
879 c &= -lsb;
880 /* Find the second transition, if any. */
881 lsb = c & -c;
882 /* Match if all the bits are 1's, or if c is zero. */
883 return c == -lsb;
884}
885
547ec121
RH
886static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
887{
888 int msb, lsb;
889 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
890 /* Achieve wraparound by swapping msb and lsb. */
891 msb = 64 - ctz64(~val);
892 lsb = clz64(~val) - 1;
893 } else {
894 msb = clz64(val);
895 lsb = 63 - ctz64(val);
896 }
897 tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
898}
899
07ff7983 900static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750
RH
901{
902 static const S390Opcode ni_insns[4] = {
903 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
904 };
905 static const S390Opcode nif_insns[2] = {
906 RIL_NILF, RIL_NIHF
907 };
07ff7983 908 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
48bb3750
RH
909 int i;
910
48bb3750 911 /* Look for the zero-extensions. */
07ff7983 912 if ((val & valid) == 0xffffffff) {
48bb3750
RH
913 tgen_ext32u(s, dest, dest);
914 return;
915 }
b2c98d9d 916 if (s390_facilities & FACILITY_EXT_IMM) {
07ff7983 917 if ((val & valid) == 0xff) {
48bb3750
RH
918 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
919 return;
920 }
07ff7983 921 if ((val & valid) == 0xffff) {
48bb3750
RH
922 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
923 return;
924 }
07ff7983 925 }
48bb3750 926
07ff7983
RH
927 /* Try all 32-bit insns that can perform it in one go. */
928 for (i = 0; i < 4; i++) {
929 tcg_target_ulong mask = ~(0xffffull << i*16);
930 if (((val | ~valid) & mask) == mask) {
931 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
932 return;
48bb3750 933 }
07ff7983 934 }
48bb3750 935
07ff7983 936 /* Try all 48-bit insns that can perform it in one go. */
b2c98d9d 937 if (s390_facilities & FACILITY_EXT_IMM) {
07ff7983
RH
938 for (i = 0; i < 2; i++) {
939 tcg_target_ulong mask = ~(0xffffffffull << i*32);
940 if (((val | ~valid) & mask) == mask) {
941 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
942 return;
48bb3750
RH
943 }
944 }
07ff7983 945 }
b2c98d9d 946 if ((s390_facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
547ec121 947 tgen_andi_risbg(s, dest, dest, val);
f0bffc27
RH
948 return;
949 }
48bb3750 950
bdcd5d19
RH
951 /* Use the constant pool if USE_REG_TB, but not for small constants. */
952 if (USE_REG_TB) {
953 if (!maybe_out_small_movi(s, type, TCG_TMP0, val)) {
954 tcg_out_insn(s, RXY, NG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
955 new_pool_label(s, val & valid, R_390_20, s->code_ptr - 2,
956 -(intptr_t)s->code_gen_ptr);
957 return;
958 }
959 } else {
960 tcg_out_movi(s, type, TCG_TMP0, val);
961 }
07ff7983
RH
962 if (type == TCG_TYPE_I32) {
963 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
48bb3750 964 } else {
07ff7983 965 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
48bb3750
RH
966 }
967}
968
4046d9ca 969static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750
RH
970{
971 static const S390Opcode oi_insns[4] = {
972 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
973 };
4046d9ca 974 static const S390Opcode oif_insns[2] = {
48bb3750
RH
975 RIL_OILF, RIL_OIHF
976 };
977
978 int i;
979
980 /* Look for no-op. */
4046d9ca 981 if (unlikely(val == 0)) {
48bb3750
RH
982 return;
983 }
984
4046d9ca
RH
985 /* Try all 32-bit insns that can perform it in one go. */
986 for (i = 0; i < 4; i++) {
987 tcg_target_ulong mask = (0xffffull << i*16);
988 if ((val & mask) != 0 && (val & ~mask) == 0) {
989 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
990 return;
48bb3750 991 }
4046d9ca 992 }
48bb3750 993
4046d9ca
RH
994 /* Try all 48-bit insns that can perform it in one go. */
995 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
996 for (i = 0; i < 2; i++) {
997 tcg_target_ulong mask = (0xffffffffull << i*32);
998 if ((val & mask) != 0 && (val & ~mask) == 0) {
4046d9ca 999 tcg_out_insn_RIL(s, oif_insns[i], dest, val >> i*32);
48bb3750
RH
1000 return;
1001 }
1002 }
4046d9ca 1003 }
48bb3750 1004
4046d9ca
RH
1005 /* Use the constant pool if USE_REG_TB, but not for small constants. */
1006 if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
1007 if (type == TCG_TYPE_I32) {
1008 tcg_out_insn(s, RR, OR, dest, TCG_TMP0);
1009 } else {
1010 tcg_out_insn(s, RRE, OGR, dest, TCG_TMP0);
1011 }
1012 } else if (USE_REG_TB) {
1013 tcg_out_insn(s, RXY, OG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
1014 new_pool_label(s, val, R_390_20, s->code_ptr - 2,
1015 -(intptr_t)s->code_gen_ptr);
1016 } else {
48bb3750
RH
1017 /* Perform the OR via sequential modifications to the high and
1018 low parts. Do this via recursion to handle 16-bit vs 32-bit
1019 masks in each half. */
4046d9ca
RH
1020 tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM);
1021 tgen_ori(s, type, dest, val & 0x00000000ffffffffull);
1022 tgen_ori(s, type, dest, val & 0xffffffff00000000ull);
48bb3750
RH
1023 }
1024}
1025
5bf67a92 1026static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750 1027{
5bf67a92
RH
1028 /* Try all 48-bit insns that can perform it in one go. */
1029 if (s390_facilities & FACILITY_EXT_IMM) {
1030 if ((val & 0xffffffff00000000ull) == 0) {
1031 tcg_out_insn(s, RIL, XILF, dest, val);
1032 return;
1033 }
1034 if ((val & 0x00000000ffffffffull) == 0) {
1035 tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1036 return;
1037 }
48bb3750 1038 }
5bf67a92
RH
1039
1040 /* Use the constant pool if USE_REG_TB, but not for small constants. */
1041 if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
1042 if (type == TCG_TYPE_I32) {
1043 tcg_out_insn(s, RR, XR, dest, TCG_TMP0);
1044 } else {
1045 tcg_out_insn(s, RRE, XGR, dest, TCG_TMP0);
1046 }
1047 } else if (USE_REG_TB) {
1048 tcg_out_insn(s, RXY, XG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
1049 new_pool_label(s, val, R_390_20, s->code_ptr - 2,
1050 -(intptr_t)s->code_gen_ptr);
1051 } else {
1052 /* Perform the xor by parts. */
1053 tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM);
1054 if (val & 0xffffffff) {
1055 tcg_out_insn(s, RIL, XILF, dest, val);
1056 }
1057 if (val > 0xffffffff) {
1058 tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1059 }
48bb3750
RH
1060 }
1061}
1062
1063static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
65839b56 1064 TCGArg c2, bool c2const, bool need_carry)
48bb3750 1065{
bcc66562 1066 bool is_unsigned = is_unsigned_cond(c);
a534bb15
RH
1067 S390Opcode op;
1068
48bb3750
RH
1069 if (c2const) {
1070 if (c2 == 0) {
65839b56
RH
1071 if (!(is_unsigned && need_carry)) {
1072 if (type == TCG_TYPE_I32) {
1073 tcg_out_insn(s, RR, LTR, r1, r1);
1074 } else {
1075 tcg_out_insn(s, RRE, LTGR, r1, r1);
1076 }
1077 return tcg_cond_to_ltr_cond[c];
1078 }
65839b56 1079 }
a534bb15
RH
1080
1081 if (!is_unsigned && c2 == (int16_t)c2) {
1082 op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI);
1083 tcg_out_insn_RI(s, op, r1, c2);
1084 goto exit;
1085 }
1086
1087 if (s390_facilities & FACILITY_EXT_IMM) {
65839b56 1088 if (type == TCG_TYPE_I32) {
a534bb15
RH
1089 op = (is_unsigned ? RIL_CLFI : RIL_CFI);
1090 tcg_out_insn_RIL(s, op, r1, c2);
1091 goto exit;
1092 } else if (c2 == (is_unsigned ? (uint32_t)c2 : (int32_t)c2)) {
1093 op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
1094 tcg_out_insn_RIL(s, op, r1, c2);
1095 goto exit;
48bb3750
RH
1096 }
1097 }
a534bb15
RH
1098
1099 /* Use the constant pool, but not for small constants. */
1100 if (maybe_out_small_movi(s, type, TCG_TMP0, c2)) {
1101 c2 = TCG_TMP0;
1102 /* fall through to reg-reg */
1103 } else if (USE_REG_TB) {
48bb3750 1104 if (type == TCG_TYPE_I32) {
a534bb15
RH
1105 op = (is_unsigned ? RXY_CLY : RXY_CY);
1106 tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
1107 new_pool_label(s, (uint32_t)c2, R_390_20, s->code_ptr - 2,
1108 4 - (intptr_t)s->code_gen_ptr);
48bb3750 1109 } else {
a534bb15
RH
1110 op = (is_unsigned ? RXY_CLG : RXY_CG);
1111 tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
1112 new_pool_label(s, c2, R_390_20, s->code_ptr - 2,
1113 -(intptr_t)s->code_gen_ptr);
48bb3750 1114 }
a534bb15 1115 goto exit;
48bb3750
RH
1116 } else {
1117 if (type == TCG_TYPE_I32) {
a534bb15
RH
1118 op = (is_unsigned ? RIL_CLRL : RIL_CRL);
1119 tcg_out_insn_RIL(s, op, r1, 0);
1120 new_pool_label(s, (uint32_t)c2, R_390_PC32DBL,
1121 s->code_ptr - 2, 2 + 4);
48bb3750 1122 } else {
a534bb15
RH
1123 op = (is_unsigned ? RIL_CLGRL : RIL_CGRL);
1124 tcg_out_insn_RIL(s, op, r1, 0);
1125 new_pool_label(s, c2, R_390_PC32DBL, s->code_ptr - 2, 2);
48bb3750 1126 }
a534bb15 1127 goto exit;
48bb3750
RH
1128 }
1129 }
a534bb15
RH
1130
1131 if (type == TCG_TYPE_I32) {
1132 op = (is_unsigned ? RR_CLR : RR_CR);
1133 tcg_out_insn_RR(s, op, r1, c2);
1134 } else {
1135 op = (is_unsigned ? RRE_CLGR : RRE_CGR);
1136 tcg_out_insn_RRE(s, op, r1, c2);
1137 }
1138
1139 exit:
48bb3750
RH
1140 return tcg_cond_to_s390_cond[c];
1141}
1142
7b7066b1 1143static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
96a9f093 1144 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
48bb3750 1145{
7b7066b1 1146 int cc;
7af525af 1147 bool have_loc;
7b7066b1 1148
7af525af
RH
1149 /* With LOC2, we can always emit the minimum 3 insns. */
1150 if (s390_facilities & FACILITY_LOAD_ON_COND2) {
1151 /* Emit: d = 0, d = (cc ? 1 : d). */
1152 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1153 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1154 tcg_out_insn(s, RIE, LOCGHI, dest, 1, cc);
1155 return;
1156 }
1157
1158 have_loc = (s390_facilities & FACILITY_LOAD_ON_COND) != 0;
1159
4609190b
RH
1160 /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */
1161 restart:
7b7066b1 1162 switch (cond) {
4609190b
RH
1163 case TCG_COND_NE:
1164 /* X != 0 is X > 0. */
1165 if (c2const && c2 == 0) {
1166 cond = TCG_COND_GTU;
1167 } else {
1168 break;
1169 }
1170 /* fallthru */
1171
7b7066b1
RH
1172 case TCG_COND_GTU:
1173 case TCG_COND_GT:
7b7066b1
RH
1174 /* The result of a compare has CC=2 for GT and CC=3 unused.
1175 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
65839b56 1176 tgen_cmp(s, type, cond, c1, c2, c2const, true);
7b7066b1
RH
1177 tcg_out_movi(s, type, dest, 0);
1178 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1179 return;
1180
4609190b
RH
1181 case TCG_COND_EQ:
1182 /* X == 0 is X <= 0. */
1183 if (c2const && c2 == 0) {
1184 cond = TCG_COND_LEU;
7b7066b1 1185 } else {
4609190b 1186 break;
7b7066b1 1187 }
4609190b 1188 /* fallthru */
7b7066b1
RH
1189
1190 case TCG_COND_LEU:
4609190b
RH
1191 case TCG_COND_LE:
1192 /* As above, but we're looking for borrow, or !carry.
1193 The second insn computes d - d - borrow, or -1 for true
1194 and 0 for false. So we must mask to 1 bit afterward. */
1195 tgen_cmp(s, type, cond, c1, c2, c2const, true);
1196 tcg_out_insn(s, RRE, SLBGR, dest, dest);
1197 tgen_andi(s, type, dest, 1);
1198 return;
1199
1200 case TCG_COND_GEU:
7b7066b1
RH
1201 case TCG_COND_LTU:
1202 case TCG_COND_LT:
4609190b
RH
1203 case TCG_COND_GE:
1204 /* Swap operands so that we can use LEU/GTU/GT/LE. */
7b7066b1 1205 if (c2const) {
7af525af 1206 if (have_loc) {
4609190b 1207 break;
7af525af 1208 }
7b7066b1
RH
1209 tcg_out_movi(s, type, TCG_TMP0, c2);
1210 c2 = c1;
1211 c2const = 0;
1212 c1 = TCG_TMP0;
1213 } else {
1214 TCGReg t = c1;
1215 c1 = c2;
1216 c2 = t;
1217 }
7b7066b1 1218 cond = tcg_swap_cond(cond);
4609190b 1219 goto restart;
48bb3750 1220
7b7066b1 1221 default:
4609190b 1222 g_assert_not_reached();
7b7066b1
RH
1223 }
1224
65839b56 1225 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
4609190b
RH
1226 if (have_loc) {
1227 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1228 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1229 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1230 tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
1231 } else {
1232 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1233 tcg_out_movi(s, type, dest, 1);
1234 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1235 tcg_out_movi(s, type, dest, 0);
1236 }
48bb3750
RH
1237}
1238
96a9f093 1239static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
7af525af
RH
1240 TCGReg c1, TCGArg c2, int c2const,
1241 TCGArg v3, int v3const)
96a9f093
RH
1242{
1243 int cc;
b2c98d9d 1244 if (s390_facilities & FACILITY_LOAD_ON_COND) {
65839b56 1245 cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
7af525af
RH
1246 if (v3const) {
1247 tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc);
1248 } else {
1249 tcg_out_insn(s, RRF, LOCGR, dest, v3, cc);
1250 }
96a9f093
RH
1251 } else {
1252 c = tcg_invert_cond(c);
65839b56 1253 cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
96a9f093
RH
1254
1255 /* Emit: if (cc) goto over; dest = r3; over: */
1256 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
7af525af 1257 tcg_out_insn(s, RRE, LGR, dest, v3);
96a9f093
RH
1258 }
1259}
1260
ce411066
RH
1261static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
1262 TCGArg a2, int a2const)
1263{
1264 /* Since this sets both R and R+1, we have no choice but to store the
1265 result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
1266 QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1267 tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1268
1269 if (a2const && a2 == 64) {
1270 tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1271 } else {
1272 if (a2const) {
1273 tcg_out_movi(s, TCG_TYPE_I64, dest, a2);
1274 } else {
1275 tcg_out_mov(s, TCG_TYPE_I64, dest, a2);
1276 }
1277 if (s390_facilities & FACILITY_LOAD_ON_COND) {
1278 /* Emit: if (one bit found) dest = r0. */
1279 tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2);
1280 } else {
1281 /* Emit: if (no one bit found) goto over; dest = r0; over: */
1282 tcg_out_insn(s, RI, BRC, 8, (4 + 4) >> 1);
1283 tcg_out_insn(s, RRE, LGR, dest, TCG_REG_R0);
1284 }
1285 }
1286}
1287
d5690ea4 1288static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
752b1be9 1289 int ofs, int len, int z)
d5690ea4
RH
1290{
1291 int lsb = (63 - ofs);
1292 int msb = lsb - (len - 1);
752b1be9 1293 tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
d5690ea4
RH
1294}
1295
b0bf5fe8
RH
1296static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
1297 int ofs, int len)
1298{
1299 tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
1300}
1301
8c081b18 1302static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
48bb3750 1303{
8c081b18
RH
1304 ptrdiff_t off = dest - s->code_ptr;
1305 if (off == (int16_t)off) {
48bb3750
RH
1306 tcg_out_insn(s, RI, BRC, cc, off);
1307 } else if (off == (int32_t)off) {
1308 tcg_out_insn(s, RIL, BRCL, cc, off);
1309 } else {
8c081b18 1310 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1311 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1312 }
1313}
1314
bec16311 1315static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
48bb3750 1316{
48bb3750 1317 if (l->has_value) {
8c081b18 1318 tgen_gotoi(s, cc, l->u.value_ptr);
48bb3750
RH
1319 } else if (USE_LONG_BRANCHES) {
1320 tcg_out16(s, RIL_BRCL | (cc << 4));
e692a349 1321 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, 2);
8c081b18 1322 s->code_ptr += 2;
48bb3750
RH
1323 } else {
1324 tcg_out16(s, RI_BRC | (cc << 4));
e692a349 1325 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2);
8c081b18 1326 s->code_ptr += 1;
48bb3750
RH
1327 }
1328}
1329
1330static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
bec16311 1331 TCGReg r1, TCGReg r2, TCGLabel *l)
48bb3750 1332{
3661612f 1333 intptr_t off = 0;
48bb3750
RH
1334
1335 if (l->has_value) {
8c081b18 1336 off = l->u.value_ptr - s->code_ptr;
48bb3750 1337 } else {
e692a349 1338 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
48bb3750
RH
1339 }
1340
1341 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1342 tcg_out16(s, off);
1343 tcg_out16(s, cc << 12 | (opc & 0xff));
1344}
1345
1346static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
bec16311 1347 TCGReg r1, int i2, TCGLabel *l)
48bb3750 1348{
3661612f 1349 tcg_target_long off = 0;
48bb3750
RH
1350
1351 if (l->has_value) {
8c081b18 1352 off = l->u.value_ptr - s->code_ptr;
48bb3750 1353 } else {
e692a349 1354 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
48bb3750
RH
1355 }
1356
1357 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1358 tcg_out16(s, off);
1359 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1360}
1361
1362static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
bec16311 1363 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
48bb3750
RH
1364{
1365 int cc;
1366
b2c98d9d 1367 if (s390_facilities & FACILITY_GEN_INST_EXT) {
b879f308 1368 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1369 bool in_range;
1370 S390Opcode opc;
1371
1372 cc = tcg_cond_to_s390_cond[c];
1373
1374 if (!c2const) {
1375 opc = (type == TCG_TYPE_I32
1376 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1377 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
bec16311 1378 tgen_compare_branch(s, opc, cc, r1, c2, l);
48bb3750
RH
1379 return;
1380 }
1381
1382 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1383 If the immediate we've been given does not fit that range, we'll
1384 fall back to separate compare and branch instructions using the
1385 larger comparison range afforded by COMPARE IMMEDIATE. */
1386 if (type == TCG_TYPE_I32) {
1387 if (is_unsigned) {
1388 opc = RIE_CLIJ;
1389 in_range = (uint32_t)c2 == (uint8_t)c2;
1390 } else {
1391 opc = RIE_CIJ;
1392 in_range = (int32_t)c2 == (int8_t)c2;
1393 }
1394 } else {
1395 if (is_unsigned) {
1396 opc = RIE_CLGIJ;
1397 in_range = (uint64_t)c2 == (uint8_t)c2;
1398 } else {
1399 opc = RIE_CGIJ;
1400 in_range = (int64_t)c2 == (int8_t)c2;
1401 }
1402 }
1403 if (in_range) {
bec16311 1404 tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
48bb3750
RH
1405 return;
1406 }
1407 }
1408
65839b56 1409 cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
bec16311 1410 tgen_branch(s, cc, l);
48bb3750
RH
1411}
1412
a8111212 1413static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
48bb3750 1414{
8c081b18 1415 ptrdiff_t off = dest - s->code_ptr;
48bb3750
RH
1416 if (off == (int32_t)off) {
1417 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1418 } else {
8c081b18 1419 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1420 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1421 }
1422}
1423
a5a04f28 1424static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
48bb3750
RH
1425 TCGReg base, TCGReg index, int disp)
1426{
3c8691f5 1427 switch (opc & (MO_SSIZE | MO_BSWAP)) {
a5a04f28 1428 case MO_UB:
48bb3750
RH
1429 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1430 break;
a5a04f28 1431 case MO_SB:
48bb3750
RH
1432 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1433 break;
b8dd88b8
RH
1434
1435 case MO_UW | MO_BSWAP:
1436 /* swapped unsigned halfword load with upper bits zeroed */
1437 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1438 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1439 break;
a5a04f28 1440 case MO_UW:
b8dd88b8
RH
1441 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1442 break;
1443
1444 case MO_SW | MO_BSWAP:
1445 /* swapped sign-extended halfword load */
1446 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1447 tgen_ext16s(s, TCG_TYPE_I64, data, data);
48bb3750 1448 break;
a5a04f28 1449 case MO_SW:
b8dd88b8
RH
1450 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1451 break;
1452
1453 case MO_UL | MO_BSWAP:
1454 /* swapped unsigned int load with upper bits zeroed */
1455 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1456 tgen_ext32u(s, data, data);
48bb3750 1457 break;
a5a04f28 1458 case MO_UL:
b8dd88b8
RH
1459 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1460 break;
1461
1462 case MO_SL | MO_BSWAP:
1463 /* swapped sign-extended int load */
1464 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1465 tgen_ext32s(s, data, data);
48bb3750 1466 break;
a5a04f28 1467 case MO_SL:
b8dd88b8
RH
1468 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1469 break;
1470
1471 case MO_Q | MO_BSWAP:
1472 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
48bb3750 1473 break;
a5a04f28 1474 case MO_Q:
b8dd88b8 1475 tcg_out_insn(s, RXY, LG, data, base, index, disp);
48bb3750 1476 break;
b8dd88b8 1477
48bb3750
RH
1478 default:
1479 tcg_abort();
1480 }
1481}
1482
a5a04f28 1483static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
48bb3750
RH
1484 TCGReg base, TCGReg index, int disp)
1485{
3c8691f5 1486 switch (opc & (MO_SIZE | MO_BSWAP)) {
a5a04f28 1487 case MO_UB:
48bb3750
RH
1488 if (disp >= 0 && disp < 0x1000) {
1489 tcg_out_insn(s, RX, STC, data, base, index, disp);
1490 } else {
1491 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1492 }
1493 break;
b8dd88b8
RH
1494
1495 case MO_UW | MO_BSWAP:
1496 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1497 break;
a5a04f28 1498 case MO_UW:
b8dd88b8 1499 if (disp >= 0 && disp < 0x1000) {
48bb3750
RH
1500 tcg_out_insn(s, RX, STH, data, base, index, disp);
1501 } else {
1502 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1503 }
1504 break;
b8dd88b8
RH
1505
1506 case MO_UL | MO_BSWAP:
1507 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1508 break;
a5a04f28 1509 case MO_UL:
b8dd88b8 1510 if (disp >= 0 && disp < 0x1000) {
48bb3750
RH
1511 tcg_out_insn(s, RX, ST, data, base, index, disp);
1512 } else {
1513 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1514 }
1515 break;
b8dd88b8
RH
1516
1517 case MO_Q | MO_BSWAP:
1518 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1519 break;
a5a04f28 1520 case MO_Q:
b8dd88b8 1521 tcg_out_insn(s, RXY, STG, data, base, index, disp);
48bb3750 1522 break;
b8dd88b8 1523
48bb3750
RH
1524 default:
1525 tcg_abort();
1526 }
1527}
1528
1529#if defined(CONFIG_SOFTMMU)
659ef5cb
RH
1530#include "tcg-ldst.inc.c"
1531
fb596415
RH
1532/* We're expecting to use a 20-bit signed offset on the tlb memory ops.
1533 Using the offset of the second entry in the last tlb table ensures
1534 that we can index all of the elements of the first entry. */
1535QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
1536 > 0x7ffff);
1537
1538/* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1539 addend into R2. Returns a register with the santitized guest address. */
1540static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
1541 int mem_index, bool is_ld)
48bb3750 1542{
85aa8081
RH
1543 unsigned s_bits = opc & MO_SIZE;
1544 unsigned a_bits = get_alignment_bits(opc);
1545 unsigned s_mask = (1 << s_bits) - 1;
1546 unsigned a_mask = (1 << a_bits) - 1;
a5e39810
RH
1547 int ofs, a_off;
1548 uint64_t tlb_mask;
1549
1550 /* For aligned accesses, we check the first byte and include the alignment
1551 bits within the address. For unaligned access, we check that we don't
1552 cross pages using the address of the last byte of the access. */
85aa8081
RH
1553 a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask);
1554 tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
fb596415 1555
b2c98d9d 1556 if (s390_facilities & FACILITY_GEN_INST_EXT) {
547ec121
RH
1557 tcg_out_risbg(s, TCG_REG_R2, addr_reg,
1558 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
1559 63 - CPU_TLB_ENTRY_BITS,
1560 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1);
a5e39810
RH
1561 if (a_off) {
1562 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1563 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1564 } else {
1565 tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
1566 }
48bb3750 1567 } else {
547ec121
RH
1568 tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
1569 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
a5e39810 1570 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
547ec121
RH
1571 tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2,
1572 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1573 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
48bb3750
RH
1574 }
1575
fb596415 1576 if (is_ld) {
9349b4f9 1577 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
fb596415
RH
1578 } else {
1579 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
48bb3750 1580 }
48bb3750 1581 if (TARGET_LONG_BITS == 32) {
fb596415 1582 tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
48bb3750 1583 } else {
fb596415 1584 tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
48bb3750
RH
1585 }
1586
fb596415
RH
1587 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1588 tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs);
1589
48bb3750 1590 if (TARGET_LONG_BITS == 32) {
fb596415
RH
1591 tgen_ext32u(s, TCG_REG_R3, addr_reg);
1592 return TCG_REG_R3;
48bb3750 1593 }
fb596415
RH
1594 return addr_reg;
1595}
48bb3750 1596
3972ef6f
RH
1597static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1598 TCGReg data, TCGReg addr,
fb596415
RH
1599 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1600{
1601 TCGLabelQemuLdst *label = new_ldst_label(s);
1602
1603 label->is_ld = is_ld;
3972ef6f 1604 label->oi = oi;
fb596415
RH
1605 label->datalo_reg = data;
1606 label->addrlo_reg = addr;
fb596415
RH
1607 label->raddr = raddr;
1608 label->label_ptr[0] = label_ptr;
1609}
48bb3750 1610
fb596415
RH
1611static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1612{
1613 TCGReg addr_reg = lb->addrlo_reg;
1614 TCGReg data_reg = lb->datalo_reg;
3972ef6f
RH
1615 TCGMemOpIdx oi = lb->oi;
1616 TCGMemOp opc = get_memop(oi);
48bb3750 1617
e692a349 1618 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, 2);
48bb3750 1619
fb596415
RH
1620 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1621 if (TARGET_LONG_BITS == 64) {
1622 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1623 }
3972ef6f 1624 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
fb596415 1625 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
2b7ec66f 1626 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
fb596415 1627 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
65a62a75 1628
fb596415 1629 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
48bb3750
RH
1630}
1631
fb596415 1632static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
48bb3750 1633{
fb596415
RH
1634 TCGReg addr_reg = lb->addrlo_reg;
1635 TCGReg data_reg = lb->datalo_reg;
3972ef6f
RH
1636 TCGMemOpIdx oi = lb->oi;
1637 TCGMemOp opc = get_memop(oi);
fb596415 1638
e692a349 1639 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, 2);
fb596415
RH
1640
1641 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1642 if (TARGET_LONG_BITS == 64) {
1643 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1644 }
1645 switch (opc & MO_SIZE) {
1646 case MO_UB:
1647 tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1648 break;
1649 case MO_UW:
1650 tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1651 break;
1652 case MO_UL:
1653 tgen_ext32u(s, TCG_REG_R4, data_reg);
1654 break;
1655 case MO_Q:
1656 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1657 break;
1658 default:
1659 tcg_abort();
1660 }
3972ef6f 1661 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
fb596415 1662 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
2b7ec66f 1663 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
fb596415
RH
1664
1665 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
48bb3750
RH
1666}
1667#else
1668static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1669 TCGReg *index_reg, tcg_target_long *disp)
1670{
1671 if (TARGET_LONG_BITS == 32) {
1672 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1673 *addr_reg = TCG_TMP0;
1674 }
b76f21a7 1675 if (guest_base < 0x80000) {
48bb3750 1676 *index_reg = TCG_REG_NONE;
b76f21a7 1677 *disp = guest_base;
48bb3750
RH
1678 } else {
1679 *index_reg = TCG_GUEST_BASE_REG;
1680 *disp = 0;
1681 }
1682}
1683#endif /* CONFIG_SOFTMMU */
1684
f24efee4 1685static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
59227d5d 1686 TCGMemOpIdx oi)
48bb3750 1687{
59227d5d 1688 TCGMemOp opc = get_memop(oi);
fb596415 1689#ifdef CONFIG_SOFTMMU
59227d5d 1690 unsigned mem_index = get_mmuidx(oi);
fb596415
RH
1691 tcg_insn_unit *label_ptr;
1692 TCGReg base_reg;
1693
1694 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
1695
cd3b29b7
AJ
1696 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1697 label_ptr = s->code_ptr;
1698 s->code_ptr += 1;
fb596415
RH
1699
1700 tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
48bb3750 1701
3972ef6f 1702 add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
48bb3750 1703#else
f24efee4
RH
1704 TCGReg index_reg;
1705 tcg_target_long disp;
1706
48bb3750
RH
1707 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1708 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1709#endif
1710}
1711
f24efee4 1712static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
59227d5d 1713 TCGMemOpIdx oi)
48bb3750 1714{
59227d5d 1715 TCGMemOp opc = get_memop(oi);
fb596415 1716#ifdef CONFIG_SOFTMMU
59227d5d 1717 unsigned mem_index = get_mmuidx(oi);
fb596415
RH
1718 tcg_insn_unit *label_ptr;
1719 TCGReg base_reg;
1720
1721 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
1722
cd3b29b7
AJ
1723 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1724 label_ptr = s->code_ptr;
1725 s->code_ptr += 1;
fb596415
RH
1726
1727 tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
48bb3750 1728
3972ef6f 1729 add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
48bb3750 1730#else
f24efee4
RH
1731 TCGReg index_reg;
1732 tcg_target_long disp;
1733
48bb3750
RH
1734 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1735 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1736#endif
2827822e
AG
1737}
1738
48bb3750
RH
1739# define OP_32_64(x) \
1740 case glue(glue(INDEX_op_,x),_i32): \
1741 case glue(glue(INDEX_op_,x),_i64)
48bb3750 1742
a9751609 1743static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2827822e
AG
1744 const TCGArg *args, const int *const_args)
1745{
c2097136 1746 S390Opcode op, op2;
0db921e6 1747 TCGArg a0, a1, a2;
48bb3750
RH
1748
1749 switch (opc) {
1750 case INDEX_op_exit_tb:
46644483
RH
1751 /* Reuse the zeroing that exists for goto_ptr. */
1752 a0 = args[0];
1753 if (a0 == 0) {
1754 tgen_gotoi(s, S390_CC_ALWAYS, s->code_gen_epilogue);
1755 } else {
1756 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
1757 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1758 }
48bb3750
RH
1759 break;
1760
1761 case INDEX_op_goto_tb:
829e1376 1762 a0 = args[0];
f309101c 1763 if (s->tb_jmp_insn_offset) {
ed3d51ec
SF
1764 /* branch displacement must be aligned for atomic patching;
1765 * see if we need to add extra nop before branch
1766 */
1767 if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
1768 tcg_out16(s, NOP);
1769 }
829e1376 1770 tcg_debug_assert(!USE_REG_TB);
a10c64e0 1771 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
829e1376 1772 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
a10c64e0 1773 s->code_ptr += 2;
48bb3750 1774 } else {
829e1376
RH
1775 /* load address stored at s->tb_jmp_target_addr + a0 */
1776 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_REG_TB,
1777 s->tb_jmp_target_addr + a0);
48bb3750 1778 /* and go there */
829e1376
RH
1779 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB);
1780 }
9f754620 1781 set_jmp_reset_offset(s, a0);
829e1376
RH
1782
1783 /* For the unlinked path of goto_tb, we need to reset
1784 TCG_REG_TB to the beginning of this TB. */
1785 if (USE_REG_TB) {
1786 int ofs = -tcg_current_code_size(s);
1787 assert(ofs == (int16_t)ofs);
1788 tcg_out_insn(s, RI, AGHI, TCG_REG_TB, ofs);
48bb3750 1789 }
48bb3750
RH
1790 break;
1791
46644483 1792 case INDEX_op_goto_ptr:
829e1376
RH
1793 a0 = args[0];
1794 if (USE_REG_TB) {
1795 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
1796 }
1797 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
46644483
RH
1798 break;
1799
48bb3750
RH
1800 OP_32_64(ld8u):
1801 /* ??? LLC (RXY format) is only present with the extended-immediate
1802 facility, whereas LLGC is always present. */
1803 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1804 break;
1805
1806 OP_32_64(ld8s):
1807 /* ??? LB is no smaller than LGB, so no point to using it. */
1808 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1809 break;
1810
1811 OP_32_64(ld16u):
1812 /* ??? LLH (RXY format) is only present with the extended-immediate
1813 facility, whereas LLGH is always present. */
1814 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1815 break;
1816
1817 case INDEX_op_ld16s_i32:
1818 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1819 break;
1820
1821 case INDEX_op_ld_i32:
1822 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1823 break;
1824
1825 OP_32_64(st8):
1826 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1827 TCG_REG_NONE, args[2]);
1828 break;
1829
1830 OP_32_64(st16):
1831 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1832 TCG_REG_NONE, args[2]);
1833 break;
1834
1835 case INDEX_op_st_i32:
1836 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1837 break;
1838
1839 case INDEX_op_add_i32:
0db921e6 1840 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1841 if (const_args[2]) {
0db921e6
RH
1842 do_addi_32:
1843 if (a0 == a1) {
1844 if (a2 == (int16_t)a2) {
1845 tcg_out_insn(s, RI, AHI, a0, a2);
1846 break;
1847 }
b2c98d9d 1848 if (s390_facilities & FACILITY_EXT_IMM) {
0db921e6
RH
1849 tcg_out_insn(s, RIL, AFI, a0, a2);
1850 break;
1851 }
1852 }
1853 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1854 } else if (a0 == a1) {
1855 tcg_out_insn(s, RR, AR, a0, a2);
48bb3750 1856 } else {
0db921e6 1857 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1858 }
1859 break;
1860 case INDEX_op_sub_i32:
0db921e6 1861 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1862 if (const_args[2]) {
0db921e6
RH
1863 a2 = -a2;
1864 goto do_addi_32;
c2097136
RH
1865 } else if (a0 == a1) {
1866 tcg_out_insn(s, RR, SR, a0, a2);
1867 } else {
1868 tcg_out_insn(s, RRF, SRK, a0, a1, a2);
48bb3750
RH
1869 }
1870 break;
1871
1872 case INDEX_op_and_i32:
c2097136 1873 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
48bb3750 1874 if (const_args[2]) {
c2097136
RH
1875 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1876 tgen_andi(s, TCG_TYPE_I32, a0, a2);
1877 } else if (a0 == a1) {
1878 tcg_out_insn(s, RR, NR, a0, a2);
48bb3750 1879 } else {
c2097136 1880 tcg_out_insn(s, RRF, NRK, a0, a1, a2);
48bb3750
RH
1881 }
1882 break;
1883 case INDEX_op_or_i32:
c2097136 1884 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
48bb3750 1885 if (const_args[2]) {
c2097136 1886 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
4046d9ca 1887 tgen_ori(s, TCG_TYPE_I32, a0, a2);
c2097136
RH
1888 } else if (a0 == a1) {
1889 tcg_out_insn(s, RR, OR, a0, a2);
48bb3750 1890 } else {
c2097136 1891 tcg_out_insn(s, RRF, ORK, a0, a1, a2);
48bb3750
RH
1892 }
1893 break;
1894 case INDEX_op_xor_i32:
c2097136 1895 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
48bb3750 1896 if (const_args[2]) {
c2097136 1897 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
5bf67a92 1898 tgen_xori(s, TCG_TYPE_I32, a0, a2);
c2097136 1899 } else if (a0 == a1) {
48bb3750 1900 tcg_out_insn(s, RR, XR, args[0], args[2]);
c2097136
RH
1901 } else {
1902 tcg_out_insn(s, RRF, XRK, a0, a1, a2);
48bb3750
RH
1903 }
1904 break;
1905
1906 case INDEX_op_neg_i32:
1907 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1908 break;
1909
1910 case INDEX_op_mul_i32:
1911 if (const_args[2]) {
1912 if ((int32_t)args[2] == (int16_t)args[2]) {
1913 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1914 } else {
1915 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1916 }
1917 } else {
1918 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1919 }
1920 break;
1921
1922 case INDEX_op_div2_i32:
1923 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1924 break;
1925 case INDEX_op_divu2_i32:
1926 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1927 break;
1928
1929 case INDEX_op_shl_i32:
1930 op = RS_SLL;
c2097136 1931 op2 = RSY_SLLK;
48bb3750 1932 do_shift32:
c2097136
RH
1933 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1934 if (a0 == a1) {
1935 if (const_args[2]) {
1936 tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
1937 } else {
1938 tcg_out_sh32(s, op, a0, a2, 0);
1939 }
48bb3750 1940 } else {
c2097136
RH
1941 /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
1942 if (const_args[2]) {
1943 tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
1944 } else {
1945 tcg_out_sh64(s, op2, a0, a1, a2, 0);
1946 }
48bb3750
RH
1947 }
1948 break;
1949 case INDEX_op_shr_i32:
1950 op = RS_SRL;
c2097136 1951 op2 = RSY_SRLK;
48bb3750
RH
1952 goto do_shift32;
1953 case INDEX_op_sar_i32:
1954 op = RS_SRA;
c2097136 1955 op2 = RSY_SRAK;
48bb3750
RH
1956 goto do_shift32;
1957
1958 case INDEX_op_rotl_i32:
1959 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1960 if (const_args[2]) {
1961 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1962 } else {
1963 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1964 }
1965 break;
1966 case INDEX_op_rotr_i32:
1967 if (const_args[2]) {
1968 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1969 TCG_REG_NONE, (32 - args[2]) & 31);
1970 } else {
1971 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1972 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1973 }
1974 break;
1975
1976 case INDEX_op_ext8s_i32:
1977 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1978 break;
1979 case INDEX_op_ext16s_i32:
1980 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1981 break;
1982 case INDEX_op_ext8u_i32:
1983 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1984 break;
1985 case INDEX_op_ext16u_i32:
1986 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1987 break;
1988
1989 OP_32_64(bswap16):
1990 /* The TCG bswap definition requires bits 0-47 already be zero.
1991 Thus we don't need the G-type insns to implement bswap16_i64. */
1992 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1993 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1994 break;
1995 OP_32_64(bswap32):
1996 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1997 break;
1998
3790b918 1999 case INDEX_op_add2_i32:
ad19b358
RH
2000 if (const_args[4]) {
2001 tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
2002 } else {
2003 tcg_out_insn(s, RR, ALR, args[0], args[4]);
2004 }
3790b918
RH
2005 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
2006 break;
2007 case INDEX_op_sub2_i32:
ad19b358
RH
2008 if (const_args[4]) {
2009 tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
2010 } else {
2011 tcg_out_insn(s, RR, SLR, args[0], args[4]);
2012 }
3790b918
RH
2013 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
2014 break;
2015
48bb3750 2016 case INDEX_op_br:
bec16311 2017 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
48bb3750
RH
2018 break;
2019
2020 case INDEX_op_brcond_i32:
2021 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
bec16311 2022 args[1], const_args[1], arg_label(args[3]));
48bb3750
RH
2023 break;
2024 case INDEX_op_setcond_i32:
2025 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
2026 args[2], const_args[2]);
2027 break;
96a9f093
RH
2028 case INDEX_op_movcond_i32:
2029 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
7af525af 2030 args[2], const_args[2], args[3], const_args[3]);
96a9f093 2031 break;
48bb3750 2032
f24efee4 2033 case INDEX_op_qemu_ld_i32:
48bb3750 2034 /* ??? Technically we can use a non-extending instruction. */
f24efee4 2035 case INDEX_op_qemu_ld_i64:
59227d5d 2036 tcg_out_qemu_ld(s, args[0], args[1], args[2]);
48bb3750 2037 break;
f24efee4
RH
2038 case INDEX_op_qemu_st_i32:
2039 case INDEX_op_qemu_st_i64:
59227d5d 2040 tcg_out_qemu_st(s, args[0], args[1], args[2]);
48bb3750
RH
2041 break;
2042
48bb3750
RH
2043 case INDEX_op_ld16s_i64:
2044 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
2045 break;
2046 case INDEX_op_ld32u_i64:
2047 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
2048 break;
2049 case INDEX_op_ld32s_i64:
2050 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
2051 break;
2052 case INDEX_op_ld_i64:
2053 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2054 break;
2055
2056 case INDEX_op_st32_i64:
2057 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2058 break;
2059 case INDEX_op_st_i64:
2060 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2061 break;
2062
2063 case INDEX_op_add_i64:
0db921e6 2064 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2065 if (const_args[2]) {
0db921e6
RH
2066 do_addi_64:
2067 if (a0 == a1) {
2068 if (a2 == (int16_t)a2) {
2069 tcg_out_insn(s, RI, AGHI, a0, a2);
2070 break;
2071 }
b2c98d9d 2072 if (s390_facilities & FACILITY_EXT_IMM) {
0db921e6
RH
2073 if (a2 == (int32_t)a2) {
2074 tcg_out_insn(s, RIL, AGFI, a0, a2);
2075 break;
2076 } else if (a2 == (uint32_t)a2) {
2077 tcg_out_insn(s, RIL, ALGFI, a0, a2);
2078 break;
2079 } else if (-a2 == (uint32_t)-a2) {
2080 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
2081 break;
2082 }
2083 }
2084 }
2085 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2086 } else if (a0 == a1) {
2087 tcg_out_insn(s, RRE, AGR, a0, a2);
48bb3750 2088 } else {
0db921e6 2089 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
2090 }
2091 break;
2092 case INDEX_op_sub_i64:
0db921e6 2093 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2094 if (const_args[2]) {
0db921e6
RH
2095 a2 = -a2;
2096 goto do_addi_64;
c2097136
RH
2097 } else if (a0 == a1) {
2098 tcg_out_insn(s, RRE, SGR, a0, a2);
48bb3750 2099 } else {
c2097136 2100 tcg_out_insn(s, RRF, SGRK, a0, a1, a2);
48bb3750
RH
2101 }
2102 break;
2103
2104 case INDEX_op_and_i64:
c2097136 2105 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2106 if (const_args[2]) {
c2097136 2107 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
07ff7983 2108 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
c2097136 2109 } else if (a0 == a1) {
48bb3750 2110 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
c2097136
RH
2111 } else {
2112 tcg_out_insn(s, RRF, NGRK, a0, a1, a2);
48bb3750
RH
2113 }
2114 break;
2115 case INDEX_op_or_i64:
c2097136 2116 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2117 if (const_args[2]) {
c2097136 2118 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
4046d9ca 2119 tgen_ori(s, TCG_TYPE_I64, a0, a2);
c2097136
RH
2120 } else if (a0 == a1) {
2121 tcg_out_insn(s, RRE, OGR, a0, a2);
48bb3750 2122 } else {
c2097136 2123 tcg_out_insn(s, RRF, OGRK, a0, a1, a2);
48bb3750
RH
2124 }
2125 break;
2126 case INDEX_op_xor_i64:
c2097136 2127 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2128 if (const_args[2]) {
c2097136 2129 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
5bf67a92 2130 tgen_xori(s, TCG_TYPE_I64, a0, a2);
c2097136
RH
2131 } else if (a0 == a1) {
2132 tcg_out_insn(s, RRE, XGR, a0, a2);
48bb3750 2133 } else {
c2097136 2134 tcg_out_insn(s, RRF, XGRK, a0, a1, a2);
48bb3750
RH
2135 }
2136 break;
2137
2138 case INDEX_op_neg_i64:
2139 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2140 break;
2141 case INDEX_op_bswap64_i64:
2142 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2143 break;
2144
2145 case INDEX_op_mul_i64:
2146 if (const_args[2]) {
2147 if (args[2] == (int16_t)args[2]) {
2148 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
2149 } else {
2150 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
2151 }
2152 } else {
2153 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
2154 }
2155 break;
2156
2157 case INDEX_op_div2_i64:
2158 /* ??? We get an unnecessary sign-extension of the dividend
2159 into R3 with this definition, but as we do in fact always
2160 produce both quotient and remainder using INDEX_op_div_i64
2161 instead requires jumping through even more hoops. */
2162 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
2163 break;
2164 case INDEX_op_divu2_i64:
2165 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
2166 break;
36017dc6
RH
2167 case INDEX_op_mulu2_i64:
2168 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
2169 break;
48bb3750
RH
2170
2171 case INDEX_op_shl_i64:
2172 op = RSY_SLLG;
2173 do_shift64:
2174 if (const_args[2]) {
2175 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2176 } else {
2177 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2178 }
2179 break;
2180 case INDEX_op_shr_i64:
2181 op = RSY_SRLG;
2182 goto do_shift64;
2183 case INDEX_op_sar_i64:
2184 op = RSY_SRAG;
2185 goto do_shift64;
2186
2187 case INDEX_op_rotl_i64:
2188 if (const_args[2]) {
2189 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2190 TCG_REG_NONE, args[2]);
2191 } else {
2192 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2193 }
2194 break;
2195 case INDEX_op_rotr_i64:
2196 if (const_args[2]) {
2197 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2198 TCG_REG_NONE, (64 - args[2]) & 63);
2199 } else {
2200 /* We can use the smaller 32-bit negate because only the
2201 low 6 bits are examined for the rotate. */
2202 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2203 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2204 }
2205 break;
2206
2207 case INDEX_op_ext8s_i64:
2208 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2209 break;
2210 case INDEX_op_ext16s_i64:
2211 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2212 break;
4f2331e5 2213 case INDEX_op_ext_i32_i64:
48bb3750
RH
2214 case INDEX_op_ext32s_i64:
2215 tgen_ext32s(s, args[0], args[1]);
2216 break;
2217 case INDEX_op_ext8u_i64:
2218 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2219 break;
2220 case INDEX_op_ext16u_i64:
2221 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2222 break;
4f2331e5 2223 case INDEX_op_extu_i32_i64:
48bb3750
RH
2224 case INDEX_op_ext32u_i64:
2225 tgen_ext32u(s, args[0], args[1]);
2226 break;
2227
3790b918 2228 case INDEX_op_add2_i64:
ad19b358
RH
2229 if (const_args[4]) {
2230 if ((int64_t)args[4] >= 0) {
2231 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2232 } else {
2233 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2234 }
2235 } else {
2236 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2237 }
3790b918
RH
2238 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2239 break;
2240 case INDEX_op_sub2_i64:
ad19b358
RH
2241 if (const_args[4]) {
2242 if ((int64_t)args[4] >= 0) {
2243 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2244 } else {
2245 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2246 }
2247 } else {
2248 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2249 }
3790b918
RH
2250 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2251 break;
2252
48bb3750
RH
2253 case INDEX_op_brcond_i64:
2254 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
bec16311 2255 args[1], const_args[1], arg_label(args[3]));
48bb3750
RH
2256 break;
2257 case INDEX_op_setcond_i64:
2258 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2259 args[2], const_args[2]);
2260 break;
96a9f093
RH
2261 case INDEX_op_movcond_i64:
2262 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
7af525af 2263 args[2], const_args[2], args[3], const_args[3]);
96a9f093 2264 break;
48bb3750 2265
d5690ea4 2266 OP_32_64(deposit):
752b1be9
RH
2267 a0 = args[0], a1 = args[1], a2 = args[2];
2268 if (const_args[1]) {
2269 tgen_deposit(s, a0, a2, args[3], args[4], 1);
2270 } else {
2271 /* Since we can't support "0Z" as a constraint, we allow a1 in
2272 any register. Fix things up as if a matching constraint. */
2273 if (a0 != a1) {
2274 TCGType type = (opc == INDEX_op_deposit_i64);
2275 if (a0 == a2) {
2276 tcg_out_mov(s, type, TCG_TMP0, a2);
2277 a2 = TCG_TMP0;
2278 }
2279 tcg_out_mov(s, type, a0, a1);
2280 }
2281 tgen_deposit(s, a0, a2, args[3], args[4], 0);
2282 }
d5690ea4 2283 break;
752b1be9 2284
b0bf5fe8
RH
2285 OP_32_64(extract):
2286 tgen_extract(s, args[0], args[1], args[2], args[3]);
2287 break;
d5690ea4 2288
ce411066
RH
2289 case INDEX_op_clz_i64:
2290 tgen_clz(s, args[0], args[1], args[2], const_args[2]);
2291 break;
2292
c9314d61
PK
2293 case INDEX_op_mb:
2294 /* The host memory model is quite strong, we simply need to
2295 serialize the instruction stream. */
2296 if (args[0] & TCG_MO_ST_LD) {
2297 tcg_out_insn(s, RR, BCR,
b2c98d9d 2298 s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0);
c9314d61
PK
2299 }
2300 break;
2301
96d0ee7f
RH
2302 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2303 case INDEX_op_mov_i64:
2304 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2305 case INDEX_op_movi_i64:
2306 case INDEX_op_call: /* Always emitted via tcg_out_call. */
48bb3750 2307 default:
48bb3750
RH
2308 tcg_abort();
2309 }
2827822e
AG
2310}
2311
f69d277e
RH
2312static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
2313{
9b5500b6
RH
2314 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
2315 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
2316 static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
2317 static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
07952d95 2318 static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } };
9b5500b6
RH
2319 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
2320 static const TCGTargetOpDef r_0_ri = { .args_ct_str = { "r", "0", "ri" } };
a8f0269e
RH
2321 static const TCGTargetOpDef r_0_rI = { .args_ct_str = { "r", "0", "rI" } };
2322 static const TCGTargetOpDef r_0_rJ = { .args_ct_str = { "r", "0", "rJ" } };
ba18b07d
RH
2323 static const TCGTargetOpDef a2_r
2324 = { .args_ct_str = { "r", "r", "0", "1", "r", "r" } };
2325 static const TCGTargetOpDef a2_ri
2326 = { .args_ct_str = { "r", "r", "0", "1", "ri", "r" } };
2327 static const TCGTargetOpDef a2_rA
2328 = { .args_ct_str = { "r", "r", "0", "1", "rA", "r" } };
9b5500b6
RH
2329
2330 switch (op) {
2331 case INDEX_op_goto_ptr:
2332 return &r;
2333
2334 case INDEX_op_ld8u_i32:
2335 case INDEX_op_ld8u_i64:
2336 case INDEX_op_ld8s_i32:
2337 case INDEX_op_ld8s_i64:
2338 case INDEX_op_ld16u_i32:
2339 case INDEX_op_ld16u_i64:
2340 case INDEX_op_ld16s_i32:
2341 case INDEX_op_ld16s_i64:
2342 case INDEX_op_ld_i32:
2343 case INDEX_op_ld32u_i64:
2344 case INDEX_op_ld32s_i64:
2345 case INDEX_op_ld_i64:
2346 case INDEX_op_st8_i32:
2347 case INDEX_op_st8_i64:
2348 case INDEX_op_st16_i32:
2349 case INDEX_op_st16_i64:
2350 case INDEX_op_st_i32:
2351 case INDEX_op_st32_i64:
2352 case INDEX_op_st_i64:
2353 return &r_r;
2354
2355 case INDEX_op_add_i32:
2356 case INDEX_op_add_i64:
2357 return &r_r_ri;
2358 case INDEX_op_sub_i32:
2359 case INDEX_op_sub_i64:
bdcd5d19
RH
2360 case INDEX_op_and_i32:
2361 case INDEX_op_and_i64:
4046d9ca
RH
2362 case INDEX_op_or_i32:
2363 case INDEX_op_or_i64:
5bf67a92
RH
2364 case INDEX_op_xor_i32:
2365 case INDEX_op_xor_i64:
c2097136 2366 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
a8f0269e 2367
9b5500b6 2368 case INDEX_op_mul_i32:
a8f0269e
RH
2369 /* If we have the general-instruction-extensions, then we have
2370 MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we
2371 have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
2372 return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_ri : &r_0_rI);
9b5500b6 2373 case INDEX_op_mul_i64:
a8f0269e
RH
2374 return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_rJ : &r_0_rI);
2375
9b5500b6
RH
2376 case INDEX_op_shl_i32:
2377 case INDEX_op_shr_i32:
2378 case INDEX_op_sar_i32:
c2097136 2379 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
9b5500b6
RH
2380
2381 case INDEX_op_shl_i64:
2382 case INDEX_op_shr_i64:
2383 case INDEX_op_sar_i64:
2384 return &r_r_ri;
2385
2386 case INDEX_op_rotl_i32:
2387 case INDEX_op_rotl_i64:
2388 case INDEX_op_rotr_i32:
2389 case INDEX_op_rotr_i64:
2390 return &r_r_ri;
2391
2392 case INDEX_op_brcond_i32:
2393 case INDEX_op_brcond_i64:
a534bb15 2394 return &r_ri;
9b5500b6
RH
2395
2396 case INDEX_op_bswap16_i32:
2397 case INDEX_op_bswap16_i64:
2398 case INDEX_op_bswap32_i32:
2399 case INDEX_op_bswap32_i64:
2400 case INDEX_op_bswap64_i64:
2401 case INDEX_op_neg_i32:
2402 case INDEX_op_neg_i64:
2403 case INDEX_op_ext8s_i32:
2404 case INDEX_op_ext8s_i64:
2405 case INDEX_op_ext8u_i32:
2406 case INDEX_op_ext8u_i64:
2407 case INDEX_op_ext16s_i32:
2408 case INDEX_op_ext16s_i64:
2409 case INDEX_op_ext16u_i32:
2410 case INDEX_op_ext16u_i64:
2411 case INDEX_op_ext32s_i64:
2412 case INDEX_op_ext32u_i64:
2413 case INDEX_op_ext_i32_i64:
2414 case INDEX_op_extu_i32_i64:
2415 case INDEX_op_extract_i32:
2416 case INDEX_op_extract_i64:
2417 return &r_r;
2418
2419 case INDEX_op_clz_i64:
a534bb15
RH
2420 case INDEX_op_setcond_i32:
2421 case INDEX_op_setcond_i64:
9b5500b6
RH
2422 return &r_r_ri;
2423
2424 case INDEX_op_qemu_ld_i32:
2425 case INDEX_op_qemu_ld_i64:
2426 return &r_L;
2427 case INDEX_op_qemu_st_i64:
2428 case INDEX_op_qemu_st_i32:
2429 return &L_L;
f69d277e 2430
9b5500b6
RH
2431 case INDEX_op_deposit_i32:
2432 case INDEX_op_deposit_i64:
2433 {
2434 static const TCGTargetOpDef dep
2435 = { .args_ct_str = { "r", "rZ", "r" } };
2436 return &dep;
f69d277e 2437 }
9b5500b6
RH
2438 case INDEX_op_movcond_i32:
2439 case INDEX_op_movcond_i64:
2440 {
a534bb15
RH
2441 static const TCGTargetOpDef movc
2442 = { .args_ct_str = { "r", "r", "ri", "r", "0" } };
7af525af 2443 static const TCGTargetOpDef movc_l
a534bb15
RH
2444 = { .args_ct_str = { "r", "r", "ri", "rI", "0" } };
2445 return (s390_facilities & FACILITY_LOAD_ON_COND2 ? &movc_l : &movc);
9b5500b6
RH
2446 }
2447 case INDEX_op_div2_i32:
2448 case INDEX_op_div2_i64:
2449 case INDEX_op_divu2_i32:
2450 case INDEX_op_divu2_i64:
2451 {
2452 static const TCGTargetOpDef div2
2453 = { .args_ct_str = { "b", "a", "0", "1", "r" } };
2454 return &div2;
2455 }
2456 case INDEX_op_mulu2_i64:
2457 {
2458 static const TCGTargetOpDef mul2
2459 = { .args_ct_str = { "b", "a", "0", "r" } };
2460 return &mul2;
2461 }
ba18b07d 2462
9b5500b6 2463 case INDEX_op_add2_i32:
9b5500b6 2464 case INDEX_op_sub2_i32:
ba18b07d
RH
2465 return (s390_facilities & FACILITY_EXT_IMM ? &a2_ri : &a2_r);
2466 case INDEX_op_add2_i64:
9b5500b6 2467 case INDEX_op_sub2_i64:
ba18b07d 2468 return (s390_facilities & FACILITY_EXT_IMM ? &a2_rA : &a2_r);
9b5500b6
RH
2469
2470 default:
2471 break;
f69d277e
RH
2472 }
2473 return NULL;
2474}
2475
b2c98d9d 2476static void query_s390_facilities(void)
48bb3750 2477{
c9baa30f 2478 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
48bb3750 2479
c9baa30f
RH
2480 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2481 is present on all 64-bit systems, but let's check for it anyway. */
2482 if (hwcap & HWCAP_S390_STFLE) {
2483 register int r0 __asm__("0");
2484 register void *r1 __asm__("1");
48bb3750 2485
c9baa30f 2486 /* stfle 0(%r1) */
b2c98d9d 2487 r1 = &s390_facilities;
c9baa30f
RH
2488 asm volatile(".word 0xb2b0,0x1000"
2489 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
48bb3750
RH
2490 }
2491}
2492
2493static void tcg_target_init(TCGContext *s)
2827822e 2494{
b2c98d9d 2495 query_s390_facilities();
48bb3750 2496
f46934df
RH
2497 tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
2498 tcg_target_available_regs[TCG_TYPE_I64] = 0xffff;
48bb3750 2499
ccb1bb66 2500 tcg_target_call_clobber_regs = 0;
48bb3750
RH
2501 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2502 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2503 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2504 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2505 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2506 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
f24efee4
RH
2507 /* The r6 register is technically call-saved, but it's also a parameter
2508 register, so it can get killed by setup for the qemu_st helper. */
2509 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
48bb3750
RH
2510 /* The return register can be considered call-clobbered. */
2511 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2512
ccb1bb66 2513 s->reserved_regs = 0;
48bb3750
RH
2514 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2515 /* XXX many insns can't be used with R0, so we better avoid it for now */
2516 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2517 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
829e1376
RH
2518 if (USE_REG_TB) {
2519 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
2520 }
2827822e
AG
2521}
2522
f167dc37
RH
2523#define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2524 + TCG_STATIC_CALL_ARGS_SIZE \
2525 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2526
48bb3750 2527static void tcg_target_qemu_prologue(TCGContext *s)
2827822e 2528{
48bb3750
RH
2529 /* stmg %r6,%r15,48(%r15) (save registers) */
2530 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2531
a4924e8b 2532 /* aghi %r15,-frame_size */
f167dc37 2533 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
a4924e8b
RH
2534
2535 tcg_set_frame(s, TCG_REG_CALL_STACK,
2536 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2537 CPU_TEMP_BUF_NLONGS * sizeof(long));
48bb3750 2538
090d0bfd 2539#ifndef CONFIG_SOFTMMU
b76f21a7 2540 if (guest_base >= 0x80000) {
829e1376 2541 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
48bb3750
RH
2542 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2543 }
090d0bfd 2544#endif
48bb3750 2545
cea5f9a2 2546 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
829e1376
RH
2547 if (USE_REG_TB) {
2548 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB,
2549 tcg_target_call_iarg_regs[1]);
2550 }
2551
cea5f9a2
BS
2552 /* br %r3 (go to TB) */
2553 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
48bb3750 2554
46644483
RH
2555 /*
2556 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2557 * and fall through to the rest of the epilogue.
2558 */
2559 s->code_gen_epilogue = s->code_ptr;
2560 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
2561
2562 /* TB epilogue */
48bb3750
RH
2563 tb_ret_addr = s->code_ptr;
2564
a4924e8b
RH
2565 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2566 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
f167dc37 2567 FRAME_SIZE + 48);
48bb3750
RH
2568
2569 /* br %r14 (return) */
2570 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2827822e 2571}
f167dc37 2572
28eef8aa
RH
2573static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2574{
2575 memset(p, 0x07, count * sizeof(tcg_insn_unit));
2576}
2577
f167dc37 2578typedef struct {
d2e16f2c 2579 DebugFrameHeader h;
f167dc37
RH
2580 uint8_t fde_def_cfa[4];
2581 uint8_t fde_reg_ofs[18];
2582} DebugFrame;
2583
2584/* We're expecting a 2 byte uleb128 encoded value. */
2585QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2586
2587#define ELF_HOST_MACHINE EM_S390
2588
d2e16f2c
RH
2589static const DebugFrame debug_frame = {
2590 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2591 .h.cie.id = -1,
2592 .h.cie.version = 1,
2593 .h.cie.code_align = 1,
2594 .h.cie.data_align = 8, /* sleb128 8 */
2595 .h.cie.return_column = TCG_REG_R14,
f167dc37
RH
2596
2597 /* Total FDE size does not include the "len" member. */
d2e16f2c 2598 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
f167dc37
RH
2599
2600 .fde_def_cfa = {
2601 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
2602 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2603 (FRAME_SIZE >> 7)
2604 },
2605 .fde_reg_ofs = {
2606 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2607 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2608 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2609 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2610 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2611 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2612 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2613 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2614 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2615 }
2616};
2617
2618void tcg_register_jit(void *buf, size_t buf_size)
2619{
f167dc37
RH
2620 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2621}