]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/s390/tcg-target.c.inc
Merge remote-tracking branch 'remotes/jasowang/tags/net-pull-request' into staging
[mirror_qemu.git] / tcg / s390 / tcg-target.c.inc
CommitLineData
2827822e
AG
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
48bb3750
RH
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
2827822e
AG
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
a01fc30d
RH
27/* We only support generating code for 64-bit mode. */
28#if TCG_TARGET_REG_BITS != 64
29#error "unsupported code generation mode"
30#endif
31
139c1837 32#include "../tcg-pool.c.inc"
c9baa30f
RH
33#include "elf.h"
34
48bb3750
RH
35/* ??? The translation blocks produced by TCG are generally small enough to
36 be entirely reachable with a 16-bit displacement. Leaving the option for
37 a 32-bit displacement here Just In Case. */
38#define USE_LONG_BRANCHES 0
39
a8f0269e
RH
40#define TCG_CT_CONST_S16 0x100
41#define TCG_CT_CONST_S32 0x200
a534bb15
RH
42#define TCG_CT_CONST_S33 0x400
43#define TCG_CT_CONST_ZERO 0x800
48bb3750 44
c947deb1
RH
45#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16)
46/*
47 * For softmmu, we need to avoid conflicts with the first 3
48 * argument registers to perform the tlb lookup, and to call
49 * the helper function.
50 */
51#ifdef CONFIG_SOFTMMU
52#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_R2, 3)
53#else
54#define SOFTMMU_RESERVE_REGS 0
55#endif
56
57
48bb3750
RH
58/* Several places within the instruction set 0 means "no register"
59 rather than TCG_REG_R0. */
60#define TCG_REG_NONE 0
61
62/* A scratch register that may be be used throughout the backend. */
ce411066 63#define TCG_TMP0 TCG_REG_R1
48bb3750 64
829e1376
RH
65/* A scratch register that holds a pointer to the beginning of the TB.
66 We don't need this when we have pc-relative loads with the general
67 instructions extension facility. */
68#define TCG_REG_TB TCG_REG_R12
69#define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT))
70
4cbea598 71#ifndef CONFIG_SOFTMMU
48bb3750 72#define TCG_GUEST_BASE_REG TCG_REG_R13
48bb3750
RH
73#endif
74
48bb3750
RH
75/* All of the following instructions are prefixed with their instruction
76 format, and are defined as 8- or 16-bit quantities, even when the two
77 halves of the 16-bit quantity may appear 32 bits apart in the insn.
78 This makes it easy to copy the values from the tables in Appendix B. */
79typedef enum S390Opcode {
80 RIL_AFI = 0xc209,
81 RIL_AGFI = 0xc208,
3790b918 82 RIL_ALFI = 0xc20b,
48bb3750
RH
83 RIL_ALGFI = 0xc20a,
84 RIL_BRASL = 0xc005,
85 RIL_BRCL = 0xc004,
86 RIL_CFI = 0xc20d,
87 RIL_CGFI = 0xc20c,
88 RIL_CLFI = 0xc20f,
89 RIL_CLGFI = 0xc20e,
a534bb15
RH
90 RIL_CLRL = 0xc60f,
91 RIL_CLGRL = 0xc60a,
92 RIL_CRL = 0xc60d,
93 RIL_CGRL = 0xc608,
48bb3750
RH
94 RIL_IIHF = 0xc008,
95 RIL_IILF = 0xc009,
96 RIL_LARL = 0xc000,
97 RIL_LGFI = 0xc001,
98 RIL_LGRL = 0xc408,
99 RIL_LLIHF = 0xc00e,
100 RIL_LLILF = 0xc00f,
101 RIL_LRL = 0xc40d,
102 RIL_MSFI = 0xc201,
103 RIL_MSGFI = 0xc200,
104 RIL_NIHF = 0xc00a,
105 RIL_NILF = 0xc00b,
106 RIL_OIHF = 0xc00c,
107 RIL_OILF = 0xc00d,
3790b918 108 RIL_SLFI = 0xc205,
0db921e6 109 RIL_SLGFI = 0xc204,
48bb3750
RH
110 RIL_XIHF = 0xc006,
111 RIL_XILF = 0xc007,
112
113 RI_AGHI = 0xa70b,
114 RI_AHI = 0xa70a,
115 RI_BRC = 0xa704,
a534bb15
RH
116 RI_CHI = 0xa70e,
117 RI_CGHI = 0xa70f,
48bb3750
RH
118 RI_IIHH = 0xa500,
119 RI_IIHL = 0xa501,
120 RI_IILH = 0xa502,
121 RI_IILL = 0xa503,
122 RI_LGHI = 0xa709,
123 RI_LLIHH = 0xa50c,
124 RI_LLIHL = 0xa50d,
125 RI_LLILH = 0xa50e,
126 RI_LLILL = 0xa50f,
127 RI_MGHI = 0xa70d,
128 RI_MHI = 0xa70c,
129 RI_NIHH = 0xa504,
130 RI_NIHL = 0xa505,
131 RI_NILH = 0xa506,
132 RI_NILL = 0xa507,
133 RI_OIHH = 0xa508,
134 RI_OIHL = 0xa509,
135 RI_OILH = 0xa50a,
136 RI_OILL = 0xa50b,
137
138 RIE_CGIJ = 0xec7c,
139 RIE_CGRJ = 0xec64,
140 RIE_CIJ = 0xec7e,
141 RIE_CLGRJ = 0xec65,
142 RIE_CLIJ = 0xec7f,
143 RIE_CLGIJ = 0xec7d,
144 RIE_CLRJ = 0xec77,
145 RIE_CRJ = 0xec76,
7af525af 146 RIE_LOCGHI = 0xec46,
d5690ea4 147 RIE_RISBG = 0xec55,
48bb3750
RH
148
149 RRE_AGR = 0xb908,
3790b918
RH
150 RRE_ALGR = 0xb90a,
151 RRE_ALCR = 0xb998,
152 RRE_ALCGR = 0xb988,
48bb3750
RH
153 RRE_CGR = 0xb920,
154 RRE_CLGR = 0xb921,
155 RRE_DLGR = 0xb987,
156 RRE_DLR = 0xb997,
157 RRE_DSGFR = 0xb91d,
158 RRE_DSGR = 0xb90d,
ce411066 159 RRE_FLOGR = 0xb983,
48bb3750
RH
160 RRE_LGBR = 0xb906,
161 RRE_LCGR = 0xb903,
162 RRE_LGFR = 0xb914,
163 RRE_LGHR = 0xb907,
164 RRE_LGR = 0xb904,
165 RRE_LLGCR = 0xb984,
166 RRE_LLGFR = 0xb916,
167 RRE_LLGHR = 0xb985,
168 RRE_LRVR = 0xb91f,
169 RRE_LRVGR = 0xb90f,
170 RRE_LTGR = 0xb902,
36017dc6 171 RRE_MLGR = 0xb986,
48bb3750
RH
172 RRE_MSGR = 0xb90c,
173 RRE_MSR = 0xb252,
174 RRE_NGR = 0xb980,
175 RRE_OGR = 0xb981,
176 RRE_SGR = 0xb909,
3790b918
RH
177 RRE_SLGR = 0xb90b,
178 RRE_SLBR = 0xb999,
179 RRE_SLBGR = 0xb989,
48bb3750
RH
180 RRE_XGR = 0xb982,
181
96a9f093
RH
182 RRF_LOCR = 0xb9f2,
183 RRF_LOCGR = 0xb9e2,
c2097136
RH
184 RRF_NRK = 0xb9f4,
185 RRF_NGRK = 0xb9e4,
186 RRF_ORK = 0xb9f6,
187 RRF_OGRK = 0xb9e6,
188 RRF_SRK = 0xb9f9,
189 RRF_SGRK = 0xb9e9,
190 RRF_SLRK = 0xb9fb,
191 RRF_SLGRK = 0xb9eb,
192 RRF_XRK = 0xb9f7,
193 RRF_XGRK = 0xb9e7,
96a9f093 194
48bb3750 195 RR_AR = 0x1a,
3790b918 196 RR_ALR = 0x1e,
48bb3750
RH
197 RR_BASR = 0x0d,
198 RR_BCR = 0x07,
199 RR_CLR = 0x15,
200 RR_CR = 0x19,
201 RR_DR = 0x1d,
202 RR_LCR = 0x13,
203 RR_LR = 0x18,
204 RR_LTR = 0x12,
205 RR_NR = 0x14,
206 RR_OR = 0x16,
207 RR_SR = 0x1b,
3790b918 208 RR_SLR = 0x1f,
48bb3750
RH
209 RR_XR = 0x17,
210
211 RSY_RLL = 0xeb1d,
212 RSY_RLLG = 0xeb1c,
213 RSY_SLLG = 0xeb0d,
c2097136 214 RSY_SLLK = 0xebdf,
48bb3750 215 RSY_SRAG = 0xeb0a,
c2097136 216 RSY_SRAK = 0xebdc,
48bb3750 217 RSY_SRLG = 0xeb0c,
c2097136 218 RSY_SRLK = 0xebde,
48bb3750
RH
219
220 RS_SLL = 0x89,
221 RS_SRA = 0x8a,
222 RS_SRL = 0x88,
223
224 RXY_AG = 0xe308,
225 RXY_AY = 0xe35a,
226 RXY_CG = 0xe320,
a534bb15
RH
227 RXY_CLG = 0xe321,
228 RXY_CLY = 0xe355,
48bb3750 229 RXY_CY = 0xe359,
0db921e6 230 RXY_LAY = 0xe371,
48bb3750
RH
231 RXY_LB = 0xe376,
232 RXY_LG = 0xe304,
233 RXY_LGB = 0xe377,
234 RXY_LGF = 0xe314,
235 RXY_LGH = 0xe315,
236 RXY_LHY = 0xe378,
237 RXY_LLGC = 0xe390,
238 RXY_LLGF = 0xe316,
239 RXY_LLGH = 0xe391,
240 RXY_LMG = 0xeb04,
241 RXY_LRV = 0xe31e,
242 RXY_LRVG = 0xe30f,
243 RXY_LRVH = 0xe31f,
244 RXY_LY = 0xe358,
bdcd5d19 245 RXY_NG = 0xe380,
4046d9ca 246 RXY_OG = 0xe381,
48bb3750
RH
247 RXY_STCY = 0xe372,
248 RXY_STG = 0xe324,
249 RXY_STHY = 0xe370,
250 RXY_STMG = 0xeb24,
251 RXY_STRV = 0xe33e,
252 RXY_STRVG = 0xe32f,
253 RXY_STRVH = 0xe33f,
254 RXY_STY = 0xe350,
5bf67a92 255 RXY_XG = 0xe382,
48bb3750
RH
256
257 RX_A = 0x5a,
258 RX_C = 0x59,
259 RX_L = 0x58,
0db921e6 260 RX_LA = 0x41,
48bb3750
RH
261 RX_LH = 0x48,
262 RX_ST = 0x50,
263 RX_STC = 0x42,
264 RX_STH = 0x40,
ed3d51ec
SF
265
266 NOP = 0x0707,
48bb3750
RH
267} S390Opcode;
268
8d8fdbae 269#ifdef CONFIG_DEBUG_TCG
48bb3750
RH
270static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
271 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
272 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
273};
274#endif
275
276/* Since R6 is a potential argument register, choose it last of the
277 call-saved registers. Likewise prefer the call-clobbered registers
278 in reverse order to maximize the chance of avoiding the arguments. */
2827822e 279static const int tcg_target_reg_alloc_order[] = {
f24efee4 280 /* Call saved registers. */
48bb3750
RH
281 TCG_REG_R13,
282 TCG_REG_R12,
283 TCG_REG_R11,
284 TCG_REG_R10,
285 TCG_REG_R9,
286 TCG_REG_R8,
287 TCG_REG_R7,
288 TCG_REG_R6,
f24efee4 289 /* Call clobbered registers. */
48bb3750
RH
290 TCG_REG_R14,
291 TCG_REG_R0,
292 TCG_REG_R1,
f24efee4 293 /* Argument registers, in reverse order of allocation. */
48bb3750
RH
294 TCG_REG_R5,
295 TCG_REG_R4,
296 TCG_REG_R3,
297 TCG_REG_R2,
2827822e
AG
298};
299
300static const int tcg_target_call_iarg_regs[] = {
48bb3750
RH
301 TCG_REG_R2,
302 TCG_REG_R3,
303 TCG_REG_R4,
304 TCG_REG_R5,
305 TCG_REG_R6,
2827822e
AG
306};
307
308static const int tcg_target_call_oarg_regs[] = {
48bb3750 309 TCG_REG_R2,
48bb3750
RH
310};
311
312#define S390_CC_EQ 8
313#define S390_CC_LT 4
314#define S390_CC_GT 2
315#define S390_CC_OV 1
316#define S390_CC_NE (S390_CC_LT | S390_CC_GT)
317#define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
318#define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
319#define S390_CC_NEVER 0
320#define S390_CC_ALWAYS 15
321
322/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
0aed257f 323static const uint8_t tcg_cond_to_s390_cond[] = {
48bb3750
RH
324 [TCG_COND_EQ] = S390_CC_EQ,
325 [TCG_COND_NE] = S390_CC_NE,
326 [TCG_COND_LT] = S390_CC_LT,
327 [TCG_COND_LE] = S390_CC_LE,
328 [TCG_COND_GT] = S390_CC_GT,
329 [TCG_COND_GE] = S390_CC_GE,
330 [TCG_COND_LTU] = S390_CC_LT,
331 [TCG_COND_LEU] = S390_CC_LE,
332 [TCG_COND_GTU] = S390_CC_GT,
333 [TCG_COND_GEU] = S390_CC_GE,
334};
335
336/* Condition codes that result from a LOAD AND TEST. Here, we have no
337 unsigned instruction variation, however since the test is vs zero we
338 can re-map the outcomes appropriately. */
0aed257f 339static const uint8_t tcg_cond_to_ltr_cond[] = {
48bb3750
RH
340 [TCG_COND_EQ] = S390_CC_EQ,
341 [TCG_COND_NE] = S390_CC_NE,
342 [TCG_COND_LT] = S390_CC_LT,
343 [TCG_COND_LE] = S390_CC_LE,
344 [TCG_COND_GT] = S390_CC_GT,
345 [TCG_COND_GE] = S390_CC_GE,
346 [TCG_COND_LTU] = S390_CC_NEVER,
347 [TCG_COND_LEU] = S390_CC_EQ,
348 [TCG_COND_GTU] = S390_CC_NE,
349 [TCG_COND_GEU] = S390_CC_ALWAYS,
350};
351
352#ifdef CONFIG_SOFTMMU
f24efee4
RH
353static void * const qemu_ld_helpers[16] = {
354 [MO_UB] = helper_ret_ldub_mmu,
355 [MO_SB] = helper_ret_ldsb_mmu,
356 [MO_LEUW] = helper_le_lduw_mmu,
357 [MO_LESW] = helper_le_ldsw_mmu,
358 [MO_LEUL] = helper_le_ldul_mmu,
359 [MO_LESL] = helper_le_ldsl_mmu,
360 [MO_LEQ] = helper_le_ldq_mmu,
361 [MO_BEUW] = helper_be_lduw_mmu,
362 [MO_BESW] = helper_be_ldsw_mmu,
363 [MO_BEUL] = helper_be_ldul_mmu,
364 [MO_BESL] = helper_be_ldsl_mmu,
365 [MO_BEQ] = helper_be_ldq_mmu,
e141ab52
BS
366};
367
f24efee4
RH
368static void * const qemu_st_helpers[16] = {
369 [MO_UB] = helper_ret_stb_mmu,
370 [MO_LEUW] = helper_le_stw_mmu,
371 [MO_LEUL] = helper_le_stl_mmu,
372 [MO_LEQ] = helper_le_stq_mmu,
373 [MO_BEUW] = helper_be_stw_mmu,
374 [MO_BEUL] = helper_be_stl_mmu,
375 [MO_BEQ] = helper_be_stq_mmu,
e141ab52 376};
e141ab52 377#endif
48bb3750 378
79dae4dd 379static const tcg_insn_unit *tb_ret_addr;
b2c98d9d 380uint64_t s390_facilities;
2827822e 381
79dae4dd 382static bool patch_reloc(tcg_insn_unit *src_rw, int type,
2ba7fae2 383 intptr_t value, intptr_t addend)
2827822e 384{
79dae4dd 385 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
e692a349 386 intptr_t pcrel2;
28eef8aa 387 uint32_t old;
e692a349
RH
388
389 value += addend;
79dae4dd 390 pcrel2 = (tcg_insn_unit *)value - src_rx;
48bb3750
RH
391
392 switch (type) {
393 case R_390_PC16DBL:
55dfd8fe 394 if (pcrel2 == (int16_t)pcrel2) {
79dae4dd 395 tcg_patch16(src_rw, pcrel2);
55dfd8fe
RH
396 return true;
397 }
48bb3750
RH
398 break;
399 case R_390_PC32DBL:
55dfd8fe 400 if (pcrel2 == (int32_t)pcrel2) {
79dae4dd 401 tcg_patch32(src_rw, pcrel2);
55dfd8fe
RH
402 return true;
403 }
48bb3750 404 break;
28eef8aa 405 case R_390_20:
55dfd8fe 406 if (value == sextract64(value, 0, 20)) {
79dae4dd 407 old = *(uint32_t *)src_rw & 0xf00000ff;
55dfd8fe 408 old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4);
79dae4dd 409 tcg_patch32(src_rw, old);
55dfd8fe
RH
410 return true;
411 }
28eef8aa 412 break;
48bb3750 413 default:
e692a349 414 g_assert_not_reached();
48bb3750 415 }
55dfd8fe 416 return false;
2827822e
AG
417}
418
2827822e 419/* Test if a constant matches the constraint. */
f6c6afc1 420static int tcg_target_const_match(tcg_target_long val, TCGType type,
48bb3750 421 const TCGArgConstraint *arg_ct)
2827822e 422{
48bb3750
RH
423 int ct = arg_ct->ct;
424
425 if (ct & TCG_CT_CONST) {
426 return 1;
427 }
428
671c835b 429 if (type == TCG_TYPE_I32) {
48bb3750
RH
430 val = (int32_t)val;
431 }
432
433 /* The following are mutually exclusive. */
a8f0269e
RH
434 if (ct & TCG_CT_CONST_S16) {
435 return val == (int16_t)val;
436 } else if (ct & TCG_CT_CONST_S32) {
437 return val == (int32_t)val;
ba18b07d
RH
438 } else if (ct & TCG_CT_CONST_S33) {
439 return val >= -0xffffffffll && val <= 0xffffffffll;
752b1be9
RH
440 } else if (ct & TCG_CT_CONST_ZERO) {
441 return val == 0;
48bb3750
RH
442 }
443
2827822e
AG
444 return 0;
445}
446
48bb3750
RH
447/* Emit instructions according to the given instruction format. */
448
449static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
450{
451 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
452}
453
454static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
455 TCGReg r1, TCGReg r2)
456{
457 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
458}
459
96a9f093
RH
460static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
461 TCGReg r1, TCGReg r2, int m3)
462{
463 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
464}
465
48bb3750
RH
466static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
467{
468 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
469}
470
7af525af
RH
471static void tcg_out_insn_RIE(TCGContext *s, S390Opcode op, TCGReg r1,
472 int i2, int m3)
473{
474 tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
475 tcg_out32(s, (i2 << 16) | (op & 0xff));
476}
477
48bb3750
RH
478static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
479{
480 tcg_out16(s, op | (r1 << 4));
481 tcg_out32(s, i2);
482}
483
484static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
485 TCGReg b2, TCGReg r3, int disp)
486{
487 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
488 | (disp & 0xfff));
489}
490
491static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
492 TCGReg b2, TCGReg r3, int disp)
493{
494 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
495 tcg_out32(s, (op & 0xff) | (b2 << 28)
496 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
497}
498
499#define tcg_out_insn_RX tcg_out_insn_RS
500#define tcg_out_insn_RXY tcg_out_insn_RSY
501
502/* Emit an opcode with "type-checking" of the format. */
503#define tcg_out_insn(S, FMT, OP, ...) \
504 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
505
506
507/* emit 64-bit shifts */
508static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
509 TCGReg src, TCGReg sh_reg, int sh_imm)
510{
511 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
512}
513
514/* emit 32-bit shifts */
515static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
516 TCGReg sh_reg, int sh_imm)
517{
518 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
519}
520
78113e83 521static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
48bb3750
RH
522{
523 if (src != dst) {
524 if (type == TCG_TYPE_I32) {
525 tcg_out_insn(s, RR, LR, dst, src);
526 } else {
527 tcg_out_insn(s, RRE, LGR, dst, src);
528 }
529 }
78113e83 530 return true;
48bb3750
RH
531}
532
28eef8aa
RH
533static const S390Opcode lli_insns[4] = {
534 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
535};
48bb3750 536
28eef8aa
RH
537static bool maybe_out_small_movi(TCGContext *s, TCGType type,
538 TCGReg ret, tcg_target_long sval)
539{
48bb3750
RH
540 tcg_target_ulong uval = sval;
541 int i;
542
543 if (type == TCG_TYPE_I32) {
544 uval = (uint32_t)sval;
545 sval = (int32_t)sval;
546 }
547
548 /* Try all 32-bit insns that can load it in one go. */
549 if (sval >= -0x8000 && sval < 0x8000) {
550 tcg_out_insn(s, RI, LGHI, ret, sval);
28eef8aa 551 return true;
48bb3750
RH
552 }
553
554 for (i = 0; i < 4; i++) {
555 tcg_target_long mask = 0xffffull << i*16;
556 if ((uval & mask) == uval) {
557 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
28eef8aa 558 return true;
48bb3750
RH
559 }
560 }
561
28eef8aa
RH
562 return false;
563}
564
565/* load a register with an immediate value */
566static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
567 tcg_target_long sval, bool in_prologue)
568{
569 tcg_target_ulong uval;
570
571 /* Try all 32-bit insns that can load it in one go. */
572 if (maybe_out_small_movi(s, type, ret, sval)) {
573 return;
574 }
575
576 uval = sval;
577 if (type == TCG_TYPE_I32) {
578 uval = (uint32_t)sval;
579 sval = (int32_t)sval;
580 }
581
48bb3750 582 /* Try all 48-bit insns that can load it in one go. */
b2c98d9d 583 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
584 if (sval == (int32_t)sval) {
585 tcg_out_insn(s, RIL, LGFI, ret, sval);
586 return;
587 }
588 if (uval <= 0xffffffff) {
589 tcg_out_insn(s, RIL, LLILF, ret, uval);
590 return;
591 }
592 if ((uval & 0xffffffff) == 0) {
28eef8aa 593 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 32);
48bb3750
RH
594 return;
595 }
596 }
597
829e1376
RH
598 /* Try for PC-relative address load. For odd addresses,
599 attempt to use an offset from the start of the TB. */
48bb3750 600 if ((sval & 1) == 0) {
8c081b18 601 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
48bb3750
RH
602 if (off == (int32_t)off) {
603 tcg_out_insn(s, RIL, LARL, ret, off);
604 return;
605 }
829e1376 606 } else if (USE_REG_TB && !in_prologue) {
dd90043f 607 ptrdiff_t off = tcg_tbrel_diff(s, (void *)sval);
829e1376
RH
608 if (off == sextract64(off, 0, 20)) {
609 /* This is certain to be an address within TB, and therefore
610 OFF will be negative; don't try RX_LA. */
611 tcg_out_insn(s, RXY, LAY, ret, TCG_REG_TB, TCG_REG_NONE, off);
612 return;
613 }
48bb3750
RH
614 }
615
28eef8aa
RH
616 /* A 32-bit unsigned value can be loaded in 2 insns. And given
617 that LLILL, LLIHL, LLILF above did not succeed, we know that
618 both insns are required. */
619 if (uval <= 0xffffffff) {
620 tcg_out_insn(s, RI, LLILL, ret, uval);
621 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
622 return;
623 }
48bb3750 624
ba2c7479
RH
625 /* Otherwise, stuff it in the constant pool. */
626 if (s390_facilities & FACILITY_GEN_INST_EXT) {
627 tcg_out_insn(s, RIL, LGRL, ret, 0);
628 new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
629 } else if (USE_REG_TB && !in_prologue) {
630 tcg_out_insn(s, RXY, LG, ret, TCG_REG_TB, TCG_REG_NONE, 0);
631 new_pool_label(s, sval, R_390_20, s->code_ptr - 2,
dd90043f 632 tcg_tbrel_diff(s, NULL));
48bb3750 633 } else {
ba2c7479
RH
634 TCGReg base = ret ? ret : TCG_TMP0;
635 tcg_out_insn(s, RIL, LARL, base, 0);
636 new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
637 tcg_out_insn(s, RXY, LG, ret, base, TCG_REG_NONE, 0);
48bb3750
RH
638 }
639}
640
829e1376
RH
641static void tcg_out_movi(TCGContext *s, TCGType type,
642 TCGReg ret, tcg_target_long sval)
643{
644 tcg_out_movi_int(s, type, ret, sval, false);
645}
48bb3750
RH
646
647/* Emit a load/store type instruction. Inputs are:
648 DATA: The register to be loaded or stored.
649 BASE+OFS: The effective address.
650 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
651 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
652
653static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
654 TCGReg data, TCGReg base, TCGReg index,
655 tcg_target_long ofs)
656{
657 if (ofs < -0x80000 || ofs >= 0x80000) {
78c9f7c5
RH
658 /* Combine the low 20 bits of the offset with the actual load insn;
659 the high 44 bits must come from an immediate load. */
660 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
661 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
662 ofs = low;
48bb3750
RH
663
664 /* If we were already given an index register, add it in. */
665 if (index != TCG_REG_NONE) {
666 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
667 }
668 index = TCG_TMP0;
669 }
670
671 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
672 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
673 } else {
674 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
675 }
2827822e
AG
676}
677
48bb3750 678
2827822e 679/* load data without address translation or endianness conversion */
48bb3750 680static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 681 TCGReg base, intptr_t ofs)
2827822e 682{
48bb3750
RH
683 if (type == TCG_TYPE_I32) {
684 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
685 } else {
686 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
687 }
2827822e
AG
688}
689
48bb3750 690static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 691 TCGReg base, intptr_t ofs)
2827822e 692{
48bb3750
RH
693 if (type == TCG_TYPE_I32) {
694 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
695 } else {
696 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
697 }
698}
699
59d7c14e
RH
700static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
701 TCGReg base, intptr_t ofs)
702{
703 return false;
704}
705
48bb3750 706/* load data from an absolute host address */
79dae4dd
RH
707static void tcg_out_ld_abs(TCGContext *s, TCGType type,
708 TCGReg dest, const void *abs)
48bb3750 709{
8c081b18 710 intptr_t addr = (intptr_t)abs;
48bb3750 711
b2c98d9d 712 if ((s390_facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
8c081b18 713 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
48bb3750
RH
714 if (disp == (int32_t)disp) {
715 if (type == TCG_TYPE_I32) {
716 tcg_out_insn(s, RIL, LRL, dest, disp);
717 } else {
718 tcg_out_insn(s, RIL, LGRL, dest, disp);
719 }
720 return;
721 }
722 }
829e1376 723 if (USE_REG_TB) {
dd90043f 724 ptrdiff_t disp = tcg_tbrel_diff(s, abs);
829e1376
RH
725 if (disp == sextract64(disp, 0, 20)) {
726 tcg_out_ld(s, type, dest, TCG_REG_TB, disp);
727 return;
728 }
729 }
48bb3750
RH
730
731 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
732 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
733}
734
f0bffc27
RH
735static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
736 int msb, int lsb, int ofs, int z)
737{
738 /* Format RIE-f */
739 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
740 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
741 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
742}
743
48bb3750
RH
744static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
745{
b2c98d9d 746 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
747 tcg_out_insn(s, RRE, LGBR, dest, src);
748 return;
749 }
750
751 if (type == TCG_TYPE_I32) {
752 if (dest == src) {
753 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
754 } else {
755 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
756 }
757 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
758 } else {
759 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
760 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
761 }
762}
763
764static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
765{
b2c98d9d 766 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
767 tcg_out_insn(s, RRE, LLGCR, dest, src);
768 return;
769 }
770
771 if (dest == src) {
772 tcg_out_movi(s, type, TCG_TMP0, 0xff);
773 src = TCG_TMP0;
774 } else {
775 tcg_out_movi(s, type, dest, 0xff);
776 }
777 if (type == TCG_TYPE_I32) {
778 tcg_out_insn(s, RR, NR, dest, src);
779 } else {
780 tcg_out_insn(s, RRE, NGR, dest, src);
781 }
782}
783
784static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
785{
b2c98d9d 786 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
787 tcg_out_insn(s, RRE, LGHR, dest, src);
788 return;
789 }
790
791 if (type == TCG_TYPE_I32) {
792 if (dest == src) {
793 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
794 } else {
795 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
796 }
797 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
798 } else {
799 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
800 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
801 }
802}
803
804static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
805{
b2c98d9d 806 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
807 tcg_out_insn(s, RRE, LLGHR, dest, src);
808 return;
809 }
810
811 if (dest == src) {
812 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
813 src = TCG_TMP0;
814 } else {
815 tcg_out_movi(s, type, dest, 0xffff);
816 }
817 if (type == TCG_TYPE_I32) {
818 tcg_out_insn(s, RR, NR, dest, src);
819 } else {
820 tcg_out_insn(s, RRE, NGR, dest, src);
821 }
822}
823
824static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
825{
826 tcg_out_insn(s, RRE, LGFR, dest, src);
827}
828
829static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
830{
831 tcg_out_insn(s, RRE, LLGFR, dest, src);
832}
833
f0bffc27
RH
834/* Accept bit patterns like these:
835 0....01....1
836 1....10....0
837 1..10..01..1
838 0..01..10..0
839 Copied from gcc sources. */
840static inline bool risbg_mask(uint64_t c)
841{
842 uint64_t lsb;
843 /* We don't change the number of transitions by inverting,
844 so make sure we start with the LSB zero. */
845 if (c & 1) {
846 c = ~c;
847 }
848 /* Reject all zeros or all ones. */
849 if (c == 0) {
850 return false;
851 }
852 /* Find the first transition. */
853 lsb = c & -c;
854 /* Invert to look for a second transition. */
855 c = ~c;
856 /* Erase the first transition. */
857 c &= -lsb;
858 /* Find the second transition, if any. */
859 lsb = c & -c;
860 /* Match if all the bits are 1's, or if c is zero. */
861 return c == -lsb;
862}
863
547ec121
RH
864static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
865{
866 int msb, lsb;
867 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
868 /* Achieve wraparound by swapping msb and lsb. */
869 msb = 64 - ctz64(~val);
870 lsb = clz64(~val) - 1;
871 } else {
872 msb = clz64(val);
873 lsb = 63 - ctz64(val);
874 }
875 tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
876}
877
07ff7983 878static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750
RH
879{
880 static const S390Opcode ni_insns[4] = {
881 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
882 };
883 static const S390Opcode nif_insns[2] = {
884 RIL_NILF, RIL_NIHF
885 };
07ff7983 886 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
48bb3750
RH
887 int i;
888
48bb3750 889 /* Look for the zero-extensions. */
07ff7983 890 if ((val & valid) == 0xffffffff) {
48bb3750
RH
891 tgen_ext32u(s, dest, dest);
892 return;
893 }
b2c98d9d 894 if (s390_facilities & FACILITY_EXT_IMM) {
07ff7983 895 if ((val & valid) == 0xff) {
48bb3750
RH
896 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
897 return;
898 }
07ff7983 899 if ((val & valid) == 0xffff) {
48bb3750
RH
900 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
901 return;
902 }
07ff7983 903 }
48bb3750 904
07ff7983
RH
905 /* Try all 32-bit insns that can perform it in one go. */
906 for (i = 0; i < 4; i++) {
907 tcg_target_ulong mask = ~(0xffffull << i*16);
908 if (((val | ~valid) & mask) == mask) {
909 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
910 return;
48bb3750 911 }
07ff7983 912 }
48bb3750 913
07ff7983 914 /* Try all 48-bit insns that can perform it in one go. */
b2c98d9d 915 if (s390_facilities & FACILITY_EXT_IMM) {
07ff7983
RH
916 for (i = 0; i < 2; i++) {
917 tcg_target_ulong mask = ~(0xffffffffull << i*32);
918 if (((val | ~valid) & mask) == mask) {
919 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
920 return;
48bb3750
RH
921 }
922 }
07ff7983 923 }
b2c98d9d 924 if ((s390_facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
547ec121 925 tgen_andi_risbg(s, dest, dest, val);
f0bffc27
RH
926 return;
927 }
48bb3750 928
bdcd5d19
RH
929 /* Use the constant pool if USE_REG_TB, but not for small constants. */
930 if (USE_REG_TB) {
931 if (!maybe_out_small_movi(s, type, TCG_TMP0, val)) {
932 tcg_out_insn(s, RXY, NG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
933 new_pool_label(s, val & valid, R_390_20, s->code_ptr - 2,
dd90043f 934 tcg_tbrel_diff(s, NULL));
bdcd5d19
RH
935 return;
936 }
937 } else {
938 tcg_out_movi(s, type, TCG_TMP0, val);
939 }
07ff7983
RH
940 if (type == TCG_TYPE_I32) {
941 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
48bb3750 942 } else {
07ff7983 943 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
48bb3750
RH
944 }
945}
946
4046d9ca 947static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750
RH
948{
949 static const S390Opcode oi_insns[4] = {
950 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
951 };
4046d9ca 952 static const S390Opcode oif_insns[2] = {
48bb3750
RH
953 RIL_OILF, RIL_OIHF
954 };
955
956 int i;
957
958 /* Look for no-op. */
4046d9ca 959 if (unlikely(val == 0)) {
48bb3750
RH
960 return;
961 }
962
4046d9ca
RH
963 /* Try all 32-bit insns that can perform it in one go. */
964 for (i = 0; i < 4; i++) {
965 tcg_target_ulong mask = (0xffffull << i*16);
966 if ((val & mask) != 0 && (val & ~mask) == 0) {
967 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
968 return;
48bb3750 969 }
4046d9ca 970 }
48bb3750 971
4046d9ca
RH
972 /* Try all 48-bit insns that can perform it in one go. */
973 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
974 for (i = 0; i < 2; i++) {
975 tcg_target_ulong mask = (0xffffffffull << i*32);
976 if ((val & mask) != 0 && (val & ~mask) == 0) {
4046d9ca 977 tcg_out_insn_RIL(s, oif_insns[i], dest, val >> i*32);
48bb3750
RH
978 return;
979 }
980 }
4046d9ca 981 }
48bb3750 982
4046d9ca
RH
983 /* Use the constant pool if USE_REG_TB, but not for small constants. */
984 if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
985 if (type == TCG_TYPE_I32) {
986 tcg_out_insn(s, RR, OR, dest, TCG_TMP0);
987 } else {
988 tcg_out_insn(s, RRE, OGR, dest, TCG_TMP0);
989 }
990 } else if (USE_REG_TB) {
991 tcg_out_insn(s, RXY, OG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
992 new_pool_label(s, val, R_390_20, s->code_ptr - 2,
dd90043f 993 tcg_tbrel_diff(s, NULL));
4046d9ca 994 } else {
48bb3750
RH
995 /* Perform the OR via sequential modifications to the high and
996 low parts. Do this via recursion to handle 16-bit vs 32-bit
997 masks in each half. */
4046d9ca
RH
998 tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM);
999 tgen_ori(s, type, dest, val & 0x00000000ffffffffull);
1000 tgen_ori(s, type, dest, val & 0xffffffff00000000ull);
48bb3750
RH
1001 }
1002}
1003
5bf67a92 1004static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750 1005{
5bf67a92
RH
1006 /* Try all 48-bit insns that can perform it in one go. */
1007 if (s390_facilities & FACILITY_EXT_IMM) {
1008 if ((val & 0xffffffff00000000ull) == 0) {
1009 tcg_out_insn(s, RIL, XILF, dest, val);
1010 return;
1011 }
1012 if ((val & 0x00000000ffffffffull) == 0) {
1013 tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1014 return;
1015 }
48bb3750 1016 }
5bf67a92
RH
1017
1018 /* Use the constant pool if USE_REG_TB, but not for small constants. */
1019 if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
1020 if (type == TCG_TYPE_I32) {
1021 tcg_out_insn(s, RR, XR, dest, TCG_TMP0);
1022 } else {
1023 tcg_out_insn(s, RRE, XGR, dest, TCG_TMP0);
1024 }
1025 } else if (USE_REG_TB) {
1026 tcg_out_insn(s, RXY, XG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
1027 new_pool_label(s, val, R_390_20, s->code_ptr - 2,
dd90043f 1028 tcg_tbrel_diff(s, NULL));
5bf67a92
RH
1029 } else {
1030 /* Perform the xor by parts. */
1031 tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM);
1032 if (val & 0xffffffff) {
1033 tcg_out_insn(s, RIL, XILF, dest, val);
1034 }
1035 if (val > 0xffffffff) {
1036 tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1037 }
48bb3750
RH
1038 }
1039}
1040
1041static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
65839b56 1042 TCGArg c2, bool c2const, bool need_carry)
48bb3750 1043{
bcc66562 1044 bool is_unsigned = is_unsigned_cond(c);
a534bb15
RH
1045 S390Opcode op;
1046
48bb3750
RH
1047 if (c2const) {
1048 if (c2 == 0) {
65839b56
RH
1049 if (!(is_unsigned && need_carry)) {
1050 if (type == TCG_TYPE_I32) {
1051 tcg_out_insn(s, RR, LTR, r1, r1);
1052 } else {
1053 tcg_out_insn(s, RRE, LTGR, r1, r1);
1054 }
1055 return tcg_cond_to_ltr_cond[c];
1056 }
65839b56 1057 }
a534bb15
RH
1058
1059 if (!is_unsigned && c2 == (int16_t)c2) {
1060 op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI);
1061 tcg_out_insn_RI(s, op, r1, c2);
1062 goto exit;
1063 }
1064
1065 if (s390_facilities & FACILITY_EXT_IMM) {
65839b56 1066 if (type == TCG_TYPE_I32) {
a534bb15
RH
1067 op = (is_unsigned ? RIL_CLFI : RIL_CFI);
1068 tcg_out_insn_RIL(s, op, r1, c2);
1069 goto exit;
8e43c5a1 1070 } else if (c2 == (is_unsigned ? (TCGArg)(uint32_t)c2 : (TCGArg)(int32_t)c2)) {
a534bb15
RH
1071 op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
1072 tcg_out_insn_RIL(s, op, r1, c2);
1073 goto exit;
48bb3750
RH
1074 }
1075 }
a534bb15
RH
1076
1077 /* Use the constant pool, but not for small constants. */
1078 if (maybe_out_small_movi(s, type, TCG_TMP0, c2)) {
1079 c2 = TCG_TMP0;
1080 /* fall through to reg-reg */
1081 } else if (USE_REG_TB) {
48bb3750 1082 if (type == TCG_TYPE_I32) {
a534bb15
RH
1083 op = (is_unsigned ? RXY_CLY : RXY_CY);
1084 tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
1085 new_pool_label(s, (uint32_t)c2, R_390_20, s->code_ptr - 2,
dd90043f 1086 4 - tcg_tbrel_diff(s, NULL));
48bb3750 1087 } else {
a534bb15
RH
1088 op = (is_unsigned ? RXY_CLG : RXY_CG);
1089 tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
1090 new_pool_label(s, c2, R_390_20, s->code_ptr - 2,
dd90043f 1091 tcg_tbrel_diff(s, NULL));
48bb3750 1092 }
a534bb15 1093 goto exit;
48bb3750
RH
1094 } else {
1095 if (type == TCG_TYPE_I32) {
a534bb15
RH
1096 op = (is_unsigned ? RIL_CLRL : RIL_CRL);
1097 tcg_out_insn_RIL(s, op, r1, 0);
1098 new_pool_label(s, (uint32_t)c2, R_390_PC32DBL,
1099 s->code_ptr - 2, 2 + 4);
48bb3750 1100 } else {
a534bb15
RH
1101 op = (is_unsigned ? RIL_CLGRL : RIL_CGRL);
1102 tcg_out_insn_RIL(s, op, r1, 0);
1103 new_pool_label(s, c2, R_390_PC32DBL, s->code_ptr - 2, 2);
48bb3750 1104 }
a534bb15 1105 goto exit;
48bb3750
RH
1106 }
1107 }
a534bb15
RH
1108
1109 if (type == TCG_TYPE_I32) {
1110 op = (is_unsigned ? RR_CLR : RR_CR);
1111 tcg_out_insn_RR(s, op, r1, c2);
1112 } else {
1113 op = (is_unsigned ? RRE_CLGR : RRE_CGR);
1114 tcg_out_insn_RRE(s, op, r1, c2);
1115 }
1116
1117 exit:
48bb3750
RH
1118 return tcg_cond_to_s390_cond[c];
1119}
1120
7b7066b1 1121static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
96a9f093 1122 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
48bb3750 1123{
7b7066b1 1124 int cc;
7af525af 1125 bool have_loc;
7b7066b1 1126
7af525af
RH
1127 /* With LOC2, we can always emit the minimum 3 insns. */
1128 if (s390_facilities & FACILITY_LOAD_ON_COND2) {
1129 /* Emit: d = 0, d = (cc ? 1 : d). */
1130 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1131 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1132 tcg_out_insn(s, RIE, LOCGHI, dest, 1, cc);
1133 return;
1134 }
1135
1136 have_loc = (s390_facilities & FACILITY_LOAD_ON_COND) != 0;
1137
4609190b
RH
1138 /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */
1139 restart:
7b7066b1 1140 switch (cond) {
4609190b
RH
1141 case TCG_COND_NE:
1142 /* X != 0 is X > 0. */
1143 if (c2const && c2 == 0) {
1144 cond = TCG_COND_GTU;
1145 } else {
1146 break;
1147 }
1148 /* fallthru */
1149
7b7066b1
RH
1150 case TCG_COND_GTU:
1151 case TCG_COND_GT:
7b7066b1
RH
1152 /* The result of a compare has CC=2 for GT and CC=3 unused.
1153 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
65839b56 1154 tgen_cmp(s, type, cond, c1, c2, c2const, true);
7b7066b1
RH
1155 tcg_out_movi(s, type, dest, 0);
1156 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1157 return;
1158
4609190b
RH
1159 case TCG_COND_EQ:
1160 /* X == 0 is X <= 0. */
1161 if (c2const && c2 == 0) {
1162 cond = TCG_COND_LEU;
7b7066b1 1163 } else {
4609190b 1164 break;
7b7066b1 1165 }
4609190b 1166 /* fallthru */
7b7066b1
RH
1167
1168 case TCG_COND_LEU:
4609190b
RH
1169 case TCG_COND_LE:
1170 /* As above, but we're looking for borrow, or !carry.
1171 The second insn computes d - d - borrow, or -1 for true
1172 and 0 for false. So we must mask to 1 bit afterward. */
1173 tgen_cmp(s, type, cond, c1, c2, c2const, true);
1174 tcg_out_insn(s, RRE, SLBGR, dest, dest);
1175 tgen_andi(s, type, dest, 1);
1176 return;
1177
1178 case TCG_COND_GEU:
7b7066b1
RH
1179 case TCG_COND_LTU:
1180 case TCG_COND_LT:
4609190b
RH
1181 case TCG_COND_GE:
1182 /* Swap operands so that we can use LEU/GTU/GT/LE. */
7b7066b1 1183 if (c2const) {
7af525af 1184 if (have_loc) {
4609190b 1185 break;
7af525af 1186 }
7b7066b1
RH
1187 tcg_out_movi(s, type, TCG_TMP0, c2);
1188 c2 = c1;
1189 c2const = 0;
1190 c1 = TCG_TMP0;
1191 } else {
1192 TCGReg t = c1;
1193 c1 = c2;
1194 c2 = t;
1195 }
7b7066b1 1196 cond = tcg_swap_cond(cond);
4609190b 1197 goto restart;
48bb3750 1198
7b7066b1 1199 default:
4609190b 1200 g_assert_not_reached();
7b7066b1
RH
1201 }
1202
65839b56 1203 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
4609190b
RH
1204 if (have_loc) {
1205 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1206 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1207 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1208 tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
1209 } else {
1210 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1211 tcg_out_movi(s, type, dest, 1);
1212 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1213 tcg_out_movi(s, type, dest, 0);
1214 }
48bb3750
RH
1215}
1216
96a9f093 1217static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
7af525af
RH
1218 TCGReg c1, TCGArg c2, int c2const,
1219 TCGArg v3, int v3const)
96a9f093
RH
1220{
1221 int cc;
b2c98d9d 1222 if (s390_facilities & FACILITY_LOAD_ON_COND) {
65839b56 1223 cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
7af525af
RH
1224 if (v3const) {
1225 tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc);
1226 } else {
1227 tcg_out_insn(s, RRF, LOCGR, dest, v3, cc);
1228 }
96a9f093
RH
1229 } else {
1230 c = tcg_invert_cond(c);
65839b56 1231 cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
96a9f093
RH
1232
1233 /* Emit: if (cc) goto over; dest = r3; over: */
1234 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
7af525af 1235 tcg_out_insn(s, RRE, LGR, dest, v3);
96a9f093
RH
1236 }
1237}
1238
ce411066
RH
1239static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
1240 TCGArg a2, int a2const)
1241{
1242 /* Since this sets both R and R+1, we have no choice but to store the
1243 result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
1244 QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1245 tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1246
1247 if (a2const && a2 == 64) {
1248 tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1249 } else {
1250 if (a2const) {
1251 tcg_out_movi(s, TCG_TYPE_I64, dest, a2);
1252 } else {
1253 tcg_out_mov(s, TCG_TYPE_I64, dest, a2);
1254 }
1255 if (s390_facilities & FACILITY_LOAD_ON_COND) {
1256 /* Emit: if (one bit found) dest = r0. */
1257 tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2);
1258 } else {
1259 /* Emit: if (no one bit found) goto over; dest = r0; over: */
1260 tcg_out_insn(s, RI, BRC, 8, (4 + 4) >> 1);
1261 tcg_out_insn(s, RRE, LGR, dest, TCG_REG_R0);
1262 }
1263 }
1264}
1265
d5690ea4 1266static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
752b1be9 1267 int ofs, int len, int z)
d5690ea4
RH
1268{
1269 int lsb = (63 - ofs);
1270 int msb = lsb - (len - 1);
752b1be9 1271 tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
d5690ea4
RH
1272}
1273
b0bf5fe8
RH
1274static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
1275 int ofs, int len)
1276{
1277 tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
1278}
1279
ffd0e507 1280static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
48bb3750 1281{
79dae4dd 1282 ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
8c081b18 1283 if (off == (int16_t)off) {
48bb3750
RH
1284 tcg_out_insn(s, RI, BRC, cc, off);
1285 } else if (off == (int32_t)off) {
1286 tcg_out_insn(s, RIL, BRCL, cc, off);
1287 } else {
8c081b18 1288 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1289 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1290 }
1291}
1292
bec16311 1293static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
48bb3750 1294{
48bb3750 1295 if (l->has_value) {
8c081b18 1296 tgen_gotoi(s, cc, l->u.value_ptr);
48bb3750
RH
1297 } else if (USE_LONG_BRANCHES) {
1298 tcg_out16(s, RIL_BRCL | (cc << 4));
e692a349 1299 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, 2);
8c081b18 1300 s->code_ptr += 2;
48bb3750
RH
1301 } else {
1302 tcg_out16(s, RI_BRC | (cc << 4));
e692a349 1303 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2);
8c081b18 1304 s->code_ptr += 1;
48bb3750
RH
1305 }
1306}
1307
1308static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
bec16311 1309 TCGReg r1, TCGReg r2, TCGLabel *l)
48bb3750 1310{
79dae4dd 1311 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
48bb3750 1312 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
79dae4dd 1313 tcg_out16(s, 0);
48bb3750
RH
1314 tcg_out16(s, cc << 12 | (opc & 0xff));
1315}
1316
1317static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
bec16311 1318 TCGReg r1, int i2, TCGLabel *l)
48bb3750 1319{
79dae4dd 1320 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
48bb3750 1321 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
79dae4dd 1322 tcg_out16(s, 0);
48bb3750
RH
1323 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1324}
1325
1326static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
bec16311 1327 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
48bb3750
RH
1328{
1329 int cc;
1330
b2c98d9d 1331 if (s390_facilities & FACILITY_GEN_INST_EXT) {
b879f308 1332 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1333 bool in_range;
1334 S390Opcode opc;
1335
1336 cc = tcg_cond_to_s390_cond[c];
1337
1338 if (!c2const) {
1339 opc = (type == TCG_TYPE_I32
1340 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1341 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
bec16311 1342 tgen_compare_branch(s, opc, cc, r1, c2, l);
48bb3750
RH
1343 return;
1344 }
1345
1346 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1347 If the immediate we've been given does not fit that range, we'll
1348 fall back to separate compare and branch instructions using the
1349 larger comparison range afforded by COMPARE IMMEDIATE. */
1350 if (type == TCG_TYPE_I32) {
1351 if (is_unsigned) {
1352 opc = RIE_CLIJ;
1353 in_range = (uint32_t)c2 == (uint8_t)c2;
1354 } else {
1355 opc = RIE_CIJ;
1356 in_range = (int32_t)c2 == (int8_t)c2;
1357 }
1358 } else {
1359 if (is_unsigned) {
1360 opc = RIE_CLGIJ;
1361 in_range = (uint64_t)c2 == (uint8_t)c2;
1362 } else {
1363 opc = RIE_CGIJ;
1364 in_range = (int64_t)c2 == (int8_t)c2;
1365 }
1366 }
1367 if (in_range) {
bec16311 1368 tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
48bb3750
RH
1369 return;
1370 }
1371 }
1372
65839b56 1373 cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
bec16311 1374 tgen_branch(s, cc, l);
48bb3750
RH
1375}
1376
2be7d76b 1377static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
48bb3750 1378{
79dae4dd 1379 ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
48bb3750
RH
1380 if (off == (int32_t)off) {
1381 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1382 } else {
8c081b18 1383 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1384 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1385 }
1386}
1387
14776ab5 1388static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
48bb3750
RH
1389 TCGReg base, TCGReg index, int disp)
1390{
3c8691f5 1391 switch (opc & (MO_SSIZE | MO_BSWAP)) {
a5a04f28 1392 case MO_UB:
48bb3750
RH
1393 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1394 break;
a5a04f28 1395 case MO_SB:
48bb3750
RH
1396 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1397 break;
b8dd88b8
RH
1398
1399 case MO_UW | MO_BSWAP:
1400 /* swapped unsigned halfword load with upper bits zeroed */
1401 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1402 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1403 break;
a5a04f28 1404 case MO_UW:
b8dd88b8
RH
1405 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1406 break;
1407
1408 case MO_SW | MO_BSWAP:
1409 /* swapped sign-extended halfword load */
1410 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1411 tgen_ext16s(s, TCG_TYPE_I64, data, data);
48bb3750 1412 break;
a5a04f28 1413 case MO_SW:
b8dd88b8
RH
1414 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1415 break;
1416
1417 case MO_UL | MO_BSWAP:
1418 /* swapped unsigned int load with upper bits zeroed */
1419 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1420 tgen_ext32u(s, data, data);
48bb3750 1421 break;
a5a04f28 1422 case MO_UL:
b8dd88b8
RH
1423 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1424 break;
1425
1426 case MO_SL | MO_BSWAP:
1427 /* swapped sign-extended int load */
1428 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1429 tgen_ext32s(s, data, data);
48bb3750 1430 break;
a5a04f28 1431 case MO_SL:
b8dd88b8
RH
1432 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1433 break;
1434
1435 case MO_Q | MO_BSWAP:
1436 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
48bb3750 1437 break;
a5a04f28 1438 case MO_Q:
b8dd88b8 1439 tcg_out_insn(s, RXY, LG, data, base, index, disp);
48bb3750 1440 break;
b8dd88b8 1441
48bb3750
RH
1442 default:
1443 tcg_abort();
1444 }
1445}
1446
14776ab5 1447static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
48bb3750
RH
1448 TCGReg base, TCGReg index, int disp)
1449{
3c8691f5 1450 switch (opc & (MO_SIZE | MO_BSWAP)) {
a5a04f28 1451 case MO_UB:
48bb3750
RH
1452 if (disp >= 0 && disp < 0x1000) {
1453 tcg_out_insn(s, RX, STC, data, base, index, disp);
1454 } else {
1455 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1456 }
1457 break;
b8dd88b8
RH
1458
1459 case MO_UW | MO_BSWAP:
1460 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1461 break;
a5a04f28 1462 case MO_UW:
b8dd88b8 1463 if (disp >= 0 && disp < 0x1000) {
48bb3750
RH
1464 tcg_out_insn(s, RX, STH, data, base, index, disp);
1465 } else {
1466 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1467 }
1468 break;
b8dd88b8
RH
1469
1470 case MO_UL | MO_BSWAP:
1471 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1472 break;
a5a04f28 1473 case MO_UL:
b8dd88b8 1474 if (disp >= 0 && disp < 0x1000) {
48bb3750
RH
1475 tcg_out_insn(s, RX, ST, data, base, index, disp);
1476 } else {
1477 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1478 }
1479 break;
b8dd88b8
RH
1480
1481 case MO_Q | MO_BSWAP:
1482 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1483 break;
a5a04f28 1484 case MO_Q:
b8dd88b8 1485 tcg_out_insn(s, RXY, STG, data, base, index, disp);
48bb3750 1486 break;
b8dd88b8 1487
48bb3750
RH
1488 default:
1489 tcg_abort();
1490 }
1491}
1492
1493#if defined(CONFIG_SOFTMMU)
139c1837 1494#include "../tcg-ldst.c.inc"
659ef5cb 1495
269bd5d8
RH
1496/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */
1497QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1498QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19));
fb596415
RH
1499
1500/* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1501 addend into R2. Returns a register with the santitized guest address. */
14776ab5 1502static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
fb596415 1503 int mem_index, bool is_ld)
48bb3750 1504{
85aa8081
RH
1505 unsigned s_bits = opc & MO_SIZE;
1506 unsigned a_bits = get_alignment_bits(opc);
1507 unsigned s_mask = (1 << s_bits) - 1;
1508 unsigned a_mask = (1 << a_bits) - 1;
269bd5d8
RH
1509 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1510 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1511 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
a5e39810
RH
1512 int ofs, a_off;
1513 uint64_t tlb_mask;
1514
4f47e338
RH
1515 tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
1516 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1517 tcg_out_insn(s, RXY, NG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, mask_off);
1518 tcg_out_insn(s, RXY, AG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, table_off);
1519
a5e39810
RH
1520 /* For aligned accesses, we check the first byte and include the alignment
1521 bits within the address. For unaligned access, we check that we don't
1522 cross pages using the address of the last byte of the access. */
85aa8081
RH
1523 a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask);
1524 tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
4f47e338
RH
1525 if ((s390_facilities & FACILITY_GEN_INST_EXT) && a_off == 0) {
1526 tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
48bb3750 1527 } else {
a5e39810 1528 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
547ec121 1529 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
48bb3750
RH
1530 }
1531
fb596415 1532 if (is_ld) {
4f47e338 1533 ofs = offsetof(CPUTLBEntry, addr_read);
fb596415 1534 } else {
4f47e338 1535 ofs = offsetof(CPUTLBEntry, addr_write);
48bb3750 1536 }
48bb3750 1537 if (TARGET_LONG_BITS == 32) {
4f47e338 1538 tcg_out_insn(s, RX, C, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs);
48bb3750 1539 } else {
4f47e338 1540 tcg_out_insn(s, RXY, CG, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs);
48bb3750
RH
1541 }
1542
4f47e338
RH
1543 tcg_out_insn(s, RXY, LG, TCG_REG_R2, TCG_REG_R2, TCG_REG_NONE,
1544 offsetof(CPUTLBEntry, addend));
fb596415 1545
48bb3750 1546 if (TARGET_LONG_BITS == 32) {
fb596415
RH
1547 tgen_ext32u(s, TCG_REG_R3, addr_reg);
1548 return TCG_REG_R3;
48bb3750 1549 }
fb596415
RH
1550 return addr_reg;
1551}
48bb3750 1552
3972ef6f
RH
1553static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1554 TCGReg data, TCGReg addr,
fb596415
RH
1555 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1556{
1557 TCGLabelQemuLdst *label = new_ldst_label(s);
1558
1559 label->is_ld = is_ld;
3972ef6f 1560 label->oi = oi;
fb596415
RH
1561 label->datalo_reg = data;
1562 label->addrlo_reg = addr;
e5e2e4c7 1563 label->raddr = tcg_splitwx_to_rx(raddr);
fb596415
RH
1564 label->label_ptr[0] = label_ptr;
1565}
48bb3750 1566
aeee05f5 1567static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
fb596415
RH
1568{
1569 TCGReg addr_reg = lb->addrlo_reg;
1570 TCGReg data_reg = lb->datalo_reg;
3972ef6f 1571 TCGMemOpIdx oi = lb->oi;
14776ab5 1572 MemOp opc = get_memop(oi);
48bb3750 1573
aeee05f5 1574 if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
79dae4dd 1575 (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
aeee05f5
RH
1576 return false;
1577 }
48bb3750 1578
fb596415
RH
1579 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1580 if (TARGET_LONG_BITS == 64) {
1581 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1582 }
3972ef6f 1583 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
fb596415 1584 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
2b7ec66f 1585 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
fb596415 1586 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
65a62a75 1587
fb596415 1588 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
aeee05f5 1589 return true;
48bb3750
RH
1590}
1591
aeee05f5 1592static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
48bb3750 1593{
fb596415
RH
1594 TCGReg addr_reg = lb->addrlo_reg;
1595 TCGReg data_reg = lb->datalo_reg;
3972ef6f 1596 TCGMemOpIdx oi = lb->oi;
14776ab5 1597 MemOp opc = get_memop(oi);
fb596415 1598
aeee05f5 1599 if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
79dae4dd 1600 (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
aeee05f5
RH
1601 return false;
1602 }
fb596415
RH
1603
1604 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1605 if (TARGET_LONG_BITS == 64) {
1606 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1607 }
1608 switch (opc & MO_SIZE) {
1609 case MO_UB:
1610 tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1611 break;
1612 case MO_UW:
1613 tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1614 break;
1615 case MO_UL:
1616 tgen_ext32u(s, TCG_REG_R4, data_reg);
1617 break;
1618 case MO_Q:
1619 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1620 break;
1621 default:
1622 tcg_abort();
1623 }
3972ef6f 1624 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
fb596415 1625 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
2b7ec66f 1626 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
fb596415
RH
1627
1628 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
aeee05f5 1629 return true;
48bb3750
RH
1630}
1631#else
1632static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1633 TCGReg *index_reg, tcg_target_long *disp)
1634{
1635 if (TARGET_LONG_BITS == 32) {
1636 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1637 *addr_reg = TCG_TMP0;
1638 }
b76f21a7 1639 if (guest_base < 0x80000) {
48bb3750 1640 *index_reg = TCG_REG_NONE;
b76f21a7 1641 *disp = guest_base;
48bb3750
RH
1642 } else {
1643 *index_reg = TCG_GUEST_BASE_REG;
1644 *disp = 0;
1645 }
1646}
1647#endif /* CONFIG_SOFTMMU */
1648
f24efee4 1649static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
59227d5d 1650 TCGMemOpIdx oi)
48bb3750 1651{
14776ab5 1652 MemOp opc = get_memop(oi);
fb596415 1653#ifdef CONFIG_SOFTMMU
59227d5d 1654 unsigned mem_index = get_mmuidx(oi);
fb596415
RH
1655 tcg_insn_unit *label_ptr;
1656 TCGReg base_reg;
1657
1658 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
1659
cd3b29b7
AJ
1660 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1661 label_ptr = s->code_ptr;
1662 s->code_ptr += 1;
fb596415
RH
1663
1664 tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
48bb3750 1665
3972ef6f 1666 add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
48bb3750 1667#else
f24efee4
RH
1668 TCGReg index_reg;
1669 tcg_target_long disp;
1670
48bb3750
RH
1671 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1672 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1673#endif
1674}
1675
f24efee4 1676static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
59227d5d 1677 TCGMemOpIdx oi)
48bb3750 1678{
14776ab5 1679 MemOp opc = get_memop(oi);
fb596415 1680#ifdef CONFIG_SOFTMMU
59227d5d 1681 unsigned mem_index = get_mmuidx(oi);
fb596415
RH
1682 tcg_insn_unit *label_ptr;
1683 TCGReg base_reg;
1684
1685 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
1686
cd3b29b7
AJ
1687 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1688 label_ptr = s->code_ptr;
1689 s->code_ptr += 1;
fb596415
RH
1690
1691 tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
48bb3750 1692
3972ef6f 1693 add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
48bb3750 1694#else
f24efee4
RH
1695 TCGReg index_reg;
1696 tcg_target_long disp;
1697
48bb3750
RH
1698 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1699 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1700#endif
2827822e
AG
1701}
1702
48bb3750
RH
1703# define OP_32_64(x) \
1704 case glue(glue(INDEX_op_,x),_i32): \
1705 case glue(glue(INDEX_op_,x),_i64)
48bb3750 1706
a9751609 1707static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
5e8892db
MR
1708 const TCGArg args[TCG_MAX_OP_ARGS],
1709 const int const_args[TCG_MAX_OP_ARGS])
2827822e 1710{
c2097136 1711 S390Opcode op, op2;
0db921e6 1712 TCGArg a0, a1, a2;
48bb3750
RH
1713
1714 switch (opc) {
1715 case INDEX_op_exit_tb:
46644483
RH
1716 /* Reuse the zeroing that exists for goto_ptr. */
1717 a0 = args[0];
1718 if (a0 == 0) {
8b5c2b62 1719 tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
46644483
RH
1720 } else {
1721 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
1722 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1723 }
48bb3750
RH
1724 break;
1725
1726 case INDEX_op_goto_tb:
829e1376 1727 a0 = args[0];
f309101c 1728 if (s->tb_jmp_insn_offset) {
79dae4dd
RH
1729 /*
1730 * branch displacement must be aligned for atomic patching;
ed3d51ec
SF
1731 * see if we need to add extra nop before branch
1732 */
1733 if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
1734 tcg_out16(s, NOP);
1735 }
829e1376 1736 tcg_debug_assert(!USE_REG_TB);
a10c64e0 1737 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
829e1376 1738 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
a10c64e0 1739 s->code_ptr += 2;
48bb3750 1740 } else {
829e1376
RH
1741 /* load address stored at s->tb_jmp_target_addr + a0 */
1742 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_REG_TB,
79dae4dd 1743 tcg_splitwx_to_rx(s->tb_jmp_target_addr + a0));
48bb3750 1744 /* and go there */
829e1376
RH
1745 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB);
1746 }
9f754620 1747 set_jmp_reset_offset(s, a0);
829e1376
RH
1748
1749 /* For the unlinked path of goto_tb, we need to reset
1750 TCG_REG_TB to the beginning of this TB. */
1751 if (USE_REG_TB) {
1752 int ofs = -tcg_current_code_size(s);
79dae4dd
RH
1753 /* All TB are restricted to 64KiB by unwind info. */
1754 tcg_debug_assert(ofs == sextract64(ofs, 0, 20));
1755 tcg_out_insn(s, RXY, LAY, TCG_REG_TB,
1756 TCG_REG_TB, TCG_REG_NONE, ofs);
48bb3750 1757 }
48bb3750
RH
1758 break;
1759
46644483 1760 case INDEX_op_goto_ptr:
829e1376
RH
1761 a0 = args[0];
1762 if (USE_REG_TB) {
1763 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
1764 }
1765 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
46644483
RH
1766 break;
1767
48bb3750
RH
1768 OP_32_64(ld8u):
1769 /* ??? LLC (RXY format) is only present with the extended-immediate
1770 facility, whereas LLGC is always present. */
1771 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1772 break;
1773
1774 OP_32_64(ld8s):
1775 /* ??? LB is no smaller than LGB, so no point to using it. */
1776 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1777 break;
1778
1779 OP_32_64(ld16u):
1780 /* ??? LLH (RXY format) is only present with the extended-immediate
1781 facility, whereas LLGH is always present. */
1782 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1783 break;
1784
1785 case INDEX_op_ld16s_i32:
1786 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1787 break;
1788
1789 case INDEX_op_ld_i32:
1790 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1791 break;
1792
1793 OP_32_64(st8):
1794 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1795 TCG_REG_NONE, args[2]);
1796 break;
1797
1798 OP_32_64(st16):
1799 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1800 TCG_REG_NONE, args[2]);
1801 break;
1802
1803 case INDEX_op_st_i32:
1804 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1805 break;
1806
1807 case INDEX_op_add_i32:
0db921e6 1808 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1809 if (const_args[2]) {
0db921e6
RH
1810 do_addi_32:
1811 if (a0 == a1) {
1812 if (a2 == (int16_t)a2) {
1813 tcg_out_insn(s, RI, AHI, a0, a2);
1814 break;
1815 }
b2c98d9d 1816 if (s390_facilities & FACILITY_EXT_IMM) {
0db921e6
RH
1817 tcg_out_insn(s, RIL, AFI, a0, a2);
1818 break;
1819 }
1820 }
1821 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1822 } else if (a0 == a1) {
1823 tcg_out_insn(s, RR, AR, a0, a2);
48bb3750 1824 } else {
0db921e6 1825 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1826 }
1827 break;
1828 case INDEX_op_sub_i32:
0db921e6 1829 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1830 if (const_args[2]) {
0db921e6
RH
1831 a2 = -a2;
1832 goto do_addi_32;
c2097136
RH
1833 } else if (a0 == a1) {
1834 tcg_out_insn(s, RR, SR, a0, a2);
1835 } else {
1836 tcg_out_insn(s, RRF, SRK, a0, a1, a2);
48bb3750
RH
1837 }
1838 break;
1839
1840 case INDEX_op_and_i32:
c2097136 1841 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
48bb3750 1842 if (const_args[2]) {
c2097136
RH
1843 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1844 tgen_andi(s, TCG_TYPE_I32, a0, a2);
1845 } else if (a0 == a1) {
1846 tcg_out_insn(s, RR, NR, a0, a2);
48bb3750 1847 } else {
c2097136 1848 tcg_out_insn(s, RRF, NRK, a0, a1, a2);
48bb3750
RH
1849 }
1850 break;
1851 case INDEX_op_or_i32:
c2097136 1852 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
48bb3750 1853 if (const_args[2]) {
c2097136 1854 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
4046d9ca 1855 tgen_ori(s, TCG_TYPE_I32, a0, a2);
c2097136
RH
1856 } else if (a0 == a1) {
1857 tcg_out_insn(s, RR, OR, a0, a2);
48bb3750 1858 } else {
c2097136 1859 tcg_out_insn(s, RRF, ORK, a0, a1, a2);
48bb3750
RH
1860 }
1861 break;
1862 case INDEX_op_xor_i32:
c2097136 1863 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
48bb3750 1864 if (const_args[2]) {
c2097136 1865 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
5bf67a92 1866 tgen_xori(s, TCG_TYPE_I32, a0, a2);
c2097136 1867 } else if (a0 == a1) {
48bb3750 1868 tcg_out_insn(s, RR, XR, args[0], args[2]);
c2097136
RH
1869 } else {
1870 tcg_out_insn(s, RRF, XRK, a0, a1, a2);
48bb3750
RH
1871 }
1872 break;
1873
1874 case INDEX_op_neg_i32:
1875 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1876 break;
1877
1878 case INDEX_op_mul_i32:
1879 if (const_args[2]) {
1880 if ((int32_t)args[2] == (int16_t)args[2]) {
1881 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1882 } else {
1883 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1884 }
1885 } else {
1886 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1887 }
1888 break;
1889
1890 case INDEX_op_div2_i32:
1891 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1892 break;
1893 case INDEX_op_divu2_i32:
1894 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1895 break;
1896
1897 case INDEX_op_shl_i32:
1898 op = RS_SLL;
c2097136 1899 op2 = RSY_SLLK;
48bb3750 1900 do_shift32:
c2097136
RH
1901 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1902 if (a0 == a1) {
1903 if (const_args[2]) {
1904 tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
1905 } else {
1906 tcg_out_sh32(s, op, a0, a2, 0);
1907 }
48bb3750 1908 } else {
c2097136
RH
1909 /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
1910 if (const_args[2]) {
1911 tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
1912 } else {
1913 tcg_out_sh64(s, op2, a0, a1, a2, 0);
1914 }
48bb3750
RH
1915 }
1916 break;
1917 case INDEX_op_shr_i32:
1918 op = RS_SRL;
c2097136 1919 op2 = RSY_SRLK;
48bb3750
RH
1920 goto do_shift32;
1921 case INDEX_op_sar_i32:
1922 op = RS_SRA;
c2097136 1923 op2 = RSY_SRAK;
48bb3750
RH
1924 goto do_shift32;
1925
1926 case INDEX_op_rotl_i32:
1927 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1928 if (const_args[2]) {
1929 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1930 } else {
1931 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1932 }
1933 break;
1934 case INDEX_op_rotr_i32:
1935 if (const_args[2]) {
1936 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1937 TCG_REG_NONE, (32 - args[2]) & 31);
1938 } else {
1939 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1940 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1941 }
1942 break;
1943
1944 case INDEX_op_ext8s_i32:
1945 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1946 break;
1947 case INDEX_op_ext16s_i32:
1948 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1949 break;
1950 case INDEX_op_ext8u_i32:
1951 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1952 break;
1953 case INDEX_op_ext16u_i32:
1954 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1955 break;
1956
1957 OP_32_64(bswap16):
1958 /* The TCG bswap definition requires bits 0-47 already be zero.
1959 Thus we don't need the G-type insns to implement bswap16_i64. */
1960 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1961 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1962 break;
1963 OP_32_64(bswap32):
1964 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1965 break;
1966
3790b918 1967 case INDEX_op_add2_i32:
ad19b358
RH
1968 if (const_args[4]) {
1969 tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
1970 } else {
1971 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1972 }
3790b918
RH
1973 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1974 break;
1975 case INDEX_op_sub2_i32:
ad19b358
RH
1976 if (const_args[4]) {
1977 tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
1978 } else {
1979 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1980 }
3790b918
RH
1981 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1982 break;
1983
48bb3750 1984 case INDEX_op_br:
bec16311 1985 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
48bb3750
RH
1986 break;
1987
1988 case INDEX_op_brcond_i32:
1989 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
bec16311 1990 args[1], const_args[1], arg_label(args[3]));
48bb3750
RH
1991 break;
1992 case INDEX_op_setcond_i32:
1993 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1994 args[2], const_args[2]);
1995 break;
96a9f093
RH
1996 case INDEX_op_movcond_i32:
1997 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
7af525af 1998 args[2], const_args[2], args[3], const_args[3]);
96a9f093 1999 break;
48bb3750 2000
f24efee4 2001 case INDEX_op_qemu_ld_i32:
48bb3750 2002 /* ??? Technically we can use a non-extending instruction. */
f24efee4 2003 case INDEX_op_qemu_ld_i64:
59227d5d 2004 tcg_out_qemu_ld(s, args[0], args[1], args[2]);
48bb3750 2005 break;
f24efee4
RH
2006 case INDEX_op_qemu_st_i32:
2007 case INDEX_op_qemu_st_i64:
59227d5d 2008 tcg_out_qemu_st(s, args[0], args[1], args[2]);
48bb3750
RH
2009 break;
2010
48bb3750
RH
2011 case INDEX_op_ld16s_i64:
2012 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
2013 break;
2014 case INDEX_op_ld32u_i64:
2015 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
2016 break;
2017 case INDEX_op_ld32s_i64:
2018 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
2019 break;
2020 case INDEX_op_ld_i64:
2021 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2022 break;
2023
2024 case INDEX_op_st32_i64:
2025 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2026 break;
2027 case INDEX_op_st_i64:
2028 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2029 break;
2030
2031 case INDEX_op_add_i64:
0db921e6 2032 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2033 if (const_args[2]) {
0db921e6
RH
2034 do_addi_64:
2035 if (a0 == a1) {
2036 if (a2 == (int16_t)a2) {
2037 tcg_out_insn(s, RI, AGHI, a0, a2);
2038 break;
2039 }
b2c98d9d 2040 if (s390_facilities & FACILITY_EXT_IMM) {
0db921e6
RH
2041 if (a2 == (int32_t)a2) {
2042 tcg_out_insn(s, RIL, AGFI, a0, a2);
2043 break;
2044 } else if (a2 == (uint32_t)a2) {
2045 tcg_out_insn(s, RIL, ALGFI, a0, a2);
2046 break;
2047 } else if (-a2 == (uint32_t)-a2) {
2048 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
2049 break;
2050 }
2051 }
2052 }
2053 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2054 } else if (a0 == a1) {
2055 tcg_out_insn(s, RRE, AGR, a0, a2);
48bb3750 2056 } else {
0db921e6 2057 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
2058 }
2059 break;
2060 case INDEX_op_sub_i64:
0db921e6 2061 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2062 if (const_args[2]) {
0db921e6
RH
2063 a2 = -a2;
2064 goto do_addi_64;
c2097136
RH
2065 } else if (a0 == a1) {
2066 tcg_out_insn(s, RRE, SGR, a0, a2);
48bb3750 2067 } else {
c2097136 2068 tcg_out_insn(s, RRF, SGRK, a0, a1, a2);
48bb3750
RH
2069 }
2070 break;
2071
2072 case INDEX_op_and_i64:
c2097136 2073 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2074 if (const_args[2]) {
c2097136 2075 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
07ff7983 2076 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
c2097136 2077 } else if (a0 == a1) {
48bb3750 2078 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
c2097136
RH
2079 } else {
2080 tcg_out_insn(s, RRF, NGRK, a0, a1, a2);
48bb3750
RH
2081 }
2082 break;
2083 case INDEX_op_or_i64:
c2097136 2084 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2085 if (const_args[2]) {
c2097136 2086 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
4046d9ca 2087 tgen_ori(s, TCG_TYPE_I64, a0, a2);
c2097136
RH
2088 } else if (a0 == a1) {
2089 tcg_out_insn(s, RRE, OGR, a0, a2);
48bb3750 2090 } else {
c2097136 2091 tcg_out_insn(s, RRF, OGRK, a0, a1, a2);
48bb3750
RH
2092 }
2093 break;
2094 case INDEX_op_xor_i64:
c2097136 2095 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2096 if (const_args[2]) {
c2097136 2097 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
5bf67a92 2098 tgen_xori(s, TCG_TYPE_I64, a0, a2);
c2097136
RH
2099 } else if (a0 == a1) {
2100 tcg_out_insn(s, RRE, XGR, a0, a2);
48bb3750 2101 } else {
c2097136 2102 tcg_out_insn(s, RRF, XGRK, a0, a1, a2);
48bb3750
RH
2103 }
2104 break;
2105
2106 case INDEX_op_neg_i64:
2107 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2108 break;
2109 case INDEX_op_bswap64_i64:
2110 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2111 break;
2112
2113 case INDEX_op_mul_i64:
2114 if (const_args[2]) {
2115 if (args[2] == (int16_t)args[2]) {
2116 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
2117 } else {
2118 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
2119 }
2120 } else {
2121 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
2122 }
2123 break;
2124
2125 case INDEX_op_div2_i64:
2126 /* ??? We get an unnecessary sign-extension of the dividend
2127 into R3 with this definition, but as we do in fact always
2128 produce both quotient and remainder using INDEX_op_div_i64
2129 instead requires jumping through even more hoops. */
2130 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
2131 break;
2132 case INDEX_op_divu2_i64:
2133 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
2134 break;
36017dc6
RH
2135 case INDEX_op_mulu2_i64:
2136 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
2137 break;
48bb3750
RH
2138
2139 case INDEX_op_shl_i64:
2140 op = RSY_SLLG;
2141 do_shift64:
2142 if (const_args[2]) {
2143 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2144 } else {
2145 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2146 }
2147 break;
2148 case INDEX_op_shr_i64:
2149 op = RSY_SRLG;
2150 goto do_shift64;
2151 case INDEX_op_sar_i64:
2152 op = RSY_SRAG;
2153 goto do_shift64;
2154
2155 case INDEX_op_rotl_i64:
2156 if (const_args[2]) {
2157 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2158 TCG_REG_NONE, args[2]);
2159 } else {
2160 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2161 }
2162 break;
2163 case INDEX_op_rotr_i64:
2164 if (const_args[2]) {
2165 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2166 TCG_REG_NONE, (64 - args[2]) & 63);
2167 } else {
2168 /* We can use the smaller 32-bit negate because only the
2169 low 6 bits are examined for the rotate. */
2170 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2171 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2172 }
2173 break;
2174
2175 case INDEX_op_ext8s_i64:
2176 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2177 break;
2178 case INDEX_op_ext16s_i64:
2179 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2180 break;
4f2331e5 2181 case INDEX_op_ext_i32_i64:
48bb3750
RH
2182 case INDEX_op_ext32s_i64:
2183 tgen_ext32s(s, args[0], args[1]);
2184 break;
2185 case INDEX_op_ext8u_i64:
2186 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2187 break;
2188 case INDEX_op_ext16u_i64:
2189 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2190 break;
4f2331e5 2191 case INDEX_op_extu_i32_i64:
48bb3750
RH
2192 case INDEX_op_ext32u_i64:
2193 tgen_ext32u(s, args[0], args[1]);
2194 break;
2195
3790b918 2196 case INDEX_op_add2_i64:
ad19b358
RH
2197 if (const_args[4]) {
2198 if ((int64_t)args[4] >= 0) {
2199 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2200 } else {
2201 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2202 }
2203 } else {
2204 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2205 }
3790b918
RH
2206 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2207 break;
2208 case INDEX_op_sub2_i64:
ad19b358
RH
2209 if (const_args[4]) {
2210 if ((int64_t)args[4] >= 0) {
2211 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2212 } else {
2213 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2214 }
2215 } else {
2216 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2217 }
3790b918
RH
2218 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2219 break;
2220
48bb3750
RH
2221 case INDEX_op_brcond_i64:
2222 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
bec16311 2223 args[1], const_args[1], arg_label(args[3]));
48bb3750
RH
2224 break;
2225 case INDEX_op_setcond_i64:
2226 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2227 args[2], const_args[2]);
2228 break;
96a9f093
RH
2229 case INDEX_op_movcond_i64:
2230 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
7af525af 2231 args[2], const_args[2], args[3], const_args[3]);
96a9f093 2232 break;
48bb3750 2233
d5690ea4 2234 OP_32_64(deposit):
752b1be9
RH
2235 a0 = args[0], a1 = args[1], a2 = args[2];
2236 if (const_args[1]) {
2237 tgen_deposit(s, a0, a2, args[3], args[4], 1);
2238 } else {
2239 /* Since we can't support "0Z" as a constraint, we allow a1 in
2240 any register. Fix things up as if a matching constraint. */
2241 if (a0 != a1) {
2242 TCGType type = (opc == INDEX_op_deposit_i64);
2243 if (a0 == a2) {
2244 tcg_out_mov(s, type, TCG_TMP0, a2);
2245 a2 = TCG_TMP0;
2246 }
2247 tcg_out_mov(s, type, a0, a1);
2248 }
2249 tgen_deposit(s, a0, a2, args[3], args[4], 0);
2250 }
d5690ea4 2251 break;
752b1be9 2252
b0bf5fe8
RH
2253 OP_32_64(extract):
2254 tgen_extract(s, args[0], args[1], args[2], args[3]);
2255 break;
d5690ea4 2256
ce411066
RH
2257 case INDEX_op_clz_i64:
2258 tgen_clz(s, args[0], args[1], args[2], const_args[2]);
2259 break;
2260
c9314d61
PK
2261 case INDEX_op_mb:
2262 /* The host memory model is quite strong, we simply need to
2263 serialize the instruction stream. */
2264 if (args[0] & TCG_MO_ST_LD) {
2265 tcg_out_insn(s, RR, BCR,
b2c98d9d 2266 s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0);
c9314d61
PK
2267 }
2268 break;
2269
96d0ee7f
RH
2270 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2271 case INDEX_op_mov_i64:
96d0ee7f 2272 case INDEX_op_call: /* Always emitted via tcg_out_call. */
48bb3750 2273 default:
48bb3750
RH
2274 tcg_abort();
2275 }
2827822e
AG
2276}
2277
d1c36a90 2278static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
f69d277e 2279{
9b5500b6
RH
2280 switch (op) {
2281 case INDEX_op_goto_ptr:
d1c36a90 2282 return C_O0_I1(r);
9b5500b6
RH
2283
2284 case INDEX_op_ld8u_i32:
2285 case INDEX_op_ld8u_i64:
2286 case INDEX_op_ld8s_i32:
2287 case INDEX_op_ld8s_i64:
2288 case INDEX_op_ld16u_i32:
2289 case INDEX_op_ld16u_i64:
2290 case INDEX_op_ld16s_i32:
2291 case INDEX_op_ld16s_i64:
2292 case INDEX_op_ld_i32:
2293 case INDEX_op_ld32u_i64:
2294 case INDEX_op_ld32s_i64:
2295 case INDEX_op_ld_i64:
d1c36a90
RH
2296 return C_O1_I1(r, r);
2297
9b5500b6
RH
2298 case INDEX_op_st8_i32:
2299 case INDEX_op_st8_i64:
2300 case INDEX_op_st16_i32:
2301 case INDEX_op_st16_i64:
2302 case INDEX_op_st_i32:
2303 case INDEX_op_st32_i64:
2304 case INDEX_op_st_i64:
d1c36a90 2305 return C_O0_I2(r, r);
9b5500b6
RH
2306
2307 case INDEX_op_add_i32:
2308 case INDEX_op_add_i64:
d1c36a90
RH
2309 case INDEX_op_shl_i64:
2310 case INDEX_op_shr_i64:
2311 case INDEX_op_sar_i64:
2312 case INDEX_op_rotl_i32:
2313 case INDEX_op_rotl_i64:
2314 case INDEX_op_rotr_i32:
2315 case INDEX_op_rotr_i64:
2316 case INDEX_op_clz_i64:
2317 case INDEX_op_setcond_i32:
2318 case INDEX_op_setcond_i64:
2319 return C_O1_I2(r, r, ri);
2320
9b5500b6
RH
2321 case INDEX_op_sub_i32:
2322 case INDEX_op_sub_i64:
bdcd5d19
RH
2323 case INDEX_op_and_i32:
2324 case INDEX_op_and_i64:
4046d9ca
RH
2325 case INDEX_op_or_i32:
2326 case INDEX_op_or_i64:
5bf67a92
RH
2327 case INDEX_op_xor_i32:
2328 case INDEX_op_xor_i64:
d1c36a90
RH
2329 return (s390_facilities & FACILITY_DISTINCT_OPS
2330 ? C_O1_I2(r, r, ri)
2331 : C_O1_I2(r, 0, ri));
a8f0269e 2332
9b5500b6 2333 case INDEX_op_mul_i32:
a8f0269e
RH
2334 /* If we have the general-instruction-extensions, then we have
2335 MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we
2336 have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
d1c36a90
RH
2337 return (s390_facilities & FACILITY_GEN_INST_EXT
2338 ? C_O1_I2(r, 0, ri)
2339 : C_O1_I2(r, 0, rI));
2340
9b5500b6 2341 case INDEX_op_mul_i64:
d1c36a90
RH
2342 return (s390_facilities & FACILITY_GEN_INST_EXT
2343 ? C_O1_I2(r, 0, rJ)
2344 : C_O1_I2(r, 0, rI));
a8f0269e 2345
9b5500b6
RH
2346 case INDEX_op_shl_i32:
2347 case INDEX_op_shr_i32:
2348 case INDEX_op_sar_i32:
d1c36a90
RH
2349 return (s390_facilities & FACILITY_DISTINCT_OPS
2350 ? C_O1_I2(r, r, ri)
2351 : C_O1_I2(r, 0, ri));
9b5500b6
RH
2352
2353 case INDEX_op_brcond_i32:
2354 case INDEX_op_brcond_i64:
d1c36a90 2355 return C_O0_I2(r, ri);
9b5500b6
RH
2356
2357 case INDEX_op_bswap16_i32:
2358 case INDEX_op_bswap16_i64:
2359 case INDEX_op_bswap32_i32:
2360 case INDEX_op_bswap32_i64:
2361 case INDEX_op_bswap64_i64:
2362 case INDEX_op_neg_i32:
2363 case INDEX_op_neg_i64:
2364 case INDEX_op_ext8s_i32:
2365 case INDEX_op_ext8s_i64:
2366 case INDEX_op_ext8u_i32:
2367 case INDEX_op_ext8u_i64:
2368 case INDEX_op_ext16s_i32:
2369 case INDEX_op_ext16s_i64:
2370 case INDEX_op_ext16u_i32:
2371 case INDEX_op_ext16u_i64:
2372 case INDEX_op_ext32s_i64:
2373 case INDEX_op_ext32u_i64:
2374 case INDEX_op_ext_i32_i64:
2375 case INDEX_op_extu_i32_i64:
2376 case INDEX_op_extract_i32:
2377 case INDEX_op_extract_i64:
d1c36a90 2378 return C_O1_I1(r, r);
9b5500b6
RH
2379
2380 case INDEX_op_qemu_ld_i32:
2381 case INDEX_op_qemu_ld_i64:
d1c36a90 2382 return C_O1_I1(r, L);
9b5500b6
RH
2383 case INDEX_op_qemu_st_i64:
2384 case INDEX_op_qemu_st_i32:
d1c36a90 2385 return C_O0_I2(L, L);
f69d277e 2386
9b5500b6
RH
2387 case INDEX_op_deposit_i32:
2388 case INDEX_op_deposit_i64:
d1c36a90
RH
2389 return C_O1_I2(r, rZ, r);
2390
9b5500b6
RH
2391 case INDEX_op_movcond_i32:
2392 case INDEX_op_movcond_i64:
d1c36a90
RH
2393 return (s390_facilities & FACILITY_LOAD_ON_COND2
2394 ? C_O1_I4(r, r, ri, rI, 0)
2395 : C_O1_I4(r, r, ri, r, 0));
2396
9b5500b6
RH
2397 case INDEX_op_div2_i32:
2398 case INDEX_op_div2_i64:
2399 case INDEX_op_divu2_i32:
2400 case INDEX_op_divu2_i64:
d1c36a90
RH
2401 return C_O2_I3(b, a, 0, 1, r);
2402
9b5500b6 2403 case INDEX_op_mulu2_i64:
d1c36a90 2404 return C_O2_I2(b, a, 0, r);
ba18b07d 2405
9b5500b6 2406 case INDEX_op_add2_i32:
9b5500b6 2407 case INDEX_op_sub2_i32:
d1c36a90
RH
2408 return (s390_facilities & FACILITY_EXT_IMM
2409 ? C_O2_I4(r, r, 0, 1, ri, r)
2410 : C_O2_I4(r, r, 0, 1, r, r));
2411
ba18b07d 2412 case INDEX_op_add2_i64:
9b5500b6 2413 case INDEX_op_sub2_i64:
d1c36a90
RH
2414 return (s390_facilities & FACILITY_EXT_IMM
2415 ? C_O2_I4(r, r, 0, 1, rA, r)
2416 : C_O2_I4(r, r, 0, 1, r, r));
9b5500b6
RH
2417
2418 default:
d1c36a90 2419 g_assert_not_reached();
f69d277e 2420 }
f69d277e
RH
2421}
2422
b2c98d9d 2423static void query_s390_facilities(void)
48bb3750 2424{
c9baa30f 2425 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
48bb3750 2426
c9baa30f
RH
2427 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2428 is present on all 64-bit systems, but let's check for it anyway. */
2429 if (hwcap & HWCAP_S390_STFLE) {
2430 register int r0 __asm__("0");
2431 register void *r1 __asm__("1");
48bb3750 2432
c9baa30f 2433 /* stfle 0(%r1) */
b2c98d9d 2434 r1 = &s390_facilities;
c9baa30f
RH
2435 asm volatile(".word 0xb2b0,0x1000"
2436 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
48bb3750
RH
2437 }
2438}
2439
2440static void tcg_target_init(TCGContext *s)
2827822e 2441{
b2c98d9d 2442 query_s390_facilities();
48bb3750 2443
f46934df
RH
2444 tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
2445 tcg_target_available_regs[TCG_TYPE_I64] = 0xffff;
48bb3750 2446
ccb1bb66 2447 tcg_target_call_clobber_regs = 0;
48bb3750
RH
2448 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2449 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2450 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2451 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2452 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2453 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
f24efee4
RH
2454 /* The r6 register is technically call-saved, but it's also a parameter
2455 register, so it can get killed by setup for the qemu_st helper. */
2456 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
48bb3750
RH
2457 /* The return register can be considered call-clobbered. */
2458 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2459
ccb1bb66 2460 s->reserved_regs = 0;
48bb3750
RH
2461 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2462 /* XXX many insns can't be used with R0, so we better avoid it for now */
2463 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2464 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
829e1376
RH
2465 if (USE_REG_TB) {
2466 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
2467 }
2827822e
AG
2468}
2469
f167dc37
RH
2470#define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2471 + TCG_STATIC_CALL_ARGS_SIZE \
2472 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2473
48bb3750 2474static void tcg_target_qemu_prologue(TCGContext *s)
2827822e 2475{
48bb3750
RH
2476 /* stmg %r6,%r15,48(%r15) (save registers) */
2477 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2478
a4924e8b 2479 /* aghi %r15,-frame_size */
f167dc37 2480 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
a4924e8b
RH
2481
2482 tcg_set_frame(s, TCG_REG_CALL_STACK,
2483 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2484 CPU_TEMP_BUF_NLONGS * sizeof(long));
48bb3750 2485
090d0bfd 2486#ifndef CONFIG_SOFTMMU
b76f21a7 2487 if (guest_base >= 0x80000) {
829e1376 2488 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
48bb3750
RH
2489 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2490 }
090d0bfd 2491#endif
48bb3750 2492
cea5f9a2 2493 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
829e1376
RH
2494 if (USE_REG_TB) {
2495 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB,
2496 tcg_target_call_iarg_regs[1]);
2497 }
2498
cea5f9a2
BS
2499 /* br %r3 (go to TB) */
2500 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
48bb3750 2501
46644483
RH
2502 /*
2503 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2504 * and fall through to the rest of the epilogue.
2505 */
c8bc1168 2506 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
46644483
RH
2507 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
2508
2509 /* TB epilogue */
79dae4dd 2510 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
48bb3750 2511
a4924e8b
RH
2512 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2513 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
f167dc37 2514 FRAME_SIZE + 48);
48bb3750
RH
2515
2516 /* br %r14 (return) */
2517 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2827822e 2518}
f167dc37 2519
28eef8aa
RH
2520static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2521{
2522 memset(p, 0x07, count * sizeof(tcg_insn_unit));
2523}
2524
f167dc37 2525typedef struct {
d2e16f2c 2526 DebugFrameHeader h;
f167dc37
RH
2527 uint8_t fde_def_cfa[4];
2528 uint8_t fde_reg_ofs[18];
2529} DebugFrame;
2530
2531/* We're expecting a 2 byte uleb128 encoded value. */
2532QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2533
2534#define ELF_HOST_MACHINE EM_S390
2535
d2e16f2c
RH
2536static const DebugFrame debug_frame = {
2537 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2538 .h.cie.id = -1,
2539 .h.cie.version = 1,
2540 .h.cie.code_align = 1,
2541 .h.cie.data_align = 8, /* sleb128 8 */
2542 .h.cie.return_column = TCG_REG_R14,
f167dc37
RH
2543
2544 /* Total FDE size does not include the "len" member. */
d2e16f2c 2545 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
f167dc37
RH
2546
2547 .fde_def_cfa = {
2548 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
2549 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2550 (FRAME_SIZE >> 7)
2551 },
2552 .fde_reg_ofs = {
2553 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2554 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2555 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2556 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2557 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2558 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2559 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2560 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2561 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2562 }
2563};
2564
755bf9e5 2565void tcg_register_jit(const void *buf, size_t buf_size)
f167dc37 2566{
f167dc37
RH
2567 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2568}