]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/s390/tcg-target.inc.c
tcg/s390: Use load-on-condition-2 facility
[mirror_qemu.git] / tcg / s390 / tcg-target.inc.c
CommitLineData
2827822e
AG
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
48bb3750
RH
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
2827822e
AG
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
fb596415 27#include "tcg-be-ldst.h"
3cf246f0 28
a01fc30d
RH
29/* We only support generating code for 64-bit mode. */
30#if TCG_TARGET_REG_BITS != 64
31#error "unsupported code generation mode"
32#endif
33
c9baa30f
RH
34#include "elf.h"
35
48bb3750
RH
36/* ??? The translation blocks produced by TCG are generally small enough to
37 be entirely reachable with a 16-bit displacement. Leaving the option for
38 a 32-bit displacement here Just In Case. */
39#define USE_LONG_BRANCHES 0
40
a8f0269e
RH
41#define TCG_CT_CONST_S16 0x100
42#define TCG_CT_CONST_S32 0x200
e42349cb
RH
43#define TCG_CT_CONST_NN16 0x400
44#define TCG_CT_CONST_NN32 0x800
a8f0269e 45#define TCG_CT_CONST_U31 0x1000
ba18b07d 46#define TCG_CT_CONST_S33 0x2000
a8f0269e 47#define TCG_CT_CONST_ZERO 0x4000
48bb3750
RH
48
49/* Several places within the instruction set 0 means "no register"
50 rather than TCG_REG_R0. */
51#define TCG_REG_NONE 0
52
53/* A scratch register that may be be used throughout the backend. */
ce411066 54#define TCG_TMP0 TCG_REG_R1
48bb3750 55
4cbea598 56#ifndef CONFIG_SOFTMMU
48bb3750 57#define TCG_GUEST_BASE_REG TCG_REG_R13
48bb3750
RH
58#endif
59
48bb3750
RH
60/* All of the following instructions are prefixed with their instruction
61 format, and are defined as 8- or 16-bit quantities, even when the two
62 halves of the 16-bit quantity may appear 32 bits apart in the insn.
63 This makes it easy to copy the values from the tables in Appendix B. */
64typedef enum S390Opcode {
65 RIL_AFI = 0xc209,
66 RIL_AGFI = 0xc208,
3790b918 67 RIL_ALFI = 0xc20b,
48bb3750
RH
68 RIL_ALGFI = 0xc20a,
69 RIL_BRASL = 0xc005,
70 RIL_BRCL = 0xc004,
71 RIL_CFI = 0xc20d,
72 RIL_CGFI = 0xc20c,
73 RIL_CLFI = 0xc20f,
74 RIL_CLGFI = 0xc20e,
75 RIL_IIHF = 0xc008,
76 RIL_IILF = 0xc009,
77 RIL_LARL = 0xc000,
78 RIL_LGFI = 0xc001,
79 RIL_LGRL = 0xc408,
80 RIL_LLIHF = 0xc00e,
81 RIL_LLILF = 0xc00f,
82 RIL_LRL = 0xc40d,
83 RIL_MSFI = 0xc201,
84 RIL_MSGFI = 0xc200,
85 RIL_NIHF = 0xc00a,
86 RIL_NILF = 0xc00b,
87 RIL_OIHF = 0xc00c,
88 RIL_OILF = 0xc00d,
3790b918 89 RIL_SLFI = 0xc205,
0db921e6 90 RIL_SLGFI = 0xc204,
48bb3750
RH
91 RIL_XIHF = 0xc006,
92 RIL_XILF = 0xc007,
93
94 RI_AGHI = 0xa70b,
95 RI_AHI = 0xa70a,
96 RI_BRC = 0xa704,
97 RI_IIHH = 0xa500,
98 RI_IIHL = 0xa501,
99 RI_IILH = 0xa502,
100 RI_IILL = 0xa503,
101 RI_LGHI = 0xa709,
102 RI_LLIHH = 0xa50c,
103 RI_LLIHL = 0xa50d,
104 RI_LLILH = 0xa50e,
105 RI_LLILL = 0xa50f,
106 RI_MGHI = 0xa70d,
107 RI_MHI = 0xa70c,
108 RI_NIHH = 0xa504,
109 RI_NIHL = 0xa505,
110 RI_NILH = 0xa506,
111 RI_NILL = 0xa507,
112 RI_OIHH = 0xa508,
113 RI_OIHL = 0xa509,
114 RI_OILH = 0xa50a,
115 RI_OILL = 0xa50b,
116
117 RIE_CGIJ = 0xec7c,
118 RIE_CGRJ = 0xec64,
119 RIE_CIJ = 0xec7e,
120 RIE_CLGRJ = 0xec65,
121 RIE_CLIJ = 0xec7f,
122 RIE_CLGIJ = 0xec7d,
123 RIE_CLRJ = 0xec77,
124 RIE_CRJ = 0xec76,
7af525af 125 RIE_LOCGHI = 0xec46,
d5690ea4 126 RIE_RISBG = 0xec55,
48bb3750
RH
127
128 RRE_AGR = 0xb908,
3790b918
RH
129 RRE_ALGR = 0xb90a,
130 RRE_ALCR = 0xb998,
131 RRE_ALCGR = 0xb988,
48bb3750
RH
132 RRE_CGR = 0xb920,
133 RRE_CLGR = 0xb921,
134 RRE_DLGR = 0xb987,
135 RRE_DLR = 0xb997,
136 RRE_DSGFR = 0xb91d,
137 RRE_DSGR = 0xb90d,
ce411066 138 RRE_FLOGR = 0xb983,
48bb3750
RH
139 RRE_LGBR = 0xb906,
140 RRE_LCGR = 0xb903,
141 RRE_LGFR = 0xb914,
142 RRE_LGHR = 0xb907,
143 RRE_LGR = 0xb904,
144 RRE_LLGCR = 0xb984,
145 RRE_LLGFR = 0xb916,
146 RRE_LLGHR = 0xb985,
147 RRE_LRVR = 0xb91f,
148 RRE_LRVGR = 0xb90f,
149 RRE_LTGR = 0xb902,
36017dc6 150 RRE_MLGR = 0xb986,
48bb3750
RH
151 RRE_MSGR = 0xb90c,
152 RRE_MSR = 0xb252,
153 RRE_NGR = 0xb980,
154 RRE_OGR = 0xb981,
155 RRE_SGR = 0xb909,
3790b918
RH
156 RRE_SLGR = 0xb90b,
157 RRE_SLBR = 0xb999,
158 RRE_SLBGR = 0xb989,
48bb3750
RH
159 RRE_XGR = 0xb982,
160
96a9f093
RH
161 RRF_LOCR = 0xb9f2,
162 RRF_LOCGR = 0xb9e2,
c2097136
RH
163 RRF_NRK = 0xb9f4,
164 RRF_NGRK = 0xb9e4,
165 RRF_ORK = 0xb9f6,
166 RRF_OGRK = 0xb9e6,
167 RRF_SRK = 0xb9f9,
168 RRF_SGRK = 0xb9e9,
169 RRF_SLRK = 0xb9fb,
170 RRF_SLGRK = 0xb9eb,
171 RRF_XRK = 0xb9f7,
172 RRF_XGRK = 0xb9e7,
96a9f093 173
48bb3750 174 RR_AR = 0x1a,
3790b918 175 RR_ALR = 0x1e,
48bb3750
RH
176 RR_BASR = 0x0d,
177 RR_BCR = 0x07,
178 RR_CLR = 0x15,
179 RR_CR = 0x19,
180 RR_DR = 0x1d,
181 RR_LCR = 0x13,
182 RR_LR = 0x18,
183 RR_LTR = 0x12,
184 RR_NR = 0x14,
185 RR_OR = 0x16,
186 RR_SR = 0x1b,
3790b918 187 RR_SLR = 0x1f,
48bb3750
RH
188 RR_XR = 0x17,
189
190 RSY_RLL = 0xeb1d,
191 RSY_RLLG = 0xeb1c,
192 RSY_SLLG = 0xeb0d,
c2097136 193 RSY_SLLK = 0xebdf,
48bb3750 194 RSY_SRAG = 0xeb0a,
c2097136 195 RSY_SRAK = 0xebdc,
48bb3750 196 RSY_SRLG = 0xeb0c,
c2097136 197 RSY_SRLK = 0xebde,
48bb3750
RH
198
199 RS_SLL = 0x89,
200 RS_SRA = 0x8a,
201 RS_SRL = 0x88,
202
203 RXY_AG = 0xe308,
204 RXY_AY = 0xe35a,
205 RXY_CG = 0xe320,
206 RXY_CY = 0xe359,
0db921e6 207 RXY_LAY = 0xe371,
48bb3750
RH
208 RXY_LB = 0xe376,
209 RXY_LG = 0xe304,
210 RXY_LGB = 0xe377,
211 RXY_LGF = 0xe314,
212 RXY_LGH = 0xe315,
213 RXY_LHY = 0xe378,
214 RXY_LLGC = 0xe390,
215 RXY_LLGF = 0xe316,
216 RXY_LLGH = 0xe391,
217 RXY_LMG = 0xeb04,
218 RXY_LRV = 0xe31e,
219 RXY_LRVG = 0xe30f,
220 RXY_LRVH = 0xe31f,
221 RXY_LY = 0xe358,
222 RXY_STCY = 0xe372,
223 RXY_STG = 0xe324,
224 RXY_STHY = 0xe370,
225 RXY_STMG = 0xeb24,
226 RXY_STRV = 0xe33e,
227 RXY_STRVG = 0xe32f,
228 RXY_STRVH = 0xe33f,
229 RXY_STY = 0xe350,
230
231 RX_A = 0x5a,
232 RX_C = 0x59,
233 RX_L = 0x58,
0db921e6 234 RX_LA = 0x41,
48bb3750
RH
235 RX_LH = 0x48,
236 RX_ST = 0x50,
237 RX_STC = 0x42,
238 RX_STH = 0x40,
ed3d51ec
SF
239
240 NOP = 0x0707,
48bb3750
RH
241} S390Opcode;
242
8d8fdbae 243#ifdef CONFIG_DEBUG_TCG
48bb3750
RH
244static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
245 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
246 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
247};
248#endif
249
250/* Since R6 is a potential argument register, choose it last of the
251 call-saved registers. Likewise prefer the call-clobbered registers
252 in reverse order to maximize the chance of avoiding the arguments. */
2827822e 253static const int tcg_target_reg_alloc_order[] = {
f24efee4 254 /* Call saved registers. */
48bb3750
RH
255 TCG_REG_R13,
256 TCG_REG_R12,
257 TCG_REG_R11,
258 TCG_REG_R10,
259 TCG_REG_R9,
260 TCG_REG_R8,
261 TCG_REG_R7,
262 TCG_REG_R6,
f24efee4 263 /* Call clobbered registers. */
48bb3750
RH
264 TCG_REG_R14,
265 TCG_REG_R0,
266 TCG_REG_R1,
f24efee4 267 /* Argument registers, in reverse order of allocation. */
48bb3750
RH
268 TCG_REG_R5,
269 TCG_REG_R4,
270 TCG_REG_R3,
271 TCG_REG_R2,
2827822e
AG
272};
273
274static const int tcg_target_call_iarg_regs[] = {
48bb3750
RH
275 TCG_REG_R2,
276 TCG_REG_R3,
277 TCG_REG_R4,
278 TCG_REG_R5,
279 TCG_REG_R6,
2827822e
AG
280};
281
282static const int tcg_target_call_oarg_regs[] = {
48bb3750 283 TCG_REG_R2,
48bb3750
RH
284};
285
286#define S390_CC_EQ 8
287#define S390_CC_LT 4
288#define S390_CC_GT 2
289#define S390_CC_OV 1
290#define S390_CC_NE (S390_CC_LT | S390_CC_GT)
291#define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
292#define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
293#define S390_CC_NEVER 0
294#define S390_CC_ALWAYS 15
295
296/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
0aed257f 297static const uint8_t tcg_cond_to_s390_cond[] = {
48bb3750
RH
298 [TCG_COND_EQ] = S390_CC_EQ,
299 [TCG_COND_NE] = S390_CC_NE,
300 [TCG_COND_LT] = S390_CC_LT,
301 [TCG_COND_LE] = S390_CC_LE,
302 [TCG_COND_GT] = S390_CC_GT,
303 [TCG_COND_GE] = S390_CC_GE,
304 [TCG_COND_LTU] = S390_CC_LT,
305 [TCG_COND_LEU] = S390_CC_LE,
306 [TCG_COND_GTU] = S390_CC_GT,
307 [TCG_COND_GEU] = S390_CC_GE,
308};
309
310/* Condition codes that result from a LOAD AND TEST. Here, we have no
311 unsigned instruction variation, however since the test is vs zero we
312 can re-map the outcomes appropriately. */
0aed257f 313static const uint8_t tcg_cond_to_ltr_cond[] = {
48bb3750
RH
314 [TCG_COND_EQ] = S390_CC_EQ,
315 [TCG_COND_NE] = S390_CC_NE,
316 [TCG_COND_LT] = S390_CC_LT,
317 [TCG_COND_LE] = S390_CC_LE,
318 [TCG_COND_GT] = S390_CC_GT,
319 [TCG_COND_GE] = S390_CC_GE,
320 [TCG_COND_LTU] = S390_CC_NEVER,
321 [TCG_COND_LEU] = S390_CC_EQ,
322 [TCG_COND_GTU] = S390_CC_NE,
323 [TCG_COND_GEU] = S390_CC_ALWAYS,
324};
325
326#ifdef CONFIG_SOFTMMU
f24efee4
RH
327static void * const qemu_ld_helpers[16] = {
328 [MO_UB] = helper_ret_ldub_mmu,
329 [MO_SB] = helper_ret_ldsb_mmu,
330 [MO_LEUW] = helper_le_lduw_mmu,
331 [MO_LESW] = helper_le_ldsw_mmu,
332 [MO_LEUL] = helper_le_ldul_mmu,
333 [MO_LESL] = helper_le_ldsl_mmu,
334 [MO_LEQ] = helper_le_ldq_mmu,
335 [MO_BEUW] = helper_be_lduw_mmu,
336 [MO_BESW] = helper_be_ldsw_mmu,
337 [MO_BEUL] = helper_be_ldul_mmu,
338 [MO_BESL] = helper_be_ldsl_mmu,
339 [MO_BEQ] = helper_be_ldq_mmu,
e141ab52
BS
340};
341
f24efee4
RH
342static void * const qemu_st_helpers[16] = {
343 [MO_UB] = helper_ret_stb_mmu,
344 [MO_LEUW] = helper_le_stw_mmu,
345 [MO_LEUL] = helper_le_stl_mmu,
346 [MO_LEQ] = helper_le_stq_mmu,
347 [MO_BEUW] = helper_be_stw_mmu,
348 [MO_BEUL] = helper_be_stl_mmu,
349 [MO_BEQ] = helper_be_stq_mmu,
e141ab52 350};
e141ab52 351#endif
48bb3750 352
8c081b18 353static tcg_insn_unit *tb_ret_addr;
b2c98d9d 354uint64_t s390_facilities;
2827822e 355
8c081b18 356static void patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 357 intptr_t value, intptr_t addend)
2827822e 358{
8c081b18 359 intptr_t pcrel2 = (tcg_insn_unit *)value - (code_ptr - 1);
eabb7b91 360 tcg_debug_assert(addend == -2);
48bb3750
RH
361
362 switch (type) {
363 case R_390_PC16DBL:
eabb7b91 364 tcg_debug_assert(pcrel2 == (int16_t)pcrel2);
8c081b18 365 tcg_patch16(code_ptr, pcrel2);
48bb3750
RH
366 break;
367 case R_390_PC32DBL:
eabb7b91 368 tcg_debug_assert(pcrel2 == (int32_t)pcrel2);
8c081b18 369 tcg_patch32(code_ptr, pcrel2);
48bb3750
RH
370 break;
371 default:
372 tcg_abort();
373 break;
374 }
2827822e
AG
375}
376
2827822e 377/* parse target specific constraints */
069ea736
RH
378static const char *target_parse_constraint(TCGArgConstraint *ct,
379 const char *ct_str, TCGType type)
2827822e 380{
069ea736 381 switch (*ct_str++) {
48bb3750
RH
382 case 'r': /* all registers */
383 ct->ct |= TCG_CT_REG;
384 tcg_regset_set32(ct->u.regs, 0, 0xffff);
385 break;
48bb3750
RH
386 case 'L': /* qemu_ld/st constraint */
387 ct->ct |= TCG_CT_REG;
388 tcg_regset_set32(ct->u.regs, 0, 0xffff);
389 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
390 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
65a62a75 391 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
48bb3750
RH
392 break;
393 case 'a': /* force R2 for division */
394 ct->ct |= TCG_CT_REG;
395 tcg_regset_clear(ct->u.regs);
396 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
397 break;
398 case 'b': /* force R3 for division */
399 ct->ct |= TCG_CT_REG;
400 tcg_regset_clear(ct->u.regs);
401 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
402 break;
ad19b358 403 case 'A':
ba18b07d 404 ct->ct |= TCG_CT_CONST_S33;
ad19b358 405 break;
a8f0269e
RH
406 case 'I':
407 ct->ct |= TCG_CT_CONST_S16;
408 break;
409 case 'J':
410 ct->ct |= TCG_CT_CONST_S32;
48bb3750 411 break;
e42349cb
RH
412 case 'N':
413 ct->ct |= TCG_CT_CONST_NN16;
48bb3750 414 break;
e42349cb
RH
415 case 'M':
416 ct->ct |= TCG_CT_CONST_NN32;
48bb3750
RH
417 break;
418 case 'C':
07952d95
RH
419 /* ??? We have no insight here into whether the comparison is
420 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
421 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
422 a 32-bit unsigned immediate. If we were to use the (semi)
423 obvious "val == (int32_t)val" we would be enabling unsigned
424 comparisons vs very large numbers. The only solution is to
425 take the intersection of the ranges. */
426 /* ??? Another possible solution is to simply lie and allow all
427 constants here and force the out-of-range values into a temp
428 register in tgen_cmp when we have knowledge of the actual
429 comparison code in use. */
430 ct->ct |= TCG_CT_CONST_U31;
48bb3750 431 break;
752b1be9
RH
432 case 'Z':
433 ct->ct |= TCG_CT_CONST_ZERO;
434 break;
48bb3750 435 default:
069ea736 436 return NULL;
48bb3750 437 }
069ea736 438 return ct_str;
2827822e
AG
439}
440
441/* Test if a constant matches the constraint. */
f6c6afc1 442static int tcg_target_const_match(tcg_target_long val, TCGType type,
48bb3750 443 const TCGArgConstraint *arg_ct)
2827822e 444{
48bb3750
RH
445 int ct = arg_ct->ct;
446
447 if (ct & TCG_CT_CONST) {
448 return 1;
449 }
450
671c835b 451 if (type == TCG_TYPE_I32) {
48bb3750
RH
452 val = (int32_t)val;
453 }
454
455 /* The following are mutually exclusive. */
a8f0269e
RH
456 if (ct & TCG_CT_CONST_S16) {
457 return val == (int16_t)val;
458 } else if (ct & TCG_CT_CONST_S32) {
459 return val == (int32_t)val;
ba18b07d
RH
460 } else if (ct & TCG_CT_CONST_S33) {
461 return val >= -0xffffffffll && val <= 0xffffffffll;
e42349cb
RH
462 } else if (ct & TCG_CT_CONST_NN16) {
463 return !(val < 0 && val == (int16_t)val);
464 } else if (ct & TCG_CT_CONST_NN32) {
465 return !(val < 0 && val == (int32_t)val);
07952d95
RH
466 } else if (ct & TCG_CT_CONST_U31) {
467 return val >= 0 && val <= 0x7fffffff;
752b1be9
RH
468 } else if (ct & TCG_CT_CONST_ZERO) {
469 return val == 0;
48bb3750
RH
470 }
471
2827822e
AG
472 return 0;
473}
474
48bb3750
RH
475/* Emit instructions according to the given instruction format. */
476
477static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
478{
479 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
480}
481
482static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
483 TCGReg r1, TCGReg r2)
484{
485 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
486}
487
96a9f093
RH
488static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
489 TCGReg r1, TCGReg r2, int m3)
490{
491 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
492}
493
48bb3750
RH
494static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
495{
496 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
497}
498
7af525af
RH
499static void tcg_out_insn_RIE(TCGContext *s, S390Opcode op, TCGReg r1,
500 int i2, int m3)
501{
502 tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
503 tcg_out32(s, (i2 << 16) | (op & 0xff));
504}
505
48bb3750
RH
506static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
507{
508 tcg_out16(s, op | (r1 << 4));
509 tcg_out32(s, i2);
510}
511
512static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
513 TCGReg b2, TCGReg r3, int disp)
514{
515 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
516 | (disp & 0xfff));
517}
518
519static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
520 TCGReg b2, TCGReg r3, int disp)
521{
522 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
523 tcg_out32(s, (op & 0xff) | (b2 << 28)
524 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
525}
526
527#define tcg_out_insn_RX tcg_out_insn_RS
528#define tcg_out_insn_RXY tcg_out_insn_RSY
529
530/* Emit an opcode with "type-checking" of the format. */
531#define tcg_out_insn(S, FMT, OP, ...) \
532 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
533
534
535/* emit 64-bit shifts */
536static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
537 TCGReg src, TCGReg sh_reg, int sh_imm)
538{
539 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
540}
541
542/* emit 32-bit shifts */
543static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
544 TCGReg sh_reg, int sh_imm)
545{
546 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
547}
548
549static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
550{
551 if (src != dst) {
552 if (type == TCG_TYPE_I32) {
553 tcg_out_insn(s, RR, LR, dst, src);
554 } else {
555 tcg_out_insn(s, RRE, LGR, dst, src);
556 }
557 }
558}
559
2827822e 560/* load a register with an immediate value */
48bb3750
RH
561static void tcg_out_movi(TCGContext *s, TCGType type,
562 TCGReg ret, tcg_target_long sval)
2827822e 563{
48bb3750
RH
564 static const S390Opcode lli_insns[4] = {
565 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
566 };
567
568 tcg_target_ulong uval = sval;
569 int i;
570
571 if (type == TCG_TYPE_I32) {
572 uval = (uint32_t)sval;
573 sval = (int32_t)sval;
574 }
575
576 /* Try all 32-bit insns that can load it in one go. */
577 if (sval >= -0x8000 && sval < 0x8000) {
578 tcg_out_insn(s, RI, LGHI, ret, sval);
579 return;
580 }
581
582 for (i = 0; i < 4; i++) {
583 tcg_target_long mask = 0xffffull << i*16;
584 if ((uval & mask) == uval) {
585 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
586 return;
587 }
588 }
589
590 /* Try all 48-bit insns that can load it in one go. */
b2c98d9d 591 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
592 if (sval == (int32_t)sval) {
593 tcg_out_insn(s, RIL, LGFI, ret, sval);
594 return;
595 }
596 if (uval <= 0xffffffff) {
597 tcg_out_insn(s, RIL, LLILF, ret, uval);
598 return;
599 }
600 if ((uval & 0xffffffff) == 0) {
601 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
602 return;
603 }
604 }
605
606 /* Try for PC-relative address load. */
607 if ((sval & 1) == 0) {
8c081b18 608 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
48bb3750
RH
609 if (off == (int32_t)off) {
610 tcg_out_insn(s, RIL, LARL, ret, off);
611 return;
612 }
613 }
614
615 /* If extended immediates are not present, then we may have to issue
616 several instructions to load the low 32 bits. */
b2c98d9d 617 if (!(s390_facilities & FACILITY_EXT_IMM)) {
48bb3750
RH
618 /* A 32-bit unsigned value can be loaded in 2 insns. And given
619 that the lli_insns loop above did not succeed, we know that
620 both insns are required. */
621 if (uval <= 0xffffffff) {
622 tcg_out_insn(s, RI, LLILL, ret, uval);
623 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
624 return;
625 }
626
627 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
628 We first want to make sure that all the high bits get set. With
629 luck the low 16-bits can be considered negative to perform that for
630 free, otherwise we load an explicit -1. */
631 if (sval >> 31 >> 1 == -1) {
632 if (uval & 0x8000) {
633 tcg_out_insn(s, RI, LGHI, ret, uval);
634 } else {
635 tcg_out_insn(s, RI, LGHI, ret, -1);
636 tcg_out_insn(s, RI, IILL, ret, uval);
637 }
638 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
639 return;
640 }
641 }
642
643 /* If we get here, both the high and low parts have non-zero bits. */
644
645 /* Recurse to load the lower 32-bits. */
a22971f9 646 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
48bb3750
RH
647
648 /* Insert data into the high 32-bits. */
649 uval = uval >> 31 >> 1;
b2c98d9d 650 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
651 if (uval < 0x10000) {
652 tcg_out_insn(s, RI, IIHL, ret, uval);
653 } else if ((uval & 0xffff) == 0) {
654 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
655 } else {
656 tcg_out_insn(s, RIL, IIHF, ret, uval);
657 }
658 } else {
659 if (uval & 0xffff) {
660 tcg_out_insn(s, RI, IIHL, ret, uval);
661 }
662 if (uval & 0xffff0000) {
663 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
664 }
665 }
666}
667
668
669/* Emit a load/store type instruction. Inputs are:
670 DATA: The register to be loaded or stored.
671 BASE+OFS: The effective address.
672 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
673 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
674
675static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
676 TCGReg data, TCGReg base, TCGReg index,
677 tcg_target_long ofs)
678{
679 if (ofs < -0x80000 || ofs >= 0x80000) {
78c9f7c5
RH
680 /* Combine the low 20 bits of the offset with the actual load insn;
681 the high 44 bits must come from an immediate load. */
682 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
683 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
684 ofs = low;
48bb3750
RH
685
686 /* If we were already given an index register, add it in. */
687 if (index != TCG_REG_NONE) {
688 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
689 }
690 index = TCG_TMP0;
691 }
692
693 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
694 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
695 } else {
696 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
697 }
2827822e
AG
698}
699
48bb3750 700
2827822e 701/* load data without address translation or endianness conversion */
48bb3750 702static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 703 TCGReg base, intptr_t ofs)
2827822e 704{
48bb3750
RH
705 if (type == TCG_TYPE_I32) {
706 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
707 } else {
708 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
709 }
2827822e
AG
710}
711
48bb3750 712static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 713 TCGReg base, intptr_t ofs)
2827822e 714{
48bb3750
RH
715 if (type == TCG_TYPE_I32) {
716 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
717 } else {
718 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
719 }
720}
721
59d7c14e
RH
722static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
723 TCGReg base, intptr_t ofs)
724{
725 return false;
726}
727
48bb3750
RH
728/* load data from an absolute host address */
729static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
730{
8c081b18 731 intptr_t addr = (intptr_t)abs;
48bb3750 732
b2c98d9d 733 if ((s390_facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
8c081b18 734 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
48bb3750
RH
735 if (disp == (int32_t)disp) {
736 if (type == TCG_TYPE_I32) {
737 tcg_out_insn(s, RIL, LRL, dest, disp);
738 } else {
739 tcg_out_insn(s, RIL, LGRL, dest, disp);
740 }
741 return;
742 }
743 }
744
745 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
746 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
747}
748
f0bffc27
RH
749static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
750 int msb, int lsb, int ofs, int z)
751{
752 /* Format RIE-f */
753 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
754 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
755 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
756}
757
48bb3750
RH
758static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
759{
b2c98d9d 760 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
761 tcg_out_insn(s, RRE, LGBR, dest, src);
762 return;
763 }
764
765 if (type == TCG_TYPE_I32) {
766 if (dest == src) {
767 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
768 } else {
769 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
770 }
771 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
772 } else {
773 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
774 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
775 }
776}
777
778static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
779{
b2c98d9d 780 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
781 tcg_out_insn(s, RRE, LLGCR, dest, src);
782 return;
783 }
784
785 if (dest == src) {
786 tcg_out_movi(s, type, TCG_TMP0, 0xff);
787 src = TCG_TMP0;
788 } else {
789 tcg_out_movi(s, type, dest, 0xff);
790 }
791 if (type == TCG_TYPE_I32) {
792 tcg_out_insn(s, RR, NR, dest, src);
793 } else {
794 tcg_out_insn(s, RRE, NGR, dest, src);
795 }
796}
797
798static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
799{
b2c98d9d 800 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
801 tcg_out_insn(s, RRE, LGHR, dest, src);
802 return;
803 }
804
805 if (type == TCG_TYPE_I32) {
806 if (dest == src) {
807 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
808 } else {
809 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
810 }
811 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
812 } else {
813 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
814 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
815 }
816}
817
818static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
819{
b2c98d9d 820 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
821 tcg_out_insn(s, RRE, LLGHR, dest, src);
822 return;
823 }
824
825 if (dest == src) {
826 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
827 src = TCG_TMP0;
828 } else {
829 tcg_out_movi(s, type, dest, 0xffff);
830 }
831 if (type == TCG_TYPE_I32) {
832 tcg_out_insn(s, RR, NR, dest, src);
833 } else {
834 tcg_out_insn(s, RRE, NGR, dest, src);
835 }
836}
837
838static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
839{
840 tcg_out_insn(s, RRE, LGFR, dest, src);
841}
842
843static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
844{
845 tcg_out_insn(s, RRE, LLGFR, dest, src);
846}
847
f0bffc27
RH
848/* Accept bit patterns like these:
849 0....01....1
850 1....10....0
851 1..10..01..1
852 0..01..10..0
853 Copied from gcc sources. */
854static inline bool risbg_mask(uint64_t c)
855{
856 uint64_t lsb;
857 /* We don't change the number of transitions by inverting,
858 so make sure we start with the LSB zero. */
859 if (c & 1) {
860 c = ~c;
861 }
862 /* Reject all zeros or all ones. */
863 if (c == 0) {
864 return false;
865 }
866 /* Find the first transition. */
867 lsb = c & -c;
868 /* Invert to look for a second transition. */
869 c = ~c;
870 /* Erase the first transition. */
871 c &= -lsb;
872 /* Find the second transition, if any. */
873 lsb = c & -c;
874 /* Match if all the bits are 1's, or if c is zero. */
875 return c == -lsb;
876}
877
547ec121
RH
878static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
879{
880 int msb, lsb;
881 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
882 /* Achieve wraparound by swapping msb and lsb. */
883 msb = 64 - ctz64(~val);
884 lsb = clz64(~val) - 1;
885 } else {
886 msb = clz64(val);
887 lsb = 63 - ctz64(val);
888 }
889 tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
890}
891
07ff7983 892static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750
RH
893{
894 static const S390Opcode ni_insns[4] = {
895 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
896 };
897 static const S390Opcode nif_insns[2] = {
898 RIL_NILF, RIL_NIHF
899 };
07ff7983 900 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
48bb3750
RH
901 int i;
902
48bb3750 903 /* Look for the zero-extensions. */
07ff7983 904 if ((val & valid) == 0xffffffff) {
48bb3750
RH
905 tgen_ext32u(s, dest, dest);
906 return;
907 }
b2c98d9d 908 if (s390_facilities & FACILITY_EXT_IMM) {
07ff7983 909 if ((val & valid) == 0xff) {
48bb3750
RH
910 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
911 return;
912 }
07ff7983 913 if ((val & valid) == 0xffff) {
48bb3750
RH
914 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
915 return;
916 }
07ff7983 917 }
48bb3750 918
07ff7983
RH
919 /* Try all 32-bit insns that can perform it in one go. */
920 for (i = 0; i < 4; i++) {
921 tcg_target_ulong mask = ~(0xffffull << i*16);
922 if (((val | ~valid) & mask) == mask) {
923 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
924 return;
48bb3750 925 }
07ff7983 926 }
48bb3750 927
07ff7983 928 /* Try all 48-bit insns that can perform it in one go. */
b2c98d9d 929 if (s390_facilities & FACILITY_EXT_IMM) {
07ff7983
RH
930 for (i = 0; i < 2; i++) {
931 tcg_target_ulong mask = ~(0xffffffffull << i*32);
932 if (((val | ~valid) & mask) == mask) {
933 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
934 return;
48bb3750
RH
935 }
936 }
07ff7983 937 }
b2c98d9d 938 if ((s390_facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
547ec121 939 tgen_andi_risbg(s, dest, dest, val);
f0bffc27
RH
940 return;
941 }
48bb3750 942
07ff7983
RH
943 /* Fall back to loading the constant. */
944 tcg_out_movi(s, type, TCG_TMP0, val);
945 if (type == TCG_TYPE_I32) {
946 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
48bb3750 947 } else {
07ff7983 948 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
48bb3750
RH
949 }
950}
951
952static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
953{
954 static const S390Opcode oi_insns[4] = {
955 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
956 };
957 static const S390Opcode nif_insns[2] = {
958 RIL_OILF, RIL_OIHF
959 };
960
961 int i;
962
963 /* Look for no-op. */
964 if (val == 0) {
965 return;
966 }
967
b2c98d9d 968 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
969 /* Try all 32-bit insns that can perform it in one go. */
970 for (i = 0; i < 4; i++) {
971 tcg_target_ulong mask = (0xffffull << i*16);
972 if ((val & mask) != 0 && (val & ~mask) == 0) {
973 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
974 return;
975 }
976 }
977
978 /* Try all 48-bit insns that can perform it in one go. */
979 for (i = 0; i < 2; i++) {
980 tcg_target_ulong mask = (0xffffffffull << i*32);
981 if ((val & mask) != 0 && (val & ~mask) == 0) {
982 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
983 return;
984 }
985 }
986
987 /* Perform the OR via sequential modifications to the high and
988 low parts. Do this via recursion to handle 16-bit vs 32-bit
989 masks in each half. */
990 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
991 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
992 } else {
993 /* With no extended-immediate facility, we don't need to be so
994 clever. Just iterate over the insns and mask in the constant. */
995 for (i = 0; i < 4; i++) {
996 tcg_target_ulong mask = (0xffffull << i*16);
997 if ((val & mask) != 0) {
998 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
999 }
1000 }
1001 }
1002}
1003
1004static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1005{
1006 /* Perform the xor by parts. */
1007 if (val & 0xffffffff) {
1008 tcg_out_insn(s, RIL, XILF, dest, val);
1009 }
1010 if (val > 0xffffffff) {
1011 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1012 }
1013}
1014
1015static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
65839b56 1016 TCGArg c2, bool c2const, bool need_carry)
48bb3750 1017{
bcc66562 1018 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1019 if (c2const) {
1020 if (c2 == 0) {
65839b56
RH
1021 if (!(is_unsigned && need_carry)) {
1022 if (type == TCG_TYPE_I32) {
1023 tcg_out_insn(s, RR, LTR, r1, r1);
1024 } else {
1025 tcg_out_insn(s, RRE, LTGR, r1, r1);
1026 }
1027 return tcg_cond_to_ltr_cond[c];
1028 }
1029 /* If we only got here because of load-and-test,
1030 and we couldn't use that, then we need to load
1031 the constant into a register. */
a32b6ae8 1032 if (!(s390_facilities & FACILITY_EXT_IMM)) {
65839b56
RH
1033 c2 = TCG_TMP0;
1034 tcg_out_movi(s, type, c2, 0);
1035 goto do_reg;
1036 }
1037 }
1038 if (is_unsigned) {
48bb3750 1039 if (type == TCG_TYPE_I32) {
65839b56 1040 tcg_out_insn(s, RIL, CLFI, r1, c2);
48bb3750 1041 } else {
65839b56 1042 tcg_out_insn(s, RIL, CLGFI, r1, c2);
48bb3750 1043 }
48bb3750 1044 } else {
65839b56
RH
1045 if (type == TCG_TYPE_I32) {
1046 tcg_out_insn(s, RIL, CFI, r1, c2);
48bb3750 1047 } else {
65839b56 1048 tcg_out_insn(s, RIL, CGFI, r1, c2);
48bb3750
RH
1049 }
1050 }
1051 } else {
65839b56 1052 do_reg:
48bb3750
RH
1053 if (is_unsigned) {
1054 if (type == TCG_TYPE_I32) {
1055 tcg_out_insn(s, RR, CLR, r1, c2);
1056 } else {
1057 tcg_out_insn(s, RRE, CLGR, r1, c2);
1058 }
1059 } else {
1060 if (type == TCG_TYPE_I32) {
1061 tcg_out_insn(s, RR, CR, r1, c2);
1062 } else {
1063 tcg_out_insn(s, RRE, CGR, r1, c2);
1064 }
1065 }
1066 }
1067 return tcg_cond_to_s390_cond[c];
1068}
1069
7b7066b1 1070static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
96a9f093 1071 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
48bb3750 1072{
7b7066b1 1073 int cc;
7af525af 1074 bool have_loc;
7b7066b1 1075
7af525af
RH
1076 /* With LOC2, we can always emit the minimum 3 insns. */
1077 if (s390_facilities & FACILITY_LOAD_ON_COND2) {
1078 /* Emit: d = 0, d = (cc ? 1 : d). */
1079 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1080 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1081 tcg_out_insn(s, RIE, LOCGHI, dest, 1, cc);
1082 return;
1083 }
1084
1085 have_loc = (s390_facilities & FACILITY_LOAD_ON_COND) != 0;
1086
1087 /* For HAVE_LOC, only the path through do_greater is smaller. */
7b7066b1
RH
1088 switch (cond) {
1089 case TCG_COND_GTU:
1090 case TCG_COND_GT:
1091 do_greater:
1092 /* The result of a compare has CC=2 for GT and CC=3 unused.
1093 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
65839b56 1094 tgen_cmp(s, type, cond, c1, c2, c2const, true);
7b7066b1
RH
1095 tcg_out_movi(s, type, dest, 0);
1096 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1097 return;
1098
1099 case TCG_COND_GEU:
7af525af
RH
1100 if (have_loc) {
1101 goto do_loc;
1102 }
7b7066b1
RH
1103 do_geu:
1104 /* We need "real" carry semantics, so use SUBTRACT LOGICAL
c2097136 1105 instead of COMPARE LOGICAL. This may need an extra move. */
7b7066b1 1106 if (c2const) {
c2097136 1107 tcg_out_mov(s, type, TCG_TMP0, c1);
7b7066b1
RH
1108 if (type == TCG_TYPE_I32) {
1109 tcg_out_insn(s, RIL, SLFI, TCG_TMP0, c2);
1110 } else {
1111 tcg_out_insn(s, RIL, SLGFI, TCG_TMP0, c2);
1112 }
c2097136
RH
1113 } else if (s390_facilities & FACILITY_DISTINCT_OPS) {
1114 if (type == TCG_TYPE_I32) {
1115 tcg_out_insn(s, RRF, SLRK, TCG_TMP0, c1, c2);
1116 } else {
1117 tcg_out_insn(s, RRF, SLGRK, TCG_TMP0, c1, c2);
1118 }
7b7066b1 1119 } else {
c2097136 1120 tcg_out_mov(s, type, TCG_TMP0, c1);
7b7066b1
RH
1121 if (type == TCG_TYPE_I32) {
1122 tcg_out_insn(s, RR, SLR, TCG_TMP0, c2);
1123 } else {
1124 tcg_out_insn(s, RRE, SLGR, TCG_TMP0, c2);
1125 }
7b7066b1 1126 }
c2097136 1127 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
7b7066b1
RH
1128 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1129 return;
1130
1131 case TCG_COND_LEU:
7af525af
RH
1132 if (have_loc) {
1133 goto do_loc;
1134 }
1135 /* fallthru */
7b7066b1
RH
1136 case TCG_COND_LTU:
1137 case TCG_COND_LT:
1138 /* Swap operands so that we can use GEU/GTU/GT. */
1139 if (c2const) {
7af525af
RH
1140 if (have_loc) {
1141 goto do_loc;
1142 }
7b7066b1
RH
1143 tcg_out_movi(s, type, TCG_TMP0, c2);
1144 c2 = c1;
1145 c2const = 0;
1146 c1 = TCG_TMP0;
1147 } else {
1148 TCGReg t = c1;
1149 c1 = c2;
1150 c2 = t;
1151 }
1152 if (cond == TCG_COND_LEU) {
1153 goto do_geu;
1154 }
1155 cond = tcg_swap_cond(cond);
1156 goto do_greater;
1157
1158 case TCG_COND_NE:
1159 /* X != 0 is X > 0. */
1160 if (c2const && c2 == 0) {
1161 cond = TCG_COND_GTU;
1162 goto do_greater;
1163 }
1164 break;
1165
1166 case TCG_COND_EQ:
7af525af
RH
1167 if (have_loc) {
1168 goto do_loc;
1169 }
7b7066b1
RH
1170 /* X == 0 is X <= 0 is 0 >= X. */
1171 if (c2const && c2 == 0) {
1172 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0);
1173 c2 = c1;
1174 c2const = 0;
1175 c1 = TCG_TMP0;
1176 goto do_geu;
1177 }
1178 break;
48bb3750 1179
7b7066b1
RH
1180 default:
1181 break;
1182 }
1183
65839b56 1184 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
7af525af
RH
1185 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1186 tcg_out_movi(s, type, dest, 1);
1187 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1188 tcg_out_movi(s, type, dest, 0);
1189 return;
1190
1191 do_loc:
1192 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1193 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1194 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1195 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1196 tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
48bb3750
RH
1197}
1198
96a9f093 1199static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
7af525af
RH
1200 TCGReg c1, TCGArg c2, int c2const,
1201 TCGArg v3, int v3const)
96a9f093
RH
1202{
1203 int cc;
b2c98d9d 1204 if (s390_facilities & FACILITY_LOAD_ON_COND) {
65839b56 1205 cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
7af525af
RH
1206 if (v3const) {
1207 tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc);
1208 } else {
1209 tcg_out_insn(s, RRF, LOCGR, dest, v3, cc);
1210 }
96a9f093
RH
1211 } else {
1212 c = tcg_invert_cond(c);
65839b56 1213 cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
96a9f093
RH
1214
1215 /* Emit: if (cc) goto over; dest = r3; over: */
1216 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
7af525af 1217 tcg_out_insn(s, RRE, LGR, dest, v3);
96a9f093
RH
1218 }
1219}
1220
ce411066
RH
1221static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
1222 TCGArg a2, int a2const)
1223{
1224 /* Since this sets both R and R+1, we have no choice but to store the
1225 result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
1226 QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1227 tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1228
1229 if (a2const && a2 == 64) {
1230 tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1231 } else {
1232 if (a2const) {
1233 tcg_out_movi(s, TCG_TYPE_I64, dest, a2);
1234 } else {
1235 tcg_out_mov(s, TCG_TYPE_I64, dest, a2);
1236 }
1237 if (s390_facilities & FACILITY_LOAD_ON_COND) {
1238 /* Emit: if (one bit found) dest = r0. */
1239 tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2);
1240 } else {
1241 /* Emit: if (no one bit found) goto over; dest = r0; over: */
1242 tcg_out_insn(s, RI, BRC, 8, (4 + 4) >> 1);
1243 tcg_out_insn(s, RRE, LGR, dest, TCG_REG_R0);
1244 }
1245 }
1246}
1247
d5690ea4 1248static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
752b1be9 1249 int ofs, int len, int z)
d5690ea4
RH
1250{
1251 int lsb = (63 - ofs);
1252 int msb = lsb - (len - 1);
752b1be9 1253 tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
d5690ea4
RH
1254}
1255
b0bf5fe8
RH
1256static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
1257 int ofs, int len)
1258{
1259 tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
1260}
1261
8c081b18 1262static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
48bb3750 1263{
8c081b18
RH
1264 ptrdiff_t off = dest - s->code_ptr;
1265 if (off == (int16_t)off) {
48bb3750
RH
1266 tcg_out_insn(s, RI, BRC, cc, off);
1267 } else if (off == (int32_t)off) {
1268 tcg_out_insn(s, RIL, BRCL, cc, off);
1269 } else {
8c081b18 1270 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1271 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1272 }
1273}
1274
bec16311 1275static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
48bb3750 1276{
48bb3750 1277 if (l->has_value) {
8c081b18 1278 tgen_gotoi(s, cc, l->u.value_ptr);
48bb3750
RH
1279 } else if (USE_LONG_BRANCHES) {
1280 tcg_out16(s, RIL_BRCL | (cc << 4));
bec16311 1281 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, -2);
8c081b18 1282 s->code_ptr += 2;
48bb3750
RH
1283 } else {
1284 tcg_out16(s, RI_BRC | (cc << 4));
bec16311 1285 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, -2);
8c081b18 1286 s->code_ptr += 1;
48bb3750
RH
1287 }
1288}
1289
1290static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
bec16311 1291 TCGReg r1, TCGReg r2, TCGLabel *l)
48bb3750 1292{
8c081b18 1293 intptr_t off;
48bb3750
RH
1294
1295 if (l->has_value) {
8c081b18 1296 off = l->u.value_ptr - s->code_ptr;
48bb3750
RH
1297 } else {
1298 /* We need to keep the offset unchanged for retranslation. */
8c081b18 1299 off = s->code_ptr[1];
bec16311 1300 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
48bb3750
RH
1301 }
1302
1303 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1304 tcg_out16(s, off);
1305 tcg_out16(s, cc << 12 | (opc & 0xff));
1306}
1307
1308static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
bec16311 1309 TCGReg r1, int i2, TCGLabel *l)
48bb3750 1310{
48bb3750
RH
1311 tcg_target_long off;
1312
1313 if (l->has_value) {
8c081b18 1314 off = l->u.value_ptr - s->code_ptr;
48bb3750
RH
1315 } else {
1316 /* We need to keep the offset unchanged for retranslation. */
8c081b18 1317 off = s->code_ptr[1];
bec16311 1318 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
48bb3750
RH
1319 }
1320
1321 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1322 tcg_out16(s, off);
1323 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1324}
1325
1326static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
bec16311 1327 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
48bb3750
RH
1328{
1329 int cc;
1330
b2c98d9d 1331 if (s390_facilities & FACILITY_GEN_INST_EXT) {
b879f308 1332 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1333 bool in_range;
1334 S390Opcode opc;
1335
1336 cc = tcg_cond_to_s390_cond[c];
1337
1338 if (!c2const) {
1339 opc = (type == TCG_TYPE_I32
1340 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1341 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
bec16311 1342 tgen_compare_branch(s, opc, cc, r1, c2, l);
48bb3750
RH
1343 return;
1344 }
1345
1346 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1347 If the immediate we've been given does not fit that range, we'll
1348 fall back to separate compare and branch instructions using the
1349 larger comparison range afforded by COMPARE IMMEDIATE. */
1350 if (type == TCG_TYPE_I32) {
1351 if (is_unsigned) {
1352 opc = RIE_CLIJ;
1353 in_range = (uint32_t)c2 == (uint8_t)c2;
1354 } else {
1355 opc = RIE_CIJ;
1356 in_range = (int32_t)c2 == (int8_t)c2;
1357 }
1358 } else {
1359 if (is_unsigned) {
1360 opc = RIE_CLGIJ;
1361 in_range = (uint64_t)c2 == (uint8_t)c2;
1362 } else {
1363 opc = RIE_CGIJ;
1364 in_range = (int64_t)c2 == (int8_t)c2;
1365 }
1366 }
1367 if (in_range) {
bec16311 1368 tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
48bb3750
RH
1369 return;
1370 }
1371 }
1372
65839b56 1373 cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
bec16311 1374 tgen_branch(s, cc, l);
48bb3750
RH
1375}
1376
a8111212 1377static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
48bb3750 1378{
8c081b18 1379 ptrdiff_t off = dest - s->code_ptr;
48bb3750
RH
1380 if (off == (int32_t)off) {
1381 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1382 } else {
8c081b18 1383 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1384 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1385 }
1386}
1387
a5a04f28 1388static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
48bb3750
RH
1389 TCGReg base, TCGReg index, int disp)
1390{
3c8691f5 1391 switch (opc & (MO_SSIZE | MO_BSWAP)) {
a5a04f28 1392 case MO_UB:
48bb3750
RH
1393 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1394 break;
a5a04f28 1395 case MO_SB:
48bb3750
RH
1396 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1397 break;
b8dd88b8
RH
1398
1399 case MO_UW | MO_BSWAP:
1400 /* swapped unsigned halfword load with upper bits zeroed */
1401 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1402 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1403 break;
a5a04f28 1404 case MO_UW:
b8dd88b8
RH
1405 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1406 break;
1407
1408 case MO_SW | MO_BSWAP:
1409 /* swapped sign-extended halfword load */
1410 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1411 tgen_ext16s(s, TCG_TYPE_I64, data, data);
48bb3750 1412 break;
a5a04f28 1413 case MO_SW:
b8dd88b8
RH
1414 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1415 break;
1416
1417 case MO_UL | MO_BSWAP:
1418 /* swapped unsigned int load with upper bits zeroed */
1419 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1420 tgen_ext32u(s, data, data);
48bb3750 1421 break;
a5a04f28 1422 case MO_UL:
b8dd88b8
RH
1423 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1424 break;
1425
1426 case MO_SL | MO_BSWAP:
1427 /* swapped sign-extended int load */
1428 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1429 tgen_ext32s(s, data, data);
48bb3750 1430 break;
a5a04f28 1431 case MO_SL:
b8dd88b8
RH
1432 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1433 break;
1434
1435 case MO_Q | MO_BSWAP:
1436 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
48bb3750 1437 break;
a5a04f28 1438 case MO_Q:
b8dd88b8 1439 tcg_out_insn(s, RXY, LG, data, base, index, disp);
48bb3750 1440 break;
b8dd88b8 1441
48bb3750
RH
1442 default:
1443 tcg_abort();
1444 }
1445}
1446
a5a04f28 1447static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
48bb3750
RH
1448 TCGReg base, TCGReg index, int disp)
1449{
3c8691f5 1450 switch (opc & (MO_SIZE | MO_BSWAP)) {
a5a04f28 1451 case MO_UB:
48bb3750
RH
1452 if (disp >= 0 && disp < 0x1000) {
1453 tcg_out_insn(s, RX, STC, data, base, index, disp);
1454 } else {
1455 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1456 }
1457 break;
b8dd88b8
RH
1458
1459 case MO_UW | MO_BSWAP:
1460 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1461 break;
a5a04f28 1462 case MO_UW:
b8dd88b8 1463 if (disp >= 0 && disp < 0x1000) {
48bb3750
RH
1464 tcg_out_insn(s, RX, STH, data, base, index, disp);
1465 } else {
1466 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1467 }
1468 break;
b8dd88b8
RH
1469
1470 case MO_UL | MO_BSWAP:
1471 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1472 break;
a5a04f28 1473 case MO_UL:
b8dd88b8 1474 if (disp >= 0 && disp < 0x1000) {
48bb3750
RH
1475 tcg_out_insn(s, RX, ST, data, base, index, disp);
1476 } else {
1477 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1478 }
1479 break;
b8dd88b8
RH
1480
1481 case MO_Q | MO_BSWAP:
1482 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1483 break;
a5a04f28 1484 case MO_Q:
b8dd88b8 1485 tcg_out_insn(s, RXY, STG, data, base, index, disp);
48bb3750 1486 break;
b8dd88b8 1487
48bb3750
RH
1488 default:
1489 tcg_abort();
1490 }
1491}
1492
1493#if defined(CONFIG_SOFTMMU)
fb596415
RH
1494/* We're expecting to use a 20-bit signed offset on the tlb memory ops.
1495 Using the offset of the second entry in the last tlb table ensures
1496 that we can index all of the elements of the first entry. */
1497QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
1498 > 0x7ffff);
1499
1500/* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1501 addend into R2. Returns a register with the santitized guest address. */
1502static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
1503 int mem_index, bool is_ld)
48bb3750 1504{
85aa8081
RH
1505 unsigned s_bits = opc & MO_SIZE;
1506 unsigned a_bits = get_alignment_bits(opc);
1507 unsigned s_mask = (1 << s_bits) - 1;
1508 unsigned a_mask = (1 << a_bits) - 1;
a5e39810
RH
1509 int ofs, a_off;
1510 uint64_t tlb_mask;
1511
1512 /* For aligned accesses, we check the first byte and include the alignment
1513 bits within the address. For unaligned access, we check that we don't
1514 cross pages using the address of the last byte of the access. */
85aa8081
RH
1515 a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask);
1516 tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
fb596415 1517
b2c98d9d 1518 if (s390_facilities & FACILITY_GEN_INST_EXT) {
547ec121
RH
1519 tcg_out_risbg(s, TCG_REG_R2, addr_reg,
1520 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
1521 63 - CPU_TLB_ENTRY_BITS,
1522 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1);
a5e39810
RH
1523 if (a_off) {
1524 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1525 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1526 } else {
1527 tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
1528 }
48bb3750 1529 } else {
547ec121
RH
1530 tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
1531 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
a5e39810 1532 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
547ec121
RH
1533 tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2,
1534 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1535 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
48bb3750
RH
1536 }
1537
fb596415 1538 if (is_ld) {
9349b4f9 1539 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
fb596415
RH
1540 } else {
1541 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
48bb3750 1542 }
48bb3750 1543 if (TARGET_LONG_BITS == 32) {
fb596415 1544 tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
48bb3750 1545 } else {
fb596415 1546 tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
48bb3750
RH
1547 }
1548
fb596415
RH
1549 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1550 tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs);
1551
48bb3750 1552 if (TARGET_LONG_BITS == 32) {
fb596415
RH
1553 tgen_ext32u(s, TCG_REG_R3, addr_reg);
1554 return TCG_REG_R3;
48bb3750 1555 }
fb596415
RH
1556 return addr_reg;
1557}
48bb3750 1558
3972ef6f
RH
1559static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1560 TCGReg data, TCGReg addr,
fb596415
RH
1561 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1562{
1563 TCGLabelQemuLdst *label = new_ldst_label(s);
1564
1565 label->is_ld = is_ld;
3972ef6f 1566 label->oi = oi;
fb596415
RH
1567 label->datalo_reg = data;
1568 label->addrlo_reg = addr;
fb596415
RH
1569 label->raddr = raddr;
1570 label->label_ptr[0] = label_ptr;
1571}
48bb3750 1572
fb596415
RH
1573static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1574{
1575 TCGReg addr_reg = lb->addrlo_reg;
1576 TCGReg data_reg = lb->datalo_reg;
3972ef6f
RH
1577 TCGMemOpIdx oi = lb->oi;
1578 TCGMemOp opc = get_memop(oi);
48bb3750 1579
fb596415 1580 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
48bb3750 1581
fb596415
RH
1582 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1583 if (TARGET_LONG_BITS == 64) {
1584 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1585 }
3972ef6f 1586 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
fb596415 1587 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
2b7ec66f 1588 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
fb596415 1589 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
65a62a75 1590
fb596415 1591 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
48bb3750
RH
1592}
1593
fb596415 1594static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
48bb3750 1595{
fb596415
RH
1596 TCGReg addr_reg = lb->addrlo_reg;
1597 TCGReg data_reg = lb->datalo_reg;
3972ef6f
RH
1598 TCGMemOpIdx oi = lb->oi;
1599 TCGMemOp opc = get_memop(oi);
fb596415
RH
1600
1601 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
1602
1603 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1604 if (TARGET_LONG_BITS == 64) {
1605 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1606 }
1607 switch (opc & MO_SIZE) {
1608 case MO_UB:
1609 tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1610 break;
1611 case MO_UW:
1612 tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1613 break;
1614 case MO_UL:
1615 tgen_ext32u(s, TCG_REG_R4, data_reg);
1616 break;
1617 case MO_Q:
1618 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1619 break;
1620 default:
1621 tcg_abort();
1622 }
3972ef6f 1623 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
fb596415 1624 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
2b7ec66f 1625 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
fb596415
RH
1626
1627 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
48bb3750
RH
1628}
1629#else
1630static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1631 TCGReg *index_reg, tcg_target_long *disp)
1632{
1633 if (TARGET_LONG_BITS == 32) {
1634 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1635 *addr_reg = TCG_TMP0;
1636 }
b76f21a7 1637 if (guest_base < 0x80000) {
48bb3750 1638 *index_reg = TCG_REG_NONE;
b76f21a7 1639 *disp = guest_base;
48bb3750
RH
1640 } else {
1641 *index_reg = TCG_GUEST_BASE_REG;
1642 *disp = 0;
1643 }
1644}
1645#endif /* CONFIG_SOFTMMU */
1646
f24efee4 1647static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
59227d5d 1648 TCGMemOpIdx oi)
48bb3750 1649{
59227d5d 1650 TCGMemOp opc = get_memop(oi);
fb596415 1651#ifdef CONFIG_SOFTMMU
59227d5d 1652 unsigned mem_index = get_mmuidx(oi);
fb596415
RH
1653 tcg_insn_unit *label_ptr;
1654 TCGReg base_reg;
1655
1656 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
1657
cd3b29b7
AJ
1658 /* We need to keep the offset unchanged for retranslation. */
1659 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1660 label_ptr = s->code_ptr;
1661 s->code_ptr += 1;
fb596415
RH
1662
1663 tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
48bb3750 1664
3972ef6f 1665 add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
48bb3750 1666#else
f24efee4
RH
1667 TCGReg index_reg;
1668 tcg_target_long disp;
1669
48bb3750
RH
1670 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1671 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1672#endif
1673}
1674
f24efee4 1675static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
59227d5d 1676 TCGMemOpIdx oi)
48bb3750 1677{
59227d5d 1678 TCGMemOp opc = get_memop(oi);
fb596415 1679#ifdef CONFIG_SOFTMMU
59227d5d 1680 unsigned mem_index = get_mmuidx(oi);
fb596415
RH
1681 tcg_insn_unit *label_ptr;
1682 TCGReg base_reg;
1683
1684 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
1685
cd3b29b7
AJ
1686 /* We need to keep the offset unchanged for retranslation. */
1687 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1688 label_ptr = s->code_ptr;
1689 s->code_ptr += 1;
fb596415
RH
1690
1691 tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
48bb3750 1692
3972ef6f 1693 add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
48bb3750 1694#else
f24efee4
RH
1695 TCGReg index_reg;
1696 tcg_target_long disp;
1697
48bb3750
RH
1698 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1699 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1700#endif
2827822e
AG
1701}
1702
48bb3750
RH
1703# define OP_32_64(x) \
1704 case glue(glue(INDEX_op_,x),_i32): \
1705 case glue(glue(INDEX_op_,x),_i64)
48bb3750 1706
a9751609 1707static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2827822e
AG
1708 const TCGArg *args, const int *const_args)
1709{
c2097136 1710 S390Opcode op, op2;
0db921e6 1711 TCGArg a0, a1, a2;
48bb3750
RH
1712
1713 switch (opc) {
1714 case INDEX_op_exit_tb:
46644483
RH
1715 /* Reuse the zeroing that exists for goto_ptr. */
1716 a0 = args[0];
1717 if (a0 == 0) {
1718 tgen_gotoi(s, S390_CC_ALWAYS, s->code_gen_epilogue);
1719 } else {
1720 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
1721 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1722 }
48bb3750
RH
1723 break;
1724
1725 case INDEX_op_goto_tb:
f309101c 1726 if (s->tb_jmp_insn_offset) {
ed3d51ec
SF
1727 /* branch displacement must be aligned for atomic patching;
1728 * see if we need to add extra nop before branch
1729 */
1730 if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
1731 tcg_out16(s, NOP);
1732 }
a10c64e0 1733 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
f309101c 1734 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
a10c64e0 1735 s->code_ptr += 2;
48bb3750 1736 } else {
f309101c
SF
1737 /* load address stored at s->tb_jmp_target_addr + args[0] */
1738 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0,
1739 s->tb_jmp_target_addr + args[0]);
48bb3750
RH
1740 /* and go there */
1741 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1742 }
f309101c 1743 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
48bb3750
RH
1744 break;
1745
46644483
RH
1746 case INDEX_op_goto_ptr:
1747 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, args[0]);
1748 break;
1749
48bb3750
RH
1750 OP_32_64(ld8u):
1751 /* ??? LLC (RXY format) is only present with the extended-immediate
1752 facility, whereas LLGC is always present. */
1753 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1754 break;
1755
1756 OP_32_64(ld8s):
1757 /* ??? LB is no smaller than LGB, so no point to using it. */
1758 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1759 break;
1760
1761 OP_32_64(ld16u):
1762 /* ??? LLH (RXY format) is only present with the extended-immediate
1763 facility, whereas LLGH is always present. */
1764 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1765 break;
1766
1767 case INDEX_op_ld16s_i32:
1768 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1769 break;
1770
1771 case INDEX_op_ld_i32:
1772 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1773 break;
1774
1775 OP_32_64(st8):
1776 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1777 TCG_REG_NONE, args[2]);
1778 break;
1779
1780 OP_32_64(st16):
1781 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1782 TCG_REG_NONE, args[2]);
1783 break;
1784
1785 case INDEX_op_st_i32:
1786 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1787 break;
1788
1789 case INDEX_op_add_i32:
0db921e6 1790 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1791 if (const_args[2]) {
0db921e6
RH
1792 do_addi_32:
1793 if (a0 == a1) {
1794 if (a2 == (int16_t)a2) {
1795 tcg_out_insn(s, RI, AHI, a0, a2);
1796 break;
1797 }
b2c98d9d 1798 if (s390_facilities & FACILITY_EXT_IMM) {
0db921e6
RH
1799 tcg_out_insn(s, RIL, AFI, a0, a2);
1800 break;
1801 }
1802 }
1803 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1804 } else if (a0 == a1) {
1805 tcg_out_insn(s, RR, AR, a0, a2);
48bb3750 1806 } else {
0db921e6 1807 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1808 }
1809 break;
1810 case INDEX_op_sub_i32:
0db921e6 1811 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1812 if (const_args[2]) {
0db921e6
RH
1813 a2 = -a2;
1814 goto do_addi_32;
c2097136
RH
1815 } else if (a0 == a1) {
1816 tcg_out_insn(s, RR, SR, a0, a2);
1817 } else {
1818 tcg_out_insn(s, RRF, SRK, a0, a1, a2);
48bb3750
RH
1819 }
1820 break;
1821
1822 case INDEX_op_and_i32:
c2097136 1823 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
48bb3750 1824 if (const_args[2]) {
c2097136
RH
1825 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1826 tgen_andi(s, TCG_TYPE_I32, a0, a2);
1827 } else if (a0 == a1) {
1828 tcg_out_insn(s, RR, NR, a0, a2);
48bb3750 1829 } else {
c2097136 1830 tcg_out_insn(s, RRF, NRK, a0, a1, a2);
48bb3750
RH
1831 }
1832 break;
1833 case INDEX_op_or_i32:
c2097136 1834 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
48bb3750 1835 if (const_args[2]) {
c2097136
RH
1836 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1837 tgen64_ori(s, a0, a2);
1838 } else if (a0 == a1) {
1839 tcg_out_insn(s, RR, OR, a0, a2);
48bb3750 1840 } else {
c2097136 1841 tcg_out_insn(s, RRF, ORK, a0, a1, a2);
48bb3750
RH
1842 }
1843 break;
1844 case INDEX_op_xor_i32:
c2097136 1845 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
48bb3750 1846 if (const_args[2]) {
c2097136
RH
1847 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1848 tgen64_xori(s, a0, a2);
1849 } else if (a0 == a1) {
48bb3750 1850 tcg_out_insn(s, RR, XR, args[0], args[2]);
c2097136
RH
1851 } else {
1852 tcg_out_insn(s, RRF, XRK, a0, a1, a2);
48bb3750
RH
1853 }
1854 break;
1855
1856 case INDEX_op_neg_i32:
1857 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1858 break;
1859
1860 case INDEX_op_mul_i32:
1861 if (const_args[2]) {
1862 if ((int32_t)args[2] == (int16_t)args[2]) {
1863 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1864 } else {
1865 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1866 }
1867 } else {
1868 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1869 }
1870 break;
1871
1872 case INDEX_op_div2_i32:
1873 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1874 break;
1875 case INDEX_op_divu2_i32:
1876 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1877 break;
1878
1879 case INDEX_op_shl_i32:
1880 op = RS_SLL;
c2097136 1881 op2 = RSY_SLLK;
48bb3750 1882 do_shift32:
c2097136
RH
1883 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1884 if (a0 == a1) {
1885 if (const_args[2]) {
1886 tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
1887 } else {
1888 tcg_out_sh32(s, op, a0, a2, 0);
1889 }
48bb3750 1890 } else {
c2097136
RH
1891 /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
1892 if (const_args[2]) {
1893 tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
1894 } else {
1895 tcg_out_sh64(s, op2, a0, a1, a2, 0);
1896 }
48bb3750
RH
1897 }
1898 break;
1899 case INDEX_op_shr_i32:
1900 op = RS_SRL;
c2097136 1901 op2 = RSY_SRLK;
48bb3750
RH
1902 goto do_shift32;
1903 case INDEX_op_sar_i32:
1904 op = RS_SRA;
c2097136 1905 op2 = RSY_SRAK;
48bb3750
RH
1906 goto do_shift32;
1907
1908 case INDEX_op_rotl_i32:
1909 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1910 if (const_args[2]) {
1911 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1912 } else {
1913 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1914 }
1915 break;
1916 case INDEX_op_rotr_i32:
1917 if (const_args[2]) {
1918 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1919 TCG_REG_NONE, (32 - args[2]) & 31);
1920 } else {
1921 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1922 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1923 }
1924 break;
1925
1926 case INDEX_op_ext8s_i32:
1927 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1928 break;
1929 case INDEX_op_ext16s_i32:
1930 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1931 break;
1932 case INDEX_op_ext8u_i32:
1933 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1934 break;
1935 case INDEX_op_ext16u_i32:
1936 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1937 break;
1938
1939 OP_32_64(bswap16):
1940 /* The TCG bswap definition requires bits 0-47 already be zero.
1941 Thus we don't need the G-type insns to implement bswap16_i64. */
1942 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1943 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1944 break;
1945 OP_32_64(bswap32):
1946 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1947 break;
1948
3790b918 1949 case INDEX_op_add2_i32:
ad19b358
RH
1950 if (const_args[4]) {
1951 tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
1952 } else {
1953 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1954 }
3790b918
RH
1955 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1956 break;
1957 case INDEX_op_sub2_i32:
ad19b358
RH
1958 if (const_args[4]) {
1959 tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
1960 } else {
1961 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1962 }
3790b918
RH
1963 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1964 break;
1965
48bb3750 1966 case INDEX_op_br:
bec16311 1967 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
48bb3750
RH
1968 break;
1969
1970 case INDEX_op_brcond_i32:
1971 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
bec16311 1972 args[1], const_args[1], arg_label(args[3]));
48bb3750
RH
1973 break;
1974 case INDEX_op_setcond_i32:
1975 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1976 args[2], const_args[2]);
1977 break;
96a9f093
RH
1978 case INDEX_op_movcond_i32:
1979 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
7af525af 1980 args[2], const_args[2], args[3], const_args[3]);
96a9f093 1981 break;
48bb3750 1982
f24efee4 1983 case INDEX_op_qemu_ld_i32:
48bb3750 1984 /* ??? Technically we can use a non-extending instruction. */
f24efee4 1985 case INDEX_op_qemu_ld_i64:
59227d5d 1986 tcg_out_qemu_ld(s, args[0], args[1], args[2]);
48bb3750 1987 break;
f24efee4
RH
1988 case INDEX_op_qemu_st_i32:
1989 case INDEX_op_qemu_st_i64:
59227d5d 1990 tcg_out_qemu_st(s, args[0], args[1], args[2]);
48bb3750
RH
1991 break;
1992
48bb3750
RH
1993 case INDEX_op_ld16s_i64:
1994 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1995 break;
1996 case INDEX_op_ld32u_i64:
1997 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1998 break;
1999 case INDEX_op_ld32s_i64:
2000 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
2001 break;
2002 case INDEX_op_ld_i64:
2003 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2004 break;
2005
2006 case INDEX_op_st32_i64:
2007 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2008 break;
2009 case INDEX_op_st_i64:
2010 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2011 break;
2012
2013 case INDEX_op_add_i64:
0db921e6 2014 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2015 if (const_args[2]) {
0db921e6
RH
2016 do_addi_64:
2017 if (a0 == a1) {
2018 if (a2 == (int16_t)a2) {
2019 tcg_out_insn(s, RI, AGHI, a0, a2);
2020 break;
2021 }
b2c98d9d 2022 if (s390_facilities & FACILITY_EXT_IMM) {
0db921e6
RH
2023 if (a2 == (int32_t)a2) {
2024 tcg_out_insn(s, RIL, AGFI, a0, a2);
2025 break;
2026 } else if (a2 == (uint32_t)a2) {
2027 tcg_out_insn(s, RIL, ALGFI, a0, a2);
2028 break;
2029 } else if (-a2 == (uint32_t)-a2) {
2030 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
2031 break;
2032 }
2033 }
2034 }
2035 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2036 } else if (a0 == a1) {
2037 tcg_out_insn(s, RRE, AGR, a0, a2);
48bb3750 2038 } else {
0db921e6 2039 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
2040 }
2041 break;
2042 case INDEX_op_sub_i64:
0db921e6 2043 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2044 if (const_args[2]) {
0db921e6
RH
2045 a2 = -a2;
2046 goto do_addi_64;
c2097136
RH
2047 } else if (a0 == a1) {
2048 tcg_out_insn(s, RRE, SGR, a0, a2);
48bb3750 2049 } else {
c2097136 2050 tcg_out_insn(s, RRF, SGRK, a0, a1, a2);
48bb3750
RH
2051 }
2052 break;
2053
2054 case INDEX_op_and_i64:
c2097136 2055 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2056 if (const_args[2]) {
c2097136 2057 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
07ff7983 2058 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
c2097136 2059 } else if (a0 == a1) {
48bb3750 2060 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
c2097136
RH
2061 } else {
2062 tcg_out_insn(s, RRF, NGRK, a0, a1, a2);
48bb3750
RH
2063 }
2064 break;
2065 case INDEX_op_or_i64:
c2097136 2066 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2067 if (const_args[2]) {
c2097136
RH
2068 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2069 tgen64_ori(s, a0, a2);
2070 } else if (a0 == a1) {
2071 tcg_out_insn(s, RRE, OGR, a0, a2);
48bb3750 2072 } else {
c2097136 2073 tcg_out_insn(s, RRF, OGRK, a0, a1, a2);
48bb3750
RH
2074 }
2075 break;
2076 case INDEX_op_xor_i64:
c2097136 2077 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2078 if (const_args[2]) {
c2097136
RH
2079 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2080 tgen64_xori(s, a0, a2);
2081 } else if (a0 == a1) {
2082 tcg_out_insn(s, RRE, XGR, a0, a2);
48bb3750 2083 } else {
c2097136 2084 tcg_out_insn(s, RRF, XGRK, a0, a1, a2);
48bb3750
RH
2085 }
2086 break;
2087
2088 case INDEX_op_neg_i64:
2089 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2090 break;
2091 case INDEX_op_bswap64_i64:
2092 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2093 break;
2094
2095 case INDEX_op_mul_i64:
2096 if (const_args[2]) {
2097 if (args[2] == (int16_t)args[2]) {
2098 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
2099 } else {
2100 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
2101 }
2102 } else {
2103 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
2104 }
2105 break;
2106
2107 case INDEX_op_div2_i64:
2108 /* ??? We get an unnecessary sign-extension of the dividend
2109 into R3 with this definition, but as we do in fact always
2110 produce both quotient and remainder using INDEX_op_div_i64
2111 instead requires jumping through even more hoops. */
2112 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
2113 break;
2114 case INDEX_op_divu2_i64:
2115 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
2116 break;
36017dc6
RH
2117 case INDEX_op_mulu2_i64:
2118 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
2119 break;
48bb3750
RH
2120
2121 case INDEX_op_shl_i64:
2122 op = RSY_SLLG;
2123 do_shift64:
2124 if (const_args[2]) {
2125 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2126 } else {
2127 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2128 }
2129 break;
2130 case INDEX_op_shr_i64:
2131 op = RSY_SRLG;
2132 goto do_shift64;
2133 case INDEX_op_sar_i64:
2134 op = RSY_SRAG;
2135 goto do_shift64;
2136
2137 case INDEX_op_rotl_i64:
2138 if (const_args[2]) {
2139 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2140 TCG_REG_NONE, args[2]);
2141 } else {
2142 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2143 }
2144 break;
2145 case INDEX_op_rotr_i64:
2146 if (const_args[2]) {
2147 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2148 TCG_REG_NONE, (64 - args[2]) & 63);
2149 } else {
2150 /* We can use the smaller 32-bit negate because only the
2151 low 6 bits are examined for the rotate. */
2152 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2153 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2154 }
2155 break;
2156
2157 case INDEX_op_ext8s_i64:
2158 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2159 break;
2160 case INDEX_op_ext16s_i64:
2161 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2162 break;
4f2331e5 2163 case INDEX_op_ext_i32_i64:
48bb3750
RH
2164 case INDEX_op_ext32s_i64:
2165 tgen_ext32s(s, args[0], args[1]);
2166 break;
2167 case INDEX_op_ext8u_i64:
2168 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2169 break;
2170 case INDEX_op_ext16u_i64:
2171 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2172 break;
4f2331e5 2173 case INDEX_op_extu_i32_i64:
48bb3750
RH
2174 case INDEX_op_ext32u_i64:
2175 tgen_ext32u(s, args[0], args[1]);
2176 break;
2177
3790b918 2178 case INDEX_op_add2_i64:
ad19b358
RH
2179 if (const_args[4]) {
2180 if ((int64_t)args[4] >= 0) {
2181 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2182 } else {
2183 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2184 }
2185 } else {
2186 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2187 }
3790b918
RH
2188 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2189 break;
2190 case INDEX_op_sub2_i64:
ad19b358
RH
2191 if (const_args[4]) {
2192 if ((int64_t)args[4] >= 0) {
2193 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2194 } else {
2195 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2196 }
2197 } else {
2198 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2199 }
3790b918
RH
2200 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2201 break;
2202
48bb3750
RH
2203 case INDEX_op_brcond_i64:
2204 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
bec16311 2205 args[1], const_args[1], arg_label(args[3]));
48bb3750
RH
2206 break;
2207 case INDEX_op_setcond_i64:
2208 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2209 args[2], const_args[2]);
2210 break;
96a9f093
RH
2211 case INDEX_op_movcond_i64:
2212 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
7af525af 2213 args[2], const_args[2], args[3], const_args[3]);
96a9f093 2214 break;
48bb3750 2215
d5690ea4 2216 OP_32_64(deposit):
752b1be9
RH
2217 a0 = args[0], a1 = args[1], a2 = args[2];
2218 if (const_args[1]) {
2219 tgen_deposit(s, a0, a2, args[3], args[4], 1);
2220 } else {
2221 /* Since we can't support "0Z" as a constraint, we allow a1 in
2222 any register. Fix things up as if a matching constraint. */
2223 if (a0 != a1) {
2224 TCGType type = (opc == INDEX_op_deposit_i64);
2225 if (a0 == a2) {
2226 tcg_out_mov(s, type, TCG_TMP0, a2);
2227 a2 = TCG_TMP0;
2228 }
2229 tcg_out_mov(s, type, a0, a1);
2230 }
2231 tgen_deposit(s, a0, a2, args[3], args[4], 0);
2232 }
d5690ea4 2233 break;
752b1be9 2234
b0bf5fe8
RH
2235 OP_32_64(extract):
2236 tgen_extract(s, args[0], args[1], args[2], args[3]);
2237 break;
d5690ea4 2238
ce411066
RH
2239 case INDEX_op_clz_i64:
2240 tgen_clz(s, args[0], args[1], args[2], const_args[2]);
2241 break;
2242
c9314d61
PK
2243 case INDEX_op_mb:
2244 /* The host memory model is quite strong, we simply need to
2245 serialize the instruction stream. */
2246 if (args[0] & TCG_MO_ST_LD) {
2247 tcg_out_insn(s, RR, BCR,
b2c98d9d 2248 s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0);
c9314d61
PK
2249 }
2250 break;
2251
96d0ee7f
RH
2252 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2253 case INDEX_op_mov_i64:
2254 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2255 case INDEX_op_movi_i64:
2256 case INDEX_op_call: /* Always emitted via tcg_out_call. */
48bb3750 2257 default:
48bb3750
RH
2258 tcg_abort();
2259 }
2827822e
AG
2260}
2261
f69d277e
RH
2262static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
2263{
9b5500b6
RH
2264 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
2265 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
2266 static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
2267 static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
07952d95 2268 static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } };
9b5500b6 2269 static const TCGTargetOpDef r_rC = { .args_ct_str = { "r", "rC" } };
07952d95 2270 static const TCGTargetOpDef r_rZ = { .args_ct_str = { "r", "rZ" } };
9b5500b6 2271 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
c2097136 2272 static const TCGTargetOpDef r_r_rM = { .args_ct_str = { "r", "r", "rM" } };
e42349cb 2273 static const TCGTargetOpDef r_0_r = { .args_ct_str = { "r", "0", "r" } };
9b5500b6 2274 static const TCGTargetOpDef r_0_ri = { .args_ct_str = { "r", "0", "ri" } };
a8f0269e
RH
2275 static const TCGTargetOpDef r_0_rI = { .args_ct_str = { "r", "0", "rI" } };
2276 static const TCGTargetOpDef r_0_rJ = { .args_ct_str = { "r", "0", "rJ" } };
e42349cb
RH
2277 static const TCGTargetOpDef r_0_rN = { .args_ct_str = { "r", "0", "rN" } };
2278 static const TCGTargetOpDef r_0_rM = { .args_ct_str = { "r", "0", "rM" } };
ba18b07d
RH
2279 static const TCGTargetOpDef a2_r
2280 = { .args_ct_str = { "r", "r", "0", "1", "r", "r" } };
2281 static const TCGTargetOpDef a2_ri
2282 = { .args_ct_str = { "r", "r", "0", "1", "ri", "r" } };
2283 static const TCGTargetOpDef a2_rA
2284 = { .args_ct_str = { "r", "r", "0", "1", "rA", "r" } };
9b5500b6
RH
2285
2286 switch (op) {
2287 case INDEX_op_goto_ptr:
2288 return &r;
2289
2290 case INDEX_op_ld8u_i32:
2291 case INDEX_op_ld8u_i64:
2292 case INDEX_op_ld8s_i32:
2293 case INDEX_op_ld8s_i64:
2294 case INDEX_op_ld16u_i32:
2295 case INDEX_op_ld16u_i64:
2296 case INDEX_op_ld16s_i32:
2297 case INDEX_op_ld16s_i64:
2298 case INDEX_op_ld_i32:
2299 case INDEX_op_ld32u_i64:
2300 case INDEX_op_ld32s_i64:
2301 case INDEX_op_ld_i64:
2302 case INDEX_op_st8_i32:
2303 case INDEX_op_st8_i64:
2304 case INDEX_op_st16_i32:
2305 case INDEX_op_st16_i64:
2306 case INDEX_op_st_i32:
2307 case INDEX_op_st32_i64:
2308 case INDEX_op_st_i64:
2309 return &r_r;
2310
2311 case INDEX_op_add_i32:
2312 case INDEX_op_add_i64:
2313 return &r_r_ri;
2314 case INDEX_op_sub_i32:
2315 case INDEX_op_sub_i64:
c2097136 2316 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
a8f0269e 2317
9b5500b6 2318 case INDEX_op_mul_i32:
a8f0269e
RH
2319 /* If we have the general-instruction-extensions, then we have
2320 MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we
2321 have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
2322 return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_ri : &r_0_rI);
9b5500b6 2323 case INDEX_op_mul_i64:
a8f0269e
RH
2324 return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_rJ : &r_0_rI);
2325
9b5500b6 2326 case INDEX_op_or_i32:
e42349cb
RH
2327 /* The use of [iNM] constraints are optimization only, since a full
2328 64-bit immediate OR can always be performed with 4 sequential
2329 OI[LH][LH] instructions. By rejecting certain negative ranges,
2330 the immediate load plus the reg-reg OR is smaller. */
2331 return (s390_facilities & FACILITY_EXT_IMM
c2097136 2332 ? (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri)
e42349cb 2333 : &r_0_rN);
9b5500b6 2334 case INDEX_op_or_i64:
e42349cb 2335 return (s390_facilities & FACILITY_EXT_IMM
c2097136 2336 ? (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_rM : &r_0_rM)
e42349cb
RH
2337 : &r_0_rN);
2338
9b5500b6 2339 case INDEX_op_xor_i32:
e42349cb
RH
2340 /* Without EXT_IMM, no immediates are supported. Otherwise,
2341 rejecting certain negative ranges leads to smaller code. */
2342 return (s390_facilities & FACILITY_EXT_IMM
c2097136 2343 ? (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri)
e42349cb 2344 : &r_0_r);
9b5500b6 2345 case INDEX_op_xor_i64:
e42349cb 2346 return (s390_facilities & FACILITY_EXT_IMM
c2097136 2347 ? (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_rM : &r_0_rM)
e42349cb
RH
2348 : &r_0_r);
2349
9b5500b6
RH
2350 case INDEX_op_and_i32:
2351 case INDEX_op_and_i64:
c2097136 2352 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
9b5500b6
RH
2353
2354 case INDEX_op_shl_i32:
2355 case INDEX_op_shr_i32:
2356 case INDEX_op_sar_i32:
c2097136 2357 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
9b5500b6
RH
2358
2359 case INDEX_op_shl_i64:
2360 case INDEX_op_shr_i64:
2361 case INDEX_op_sar_i64:
2362 return &r_r_ri;
2363
2364 case INDEX_op_rotl_i32:
2365 case INDEX_op_rotl_i64:
2366 case INDEX_op_rotr_i32:
2367 case INDEX_op_rotr_i64:
2368 return &r_r_ri;
2369
2370 case INDEX_op_brcond_i32:
07952d95
RH
2371 /* Without EXT_IMM, only the LOAD AND TEST insn is available. */
2372 return (s390_facilities & FACILITY_EXT_IMM ? &r_ri : &r_rZ);
9b5500b6 2373 case INDEX_op_brcond_i64:
07952d95 2374 return (s390_facilities & FACILITY_EXT_IMM ? &r_rC : &r_rZ);
9b5500b6
RH
2375
2376 case INDEX_op_bswap16_i32:
2377 case INDEX_op_bswap16_i64:
2378 case INDEX_op_bswap32_i32:
2379 case INDEX_op_bswap32_i64:
2380 case INDEX_op_bswap64_i64:
2381 case INDEX_op_neg_i32:
2382 case INDEX_op_neg_i64:
2383 case INDEX_op_ext8s_i32:
2384 case INDEX_op_ext8s_i64:
2385 case INDEX_op_ext8u_i32:
2386 case INDEX_op_ext8u_i64:
2387 case INDEX_op_ext16s_i32:
2388 case INDEX_op_ext16s_i64:
2389 case INDEX_op_ext16u_i32:
2390 case INDEX_op_ext16u_i64:
2391 case INDEX_op_ext32s_i64:
2392 case INDEX_op_ext32u_i64:
2393 case INDEX_op_ext_i32_i64:
2394 case INDEX_op_extu_i32_i64:
2395 case INDEX_op_extract_i32:
2396 case INDEX_op_extract_i64:
2397 return &r_r;
2398
2399 case INDEX_op_clz_i64:
2400 return &r_r_ri;
2401
2402 case INDEX_op_qemu_ld_i32:
2403 case INDEX_op_qemu_ld_i64:
2404 return &r_L;
2405 case INDEX_op_qemu_st_i64:
2406 case INDEX_op_qemu_st_i32:
2407 return &L_L;
f69d277e 2408
9b5500b6
RH
2409 case INDEX_op_deposit_i32:
2410 case INDEX_op_deposit_i64:
2411 {
2412 static const TCGTargetOpDef dep
2413 = { .args_ct_str = { "r", "rZ", "r" } };
2414 return &dep;
f69d277e 2415 }
9b5500b6
RH
2416 case INDEX_op_setcond_i32:
2417 case INDEX_op_setcond_i64:
2418 {
07952d95
RH
2419 /* Without EXT_IMM, only the LOAD AND TEST insn is available. */
2420 static const TCGTargetOpDef setc_z
2421 = { .args_ct_str = { "r", "r", "rZ" } };
2422 static const TCGTargetOpDef setc_c
9b5500b6 2423 = { .args_ct_str = { "r", "r", "rC" } };
07952d95 2424 return (s390_facilities & FACILITY_EXT_IMM ? &setc_c : &setc_z);
9b5500b6
RH
2425 }
2426 case INDEX_op_movcond_i32:
2427 case INDEX_op_movcond_i64:
2428 {
07952d95
RH
2429 /* Without EXT_IMM, only the LOAD AND TEST insn is available. */
2430 static const TCGTargetOpDef movc_z
2431 = { .args_ct_str = { "r", "r", "rZ", "r", "0" } };
2432 static const TCGTargetOpDef movc_c
9b5500b6 2433 = { .args_ct_str = { "r", "r", "rC", "r", "0" } };
7af525af
RH
2434 static const TCGTargetOpDef movc_l
2435 = { .args_ct_str = { "r", "r", "rC", "rI", "0" } };
2436 return (s390_facilities & FACILITY_EXT_IMM
2437 ? (s390_facilities & FACILITY_LOAD_ON_COND2
2438 ? &movc_l : &movc_c)
2439 : &movc_z);
9b5500b6
RH
2440 }
2441 case INDEX_op_div2_i32:
2442 case INDEX_op_div2_i64:
2443 case INDEX_op_divu2_i32:
2444 case INDEX_op_divu2_i64:
2445 {
2446 static const TCGTargetOpDef div2
2447 = { .args_ct_str = { "b", "a", "0", "1", "r" } };
2448 return &div2;
2449 }
2450 case INDEX_op_mulu2_i64:
2451 {
2452 static const TCGTargetOpDef mul2
2453 = { .args_ct_str = { "b", "a", "0", "r" } };
2454 return &mul2;
2455 }
ba18b07d 2456
9b5500b6 2457 case INDEX_op_add2_i32:
9b5500b6 2458 case INDEX_op_sub2_i32:
ba18b07d
RH
2459 return (s390_facilities & FACILITY_EXT_IMM ? &a2_ri : &a2_r);
2460 case INDEX_op_add2_i64:
9b5500b6 2461 case INDEX_op_sub2_i64:
ba18b07d 2462 return (s390_facilities & FACILITY_EXT_IMM ? &a2_rA : &a2_r);
9b5500b6
RH
2463
2464 default:
2465 break;
f69d277e
RH
2466 }
2467 return NULL;
2468}
2469
b2c98d9d 2470static void query_s390_facilities(void)
48bb3750 2471{
c9baa30f 2472 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
48bb3750 2473
c9baa30f
RH
2474 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2475 is present on all 64-bit systems, but let's check for it anyway. */
2476 if (hwcap & HWCAP_S390_STFLE) {
2477 register int r0 __asm__("0");
2478 register void *r1 __asm__("1");
48bb3750 2479
c9baa30f 2480 /* stfle 0(%r1) */
b2c98d9d 2481 r1 = &s390_facilities;
c9baa30f
RH
2482 asm volatile(".word 0xb2b0,0x1000"
2483 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
48bb3750
RH
2484 }
2485}
2486
2487static void tcg_target_init(TCGContext *s)
2827822e 2488{
b2c98d9d 2489 query_s390_facilities();
48bb3750
RH
2490
2491 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2492 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2493
2494 tcg_regset_clear(tcg_target_call_clobber_regs);
2495 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2496 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2497 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2498 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2499 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2500 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
f24efee4
RH
2501 /* The r6 register is technically call-saved, but it's also a parameter
2502 register, so it can get killed by setup for the qemu_st helper. */
2503 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
48bb3750
RH
2504 /* The return register can be considered call-clobbered. */
2505 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2506
2507 tcg_regset_clear(s->reserved_regs);
2508 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2509 /* XXX many insns can't be used with R0, so we better avoid it for now */
2510 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2511 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2827822e
AG
2512}
2513
f167dc37
RH
2514#define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2515 + TCG_STATIC_CALL_ARGS_SIZE \
2516 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2517
48bb3750 2518static void tcg_target_qemu_prologue(TCGContext *s)
2827822e 2519{
48bb3750
RH
2520 /* stmg %r6,%r15,48(%r15) (save registers) */
2521 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2522
a4924e8b 2523 /* aghi %r15,-frame_size */
f167dc37 2524 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
a4924e8b
RH
2525
2526 tcg_set_frame(s, TCG_REG_CALL_STACK,
2527 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2528 CPU_TEMP_BUF_NLONGS * sizeof(long));
48bb3750 2529
090d0bfd 2530#ifndef CONFIG_SOFTMMU
b76f21a7
LV
2531 if (guest_base >= 0x80000) {
2532 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
48bb3750
RH
2533 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2534 }
090d0bfd 2535#endif
48bb3750 2536
cea5f9a2
BS
2537 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2538 /* br %r3 (go to TB) */
2539 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
48bb3750 2540
46644483
RH
2541 /*
2542 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2543 * and fall through to the rest of the epilogue.
2544 */
2545 s->code_gen_epilogue = s->code_ptr;
2546 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
2547
2548 /* TB epilogue */
48bb3750
RH
2549 tb_ret_addr = s->code_ptr;
2550
a4924e8b
RH
2551 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2552 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
f167dc37 2553 FRAME_SIZE + 48);
48bb3750
RH
2554
2555 /* br %r14 (return) */
2556 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2827822e 2557}
f167dc37
RH
2558
2559typedef struct {
d2e16f2c 2560 DebugFrameHeader h;
f167dc37
RH
2561 uint8_t fde_def_cfa[4];
2562 uint8_t fde_reg_ofs[18];
2563} DebugFrame;
2564
2565/* We're expecting a 2 byte uleb128 encoded value. */
2566QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2567
2568#define ELF_HOST_MACHINE EM_S390
2569
d2e16f2c
RH
2570static const DebugFrame debug_frame = {
2571 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2572 .h.cie.id = -1,
2573 .h.cie.version = 1,
2574 .h.cie.code_align = 1,
2575 .h.cie.data_align = 8, /* sleb128 8 */
2576 .h.cie.return_column = TCG_REG_R14,
f167dc37
RH
2577
2578 /* Total FDE size does not include the "len" member. */
d2e16f2c 2579 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
f167dc37
RH
2580
2581 .fde_def_cfa = {
2582 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
2583 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2584 (FRAME_SIZE >> 7)
2585 },
2586 .fde_reg_ofs = {
2587 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2588 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2589 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2590 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2591 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2592 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2593 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2594 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2595 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2596 }
2597};
2598
2599void tcg_register_jit(void *buf, size_t buf_size)
2600{
f167dc37
RH
2601 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2602}