]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/s390/tcg-target.inc.c
translate-all: Adjust 256mb testing for mips64
[mirror_qemu.git] / tcg / s390 / tcg-target.inc.c
CommitLineData
2827822e
AG
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
48bb3750
RH
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
2827822e
AG
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
fb596415 27#include "tcg-be-ldst.h"
3cf246f0 28
a01fc30d
RH
29/* We only support generating code for 64-bit mode. */
30#if TCG_TARGET_REG_BITS != 64
31#error "unsupported code generation mode"
32#endif
33
c9baa30f
RH
34#include "elf.h"
35
48bb3750
RH
36/* ??? The translation blocks produced by TCG are generally small enough to
37 be entirely reachable with a 16-bit displacement. Leaving the option for
38 a 32-bit displacement here Just In Case. */
39#define USE_LONG_BRANCHES 0
40
671c835b
RH
41#define TCG_CT_CONST_MULI 0x100
42#define TCG_CT_CONST_ORI 0x200
43#define TCG_CT_CONST_XORI 0x400
44#define TCG_CT_CONST_CMPI 0x800
ad19b358 45#define TCG_CT_CONST_ADLI 0x1000
48bb3750
RH
46
47/* Several places within the instruction set 0 means "no register"
48 rather than TCG_REG_R0. */
49#define TCG_REG_NONE 0
50
51/* A scratch register that may be be used throughout the backend. */
52#define TCG_TMP0 TCG_REG_R14
53
4cbea598 54#ifndef CONFIG_SOFTMMU
48bb3750 55#define TCG_GUEST_BASE_REG TCG_REG_R13
48bb3750
RH
56#endif
57
48bb3750
RH
58/* All of the following instructions are prefixed with their instruction
59 format, and are defined as 8- or 16-bit quantities, even when the two
60 halves of the 16-bit quantity may appear 32 bits apart in the insn.
61 This makes it easy to copy the values from the tables in Appendix B. */
62typedef enum S390Opcode {
63 RIL_AFI = 0xc209,
64 RIL_AGFI = 0xc208,
3790b918 65 RIL_ALFI = 0xc20b,
48bb3750
RH
66 RIL_ALGFI = 0xc20a,
67 RIL_BRASL = 0xc005,
68 RIL_BRCL = 0xc004,
69 RIL_CFI = 0xc20d,
70 RIL_CGFI = 0xc20c,
71 RIL_CLFI = 0xc20f,
72 RIL_CLGFI = 0xc20e,
73 RIL_IIHF = 0xc008,
74 RIL_IILF = 0xc009,
75 RIL_LARL = 0xc000,
76 RIL_LGFI = 0xc001,
77 RIL_LGRL = 0xc408,
78 RIL_LLIHF = 0xc00e,
79 RIL_LLILF = 0xc00f,
80 RIL_LRL = 0xc40d,
81 RIL_MSFI = 0xc201,
82 RIL_MSGFI = 0xc200,
83 RIL_NIHF = 0xc00a,
84 RIL_NILF = 0xc00b,
85 RIL_OIHF = 0xc00c,
86 RIL_OILF = 0xc00d,
3790b918 87 RIL_SLFI = 0xc205,
0db921e6 88 RIL_SLGFI = 0xc204,
48bb3750
RH
89 RIL_XIHF = 0xc006,
90 RIL_XILF = 0xc007,
91
92 RI_AGHI = 0xa70b,
93 RI_AHI = 0xa70a,
94 RI_BRC = 0xa704,
95 RI_IIHH = 0xa500,
96 RI_IIHL = 0xa501,
97 RI_IILH = 0xa502,
98 RI_IILL = 0xa503,
99 RI_LGHI = 0xa709,
100 RI_LLIHH = 0xa50c,
101 RI_LLIHL = 0xa50d,
102 RI_LLILH = 0xa50e,
103 RI_LLILL = 0xa50f,
104 RI_MGHI = 0xa70d,
105 RI_MHI = 0xa70c,
106 RI_NIHH = 0xa504,
107 RI_NIHL = 0xa505,
108 RI_NILH = 0xa506,
109 RI_NILL = 0xa507,
110 RI_OIHH = 0xa508,
111 RI_OIHL = 0xa509,
112 RI_OILH = 0xa50a,
113 RI_OILL = 0xa50b,
114
115 RIE_CGIJ = 0xec7c,
116 RIE_CGRJ = 0xec64,
117 RIE_CIJ = 0xec7e,
118 RIE_CLGRJ = 0xec65,
119 RIE_CLIJ = 0xec7f,
120 RIE_CLGIJ = 0xec7d,
121 RIE_CLRJ = 0xec77,
122 RIE_CRJ = 0xec76,
d5690ea4 123 RIE_RISBG = 0xec55,
48bb3750
RH
124
125 RRE_AGR = 0xb908,
3790b918
RH
126 RRE_ALGR = 0xb90a,
127 RRE_ALCR = 0xb998,
128 RRE_ALCGR = 0xb988,
48bb3750
RH
129 RRE_CGR = 0xb920,
130 RRE_CLGR = 0xb921,
131 RRE_DLGR = 0xb987,
132 RRE_DLR = 0xb997,
133 RRE_DSGFR = 0xb91d,
134 RRE_DSGR = 0xb90d,
135 RRE_LGBR = 0xb906,
136 RRE_LCGR = 0xb903,
137 RRE_LGFR = 0xb914,
138 RRE_LGHR = 0xb907,
139 RRE_LGR = 0xb904,
140 RRE_LLGCR = 0xb984,
141 RRE_LLGFR = 0xb916,
142 RRE_LLGHR = 0xb985,
143 RRE_LRVR = 0xb91f,
144 RRE_LRVGR = 0xb90f,
145 RRE_LTGR = 0xb902,
36017dc6 146 RRE_MLGR = 0xb986,
48bb3750
RH
147 RRE_MSGR = 0xb90c,
148 RRE_MSR = 0xb252,
149 RRE_NGR = 0xb980,
150 RRE_OGR = 0xb981,
151 RRE_SGR = 0xb909,
3790b918
RH
152 RRE_SLGR = 0xb90b,
153 RRE_SLBR = 0xb999,
154 RRE_SLBGR = 0xb989,
48bb3750
RH
155 RRE_XGR = 0xb982,
156
96a9f093
RH
157 RRF_LOCR = 0xb9f2,
158 RRF_LOCGR = 0xb9e2,
159
48bb3750 160 RR_AR = 0x1a,
3790b918 161 RR_ALR = 0x1e,
48bb3750
RH
162 RR_BASR = 0x0d,
163 RR_BCR = 0x07,
164 RR_CLR = 0x15,
165 RR_CR = 0x19,
166 RR_DR = 0x1d,
167 RR_LCR = 0x13,
168 RR_LR = 0x18,
169 RR_LTR = 0x12,
170 RR_NR = 0x14,
171 RR_OR = 0x16,
172 RR_SR = 0x1b,
3790b918 173 RR_SLR = 0x1f,
48bb3750
RH
174 RR_XR = 0x17,
175
176 RSY_RLL = 0xeb1d,
177 RSY_RLLG = 0xeb1c,
178 RSY_SLLG = 0xeb0d,
179 RSY_SRAG = 0xeb0a,
180 RSY_SRLG = 0xeb0c,
181
182 RS_SLL = 0x89,
183 RS_SRA = 0x8a,
184 RS_SRL = 0x88,
185
186 RXY_AG = 0xe308,
187 RXY_AY = 0xe35a,
188 RXY_CG = 0xe320,
189 RXY_CY = 0xe359,
0db921e6 190 RXY_LAY = 0xe371,
48bb3750
RH
191 RXY_LB = 0xe376,
192 RXY_LG = 0xe304,
193 RXY_LGB = 0xe377,
194 RXY_LGF = 0xe314,
195 RXY_LGH = 0xe315,
196 RXY_LHY = 0xe378,
197 RXY_LLGC = 0xe390,
198 RXY_LLGF = 0xe316,
199 RXY_LLGH = 0xe391,
200 RXY_LMG = 0xeb04,
201 RXY_LRV = 0xe31e,
202 RXY_LRVG = 0xe30f,
203 RXY_LRVH = 0xe31f,
204 RXY_LY = 0xe358,
205 RXY_STCY = 0xe372,
206 RXY_STG = 0xe324,
207 RXY_STHY = 0xe370,
208 RXY_STMG = 0xeb24,
209 RXY_STRV = 0xe33e,
210 RXY_STRVG = 0xe32f,
211 RXY_STRVH = 0xe33f,
212 RXY_STY = 0xe350,
213
214 RX_A = 0x5a,
215 RX_C = 0x59,
216 RX_L = 0x58,
0db921e6 217 RX_LA = 0x41,
48bb3750
RH
218 RX_LH = 0x48,
219 RX_ST = 0x50,
220 RX_STC = 0x42,
221 RX_STH = 0x40,
ed3d51ec
SF
222
223 NOP = 0x0707,
48bb3750
RH
224} S390Opcode;
225
8d8fdbae 226#ifdef CONFIG_DEBUG_TCG
48bb3750
RH
227static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
228 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
229 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
230};
231#endif
232
233/* Since R6 is a potential argument register, choose it last of the
234 call-saved registers. Likewise prefer the call-clobbered registers
235 in reverse order to maximize the chance of avoiding the arguments. */
2827822e 236static const int tcg_target_reg_alloc_order[] = {
f24efee4 237 /* Call saved registers. */
48bb3750
RH
238 TCG_REG_R13,
239 TCG_REG_R12,
240 TCG_REG_R11,
241 TCG_REG_R10,
242 TCG_REG_R9,
243 TCG_REG_R8,
244 TCG_REG_R7,
245 TCG_REG_R6,
f24efee4 246 /* Call clobbered registers. */
48bb3750
RH
247 TCG_REG_R14,
248 TCG_REG_R0,
249 TCG_REG_R1,
f24efee4 250 /* Argument registers, in reverse order of allocation. */
48bb3750
RH
251 TCG_REG_R5,
252 TCG_REG_R4,
253 TCG_REG_R3,
254 TCG_REG_R2,
2827822e
AG
255};
256
257static const int tcg_target_call_iarg_regs[] = {
48bb3750
RH
258 TCG_REG_R2,
259 TCG_REG_R3,
260 TCG_REG_R4,
261 TCG_REG_R5,
262 TCG_REG_R6,
2827822e
AG
263};
264
265static const int tcg_target_call_oarg_regs[] = {
48bb3750 266 TCG_REG_R2,
48bb3750
RH
267};
268
269#define S390_CC_EQ 8
270#define S390_CC_LT 4
271#define S390_CC_GT 2
272#define S390_CC_OV 1
273#define S390_CC_NE (S390_CC_LT | S390_CC_GT)
274#define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
275#define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
276#define S390_CC_NEVER 0
277#define S390_CC_ALWAYS 15
278
279/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
0aed257f 280static const uint8_t tcg_cond_to_s390_cond[] = {
48bb3750
RH
281 [TCG_COND_EQ] = S390_CC_EQ,
282 [TCG_COND_NE] = S390_CC_NE,
283 [TCG_COND_LT] = S390_CC_LT,
284 [TCG_COND_LE] = S390_CC_LE,
285 [TCG_COND_GT] = S390_CC_GT,
286 [TCG_COND_GE] = S390_CC_GE,
287 [TCG_COND_LTU] = S390_CC_LT,
288 [TCG_COND_LEU] = S390_CC_LE,
289 [TCG_COND_GTU] = S390_CC_GT,
290 [TCG_COND_GEU] = S390_CC_GE,
291};
292
293/* Condition codes that result from a LOAD AND TEST. Here, we have no
294 unsigned instruction variation, however since the test is vs zero we
295 can re-map the outcomes appropriately. */
0aed257f 296static const uint8_t tcg_cond_to_ltr_cond[] = {
48bb3750
RH
297 [TCG_COND_EQ] = S390_CC_EQ,
298 [TCG_COND_NE] = S390_CC_NE,
299 [TCG_COND_LT] = S390_CC_LT,
300 [TCG_COND_LE] = S390_CC_LE,
301 [TCG_COND_GT] = S390_CC_GT,
302 [TCG_COND_GE] = S390_CC_GE,
303 [TCG_COND_LTU] = S390_CC_NEVER,
304 [TCG_COND_LEU] = S390_CC_EQ,
305 [TCG_COND_GTU] = S390_CC_NE,
306 [TCG_COND_GEU] = S390_CC_ALWAYS,
307};
308
309#ifdef CONFIG_SOFTMMU
f24efee4
RH
310static void * const qemu_ld_helpers[16] = {
311 [MO_UB] = helper_ret_ldub_mmu,
312 [MO_SB] = helper_ret_ldsb_mmu,
313 [MO_LEUW] = helper_le_lduw_mmu,
314 [MO_LESW] = helper_le_ldsw_mmu,
315 [MO_LEUL] = helper_le_ldul_mmu,
316 [MO_LESL] = helper_le_ldsl_mmu,
317 [MO_LEQ] = helper_le_ldq_mmu,
318 [MO_BEUW] = helper_be_lduw_mmu,
319 [MO_BESW] = helper_be_ldsw_mmu,
320 [MO_BEUL] = helper_be_ldul_mmu,
321 [MO_BESL] = helper_be_ldsl_mmu,
322 [MO_BEQ] = helper_be_ldq_mmu,
e141ab52
BS
323};
324
f24efee4
RH
325static void * const qemu_st_helpers[16] = {
326 [MO_UB] = helper_ret_stb_mmu,
327 [MO_LEUW] = helper_le_stw_mmu,
328 [MO_LEUL] = helper_le_stl_mmu,
329 [MO_LEQ] = helper_le_stq_mmu,
330 [MO_BEUW] = helper_be_stw_mmu,
331 [MO_BEUL] = helper_be_stl_mmu,
332 [MO_BEQ] = helper_be_stq_mmu,
e141ab52 333};
e141ab52 334#endif
48bb3750 335
8c081b18 336static tcg_insn_unit *tb_ret_addr;
48bb3750
RH
337
338/* A list of relevant facilities used by this translator. Some of these
339 are required for proper operation, and these are checked at startup. */
340
341#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
342#define FACILITY_LONG_DISP (1ULL << (63 - 18))
343#define FACILITY_EXT_IMM (1ULL << (63 - 21))
344#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
96a9f093 345#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
48bb3750
RH
346
347static uint64_t facilities;
2827822e 348
8c081b18 349static void patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 350 intptr_t value, intptr_t addend)
2827822e 351{
8c081b18 352 intptr_t pcrel2 = (tcg_insn_unit *)value - (code_ptr - 1);
eabb7b91 353 tcg_debug_assert(addend == -2);
48bb3750
RH
354
355 switch (type) {
356 case R_390_PC16DBL:
eabb7b91 357 tcg_debug_assert(pcrel2 == (int16_t)pcrel2);
8c081b18 358 tcg_patch16(code_ptr, pcrel2);
48bb3750
RH
359 break;
360 case R_390_PC32DBL:
eabb7b91 361 tcg_debug_assert(pcrel2 == (int32_t)pcrel2);
8c081b18 362 tcg_patch32(code_ptr, pcrel2);
48bb3750
RH
363 break;
364 default:
365 tcg_abort();
366 break;
367 }
2827822e
AG
368}
369
2827822e
AG
370/* parse target specific constraints */
371static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
372{
48bb3750
RH
373 const char *ct_str = *pct_str;
374
375 switch (ct_str[0]) {
376 case 'r': /* all registers */
377 ct->ct |= TCG_CT_REG;
378 tcg_regset_set32(ct->u.regs, 0, 0xffff);
379 break;
380 case 'R': /* not R0 */
381 ct->ct |= TCG_CT_REG;
382 tcg_regset_set32(ct->u.regs, 0, 0xffff);
383 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
384 break;
385 case 'L': /* qemu_ld/st constraint */
386 ct->ct |= TCG_CT_REG;
387 tcg_regset_set32(ct->u.regs, 0, 0xffff);
388 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
389 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
65a62a75 390 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
48bb3750
RH
391 break;
392 case 'a': /* force R2 for division */
393 ct->ct |= TCG_CT_REG;
394 tcg_regset_clear(ct->u.regs);
395 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
396 break;
397 case 'b': /* force R3 for division */
398 ct->ct |= TCG_CT_REG;
399 tcg_regset_clear(ct->u.regs);
400 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
401 break;
ad19b358
RH
402 case 'A':
403 ct->ct |= TCG_CT_CONST_ADLI;
404 break;
48bb3750
RH
405 case 'K':
406 ct->ct |= TCG_CT_CONST_MULI;
407 break;
48bb3750
RH
408 case 'O':
409 ct->ct |= TCG_CT_CONST_ORI;
410 break;
411 case 'X':
412 ct->ct |= TCG_CT_CONST_XORI;
413 break;
414 case 'C':
415 ct->ct |= TCG_CT_CONST_CMPI;
416 break;
417 default:
418 return -1;
419 }
420 ct_str++;
421 *pct_str = ct_str;
422
2827822e
AG
423 return 0;
424}
425
48bb3750
RH
426/* Immediates to be used with logical OR. This is an optimization only,
427 since a full 64-bit immediate OR can always be performed with 4 sequential
428 OI[LH][LH] instructions. What we're looking for is immediates that we
429 can load efficiently, and the immediate load plus the reg-reg OR is
430 smaller than the sequential OI's. */
431
671c835b 432static int tcg_match_ori(TCGType type, tcg_target_long val)
48bb3750
RH
433{
434 if (facilities & FACILITY_EXT_IMM) {
671c835b 435 if (type == TCG_TYPE_I32) {
48bb3750
RH
436 /* All 32-bit ORs can be performed with 1 48-bit insn. */
437 return 1;
438 }
439 }
440
441 /* Look for negative values. These are best to load with LGHI. */
442 if (val < 0) {
443 if (val == (int16_t)val) {
444 return 0;
445 }
446 if (facilities & FACILITY_EXT_IMM) {
447 if (val == (int32_t)val) {
448 return 0;
449 }
450 }
451 }
452
453 return 1;
454}
455
456/* Immediates to be used with logical XOR. This is almost, but not quite,
457 only an optimization. XOR with immediate is only supported with the
458 extended-immediate facility. That said, there are a few patterns for
459 which it is better to load the value into a register first. */
460
671c835b 461static int tcg_match_xori(TCGType type, tcg_target_long val)
48bb3750
RH
462{
463 if ((facilities & FACILITY_EXT_IMM) == 0) {
464 return 0;
465 }
466
671c835b 467 if (type == TCG_TYPE_I32) {
48bb3750
RH
468 /* All 32-bit XORs can be performed with 1 48-bit insn. */
469 return 1;
470 }
471
472 /* Look for negative values. These are best to load with LGHI. */
473 if (val < 0 && val == (int32_t)val) {
474 return 0;
475 }
476
477 return 1;
478}
479
480/* Imediates to be used with comparisons. */
481
671c835b 482static int tcg_match_cmpi(TCGType type, tcg_target_long val)
48bb3750
RH
483{
484 if (facilities & FACILITY_EXT_IMM) {
485 /* The COMPARE IMMEDIATE instruction is available. */
671c835b 486 if (type == TCG_TYPE_I32) {
48bb3750
RH
487 /* We have a 32-bit immediate and can compare against anything. */
488 return 1;
489 } else {
490 /* ??? We have no insight here into whether the comparison is
491 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
492 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
493 a 32-bit unsigned immediate. If we were to use the (semi)
494 obvious "val == (int32_t)val" we would be enabling unsigned
495 comparisons vs very large numbers. The only solution is to
496 take the intersection of the ranges. */
497 /* ??? Another possible solution is to simply lie and allow all
498 constants here and force the out-of-range values into a temp
499 register in tgen_cmp when we have knowledge of the actual
500 comparison code in use. */
501 return val >= 0 && val <= 0x7fffffff;
502 }
503 } else {
504 /* Only the LOAD AND TEST instruction is available. */
505 return val == 0;
506 }
507}
508
ad19b358
RH
509/* Immediates to be used with add2/sub2. */
510
511static int tcg_match_add2i(TCGType type, tcg_target_long val)
512{
513 if (facilities & FACILITY_EXT_IMM) {
514 if (type == TCG_TYPE_I32) {
515 return 1;
516 } else if (val >= -0xffffffffll && val <= 0xffffffffll) {
517 return 1;
518 }
519 }
520 return 0;
521}
522
2827822e 523/* Test if a constant matches the constraint. */
f6c6afc1 524static int tcg_target_const_match(tcg_target_long val, TCGType type,
48bb3750 525 const TCGArgConstraint *arg_ct)
2827822e 526{
48bb3750
RH
527 int ct = arg_ct->ct;
528
529 if (ct & TCG_CT_CONST) {
530 return 1;
531 }
532
671c835b 533 if (type == TCG_TYPE_I32) {
48bb3750
RH
534 val = (int32_t)val;
535 }
536
537 /* The following are mutually exclusive. */
0db921e6 538 if (ct & TCG_CT_CONST_MULI) {
48bb3750
RH
539 /* Immediates that may be used with multiply. If we have the
540 general-instruction-extensions, then we have MULTIPLY SINGLE
541 IMMEDIATE with a signed 32-bit, otherwise we have only
542 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
543 if (facilities & FACILITY_GEN_INST_EXT) {
544 return val == (int32_t)val;
545 } else {
546 return val == (int16_t)val;
547 }
ad19b358
RH
548 } else if (ct & TCG_CT_CONST_ADLI) {
549 return tcg_match_add2i(type, val);
48bb3750 550 } else if (ct & TCG_CT_CONST_ORI) {
671c835b 551 return tcg_match_ori(type, val);
48bb3750 552 } else if (ct & TCG_CT_CONST_XORI) {
671c835b 553 return tcg_match_xori(type, val);
48bb3750 554 } else if (ct & TCG_CT_CONST_CMPI) {
671c835b 555 return tcg_match_cmpi(type, val);
48bb3750
RH
556 }
557
2827822e
AG
558 return 0;
559}
560
48bb3750
RH
561/* Emit instructions according to the given instruction format. */
562
563static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
564{
565 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
566}
567
568static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
569 TCGReg r1, TCGReg r2)
570{
571 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
572}
573
96a9f093
RH
574static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
575 TCGReg r1, TCGReg r2, int m3)
576{
577 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
578}
579
48bb3750
RH
580static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
581{
582 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
583}
584
585static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
586{
587 tcg_out16(s, op | (r1 << 4));
588 tcg_out32(s, i2);
589}
590
591static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
592 TCGReg b2, TCGReg r3, int disp)
593{
594 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
595 | (disp & 0xfff));
596}
597
598static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
599 TCGReg b2, TCGReg r3, int disp)
600{
601 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
602 tcg_out32(s, (op & 0xff) | (b2 << 28)
603 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
604}
605
606#define tcg_out_insn_RX tcg_out_insn_RS
607#define tcg_out_insn_RXY tcg_out_insn_RSY
608
609/* Emit an opcode with "type-checking" of the format. */
610#define tcg_out_insn(S, FMT, OP, ...) \
611 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
612
613
614/* emit 64-bit shifts */
615static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
616 TCGReg src, TCGReg sh_reg, int sh_imm)
617{
618 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
619}
620
621/* emit 32-bit shifts */
622static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
623 TCGReg sh_reg, int sh_imm)
624{
625 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
626}
627
628static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
629{
630 if (src != dst) {
631 if (type == TCG_TYPE_I32) {
632 tcg_out_insn(s, RR, LR, dst, src);
633 } else {
634 tcg_out_insn(s, RRE, LGR, dst, src);
635 }
636 }
637}
638
2827822e 639/* load a register with an immediate value */
48bb3750
RH
640static void tcg_out_movi(TCGContext *s, TCGType type,
641 TCGReg ret, tcg_target_long sval)
2827822e 642{
48bb3750
RH
643 static const S390Opcode lli_insns[4] = {
644 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
645 };
646
647 tcg_target_ulong uval = sval;
648 int i;
649
650 if (type == TCG_TYPE_I32) {
651 uval = (uint32_t)sval;
652 sval = (int32_t)sval;
653 }
654
655 /* Try all 32-bit insns that can load it in one go. */
656 if (sval >= -0x8000 && sval < 0x8000) {
657 tcg_out_insn(s, RI, LGHI, ret, sval);
658 return;
659 }
660
661 for (i = 0; i < 4; i++) {
662 tcg_target_long mask = 0xffffull << i*16;
663 if ((uval & mask) == uval) {
664 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
665 return;
666 }
667 }
668
669 /* Try all 48-bit insns that can load it in one go. */
670 if (facilities & FACILITY_EXT_IMM) {
671 if (sval == (int32_t)sval) {
672 tcg_out_insn(s, RIL, LGFI, ret, sval);
673 return;
674 }
675 if (uval <= 0xffffffff) {
676 tcg_out_insn(s, RIL, LLILF, ret, uval);
677 return;
678 }
679 if ((uval & 0xffffffff) == 0) {
680 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
681 return;
682 }
683 }
684
685 /* Try for PC-relative address load. */
686 if ((sval & 1) == 0) {
8c081b18 687 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
48bb3750
RH
688 if (off == (int32_t)off) {
689 tcg_out_insn(s, RIL, LARL, ret, off);
690 return;
691 }
692 }
693
694 /* If extended immediates are not present, then we may have to issue
695 several instructions to load the low 32 bits. */
696 if (!(facilities & FACILITY_EXT_IMM)) {
697 /* A 32-bit unsigned value can be loaded in 2 insns. And given
698 that the lli_insns loop above did not succeed, we know that
699 both insns are required. */
700 if (uval <= 0xffffffff) {
701 tcg_out_insn(s, RI, LLILL, ret, uval);
702 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
703 return;
704 }
705
706 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
707 We first want to make sure that all the high bits get set. With
708 luck the low 16-bits can be considered negative to perform that for
709 free, otherwise we load an explicit -1. */
710 if (sval >> 31 >> 1 == -1) {
711 if (uval & 0x8000) {
712 tcg_out_insn(s, RI, LGHI, ret, uval);
713 } else {
714 tcg_out_insn(s, RI, LGHI, ret, -1);
715 tcg_out_insn(s, RI, IILL, ret, uval);
716 }
717 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
718 return;
719 }
720 }
721
722 /* If we get here, both the high and low parts have non-zero bits. */
723
724 /* Recurse to load the lower 32-bits. */
a22971f9 725 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
48bb3750
RH
726
727 /* Insert data into the high 32-bits. */
728 uval = uval >> 31 >> 1;
729 if (facilities & FACILITY_EXT_IMM) {
730 if (uval < 0x10000) {
731 tcg_out_insn(s, RI, IIHL, ret, uval);
732 } else if ((uval & 0xffff) == 0) {
733 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
734 } else {
735 tcg_out_insn(s, RIL, IIHF, ret, uval);
736 }
737 } else {
738 if (uval & 0xffff) {
739 tcg_out_insn(s, RI, IIHL, ret, uval);
740 }
741 if (uval & 0xffff0000) {
742 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
743 }
744 }
745}
746
747
748/* Emit a load/store type instruction. Inputs are:
749 DATA: The register to be loaded or stored.
750 BASE+OFS: The effective address.
751 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
752 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
753
754static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
755 TCGReg data, TCGReg base, TCGReg index,
756 tcg_target_long ofs)
757{
758 if (ofs < -0x80000 || ofs >= 0x80000) {
78c9f7c5
RH
759 /* Combine the low 20 bits of the offset with the actual load insn;
760 the high 44 bits must come from an immediate load. */
761 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
762 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
763 ofs = low;
48bb3750
RH
764
765 /* If we were already given an index register, add it in. */
766 if (index != TCG_REG_NONE) {
767 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
768 }
769 index = TCG_TMP0;
770 }
771
772 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
773 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
774 } else {
775 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
776 }
2827822e
AG
777}
778
48bb3750 779
2827822e 780/* load data without address translation or endianness conversion */
48bb3750 781static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 782 TCGReg base, intptr_t ofs)
2827822e 783{
48bb3750
RH
784 if (type == TCG_TYPE_I32) {
785 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
786 } else {
787 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
788 }
2827822e
AG
789}
790
48bb3750 791static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 792 TCGReg base, intptr_t ofs)
2827822e 793{
48bb3750
RH
794 if (type == TCG_TYPE_I32) {
795 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
796 } else {
797 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
798 }
799}
800
801/* load data from an absolute host address */
802static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
803{
8c081b18 804 intptr_t addr = (intptr_t)abs;
48bb3750 805
8c081b18
RH
806 if ((facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
807 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
48bb3750
RH
808 if (disp == (int32_t)disp) {
809 if (type == TCG_TYPE_I32) {
810 tcg_out_insn(s, RIL, LRL, dest, disp);
811 } else {
812 tcg_out_insn(s, RIL, LGRL, dest, disp);
813 }
814 return;
815 }
816 }
817
818 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
819 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
820}
821
f0bffc27
RH
822static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
823 int msb, int lsb, int ofs, int z)
824{
825 /* Format RIE-f */
826 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
827 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
828 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
829}
830
48bb3750
RH
831static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
832{
833 if (facilities & FACILITY_EXT_IMM) {
834 tcg_out_insn(s, RRE, LGBR, dest, src);
835 return;
836 }
837
838 if (type == TCG_TYPE_I32) {
839 if (dest == src) {
840 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
841 } else {
842 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
843 }
844 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
845 } else {
846 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
847 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
848 }
849}
850
851static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
852{
853 if (facilities & FACILITY_EXT_IMM) {
854 tcg_out_insn(s, RRE, LLGCR, dest, src);
855 return;
856 }
857
858 if (dest == src) {
859 tcg_out_movi(s, type, TCG_TMP0, 0xff);
860 src = TCG_TMP0;
861 } else {
862 tcg_out_movi(s, type, dest, 0xff);
863 }
864 if (type == TCG_TYPE_I32) {
865 tcg_out_insn(s, RR, NR, dest, src);
866 } else {
867 tcg_out_insn(s, RRE, NGR, dest, src);
868 }
869}
870
871static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
872{
873 if (facilities & FACILITY_EXT_IMM) {
874 tcg_out_insn(s, RRE, LGHR, dest, src);
875 return;
876 }
877
878 if (type == TCG_TYPE_I32) {
879 if (dest == src) {
880 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
881 } else {
882 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
883 }
884 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
885 } else {
886 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
887 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
888 }
889}
890
891static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
892{
893 if (facilities & FACILITY_EXT_IMM) {
894 tcg_out_insn(s, RRE, LLGHR, dest, src);
895 return;
896 }
897
898 if (dest == src) {
899 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
900 src = TCG_TMP0;
901 } else {
902 tcg_out_movi(s, type, dest, 0xffff);
903 }
904 if (type == TCG_TYPE_I32) {
905 tcg_out_insn(s, RR, NR, dest, src);
906 } else {
907 tcg_out_insn(s, RRE, NGR, dest, src);
908 }
909}
910
911static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
912{
913 tcg_out_insn(s, RRE, LGFR, dest, src);
914}
915
916static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
917{
918 tcg_out_insn(s, RRE, LLGFR, dest, src);
919}
920
f0bffc27
RH
921/* Accept bit patterns like these:
922 0....01....1
923 1....10....0
924 1..10..01..1
925 0..01..10..0
926 Copied from gcc sources. */
927static inline bool risbg_mask(uint64_t c)
928{
929 uint64_t lsb;
930 /* We don't change the number of transitions by inverting,
931 so make sure we start with the LSB zero. */
932 if (c & 1) {
933 c = ~c;
934 }
935 /* Reject all zeros or all ones. */
936 if (c == 0) {
937 return false;
938 }
939 /* Find the first transition. */
940 lsb = c & -c;
941 /* Invert to look for a second transition. */
942 c = ~c;
943 /* Erase the first transition. */
944 c &= -lsb;
945 /* Find the second transition, if any. */
946 lsb = c & -c;
947 /* Match if all the bits are 1's, or if c is zero. */
948 return c == -lsb;
949}
950
547ec121
RH
951static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
952{
953 int msb, lsb;
954 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
955 /* Achieve wraparound by swapping msb and lsb. */
956 msb = 64 - ctz64(~val);
957 lsb = clz64(~val) - 1;
958 } else {
959 msb = clz64(val);
960 lsb = 63 - ctz64(val);
961 }
962 tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
963}
964
07ff7983 965static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750
RH
966{
967 static const S390Opcode ni_insns[4] = {
968 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
969 };
970 static const S390Opcode nif_insns[2] = {
971 RIL_NILF, RIL_NIHF
972 };
07ff7983 973 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
48bb3750
RH
974 int i;
975
48bb3750 976 /* Look for the zero-extensions. */
07ff7983 977 if ((val & valid) == 0xffffffff) {
48bb3750
RH
978 tgen_ext32u(s, dest, dest);
979 return;
980 }
48bb3750 981 if (facilities & FACILITY_EXT_IMM) {
07ff7983 982 if ((val & valid) == 0xff) {
48bb3750
RH
983 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
984 return;
985 }
07ff7983 986 if ((val & valid) == 0xffff) {
48bb3750
RH
987 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
988 return;
989 }
07ff7983 990 }
48bb3750 991
07ff7983
RH
992 /* Try all 32-bit insns that can perform it in one go. */
993 for (i = 0; i < 4; i++) {
994 tcg_target_ulong mask = ~(0xffffull << i*16);
995 if (((val | ~valid) & mask) == mask) {
996 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
997 return;
48bb3750 998 }
07ff7983 999 }
48bb3750 1000
07ff7983
RH
1001 /* Try all 48-bit insns that can perform it in one go. */
1002 if (facilities & FACILITY_EXT_IMM) {
1003 for (i = 0; i < 2; i++) {
1004 tcg_target_ulong mask = ~(0xffffffffull << i*32);
1005 if (((val | ~valid) & mask) == mask) {
1006 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1007 return;
48bb3750
RH
1008 }
1009 }
07ff7983 1010 }
f0bffc27 1011 if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
547ec121 1012 tgen_andi_risbg(s, dest, dest, val);
f0bffc27
RH
1013 return;
1014 }
48bb3750 1015
07ff7983
RH
1016 /* Fall back to loading the constant. */
1017 tcg_out_movi(s, type, TCG_TMP0, val);
1018 if (type == TCG_TYPE_I32) {
1019 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
48bb3750 1020 } else {
07ff7983 1021 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
48bb3750
RH
1022 }
1023}
1024
1025static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1026{
1027 static const S390Opcode oi_insns[4] = {
1028 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1029 };
1030 static const S390Opcode nif_insns[2] = {
1031 RIL_OILF, RIL_OIHF
1032 };
1033
1034 int i;
1035
1036 /* Look for no-op. */
1037 if (val == 0) {
1038 return;
1039 }
1040
1041 if (facilities & FACILITY_EXT_IMM) {
1042 /* Try all 32-bit insns that can perform it in one go. */
1043 for (i = 0; i < 4; i++) {
1044 tcg_target_ulong mask = (0xffffull << i*16);
1045 if ((val & mask) != 0 && (val & ~mask) == 0) {
1046 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1047 return;
1048 }
1049 }
1050
1051 /* Try all 48-bit insns that can perform it in one go. */
1052 for (i = 0; i < 2; i++) {
1053 tcg_target_ulong mask = (0xffffffffull << i*32);
1054 if ((val & mask) != 0 && (val & ~mask) == 0) {
1055 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1056 return;
1057 }
1058 }
1059
1060 /* Perform the OR via sequential modifications to the high and
1061 low parts. Do this via recursion to handle 16-bit vs 32-bit
1062 masks in each half. */
1063 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1064 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1065 } else {
1066 /* With no extended-immediate facility, we don't need to be so
1067 clever. Just iterate over the insns and mask in the constant. */
1068 for (i = 0; i < 4; i++) {
1069 tcg_target_ulong mask = (0xffffull << i*16);
1070 if ((val & mask) != 0) {
1071 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1072 }
1073 }
1074 }
1075}
1076
1077static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1078{
1079 /* Perform the xor by parts. */
1080 if (val & 0xffffffff) {
1081 tcg_out_insn(s, RIL, XILF, dest, val);
1082 }
1083 if (val > 0xffffffff) {
1084 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1085 }
1086}
1087
1088static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1089 TCGArg c2, int c2const)
1090{
bcc66562 1091 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1092 if (c2const) {
1093 if (c2 == 0) {
1094 if (type == TCG_TYPE_I32) {
1095 tcg_out_insn(s, RR, LTR, r1, r1);
1096 } else {
1097 tcg_out_insn(s, RRE, LTGR, r1, r1);
1098 }
1099 return tcg_cond_to_ltr_cond[c];
1100 } else {
1101 if (is_unsigned) {
1102 if (type == TCG_TYPE_I32) {
1103 tcg_out_insn(s, RIL, CLFI, r1, c2);
1104 } else {
1105 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1106 }
1107 } else {
1108 if (type == TCG_TYPE_I32) {
1109 tcg_out_insn(s, RIL, CFI, r1, c2);
1110 } else {
1111 tcg_out_insn(s, RIL, CGFI, r1, c2);
1112 }
1113 }
1114 }
1115 } else {
1116 if (is_unsigned) {
1117 if (type == TCG_TYPE_I32) {
1118 tcg_out_insn(s, RR, CLR, r1, c2);
1119 } else {
1120 tcg_out_insn(s, RRE, CLGR, r1, c2);
1121 }
1122 } else {
1123 if (type == TCG_TYPE_I32) {
1124 tcg_out_insn(s, RR, CR, r1, c2);
1125 } else {
1126 tcg_out_insn(s, RRE, CGR, r1, c2);
1127 }
1128 }
1129 }
1130 return tcg_cond_to_s390_cond[c];
1131}
1132
7b7066b1 1133static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
96a9f093 1134 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
48bb3750 1135{
7b7066b1
RH
1136 int cc;
1137
1138 switch (cond) {
1139 case TCG_COND_GTU:
1140 case TCG_COND_GT:
1141 do_greater:
1142 /* The result of a compare has CC=2 for GT and CC=3 unused.
1143 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
1144 tgen_cmp(s, type, cond, c1, c2, c2const);
1145 tcg_out_movi(s, type, dest, 0);
1146 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1147 return;
1148
1149 case TCG_COND_GEU:
1150 do_geu:
1151 /* We need "real" carry semantics, so use SUBTRACT LOGICAL
1152 instead of COMPARE LOGICAL. This needs an extra move. */
1153 tcg_out_mov(s, type, TCG_TMP0, c1);
1154 if (c2const) {
1155 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1156 if (type == TCG_TYPE_I32) {
1157 tcg_out_insn(s, RIL, SLFI, TCG_TMP0, c2);
1158 } else {
1159 tcg_out_insn(s, RIL, SLGFI, TCG_TMP0, c2);
1160 }
1161 } else {
1162 if (type == TCG_TYPE_I32) {
1163 tcg_out_insn(s, RR, SLR, TCG_TMP0, c2);
1164 } else {
1165 tcg_out_insn(s, RRE, SLGR, TCG_TMP0, c2);
1166 }
1167 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1168 }
1169 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1170 return;
1171
1172 case TCG_COND_LEU:
1173 case TCG_COND_LTU:
1174 case TCG_COND_LT:
1175 /* Swap operands so that we can use GEU/GTU/GT. */
1176 if (c2const) {
1177 tcg_out_movi(s, type, TCG_TMP0, c2);
1178 c2 = c1;
1179 c2const = 0;
1180 c1 = TCG_TMP0;
1181 } else {
1182 TCGReg t = c1;
1183 c1 = c2;
1184 c2 = t;
1185 }
1186 if (cond == TCG_COND_LEU) {
1187 goto do_geu;
1188 }
1189 cond = tcg_swap_cond(cond);
1190 goto do_greater;
1191
1192 case TCG_COND_NE:
1193 /* X != 0 is X > 0. */
1194 if (c2const && c2 == 0) {
1195 cond = TCG_COND_GTU;
1196 goto do_greater;
1197 }
1198 break;
1199
1200 case TCG_COND_EQ:
1201 /* X == 0 is X <= 0 is 0 >= X. */
1202 if (c2const && c2 == 0) {
1203 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0);
1204 c2 = c1;
1205 c2const = 0;
1206 c1 = TCG_TMP0;
1207 goto do_geu;
1208 }
1209 break;
48bb3750 1210
7b7066b1
RH
1211 default:
1212 break;
1213 }
1214
1215 cc = tgen_cmp(s, type, cond, c1, c2, c2const);
1216 if (facilities & FACILITY_LOAD_ON_COND) {
1217 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1218 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1219 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1220 tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
1221 } else {
1222 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1223 tcg_out_movi(s, type, dest, 1);
1224 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1225 tcg_out_movi(s, type, dest, 0);
1226 }
48bb3750
RH
1227}
1228
96a9f093
RH
1229static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1230 TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
1231{
1232 int cc;
1233 if (facilities & FACILITY_LOAD_ON_COND) {
1234 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1235 tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
1236 } else {
1237 c = tcg_invert_cond(c);
1238 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1239
1240 /* Emit: if (cc) goto over; dest = r3; over: */
1241 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1242 tcg_out_insn(s, RRE, LGR, dest, r3);
1243 }
1244}
1245
d5690ea4
RH
1246bool tcg_target_deposit_valid(int ofs, int len)
1247{
1248 return (facilities & FACILITY_GEN_INST_EXT) != 0;
1249}
1250
1251static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1252 int ofs, int len)
1253{
1254 int lsb = (63 - ofs);
1255 int msb = lsb - (len - 1);
f0bffc27 1256 tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0);
d5690ea4
RH
1257}
1258
8c081b18 1259static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
48bb3750 1260{
8c081b18
RH
1261 ptrdiff_t off = dest - s->code_ptr;
1262 if (off == (int16_t)off) {
48bb3750
RH
1263 tcg_out_insn(s, RI, BRC, cc, off);
1264 } else if (off == (int32_t)off) {
1265 tcg_out_insn(s, RIL, BRCL, cc, off);
1266 } else {
8c081b18 1267 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1268 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1269 }
1270}
1271
bec16311 1272static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
48bb3750 1273{
48bb3750 1274 if (l->has_value) {
8c081b18 1275 tgen_gotoi(s, cc, l->u.value_ptr);
48bb3750
RH
1276 } else if (USE_LONG_BRANCHES) {
1277 tcg_out16(s, RIL_BRCL | (cc << 4));
bec16311 1278 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, -2);
8c081b18 1279 s->code_ptr += 2;
48bb3750
RH
1280 } else {
1281 tcg_out16(s, RI_BRC | (cc << 4));
bec16311 1282 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, -2);
8c081b18 1283 s->code_ptr += 1;
48bb3750
RH
1284 }
1285}
1286
1287static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
bec16311 1288 TCGReg r1, TCGReg r2, TCGLabel *l)
48bb3750 1289{
8c081b18 1290 intptr_t off;
48bb3750
RH
1291
1292 if (l->has_value) {
8c081b18 1293 off = l->u.value_ptr - s->code_ptr;
48bb3750
RH
1294 } else {
1295 /* We need to keep the offset unchanged for retranslation. */
8c081b18 1296 off = s->code_ptr[1];
bec16311 1297 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
48bb3750
RH
1298 }
1299
1300 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1301 tcg_out16(s, off);
1302 tcg_out16(s, cc << 12 | (opc & 0xff));
1303}
1304
1305static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
bec16311 1306 TCGReg r1, int i2, TCGLabel *l)
48bb3750 1307{
48bb3750
RH
1308 tcg_target_long off;
1309
1310 if (l->has_value) {
8c081b18 1311 off = l->u.value_ptr - s->code_ptr;
48bb3750
RH
1312 } else {
1313 /* We need to keep the offset unchanged for retranslation. */
8c081b18 1314 off = s->code_ptr[1];
bec16311 1315 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
48bb3750
RH
1316 }
1317
1318 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1319 tcg_out16(s, off);
1320 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1321}
1322
1323static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
bec16311 1324 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
48bb3750
RH
1325{
1326 int cc;
1327
1328 if (facilities & FACILITY_GEN_INST_EXT) {
b879f308 1329 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1330 bool in_range;
1331 S390Opcode opc;
1332
1333 cc = tcg_cond_to_s390_cond[c];
1334
1335 if (!c2const) {
1336 opc = (type == TCG_TYPE_I32
1337 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1338 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
bec16311 1339 tgen_compare_branch(s, opc, cc, r1, c2, l);
48bb3750
RH
1340 return;
1341 }
1342
1343 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1344 If the immediate we've been given does not fit that range, we'll
1345 fall back to separate compare and branch instructions using the
1346 larger comparison range afforded by COMPARE IMMEDIATE. */
1347 if (type == TCG_TYPE_I32) {
1348 if (is_unsigned) {
1349 opc = RIE_CLIJ;
1350 in_range = (uint32_t)c2 == (uint8_t)c2;
1351 } else {
1352 opc = RIE_CIJ;
1353 in_range = (int32_t)c2 == (int8_t)c2;
1354 }
1355 } else {
1356 if (is_unsigned) {
1357 opc = RIE_CLGIJ;
1358 in_range = (uint64_t)c2 == (uint8_t)c2;
1359 } else {
1360 opc = RIE_CGIJ;
1361 in_range = (int64_t)c2 == (int8_t)c2;
1362 }
1363 }
1364 if (in_range) {
bec16311 1365 tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
48bb3750
RH
1366 return;
1367 }
1368 }
1369
1370 cc = tgen_cmp(s, type, c, r1, c2, c2const);
bec16311 1371 tgen_branch(s, cc, l);
48bb3750
RH
1372}
1373
a8111212 1374static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
48bb3750 1375{
8c081b18 1376 ptrdiff_t off = dest - s->code_ptr;
48bb3750
RH
1377 if (off == (int32_t)off) {
1378 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1379 } else {
8c081b18 1380 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1381 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1382 }
1383}
1384
a5a04f28 1385static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
48bb3750
RH
1386 TCGReg base, TCGReg index, int disp)
1387{
3c8691f5 1388 switch (opc & (MO_SSIZE | MO_BSWAP)) {
a5a04f28 1389 case MO_UB:
48bb3750
RH
1390 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1391 break;
a5a04f28 1392 case MO_SB:
48bb3750
RH
1393 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1394 break;
b8dd88b8
RH
1395
1396 case MO_UW | MO_BSWAP:
1397 /* swapped unsigned halfword load with upper bits zeroed */
1398 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1399 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1400 break;
a5a04f28 1401 case MO_UW:
b8dd88b8
RH
1402 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1403 break;
1404
1405 case MO_SW | MO_BSWAP:
1406 /* swapped sign-extended halfword load */
1407 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1408 tgen_ext16s(s, TCG_TYPE_I64, data, data);
48bb3750 1409 break;
a5a04f28 1410 case MO_SW:
b8dd88b8
RH
1411 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1412 break;
1413
1414 case MO_UL | MO_BSWAP:
1415 /* swapped unsigned int load with upper bits zeroed */
1416 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1417 tgen_ext32u(s, data, data);
48bb3750 1418 break;
a5a04f28 1419 case MO_UL:
b8dd88b8
RH
1420 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1421 break;
1422
1423 case MO_SL | MO_BSWAP:
1424 /* swapped sign-extended int load */
1425 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1426 tgen_ext32s(s, data, data);
48bb3750 1427 break;
a5a04f28 1428 case MO_SL:
b8dd88b8
RH
1429 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1430 break;
1431
1432 case MO_Q | MO_BSWAP:
1433 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
48bb3750 1434 break;
a5a04f28 1435 case MO_Q:
b8dd88b8 1436 tcg_out_insn(s, RXY, LG, data, base, index, disp);
48bb3750 1437 break;
b8dd88b8 1438
48bb3750
RH
1439 default:
1440 tcg_abort();
1441 }
1442}
1443
a5a04f28 1444static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
48bb3750
RH
1445 TCGReg base, TCGReg index, int disp)
1446{
3c8691f5 1447 switch (opc & (MO_SIZE | MO_BSWAP)) {
a5a04f28 1448 case MO_UB:
48bb3750
RH
1449 if (disp >= 0 && disp < 0x1000) {
1450 tcg_out_insn(s, RX, STC, data, base, index, disp);
1451 } else {
1452 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1453 }
1454 break;
b8dd88b8
RH
1455
1456 case MO_UW | MO_BSWAP:
1457 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1458 break;
a5a04f28 1459 case MO_UW:
b8dd88b8 1460 if (disp >= 0 && disp < 0x1000) {
48bb3750
RH
1461 tcg_out_insn(s, RX, STH, data, base, index, disp);
1462 } else {
1463 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1464 }
1465 break;
b8dd88b8
RH
1466
1467 case MO_UL | MO_BSWAP:
1468 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1469 break;
a5a04f28 1470 case MO_UL:
b8dd88b8 1471 if (disp >= 0 && disp < 0x1000) {
48bb3750
RH
1472 tcg_out_insn(s, RX, ST, data, base, index, disp);
1473 } else {
1474 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1475 }
1476 break;
b8dd88b8
RH
1477
1478 case MO_Q | MO_BSWAP:
1479 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1480 break;
a5a04f28 1481 case MO_Q:
b8dd88b8 1482 tcg_out_insn(s, RXY, STG, data, base, index, disp);
48bb3750 1483 break;
b8dd88b8 1484
48bb3750
RH
1485 default:
1486 tcg_abort();
1487 }
1488}
1489
1490#if defined(CONFIG_SOFTMMU)
fb596415
RH
1491/* We're expecting to use a 20-bit signed offset on the tlb memory ops.
1492 Using the offset of the second entry in the last tlb table ensures
1493 that we can index all of the elements of the first entry. */
1494QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
1495 > 0x7ffff);
1496
1497/* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1498 addend into R2. Returns a register with the santitized guest address. */
1499static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
1500 int mem_index, bool is_ld)
48bb3750 1501{
a5e39810
RH
1502 int s_mask = (1 << (opc & MO_SIZE)) - 1;
1503 int ofs, a_off;
1504 uint64_t tlb_mask;
1505
1506 /* For aligned accesses, we check the first byte and include the alignment
1507 bits within the address. For unaligned access, we check that we don't
1508 cross pages using the address of the last byte of the access. */
1509 if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
1510 a_off = 0;
1511 tlb_mask = TARGET_PAGE_MASK | s_mask;
1512 } else {
1513 a_off = s_mask;
1514 tlb_mask = TARGET_PAGE_MASK;
1515 }
fb596415 1516
547ec121
RH
1517 if (facilities & FACILITY_GEN_INST_EXT) {
1518 tcg_out_risbg(s, TCG_REG_R2, addr_reg,
1519 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
1520 63 - CPU_TLB_ENTRY_BITS,
1521 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1);
a5e39810
RH
1522 if (a_off) {
1523 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1524 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1525 } else {
1526 tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
1527 }
48bb3750 1528 } else {
547ec121
RH
1529 tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
1530 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
a5e39810 1531 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
547ec121
RH
1532 tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2,
1533 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1534 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
48bb3750
RH
1535 }
1536
fb596415 1537 if (is_ld) {
9349b4f9 1538 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
fb596415
RH
1539 } else {
1540 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
48bb3750 1541 }
48bb3750 1542 if (TARGET_LONG_BITS == 32) {
fb596415 1543 tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
48bb3750 1544 } else {
fb596415 1545 tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
48bb3750
RH
1546 }
1547
fb596415
RH
1548 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1549 tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs);
1550
48bb3750 1551 if (TARGET_LONG_BITS == 32) {
fb596415
RH
1552 tgen_ext32u(s, TCG_REG_R3, addr_reg);
1553 return TCG_REG_R3;
48bb3750 1554 }
fb596415
RH
1555 return addr_reg;
1556}
48bb3750 1557
3972ef6f
RH
1558static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1559 TCGReg data, TCGReg addr,
fb596415
RH
1560 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1561{
1562 TCGLabelQemuLdst *label = new_ldst_label(s);
1563
1564 label->is_ld = is_ld;
3972ef6f 1565 label->oi = oi;
fb596415
RH
1566 label->datalo_reg = data;
1567 label->addrlo_reg = addr;
fb596415
RH
1568 label->raddr = raddr;
1569 label->label_ptr[0] = label_ptr;
1570}
48bb3750 1571
fb596415
RH
1572static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1573{
1574 TCGReg addr_reg = lb->addrlo_reg;
1575 TCGReg data_reg = lb->datalo_reg;
3972ef6f
RH
1576 TCGMemOpIdx oi = lb->oi;
1577 TCGMemOp opc = get_memop(oi);
48bb3750 1578
fb596415 1579 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
48bb3750 1580
fb596415
RH
1581 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1582 if (TARGET_LONG_BITS == 64) {
1583 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1584 }
3972ef6f 1585 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
fb596415 1586 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
2b7ec66f 1587 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
fb596415 1588 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
65a62a75 1589
fb596415 1590 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
48bb3750
RH
1591}
1592
fb596415 1593static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
48bb3750 1594{
fb596415
RH
1595 TCGReg addr_reg = lb->addrlo_reg;
1596 TCGReg data_reg = lb->datalo_reg;
3972ef6f
RH
1597 TCGMemOpIdx oi = lb->oi;
1598 TCGMemOp opc = get_memop(oi);
fb596415
RH
1599
1600 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
1601
1602 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1603 if (TARGET_LONG_BITS == 64) {
1604 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1605 }
1606 switch (opc & MO_SIZE) {
1607 case MO_UB:
1608 tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1609 break;
1610 case MO_UW:
1611 tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1612 break;
1613 case MO_UL:
1614 tgen_ext32u(s, TCG_REG_R4, data_reg);
1615 break;
1616 case MO_Q:
1617 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1618 break;
1619 default:
1620 tcg_abort();
1621 }
3972ef6f 1622 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
fb596415 1623 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
2b7ec66f 1624 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
fb596415
RH
1625
1626 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
48bb3750
RH
1627}
1628#else
1629static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1630 TCGReg *index_reg, tcg_target_long *disp)
1631{
1632 if (TARGET_LONG_BITS == 32) {
1633 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1634 *addr_reg = TCG_TMP0;
1635 }
b76f21a7 1636 if (guest_base < 0x80000) {
48bb3750 1637 *index_reg = TCG_REG_NONE;
b76f21a7 1638 *disp = guest_base;
48bb3750
RH
1639 } else {
1640 *index_reg = TCG_GUEST_BASE_REG;
1641 *disp = 0;
1642 }
1643}
1644#endif /* CONFIG_SOFTMMU */
1645
f24efee4 1646static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
59227d5d 1647 TCGMemOpIdx oi)
48bb3750 1648{
59227d5d 1649 TCGMemOp opc = get_memop(oi);
fb596415 1650#ifdef CONFIG_SOFTMMU
59227d5d 1651 unsigned mem_index = get_mmuidx(oi);
fb596415
RH
1652 tcg_insn_unit *label_ptr;
1653 TCGReg base_reg;
1654
1655 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
1656
cd3b29b7
AJ
1657 /* We need to keep the offset unchanged for retranslation. */
1658 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1659 label_ptr = s->code_ptr;
1660 s->code_ptr += 1;
fb596415
RH
1661
1662 tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
48bb3750 1663
3972ef6f 1664 add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
48bb3750 1665#else
f24efee4
RH
1666 TCGReg index_reg;
1667 tcg_target_long disp;
1668
48bb3750
RH
1669 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1670 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1671#endif
1672}
1673
f24efee4 1674static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
59227d5d 1675 TCGMemOpIdx oi)
48bb3750 1676{
59227d5d 1677 TCGMemOp opc = get_memop(oi);
fb596415 1678#ifdef CONFIG_SOFTMMU
59227d5d 1679 unsigned mem_index = get_mmuidx(oi);
fb596415
RH
1680 tcg_insn_unit *label_ptr;
1681 TCGReg base_reg;
1682
1683 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
1684
cd3b29b7
AJ
1685 /* We need to keep the offset unchanged for retranslation. */
1686 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1687 label_ptr = s->code_ptr;
1688 s->code_ptr += 1;
fb596415
RH
1689
1690 tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
48bb3750 1691
3972ef6f 1692 add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
48bb3750 1693#else
f24efee4
RH
1694 TCGReg index_reg;
1695 tcg_target_long disp;
1696
48bb3750
RH
1697 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1698 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1699#endif
2827822e
AG
1700}
1701
48bb3750
RH
1702# define OP_32_64(x) \
1703 case glue(glue(INDEX_op_,x),_i32): \
1704 case glue(glue(INDEX_op_,x),_i64)
48bb3750 1705
a9751609 1706static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2827822e
AG
1707 const TCGArg *args, const int *const_args)
1708{
48bb3750 1709 S390Opcode op;
0db921e6 1710 TCGArg a0, a1, a2;
48bb3750
RH
1711
1712 switch (opc) {
1713 case INDEX_op_exit_tb:
1714 /* return value */
1715 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
8c081b18 1716 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
48bb3750
RH
1717 break;
1718
1719 case INDEX_op_goto_tb:
1720 if (s->tb_jmp_offset) {
ed3d51ec
SF
1721 /* branch displacement must be aligned for atomic patching;
1722 * see if we need to add extra nop before branch
1723 */
1724 if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
1725 tcg_out16(s, NOP);
1726 }
a10c64e0
RH
1727 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
1728 s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
1729 s->code_ptr += 2;
48bb3750
RH
1730 } else {
1731 /* load address stored at s->tb_next + args[0] */
1732 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
1733 /* and go there */
1734 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1735 }
8c081b18 1736 s->tb_next_offset[args[0]] = tcg_current_code_size(s);
48bb3750
RH
1737 break;
1738
48bb3750
RH
1739 OP_32_64(ld8u):
1740 /* ??? LLC (RXY format) is only present with the extended-immediate
1741 facility, whereas LLGC is always present. */
1742 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1743 break;
1744
1745 OP_32_64(ld8s):
1746 /* ??? LB is no smaller than LGB, so no point to using it. */
1747 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1748 break;
1749
1750 OP_32_64(ld16u):
1751 /* ??? LLH (RXY format) is only present with the extended-immediate
1752 facility, whereas LLGH is always present. */
1753 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1754 break;
1755
1756 case INDEX_op_ld16s_i32:
1757 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1758 break;
1759
1760 case INDEX_op_ld_i32:
1761 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1762 break;
1763
1764 OP_32_64(st8):
1765 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1766 TCG_REG_NONE, args[2]);
1767 break;
1768
1769 OP_32_64(st16):
1770 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1771 TCG_REG_NONE, args[2]);
1772 break;
1773
1774 case INDEX_op_st_i32:
1775 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1776 break;
1777
1778 case INDEX_op_add_i32:
0db921e6 1779 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1780 if (const_args[2]) {
0db921e6
RH
1781 do_addi_32:
1782 if (a0 == a1) {
1783 if (a2 == (int16_t)a2) {
1784 tcg_out_insn(s, RI, AHI, a0, a2);
1785 break;
1786 }
1787 if (facilities & FACILITY_EXT_IMM) {
1788 tcg_out_insn(s, RIL, AFI, a0, a2);
1789 break;
1790 }
1791 }
1792 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1793 } else if (a0 == a1) {
1794 tcg_out_insn(s, RR, AR, a0, a2);
48bb3750 1795 } else {
0db921e6 1796 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1797 }
1798 break;
1799 case INDEX_op_sub_i32:
0db921e6 1800 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1801 if (const_args[2]) {
0db921e6
RH
1802 a2 = -a2;
1803 goto do_addi_32;
48bb3750 1804 }
0db921e6 1805 tcg_out_insn(s, RR, SR, args[0], args[2]);
48bb3750
RH
1806 break;
1807
1808 case INDEX_op_and_i32:
1809 if (const_args[2]) {
07ff7983 1810 tgen_andi(s, TCG_TYPE_I32, args[0], args[2]);
48bb3750
RH
1811 } else {
1812 tcg_out_insn(s, RR, NR, args[0], args[2]);
1813 }
1814 break;
1815 case INDEX_op_or_i32:
1816 if (const_args[2]) {
1817 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1818 } else {
1819 tcg_out_insn(s, RR, OR, args[0], args[2]);
1820 }
1821 break;
1822 case INDEX_op_xor_i32:
1823 if (const_args[2]) {
1824 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1825 } else {
1826 tcg_out_insn(s, RR, XR, args[0], args[2]);
1827 }
1828 break;
1829
1830 case INDEX_op_neg_i32:
1831 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1832 break;
1833
1834 case INDEX_op_mul_i32:
1835 if (const_args[2]) {
1836 if ((int32_t)args[2] == (int16_t)args[2]) {
1837 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1838 } else {
1839 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1840 }
1841 } else {
1842 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1843 }
1844 break;
1845
1846 case INDEX_op_div2_i32:
1847 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1848 break;
1849 case INDEX_op_divu2_i32:
1850 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1851 break;
1852
1853 case INDEX_op_shl_i32:
1854 op = RS_SLL;
1855 do_shift32:
1856 if (const_args[2]) {
1857 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1858 } else {
1859 tcg_out_sh32(s, op, args[0], args[2], 0);
1860 }
1861 break;
1862 case INDEX_op_shr_i32:
1863 op = RS_SRL;
1864 goto do_shift32;
1865 case INDEX_op_sar_i32:
1866 op = RS_SRA;
1867 goto do_shift32;
1868
1869 case INDEX_op_rotl_i32:
1870 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1871 if (const_args[2]) {
1872 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1873 } else {
1874 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1875 }
1876 break;
1877 case INDEX_op_rotr_i32:
1878 if (const_args[2]) {
1879 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1880 TCG_REG_NONE, (32 - args[2]) & 31);
1881 } else {
1882 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1883 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1884 }
1885 break;
1886
1887 case INDEX_op_ext8s_i32:
1888 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1889 break;
1890 case INDEX_op_ext16s_i32:
1891 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1892 break;
1893 case INDEX_op_ext8u_i32:
1894 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1895 break;
1896 case INDEX_op_ext16u_i32:
1897 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1898 break;
1899
1900 OP_32_64(bswap16):
1901 /* The TCG bswap definition requires bits 0-47 already be zero.
1902 Thus we don't need the G-type insns to implement bswap16_i64. */
1903 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1904 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1905 break;
1906 OP_32_64(bswap32):
1907 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1908 break;
1909
3790b918 1910 case INDEX_op_add2_i32:
ad19b358
RH
1911 if (const_args[4]) {
1912 tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
1913 } else {
1914 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1915 }
3790b918
RH
1916 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1917 break;
1918 case INDEX_op_sub2_i32:
ad19b358
RH
1919 if (const_args[4]) {
1920 tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
1921 } else {
1922 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1923 }
3790b918
RH
1924 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1925 break;
1926
48bb3750 1927 case INDEX_op_br:
bec16311 1928 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
48bb3750
RH
1929 break;
1930
1931 case INDEX_op_brcond_i32:
1932 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
bec16311 1933 args[1], const_args[1], arg_label(args[3]));
48bb3750
RH
1934 break;
1935 case INDEX_op_setcond_i32:
1936 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1937 args[2], const_args[2]);
1938 break;
96a9f093
RH
1939 case INDEX_op_movcond_i32:
1940 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
1941 args[2], const_args[2], args[3]);
1942 break;
48bb3750 1943
f24efee4 1944 case INDEX_op_qemu_ld_i32:
48bb3750 1945 /* ??? Technically we can use a non-extending instruction. */
f24efee4 1946 case INDEX_op_qemu_ld_i64:
59227d5d 1947 tcg_out_qemu_ld(s, args[0], args[1], args[2]);
48bb3750 1948 break;
f24efee4
RH
1949 case INDEX_op_qemu_st_i32:
1950 case INDEX_op_qemu_st_i64:
59227d5d 1951 tcg_out_qemu_st(s, args[0], args[1], args[2]);
48bb3750
RH
1952 break;
1953
48bb3750
RH
1954 case INDEX_op_ld16s_i64:
1955 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1956 break;
1957 case INDEX_op_ld32u_i64:
1958 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1959 break;
1960 case INDEX_op_ld32s_i64:
1961 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1962 break;
1963 case INDEX_op_ld_i64:
1964 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1965 break;
1966
1967 case INDEX_op_st32_i64:
1968 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1969 break;
1970 case INDEX_op_st_i64:
1971 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1972 break;
1973
1974 case INDEX_op_add_i64:
0db921e6 1975 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 1976 if (const_args[2]) {
0db921e6
RH
1977 do_addi_64:
1978 if (a0 == a1) {
1979 if (a2 == (int16_t)a2) {
1980 tcg_out_insn(s, RI, AGHI, a0, a2);
1981 break;
1982 }
1983 if (facilities & FACILITY_EXT_IMM) {
1984 if (a2 == (int32_t)a2) {
1985 tcg_out_insn(s, RIL, AGFI, a0, a2);
1986 break;
1987 } else if (a2 == (uint32_t)a2) {
1988 tcg_out_insn(s, RIL, ALGFI, a0, a2);
1989 break;
1990 } else if (-a2 == (uint32_t)-a2) {
1991 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
1992 break;
1993 }
1994 }
1995 }
1996 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1997 } else if (a0 == a1) {
1998 tcg_out_insn(s, RRE, AGR, a0, a2);
48bb3750 1999 } else {
0db921e6 2000 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
2001 }
2002 break;
2003 case INDEX_op_sub_i64:
0db921e6 2004 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2005 if (const_args[2]) {
0db921e6
RH
2006 a2 = -a2;
2007 goto do_addi_64;
48bb3750
RH
2008 } else {
2009 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
2010 }
2011 break;
2012
2013 case INDEX_op_and_i64:
2014 if (const_args[2]) {
07ff7983 2015 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
48bb3750
RH
2016 } else {
2017 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
2018 }
2019 break;
2020 case INDEX_op_or_i64:
2021 if (const_args[2]) {
2022 tgen64_ori(s, args[0], args[2]);
2023 } else {
2024 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
2025 }
2026 break;
2027 case INDEX_op_xor_i64:
2028 if (const_args[2]) {
2029 tgen64_xori(s, args[0], args[2]);
2030 } else {
2031 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
2032 }
2033 break;
2034
2035 case INDEX_op_neg_i64:
2036 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2037 break;
2038 case INDEX_op_bswap64_i64:
2039 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2040 break;
2041
2042 case INDEX_op_mul_i64:
2043 if (const_args[2]) {
2044 if (args[2] == (int16_t)args[2]) {
2045 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
2046 } else {
2047 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
2048 }
2049 } else {
2050 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
2051 }
2052 break;
2053
2054 case INDEX_op_div2_i64:
2055 /* ??? We get an unnecessary sign-extension of the dividend
2056 into R3 with this definition, but as we do in fact always
2057 produce both quotient and remainder using INDEX_op_div_i64
2058 instead requires jumping through even more hoops. */
2059 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
2060 break;
2061 case INDEX_op_divu2_i64:
2062 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
2063 break;
36017dc6
RH
2064 case INDEX_op_mulu2_i64:
2065 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
2066 break;
48bb3750
RH
2067
2068 case INDEX_op_shl_i64:
2069 op = RSY_SLLG;
2070 do_shift64:
2071 if (const_args[2]) {
2072 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2073 } else {
2074 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2075 }
2076 break;
2077 case INDEX_op_shr_i64:
2078 op = RSY_SRLG;
2079 goto do_shift64;
2080 case INDEX_op_sar_i64:
2081 op = RSY_SRAG;
2082 goto do_shift64;
2083
2084 case INDEX_op_rotl_i64:
2085 if (const_args[2]) {
2086 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2087 TCG_REG_NONE, args[2]);
2088 } else {
2089 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2090 }
2091 break;
2092 case INDEX_op_rotr_i64:
2093 if (const_args[2]) {
2094 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2095 TCG_REG_NONE, (64 - args[2]) & 63);
2096 } else {
2097 /* We can use the smaller 32-bit negate because only the
2098 low 6 bits are examined for the rotate. */
2099 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2100 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2101 }
2102 break;
2103
2104 case INDEX_op_ext8s_i64:
2105 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2106 break;
2107 case INDEX_op_ext16s_i64:
2108 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2109 break;
4f2331e5 2110 case INDEX_op_ext_i32_i64:
48bb3750
RH
2111 case INDEX_op_ext32s_i64:
2112 tgen_ext32s(s, args[0], args[1]);
2113 break;
2114 case INDEX_op_ext8u_i64:
2115 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2116 break;
2117 case INDEX_op_ext16u_i64:
2118 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2119 break;
4f2331e5 2120 case INDEX_op_extu_i32_i64:
48bb3750
RH
2121 case INDEX_op_ext32u_i64:
2122 tgen_ext32u(s, args[0], args[1]);
2123 break;
2124
3790b918 2125 case INDEX_op_add2_i64:
ad19b358
RH
2126 if (const_args[4]) {
2127 if ((int64_t)args[4] >= 0) {
2128 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2129 } else {
2130 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2131 }
2132 } else {
2133 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2134 }
3790b918
RH
2135 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2136 break;
2137 case INDEX_op_sub2_i64:
ad19b358
RH
2138 if (const_args[4]) {
2139 if ((int64_t)args[4] >= 0) {
2140 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2141 } else {
2142 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2143 }
2144 } else {
2145 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2146 }
3790b918
RH
2147 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2148 break;
2149
48bb3750
RH
2150 case INDEX_op_brcond_i64:
2151 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
bec16311 2152 args[1], const_args[1], arg_label(args[3]));
48bb3750
RH
2153 break;
2154 case INDEX_op_setcond_i64:
2155 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2156 args[2], const_args[2]);
2157 break;
96a9f093
RH
2158 case INDEX_op_movcond_i64:
2159 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2160 args[2], const_args[2], args[3]);
2161 break;
48bb3750 2162
d5690ea4
RH
2163 OP_32_64(deposit):
2164 tgen_deposit(s, args[0], args[2], args[3], args[4]);
2165 break;
2166
96d0ee7f
RH
2167 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2168 case INDEX_op_mov_i64:
2169 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2170 case INDEX_op_movi_i64:
2171 case INDEX_op_call: /* Always emitted via tcg_out_call. */
48bb3750 2172 default:
48bb3750
RH
2173 tcg_abort();
2174 }
2827822e
AG
2175}
2176
48bb3750
RH
2177static const TCGTargetOpDef s390_op_defs[] = {
2178 { INDEX_op_exit_tb, { } },
2179 { INDEX_op_goto_tb, { } },
48bb3750
RH
2180 { INDEX_op_br, { } },
2181
48bb3750
RH
2182 { INDEX_op_ld8u_i32, { "r", "r" } },
2183 { INDEX_op_ld8s_i32, { "r", "r" } },
2184 { INDEX_op_ld16u_i32, { "r", "r" } },
2185 { INDEX_op_ld16s_i32, { "r", "r" } },
2186 { INDEX_op_ld_i32, { "r", "r" } },
2187 { INDEX_op_st8_i32, { "r", "r" } },
2188 { INDEX_op_st16_i32, { "r", "r" } },
2189 { INDEX_op_st_i32, { "r", "r" } },
2190
0db921e6
RH
2191 { INDEX_op_add_i32, { "r", "r", "ri" } },
2192 { INDEX_op_sub_i32, { "r", "0", "ri" } },
48bb3750
RH
2193 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2194
2195 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2196 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2197
07ff7983 2198 { INDEX_op_and_i32, { "r", "0", "ri" } },
671c835b
RH
2199 { INDEX_op_or_i32, { "r", "0", "rO" } },
2200 { INDEX_op_xor_i32, { "r", "0", "rX" } },
48bb3750
RH
2201
2202 { INDEX_op_neg_i32, { "r", "r" } },
2203
2204 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2205 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2206 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2207
2208 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2209 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2210
2211 { INDEX_op_ext8s_i32, { "r", "r" } },
2212 { INDEX_op_ext8u_i32, { "r", "r" } },
2213 { INDEX_op_ext16s_i32, { "r", "r" } },
2214 { INDEX_op_ext16u_i32, { "r", "r" } },
2215
2216 { INDEX_op_bswap16_i32, { "r", "r" } },
2217 { INDEX_op_bswap32_i32, { "r", "r" } },
2218
ad19b358
RH
2219 { INDEX_op_add2_i32, { "r", "r", "0", "1", "rA", "r" } },
2220 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "rA", "r" } },
3790b918 2221
671c835b
RH
2222 { INDEX_op_brcond_i32, { "r", "rC" } },
2223 { INDEX_op_setcond_i32, { "r", "r", "rC" } },
2224 { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } },
d5690ea4 2225 { INDEX_op_deposit_i32, { "r", "0", "r" } },
48bb3750 2226
f24efee4
RH
2227 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2228 { INDEX_op_qemu_ld_i64, { "r", "L" } },
2229 { INDEX_op_qemu_st_i32, { "L", "L" } },
2230 { INDEX_op_qemu_st_i64, { "L", "L" } },
48bb3750 2231
48bb3750
RH
2232 { INDEX_op_ld8u_i64, { "r", "r" } },
2233 { INDEX_op_ld8s_i64, { "r", "r" } },
2234 { INDEX_op_ld16u_i64, { "r", "r" } },
2235 { INDEX_op_ld16s_i64, { "r", "r" } },
2236 { INDEX_op_ld32u_i64, { "r", "r" } },
2237 { INDEX_op_ld32s_i64, { "r", "r" } },
2238 { INDEX_op_ld_i64, { "r", "r" } },
2239
2240 { INDEX_op_st8_i64, { "r", "r" } },
2241 { INDEX_op_st16_i64, { "r", "r" } },
2242 { INDEX_op_st32_i64, { "r", "r" } },
2243 { INDEX_op_st_i64, { "r", "r" } },
2244
0db921e6
RH
2245 { INDEX_op_add_i64, { "r", "r", "ri" } },
2246 { INDEX_op_sub_i64, { "r", "0", "ri" } },
48bb3750
RH
2247 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2248
2249 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2250 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
36017dc6 2251 { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
48bb3750 2252
07ff7983 2253 { INDEX_op_and_i64, { "r", "0", "ri" } },
48bb3750
RH
2254 { INDEX_op_or_i64, { "r", "0", "rO" } },
2255 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2256
2257 { INDEX_op_neg_i64, { "r", "r" } },
2258
2259 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2260 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2261 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2262
2263 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2264 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2265
2266 { INDEX_op_ext8s_i64, { "r", "r" } },
2267 { INDEX_op_ext8u_i64, { "r", "r" } },
2268 { INDEX_op_ext16s_i64, { "r", "r" } },
2269 { INDEX_op_ext16u_i64, { "r", "r" } },
2270 { INDEX_op_ext32s_i64, { "r", "r" } },
2271 { INDEX_op_ext32u_i64, { "r", "r" } },
2272
4f2331e5
AJ
2273 { INDEX_op_ext_i32_i64, { "r", "r" } },
2274 { INDEX_op_extu_i32_i64, { "r", "r" } },
2275
48bb3750
RH
2276 { INDEX_op_bswap16_i64, { "r", "r" } },
2277 { INDEX_op_bswap32_i64, { "r", "r" } },
2278 { INDEX_op_bswap64_i64, { "r", "r" } },
2279
ad19b358
RH
2280 { INDEX_op_add2_i64, { "r", "r", "0", "1", "rA", "r" } },
2281 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "rA", "r" } },
3790b918 2282
48bb3750
RH
2283 { INDEX_op_brcond_i64, { "r", "rC" } },
2284 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
96a9f093 2285 { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
d5690ea4 2286 { INDEX_op_deposit_i64, { "r", "0", "r" } },
48bb3750 2287
48bb3750
RH
2288 { -1 },
2289};
2290
48bb3750
RH
2291static void query_facilities(void)
2292{
c9baa30f 2293 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
48bb3750 2294
c9baa30f
RH
2295 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2296 is present on all 64-bit systems, but let's check for it anyway. */
2297 if (hwcap & HWCAP_S390_STFLE) {
2298 register int r0 __asm__("0");
2299 register void *r1 __asm__("1");
48bb3750 2300
c9baa30f
RH
2301 /* stfle 0(%r1) */
2302 r1 = &facilities;
2303 asm volatile(".word 0xb2b0,0x1000"
2304 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
48bb3750
RH
2305 }
2306}
2307
2308static void tcg_target_init(TCGContext *s)
2827822e 2309{
48bb3750
RH
2310 query_facilities();
2311
2312 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2313 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2314
2315 tcg_regset_clear(tcg_target_call_clobber_regs);
2316 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2317 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2318 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2319 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2320 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2321 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
f24efee4
RH
2322 /* The r6 register is technically call-saved, but it's also a parameter
2323 register, so it can get killed by setup for the qemu_st helper. */
2324 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
48bb3750
RH
2325 /* The return register can be considered call-clobbered. */
2326 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2327
2328 tcg_regset_clear(s->reserved_regs);
2329 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2330 /* XXX many insns can't be used with R0, so we better avoid it for now */
2331 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2332 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2333
2334 tcg_add_target_add_op_defs(s390_op_defs);
2827822e
AG
2335}
2336
f167dc37
RH
2337#define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2338 + TCG_STATIC_CALL_ARGS_SIZE \
2339 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2340
48bb3750 2341static void tcg_target_qemu_prologue(TCGContext *s)
2827822e 2342{
48bb3750
RH
2343 /* stmg %r6,%r15,48(%r15) (save registers) */
2344 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2345
a4924e8b 2346 /* aghi %r15,-frame_size */
f167dc37 2347 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
a4924e8b
RH
2348
2349 tcg_set_frame(s, TCG_REG_CALL_STACK,
2350 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2351 CPU_TEMP_BUF_NLONGS * sizeof(long));
48bb3750 2352
090d0bfd 2353#ifndef CONFIG_SOFTMMU
b76f21a7
LV
2354 if (guest_base >= 0x80000) {
2355 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
48bb3750
RH
2356 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2357 }
090d0bfd 2358#endif
48bb3750 2359
cea5f9a2
BS
2360 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2361 /* br %r3 (go to TB) */
2362 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
48bb3750
RH
2363
2364 tb_ret_addr = s->code_ptr;
2365
a4924e8b
RH
2366 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2367 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
f167dc37 2368 FRAME_SIZE + 48);
48bb3750
RH
2369
2370 /* br %r14 (return) */
2371 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2827822e 2372}
f167dc37
RH
2373
2374typedef struct {
d2e16f2c 2375 DebugFrameHeader h;
f167dc37
RH
2376 uint8_t fde_def_cfa[4];
2377 uint8_t fde_reg_ofs[18];
2378} DebugFrame;
2379
2380/* We're expecting a 2 byte uleb128 encoded value. */
2381QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2382
2383#define ELF_HOST_MACHINE EM_S390
2384
d2e16f2c
RH
2385static const DebugFrame debug_frame = {
2386 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2387 .h.cie.id = -1,
2388 .h.cie.version = 1,
2389 .h.cie.code_align = 1,
2390 .h.cie.data_align = 8, /* sleb128 8 */
2391 .h.cie.return_column = TCG_REG_R14,
f167dc37
RH
2392
2393 /* Total FDE size does not include the "len" member. */
d2e16f2c 2394 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
f167dc37
RH
2395
2396 .fde_def_cfa = {
2397 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
2398 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2399 (FRAME_SIZE >> 7)
2400 },
2401 .fde_reg_ofs = {
2402 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2403 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2404 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2405 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2406 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2407 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2408 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2409 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2410 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2411 }
2412};
2413
2414void tcg_register_jit(void *buf, size_t buf_size)
2415{
f167dc37
RH
2416 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2417}