]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/s390/tcg-target.c
Merge remote-tracking branch 'remotes/agraf/tags/signed-s390-for-upstream' into staging
[mirror_qemu.git] / tcg / s390 / tcg-target.c
CommitLineData
2827822e
AG
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
48bb3750
RH
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
2827822e
AG
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
fb596415 27#include "tcg-be-ldst.h"
3cf246f0 28
a01fc30d
RH
29/* We only support generating code for 64-bit mode. */
30#if TCG_TARGET_REG_BITS != 64
31#error "unsupported code generation mode"
32#endif
33
c9baa30f
RH
34#include "elf.h"
35
48bb3750
RH
36/* ??? The translation blocks produced by TCG are generally small enough to
37 be entirely reachable with a 16-bit displacement. Leaving the option for
38 a 32-bit displacement here Just In Case. */
39#define USE_LONG_BRANCHES 0
40
671c835b
RH
41#define TCG_CT_CONST_MULI 0x100
42#define TCG_CT_CONST_ORI 0x200
43#define TCG_CT_CONST_XORI 0x400
44#define TCG_CT_CONST_CMPI 0x800
ad19b358 45#define TCG_CT_CONST_ADLI 0x1000
48bb3750
RH
46
47/* Several places within the instruction set 0 means "no register"
48 rather than TCG_REG_R0. */
49#define TCG_REG_NONE 0
50
51/* A scratch register that may be be used throughout the backend. */
52#define TCG_TMP0 TCG_REG_R14
53
54#ifdef CONFIG_USE_GUEST_BASE
55#define TCG_GUEST_BASE_REG TCG_REG_R13
56#else
57#define TCG_GUEST_BASE_REG TCG_REG_R0
58#endif
59
60#ifndef GUEST_BASE
61#define GUEST_BASE 0
62#endif
63
64
65/* All of the following instructions are prefixed with their instruction
66 format, and are defined as 8- or 16-bit quantities, even when the two
67 halves of the 16-bit quantity may appear 32 bits apart in the insn.
68 This makes it easy to copy the values from the tables in Appendix B. */
69typedef enum S390Opcode {
70 RIL_AFI = 0xc209,
71 RIL_AGFI = 0xc208,
3790b918 72 RIL_ALFI = 0xc20b,
48bb3750
RH
73 RIL_ALGFI = 0xc20a,
74 RIL_BRASL = 0xc005,
75 RIL_BRCL = 0xc004,
76 RIL_CFI = 0xc20d,
77 RIL_CGFI = 0xc20c,
78 RIL_CLFI = 0xc20f,
79 RIL_CLGFI = 0xc20e,
80 RIL_IIHF = 0xc008,
81 RIL_IILF = 0xc009,
82 RIL_LARL = 0xc000,
83 RIL_LGFI = 0xc001,
84 RIL_LGRL = 0xc408,
85 RIL_LLIHF = 0xc00e,
86 RIL_LLILF = 0xc00f,
87 RIL_LRL = 0xc40d,
88 RIL_MSFI = 0xc201,
89 RIL_MSGFI = 0xc200,
90 RIL_NIHF = 0xc00a,
91 RIL_NILF = 0xc00b,
92 RIL_OIHF = 0xc00c,
93 RIL_OILF = 0xc00d,
3790b918 94 RIL_SLFI = 0xc205,
0db921e6 95 RIL_SLGFI = 0xc204,
48bb3750
RH
96 RIL_XIHF = 0xc006,
97 RIL_XILF = 0xc007,
98
99 RI_AGHI = 0xa70b,
100 RI_AHI = 0xa70a,
101 RI_BRC = 0xa704,
102 RI_IIHH = 0xa500,
103 RI_IIHL = 0xa501,
104 RI_IILH = 0xa502,
105 RI_IILL = 0xa503,
106 RI_LGHI = 0xa709,
107 RI_LLIHH = 0xa50c,
108 RI_LLIHL = 0xa50d,
109 RI_LLILH = 0xa50e,
110 RI_LLILL = 0xa50f,
111 RI_MGHI = 0xa70d,
112 RI_MHI = 0xa70c,
113 RI_NIHH = 0xa504,
114 RI_NIHL = 0xa505,
115 RI_NILH = 0xa506,
116 RI_NILL = 0xa507,
117 RI_OIHH = 0xa508,
118 RI_OIHL = 0xa509,
119 RI_OILH = 0xa50a,
120 RI_OILL = 0xa50b,
121
122 RIE_CGIJ = 0xec7c,
123 RIE_CGRJ = 0xec64,
124 RIE_CIJ = 0xec7e,
125 RIE_CLGRJ = 0xec65,
126 RIE_CLIJ = 0xec7f,
127 RIE_CLGIJ = 0xec7d,
128 RIE_CLRJ = 0xec77,
129 RIE_CRJ = 0xec76,
d5690ea4 130 RIE_RISBG = 0xec55,
48bb3750
RH
131
132 RRE_AGR = 0xb908,
3790b918
RH
133 RRE_ALGR = 0xb90a,
134 RRE_ALCR = 0xb998,
135 RRE_ALCGR = 0xb988,
48bb3750
RH
136 RRE_CGR = 0xb920,
137 RRE_CLGR = 0xb921,
138 RRE_DLGR = 0xb987,
139 RRE_DLR = 0xb997,
140 RRE_DSGFR = 0xb91d,
141 RRE_DSGR = 0xb90d,
142 RRE_LGBR = 0xb906,
143 RRE_LCGR = 0xb903,
144 RRE_LGFR = 0xb914,
145 RRE_LGHR = 0xb907,
146 RRE_LGR = 0xb904,
147 RRE_LLGCR = 0xb984,
148 RRE_LLGFR = 0xb916,
149 RRE_LLGHR = 0xb985,
150 RRE_LRVR = 0xb91f,
151 RRE_LRVGR = 0xb90f,
152 RRE_LTGR = 0xb902,
36017dc6 153 RRE_MLGR = 0xb986,
48bb3750
RH
154 RRE_MSGR = 0xb90c,
155 RRE_MSR = 0xb252,
156 RRE_NGR = 0xb980,
157 RRE_OGR = 0xb981,
158 RRE_SGR = 0xb909,
3790b918
RH
159 RRE_SLGR = 0xb90b,
160 RRE_SLBR = 0xb999,
161 RRE_SLBGR = 0xb989,
48bb3750
RH
162 RRE_XGR = 0xb982,
163
96a9f093
RH
164 RRF_LOCR = 0xb9f2,
165 RRF_LOCGR = 0xb9e2,
166
48bb3750 167 RR_AR = 0x1a,
3790b918 168 RR_ALR = 0x1e,
48bb3750
RH
169 RR_BASR = 0x0d,
170 RR_BCR = 0x07,
171 RR_CLR = 0x15,
172 RR_CR = 0x19,
173 RR_DR = 0x1d,
174 RR_LCR = 0x13,
175 RR_LR = 0x18,
176 RR_LTR = 0x12,
177 RR_NR = 0x14,
178 RR_OR = 0x16,
179 RR_SR = 0x1b,
3790b918 180 RR_SLR = 0x1f,
48bb3750
RH
181 RR_XR = 0x17,
182
183 RSY_RLL = 0xeb1d,
184 RSY_RLLG = 0xeb1c,
185 RSY_SLLG = 0xeb0d,
186 RSY_SRAG = 0xeb0a,
187 RSY_SRLG = 0xeb0c,
188
189 RS_SLL = 0x89,
190 RS_SRA = 0x8a,
191 RS_SRL = 0x88,
192
193 RXY_AG = 0xe308,
194 RXY_AY = 0xe35a,
195 RXY_CG = 0xe320,
196 RXY_CY = 0xe359,
0db921e6 197 RXY_LAY = 0xe371,
48bb3750
RH
198 RXY_LB = 0xe376,
199 RXY_LG = 0xe304,
200 RXY_LGB = 0xe377,
201 RXY_LGF = 0xe314,
202 RXY_LGH = 0xe315,
203 RXY_LHY = 0xe378,
204 RXY_LLGC = 0xe390,
205 RXY_LLGF = 0xe316,
206 RXY_LLGH = 0xe391,
207 RXY_LMG = 0xeb04,
208 RXY_LRV = 0xe31e,
209 RXY_LRVG = 0xe30f,
210 RXY_LRVH = 0xe31f,
211 RXY_LY = 0xe358,
212 RXY_STCY = 0xe372,
213 RXY_STG = 0xe324,
214 RXY_STHY = 0xe370,
215 RXY_STMG = 0xeb24,
216 RXY_STRV = 0xe33e,
217 RXY_STRVG = 0xe32f,
218 RXY_STRVH = 0xe33f,
219 RXY_STY = 0xe350,
220
221 RX_A = 0x5a,
222 RX_C = 0x59,
223 RX_L = 0x58,
0db921e6 224 RX_LA = 0x41,
48bb3750
RH
225 RX_LH = 0x48,
226 RX_ST = 0x50,
227 RX_STC = 0x42,
228 RX_STH = 0x40,
229} S390Opcode;
230
48bb3750
RH
231#ifndef NDEBUG
232static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
233 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
234 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
235};
236#endif
237
238/* Since R6 is a potential argument register, choose it last of the
239 call-saved registers. Likewise prefer the call-clobbered registers
240 in reverse order to maximize the chance of avoiding the arguments. */
2827822e 241static const int tcg_target_reg_alloc_order[] = {
f24efee4 242 /* Call saved registers. */
48bb3750
RH
243 TCG_REG_R13,
244 TCG_REG_R12,
245 TCG_REG_R11,
246 TCG_REG_R10,
247 TCG_REG_R9,
248 TCG_REG_R8,
249 TCG_REG_R7,
250 TCG_REG_R6,
f24efee4 251 /* Call clobbered registers. */
48bb3750
RH
252 TCG_REG_R14,
253 TCG_REG_R0,
254 TCG_REG_R1,
f24efee4 255 /* Argument registers, in reverse order of allocation. */
48bb3750
RH
256 TCG_REG_R5,
257 TCG_REG_R4,
258 TCG_REG_R3,
259 TCG_REG_R2,
2827822e
AG
260};
261
262static const int tcg_target_call_iarg_regs[] = {
48bb3750
RH
263 TCG_REG_R2,
264 TCG_REG_R3,
265 TCG_REG_R4,
266 TCG_REG_R5,
267 TCG_REG_R6,
2827822e
AG
268};
269
270static const int tcg_target_call_oarg_regs[] = {
48bb3750 271 TCG_REG_R2,
48bb3750
RH
272};
273
274#define S390_CC_EQ 8
275#define S390_CC_LT 4
276#define S390_CC_GT 2
277#define S390_CC_OV 1
278#define S390_CC_NE (S390_CC_LT | S390_CC_GT)
279#define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
280#define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
281#define S390_CC_NEVER 0
282#define S390_CC_ALWAYS 15
283
284/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
0aed257f 285static const uint8_t tcg_cond_to_s390_cond[] = {
48bb3750
RH
286 [TCG_COND_EQ] = S390_CC_EQ,
287 [TCG_COND_NE] = S390_CC_NE,
288 [TCG_COND_LT] = S390_CC_LT,
289 [TCG_COND_LE] = S390_CC_LE,
290 [TCG_COND_GT] = S390_CC_GT,
291 [TCG_COND_GE] = S390_CC_GE,
292 [TCG_COND_LTU] = S390_CC_LT,
293 [TCG_COND_LEU] = S390_CC_LE,
294 [TCG_COND_GTU] = S390_CC_GT,
295 [TCG_COND_GEU] = S390_CC_GE,
296};
297
298/* Condition codes that result from a LOAD AND TEST. Here, we have no
299 unsigned instruction variation, however since the test is vs zero we
300 can re-map the outcomes appropriately. */
0aed257f 301static const uint8_t tcg_cond_to_ltr_cond[] = {
48bb3750
RH
302 [TCG_COND_EQ] = S390_CC_EQ,
303 [TCG_COND_NE] = S390_CC_NE,
304 [TCG_COND_LT] = S390_CC_LT,
305 [TCG_COND_LE] = S390_CC_LE,
306 [TCG_COND_GT] = S390_CC_GT,
307 [TCG_COND_GE] = S390_CC_GE,
308 [TCG_COND_LTU] = S390_CC_NEVER,
309 [TCG_COND_LEU] = S390_CC_EQ,
310 [TCG_COND_GTU] = S390_CC_NE,
311 [TCG_COND_GEU] = S390_CC_ALWAYS,
312};
313
314#ifdef CONFIG_SOFTMMU
f24efee4
RH
315static void * const qemu_ld_helpers[16] = {
316 [MO_UB] = helper_ret_ldub_mmu,
317 [MO_SB] = helper_ret_ldsb_mmu,
318 [MO_LEUW] = helper_le_lduw_mmu,
319 [MO_LESW] = helper_le_ldsw_mmu,
320 [MO_LEUL] = helper_le_ldul_mmu,
321 [MO_LESL] = helper_le_ldsl_mmu,
322 [MO_LEQ] = helper_le_ldq_mmu,
323 [MO_BEUW] = helper_be_lduw_mmu,
324 [MO_BESW] = helper_be_ldsw_mmu,
325 [MO_BEUL] = helper_be_ldul_mmu,
326 [MO_BESL] = helper_be_ldsl_mmu,
327 [MO_BEQ] = helper_be_ldq_mmu,
e141ab52
BS
328};
329
f24efee4
RH
330static void * const qemu_st_helpers[16] = {
331 [MO_UB] = helper_ret_stb_mmu,
332 [MO_LEUW] = helper_le_stw_mmu,
333 [MO_LEUL] = helper_le_stl_mmu,
334 [MO_LEQ] = helper_le_stq_mmu,
335 [MO_BEUW] = helper_be_stw_mmu,
336 [MO_BEUL] = helper_be_stl_mmu,
337 [MO_BEQ] = helper_be_stq_mmu,
e141ab52 338};
e141ab52 339#endif
48bb3750 340
8c081b18 341static tcg_insn_unit *tb_ret_addr;
48bb3750
RH
342
343/* A list of relevant facilities used by this translator. Some of these
344 are required for proper operation, and these are checked at startup. */
345
346#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
347#define FACILITY_LONG_DISP (1ULL << (63 - 18))
348#define FACILITY_EXT_IMM (1ULL << (63 - 21))
349#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
96a9f093 350#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
48bb3750
RH
351
352static uint64_t facilities;
2827822e 353
8c081b18 354static void patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 355 intptr_t value, intptr_t addend)
2827822e 356{
8c081b18
RH
357 intptr_t pcrel2 = (tcg_insn_unit *)value - (code_ptr - 1);
358 assert(addend == -2);
48bb3750
RH
359
360 switch (type) {
361 case R_390_PC16DBL:
362 assert(pcrel2 == (int16_t)pcrel2);
8c081b18 363 tcg_patch16(code_ptr, pcrel2);
48bb3750
RH
364 break;
365 case R_390_PC32DBL:
366 assert(pcrel2 == (int32_t)pcrel2);
8c081b18 367 tcg_patch32(code_ptr, pcrel2);
48bb3750
RH
368 break;
369 default:
370 tcg_abort();
371 break;
372 }
2827822e
AG
373}
374
2827822e
AG
375/* parse target specific constraints */
376static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
377{
48bb3750
RH
378 const char *ct_str = *pct_str;
379
380 switch (ct_str[0]) {
381 case 'r': /* all registers */
382 ct->ct |= TCG_CT_REG;
383 tcg_regset_set32(ct->u.regs, 0, 0xffff);
384 break;
385 case 'R': /* not R0 */
386 ct->ct |= TCG_CT_REG;
387 tcg_regset_set32(ct->u.regs, 0, 0xffff);
388 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
389 break;
390 case 'L': /* qemu_ld/st constraint */
391 ct->ct |= TCG_CT_REG;
392 tcg_regset_set32(ct->u.regs, 0, 0xffff);
393 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
394 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
65a62a75 395 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
48bb3750
RH
396 break;
397 case 'a': /* force R2 for division */
398 ct->ct |= TCG_CT_REG;
399 tcg_regset_clear(ct->u.regs);
400 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
401 break;
402 case 'b': /* force R3 for division */
403 ct->ct |= TCG_CT_REG;
404 tcg_regset_clear(ct->u.regs);
405 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
406 break;
ad19b358
RH
407 case 'A':
408 ct->ct |= TCG_CT_CONST_ADLI;
409 break;
48bb3750
RH
410 case 'K':
411 ct->ct |= TCG_CT_CONST_MULI;
412 break;
48bb3750
RH
413 case 'O':
414 ct->ct |= TCG_CT_CONST_ORI;
415 break;
416 case 'X':
417 ct->ct |= TCG_CT_CONST_XORI;
418 break;
419 case 'C':
420 ct->ct |= TCG_CT_CONST_CMPI;
421 break;
422 default:
423 return -1;
424 }
425 ct_str++;
426 *pct_str = ct_str;
427
2827822e
AG
428 return 0;
429}
430
48bb3750
RH
431/* Immediates to be used with logical OR. This is an optimization only,
432 since a full 64-bit immediate OR can always be performed with 4 sequential
433 OI[LH][LH] instructions. What we're looking for is immediates that we
434 can load efficiently, and the immediate load plus the reg-reg OR is
435 smaller than the sequential OI's. */
436
671c835b 437static int tcg_match_ori(TCGType type, tcg_target_long val)
48bb3750
RH
438{
439 if (facilities & FACILITY_EXT_IMM) {
671c835b 440 if (type == TCG_TYPE_I32) {
48bb3750
RH
441 /* All 32-bit ORs can be performed with 1 48-bit insn. */
442 return 1;
443 }
444 }
445
446 /* Look for negative values. These are best to load with LGHI. */
447 if (val < 0) {
448 if (val == (int16_t)val) {
449 return 0;
450 }
451 if (facilities & FACILITY_EXT_IMM) {
452 if (val == (int32_t)val) {
453 return 0;
454 }
455 }
456 }
457
458 return 1;
459}
460
461/* Immediates to be used with logical XOR. This is almost, but not quite,
462 only an optimization. XOR with immediate is only supported with the
463 extended-immediate facility. That said, there are a few patterns for
464 which it is better to load the value into a register first. */
465
671c835b 466static int tcg_match_xori(TCGType type, tcg_target_long val)
48bb3750
RH
467{
468 if ((facilities & FACILITY_EXT_IMM) == 0) {
469 return 0;
470 }
471
671c835b 472 if (type == TCG_TYPE_I32) {
48bb3750
RH
473 /* All 32-bit XORs can be performed with 1 48-bit insn. */
474 return 1;
475 }
476
477 /* Look for negative values. These are best to load with LGHI. */
478 if (val < 0 && val == (int32_t)val) {
479 return 0;
480 }
481
482 return 1;
483}
484
485/* Imediates to be used with comparisons. */
486
671c835b 487static int tcg_match_cmpi(TCGType type, tcg_target_long val)
48bb3750
RH
488{
489 if (facilities & FACILITY_EXT_IMM) {
490 /* The COMPARE IMMEDIATE instruction is available. */
671c835b 491 if (type == TCG_TYPE_I32) {
48bb3750
RH
492 /* We have a 32-bit immediate and can compare against anything. */
493 return 1;
494 } else {
495 /* ??? We have no insight here into whether the comparison is
496 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
497 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
498 a 32-bit unsigned immediate. If we were to use the (semi)
499 obvious "val == (int32_t)val" we would be enabling unsigned
500 comparisons vs very large numbers. The only solution is to
501 take the intersection of the ranges. */
502 /* ??? Another possible solution is to simply lie and allow all
503 constants here and force the out-of-range values into a temp
504 register in tgen_cmp when we have knowledge of the actual
505 comparison code in use. */
506 return val >= 0 && val <= 0x7fffffff;
507 }
508 } else {
509 /* Only the LOAD AND TEST instruction is available. */
510 return val == 0;
511 }
512}
513
ad19b358
RH
514/* Immediates to be used with add2/sub2. */
515
516static int tcg_match_add2i(TCGType type, tcg_target_long val)
517{
518 if (facilities & FACILITY_EXT_IMM) {
519 if (type == TCG_TYPE_I32) {
520 return 1;
521 } else if (val >= -0xffffffffll && val <= 0xffffffffll) {
522 return 1;
523 }
524 }
525 return 0;
526}
527
2827822e 528/* Test if a constant matches the constraint. */
f6c6afc1 529static int tcg_target_const_match(tcg_target_long val, TCGType type,
48bb3750 530 const TCGArgConstraint *arg_ct)
2827822e 531{
48bb3750
RH
532 int ct = arg_ct->ct;
533
534 if (ct & TCG_CT_CONST) {
535 return 1;
536 }
537
671c835b 538 if (type == TCG_TYPE_I32) {
48bb3750
RH
539 val = (int32_t)val;
540 }
541
542 /* The following are mutually exclusive. */
0db921e6 543 if (ct & TCG_CT_CONST_MULI) {
48bb3750
RH
544 /* Immediates that may be used with multiply. If we have the
545 general-instruction-extensions, then we have MULTIPLY SINGLE
546 IMMEDIATE with a signed 32-bit, otherwise we have only
547 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
548 if (facilities & FACILITY_GEN_INST_EXT) {
549 return val == (int32_t)val;
550 } else {
551 return val == (int16_t)val;
552 }
ad19b358
RH
553 } else if (ct & TCG_CT_CONST_ADLI) {
554 return tcg_match_add2i(type, val);
48bb3750 555 } else if (ct & TCG_CT_CONST_ORI) {
671c835b 556 return tcg_match_ori(type, val);
48bb3750 557 } else if (ct & TCG_CT_CONST_XORI) {
671c835b 558 return tcg_match_xori(type, val);
48bb3750 559 } else if (ct & TCG_CT_CONST_CMPI) {
671c835b 560 return tcg_match_cmpi(type, val);
48bb3750
RH
561 }
562
2827822e
AG
563 return 0;
564}
565
48bb3750
RH
566/* Emit instructions according to the given instruction format. */
567
568static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
569{
570 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
571}
572
573static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
574 TCGReg r1, TCGReg r2)
575{
576 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
577}
578
96a9f093
RH
579static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
580 TCGReg r1, TCGReg r2, int m3)
581{
582 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
583}
584
48bb3750
RH
585static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
586{
587 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
588}
589
590static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
591{
592 tcg_out16(s, op | (r1 << 4));
593 tcg_out32(s, i2);
594}
595
596static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
597 TCGReg b2, TCGReg r3, int disp)
598{
599 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
600 | (disp & 0xfff));
601}
602
603static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
604 TCGReg b2, TCGReg r3, int disp)
605{
606 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
607 tcg_out32(s, (op & 0xff) | (b2 << 28)
608 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
609}
610
611#define tcg_out_insn_RX tcg_out_insn_RS
612#define tcg_out_insn_RXY tcg_out_insn_RSY
613
614/* Emit an opcode with "type-checking" of the format. */
615#define tcg_out_insn(S, FMT, OP, ...) \
616 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
617
618
619/* emit 64-bit shifts */
620static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
621 TCGReg src, TCGReg sh_reg, int sh_imm)
622{
623 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
624}
625
626/* emit 32-bit shifts */
627static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
628 TCGReg sh_reg, int sh_imm)
629{
630 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
631}
632
633static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
634{
635 if (src != dst) {
636 if (type == TCG_TYPE_I32) {
637 tcg_out_insn(s, RR, LR, dst, src);
638 } else {
639 tcg_out_insn(s, RRE, LGR, dst, src);
640 }
641 }
642}
643
2827822e 644/* load a register with an immediate value */
48bb3750
RH
645static void tcg_out_movi(TCGContext *s, TCGType type,
646 TCGReg ret, tcg_target_long sval)
2827822e 647{
48bb3750
RH
648 static const S390Opcode lli_insns[4] = {
649 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
650 };
651
652 tcg_target_ulong uval = sval;
653 int i;
654
655 if (type == TCG_TYPE_I32) {
656 uval = (uint32_t)sval;
657 sval = (int32_t)sval;
658 }
659
660 /* Try all 32-bit insns that can load it in one go. */
661 if (sval >= -0x8000 && sval < 0x8000) {
662 tcg_out_insn(s, RI, LGHI, ret, sval);
663 return;
664 }
665
666 for (i = 0; i < 4; i++) {
667 tcg_target_long mask = 0xffffull << i*16;
668 if ((uval & mask) == uval) {
669 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
670 return;
671 }
672 }
673
674 /* Try all 48-bit insns that can load it in one go. */
675 if (facilities & FACILITY_EXT_IMM) {
676 if (sval == (int32_t)sval) {
677 tcg_out_insn(s, RIL, LGFI, ret, sval);
678 return;
679 }
680 if (uval <= 0xffffffff) {
681 tcg_out_insn(s, RIL, LLILF, ret, uval);
682 return;
683 }
684 if ((uval & 0xffffffff) == 0) {
685 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
686 return;
687 }
688 }
689
690 /* Try for PC-relative address load. */
691 if ((sval & 1) == 0) {
8c081b18 692 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
48bb3750
RH
693 if (off == (int32_t)off) {
694 tcg_out_insn(s, RIL, LARL, ret, off);
695 return;
696 }
697 }
698
699 /* If extended immediates are not present, then we may have to issue
700 several instructions to load the low 32 bits. */
701 if (!(facilities & FACILITY_EXT_IMM)) {
702 /* A 32-bit unsigned value can be loaded in 2 insns. And given
703 that the lli_insns loop above did not succeed, we know that
704 both insns are required. */
705 if (uval <= 0xffffffff) {
706 tcg_out_insn(s, RI, LLILL, ret, uval);
707 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
708 return;
709 }
710
711 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
712 We first want to make sure that all the high bits get set. With
713 luck the low 16-bits can be considered negative to perform that for
714 free, otherwise we load an explicit -1. */
715 if (sval >> 31 >> 1 == -1) {
716 if (uval & 0x8000) {
717 tcg_out_insn(s, RI, LGHI, ret, uval);
718 } else {
719 tcg_out_insn(s, RI, LGHI, ret, -1);
720 tcg_out_insn(s, RI, IILL, ret, uval);
721 }
722 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
723 return;
724 }
725 }
726
727 /* If we get here, both the high and low parts have non-zero bits. */
728
729 /* Recurse to load the lower 32-bits. */
a22971f9 730 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
48bb3750
RH
731
732 /* Insert data into the high 32-bits. */
733 uval = uval >> 31 >> 1;
734 if (facilities & FACILITY_EXT_IMM) {
735 if (uval < 0x10000) {
736 tcg_out_insn(s, RI, IIHL, ret, uval);
737 } else if ((uval & 0xffff) == 0) {
738 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
739 } else {
740 tcg_out_insn(s, RIL, IIHF, ret, uval);
741 }
742 } else {
743 if (uval & 0xffff) {
744 tcg_out_insn(s, RI, IIHL, ret, uval);
745 }
746 if (uval & 0xffff0000) {
747 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
748 }
749 }
750}
751
752
753/* Emit a load/store type instruction. Inputs are:
754 DATA: The register to be loaded or stored.
755 BASE+OFS: The effective address.
756 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
757 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
758
759static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
760 TCGReg data, TCGReg base, TCGReg index,
761 tcg_target_long ofs)
762{
763 if (ofs < -0x80000 || ofs >= 0x80000) {
78c9f7c5
RH
764 /* Combine the low 20 bits of the offset with the actual load insn;
765 the high 44 bits must come from an immediate load. */
766 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
767 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
768 ofs = low;
48bb3750
RH
769
770 /* If we were already given an index register, add it in. */
771 if (index != TCG_REG_NONE) {
772 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
773 }
774 index = TCG_TMP0;
775 }
776
777 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
778 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
779 } else {
780 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
781 }
2827822e
AG
782}
783
48bb3750 784
2827822e 785/* load data without address translation or endianness conversion */
48bb3750 786static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 787 TCGReg base, intptr_t ofs)
2827822e 788{
48bb3750
RH
789 if (type == TCG_TYPE_I32) {
790 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
791 } else {
792 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
793 }
2827822e
AG
794}
795
48bb3750 796static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 797 TCGReg base, intptr_t ofs)
2827822e 798{
48bb3750
RH
799 if (type == TCG_TYPE_I32) {
800 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
801 } else {
802 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
803 }
804}
805
806/* load data from an absolute host address */
807static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
808{
8c081b18 809 intptr_t addr = (intptr_t)abs;
48bb3750 810
8c081b18
RH
811 if ((facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
812 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
48bb3750
RH
813 if (disp == (int32_t)disp) {
814 if (type == TCG_TYPE_I32) {
815 tcg_out_insn(s, RIL, LRL, dest, disp);
816 } else {
817 tcg_out_insn(s, RIL, LGRL, dest, disp);
818 }
819 return;
820 }
821 }
822
823 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
824 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
825}
826
f0bffc27
RH
827static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
828 int msb, int lsb, int ofs, int z)
829{
830 /* Format RIE-f */
831 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
832 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
833 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
834}
835
48bb3750
RH
836static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
837{
838 if (facilities & FACILITY_EXT_IMM) {
839 tcg_out_insn(s, RRE, LGBR, dest, src);
840 return;
841 }
842
843 if (type == TCG_TYPE_I32) {
844 if (dest == src) {
845 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
846 } else {
847 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
848 }
849 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
850 } else {
851 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
852 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
853 }
854}
855
856static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
857{
858 if (facilities & FACILITY_EXT_IMM) {
859 tcg_out_insn(s, RRE, LLGCR, dest, src);
860 return;
861 }
862
863 if (dest == src) {
864 tcg_out_movi(s, type, TCG_TMP0, 0xff);
865 src = TCG_TMP0;
866 } else {
867 tcg_out_movi(s, type, dest, 0xff);
868 }
869 if (type == TCG_TYPE_I32) {
870 tcg_out_insn(s, RR, NR, dest, src);
871 } else {
872 tcg_out_insn(s, RRE, NGR, dest, src);
873 }
874}
875
876static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
877{
878 if (facilities & FACILITY_EXT_IMM) {
879 tcg_out_insn(s, RRE, LGHR, dest, src);
880 return;
881 }
882
883 if (type == TCG_TYPE_I32) {
884 if (dest == src) {
885 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
886 } else {
887 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
888 }
889 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
890 } else {
891 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
892 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
893 }
894}
895
896static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
897{
898 if (facilities & FACILITY_EXT_IMM) {
899 tcg_out_insn(s, RRE, LLGHR, dest, src);
900 return;
901 }
902
903 if (dest == src) {
904 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
905 src = TCG_TMP0;
906 } else {
907 tcg_out_movi(s, type, dest, 0xffff);
908 }
909 if (type == TCG_TYPE_I32) {
910 tcg_out_insn(s, RR, NR, dest, src);
911 } else {
912 tcg_out_insn(s, RRE, NGR, dest, src);
913 }
914}
915
916static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
917{
918 tcg_out_insn(s, RRE, LGFR, dest, src);
919}
920
921static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
922{
923 tcg_out_insn(s, RRE, LLGFR, dest, src);
924}
925
f0bffc27
RH
926/* Accept bit patterns like these:
927 0....01....1
928 1....10....0
929 1..10..01..1
930 0..01..10..0
931 Copied from gcc sources. */
932static inline bool risbg_mask(uint64_t c)
933{
934 uint64_t lsb;
935 /* We don't change the number of transitions by inverting,
936 so make sure we start with the LSB zero. */
937 if (c & 1) {
938 c = ~c;
939 }
940 /* Reject all zeros or all ones. */
941 if (c == 0) {
942 return false;
943 }
944 /* Find the first transition. */
945 lsb = c & -c;
946 /* Invert to look for a second transition. */
947 c = ~c;
948 /* Erase the first transition. */
949 c &= -lsb;
950 /* Find the second transition, if any. */
951 lsb = c & -c;
952 /* Match if all the bits are 1's, or if c is zero. */
953 return c == -lsb;
954}
955
547ec121
RH
956static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
957{
958 int msb, lsb;
959 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
960 /* Achieve wraparound by swapping msb and lsb. */
961 msb = 64 - ctz64(~val);
962 lsb = clz64(~val) - 1;
963 } else {
964 msb = clz64(val);
965 lsb = 63 - ctz64(val);
966 }
967 tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
968}
969
07ff7983 970static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750
RH
971{
972 static const S390Opcode ni_insns[4] = {
973 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
974 };
975 static const S390Opcode nif_insns[2] = {
976 RIL_NILF, RIL_NIHF
977 };
07ff7983 978 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
48bb3750
RH
979 int i;
980
48bb3750 981 /* Look for the zero-extensions. */
07ff7983 982 if ((val & valid) == 0xffffffff) {
48bb3750
RH
983 tgen_ext32u(s, dest, dest);
984 return;
985 }
48bb3750 986 if (facilities & FACILITY_EXT_IMM) {
07ff7983 987 if ((val & valid) == 0xff) {
48bb3750
RH
988 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
989 return;
990 }
07ff7983 991 if ((val & valid) == 0xffff) {
48bb3750
RH
992 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
993 return;
994 }
07ff7983 995 }
48bb3750 996
07ff7983
RH
997 /* Try all 32-bit insns that can perform it in one go. */
998 for (i = 0; i < 4; i++) {
999 tcg_target_ulong mask = ~(0xffffull << i*16);
1000 if (((val | ~valid) & mask) == mask) {
1001 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
1002 return;
48bb3750 1003 }
07ff7983 1004 }
48bb3750 1005
07ff7983
RH
1006 /* Try all 48-bit insns that can perform it in one go. */
1007 if (facilities & FACILITY_EXT_IMM) {
1008 for (i = 0; i < 2; i++) {
1009 tcg_target_ulong mask = ~(0xffffffffull << i*32);
1010 if (((val | ~valid) & mask) == mask) {
1011 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1012 return;
48bb3750
RH
1013 }
1014 }
07ff7983 1015 }
f0bffc27 1016 if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
547ec121 1017 tgen_andi_risbg(s, dest, dest, val);
f0bffc27
RH
1018 return;
1019 }
48bb3750 1020
07ff7983
RH
1021 /* Fall back to loading the constant. */
1022 tcg_out_movi(s, type, TCG_TMP0, val);
1023 if (type == TCG_TYPE_I32) {
1024 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
48bb3750 1025 } else {
07ff7983 1026 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
48bb3750
RH
1027 }
1028}
1029
1030static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1031{
1032 static const S390Opcode oi_insns[4] = {
1033 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1034 };
1035 static const S390Opcode nif_insns[2] = {
1036 RIL_OILF, RIL_OIHF
1037 };
1038
1039 int i;
1040
1041 /* Look for no-op. */
1042 if (val == 0) {
1043 return;
1044 }
1045
1046 if (facilities & FACILITY_EXT_IMM) {
1047 /* Try all 32-bit insns that can perform it in one go. */
1048 for (i = 0; i < 4; i++) {
1049 tcg_target_ulong mask = (0xffffull << i*16);
1050 if ((val & mask) != 0 && (val & ~mask) == 0) {
1051 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1052 return;
1053 }
1054 }
1055
1056 /* Try all 48-bit insns that can perform it in one go. */
1057 for (i = 0; i < 2; i++) {
1058 tcg_target_ulong mask = (0xffffffffull << i*32);
1059 if ((val & mask) != 0 && (val & ~mask) == 0) {
1060 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1061 return;
1062 }
1063 }
1064
1065 /* Perform the OR via sequential modifications to the high and
1066 low parts. Do this via recursion to handle 16-bit vs 32-bit
1067 masks in each half. */
1068 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1069 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1070 } else {
1071 /* With no extended-immediate facility, we don't need to be so
1072 clever. Just iterate over the insns and mask in the constant. */
1073 for (i = 0; i < 4; i++) {
1074 tcg_target_ulong mask = (0xffffull << i*16);
1075 if ((val & mask) != 0) {
1076 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1077 }
1078 }
1079 }
1080}
1081
1082static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1083{
1084 /* Perform the xor by parts. */
1085 if (val & 0xffffffff) {
1086 tcg_out_insn(s, RIL, XILF, dest, val);
1087 }
1088 if (val > 0xffffffff) {
1089 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1090 }
1091}
1092
1093static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1094 TCGArg c2, int c2const)
1095{
bcc66562 1096 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1097 if (c2const) {
1098 if (c2 == 0) {
1099 if (type == TCG_TYPE_I32) {
1100 tcg_out_insn(s, RR, LTR, r1, r1);
1101 } else {
1102 tcg_out_insn(s, RRE, LTGR, r1, r1);
1103 }
1104 return tcg_cond_to_ltr_cond[c];
1105 } else {
1106 if (is_unsigned) {
1107 if (type == TCG_TYPE_I32) {
1108 tcg_out_insn(s, RIL, CLFI, r1, c2);
1109 } else {
1110 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1111 }
1112 } else {
1113 if (type == TCG_TYPE_I32) {
1114 tcg_out_insn(s, RIL, CFI, r1, c2);
1115 } else {
1116 tcg_out_insn(s, RIL, CGFI, r1, c2);
1117 }
1118 }
1119 }
1120 } else {
1121 if (is_unsigned) {
1122 if (type == TCG_TYPE_I32) {
1123 tcg_out_insn(s, RR, CLR, r1, c2);
1124 } else {
1125 tcg_out_insn(s, RRE, CLGR, r1, c2);
1126 }
1127 } else {
1128 if (type == TCG_TYPE_I32) {
1129 tcg_out_insn(s, RR, CR, r1, c2);
1130 } else {
1131 tcg_out_insn(s, RRE, CGR, r1, c2);
1132 }
1133 }
1134 }
1135 return tcg_cond_to_s390_cond[c];
1136}
1137
7b7066b1 1138static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
96a9f093 1139 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
48bb3750 1140{
7b7066b1
RH
1141 int cc;
1142
1143 switch (cond) {
1144 case TCG_COND_GTU:
1145 case TCG_COND_GT:
1146 do_greater:
1147 /* The result of a compare has CC=2 for GT and CC=3 unused.
1148 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
1149 tgen_cmp(s, type, cond, c1, c2, c2const);
1150 tcg_out_movi(s, type, dest, 0);
1151 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1152 return;
1153
1154 case TCG_COND_GEU:
1155 do_geu:
1156 /* We need "real" carry semantics, so use SUBTRACT LOGICAL
1157 instead of COMPARE LOGICAL. This needs an extra move. */
1158 tcg_out_mov(s, type, TCG_TMP0, c1);
1159 if (c2const) {
1160 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1161 if (type == TCG_TYPE_I32) {
1162 tcg_out_insn(s, RIL, SLFI, TCG_TMP0, c2);
1163 } else {
1164 tcg_out_insn(s, RIL, SLGFI, TCG_TMP0, c2);
1165 }
1166 } else {
1167 if (type == TCG_TYPE_I32) {
1168 tcg_out_insn(s, RR, SLR, TCG_TMP0, c2);
1169 } else {
1170 tcg_out_insn(s, RRE, SLGR, TCG_TMP0, c2);
1171 }
1172 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1173 }
1174 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1175 return;
1176
1177 case TCG_COND_LEU:
1178 case TCG_COND_LTU:
1179 case TCG_COND_LT:
1180 /* Swap operands so that we can use GEU/GTU/GT. */
1181 if (c2const) {
1182 tcg_out_movi(s, type, TCG_TMP0, c2);
1183 c2 = c1;
1184 c2const = 0;
1185 c1 = TCG_TMP0;
1186 } else {
1187 TCGReg t = c1;
1188 c1 = c2;
1189 c2 = t;
1190 }
1191 if (cond == TCG_COND_LEU) {
1192 goto do_geu;
1193 }
1194 cond = tcg_swap_cond(cond);
1195 goto do_greater;
1196
1197 case TCG_COND_NE:
1198 /* X != 0 is X > 0. */
1199 if (c2const && c2 == 0) {
1200 cond = TCG_COND_GTU;
1201 goto do_greater;
1202 }
1203 break;
1204
1205 case TCG_COND_EQ:
1206 /* X == 0 is X <= 0 is 0 >= X. */
1207 if (c2const && c2 == 0) {
1208 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0);
1209 c2 = c1;
1210 c2const = 0;
1211 c1 = TCG_TMP0;
1212 goto do_geu;
1213 }
1214 break;
48bb3750 1215
7b7066b1
RH
1216 default:
1217 break;
1218 }
1219
1220 cc = tgen_cmp(s, type, cond, c1, c2, c2const);
1221 if (facilities & FACILITY_LOAD_ON_COND) {
1222 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1223 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1224 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1225 tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
1226 } else {
1227 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1228 tcg_out_movi(s, type, dest, 1);
1229 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1230 tcg_out_movi(s, type, dest, 0);
1231 }
48bb3750
RH
1232}
1233
96a9f093
RH
1234static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1235 TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
1236{
1237 int cc;
1238 if (facilities & FACILITY_LOAD_ON_COND) {
1239 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1240 tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
1241 } else {
1242 c = tcg_invert_cond(c);
1243 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1244
1245 /* Emit: if (cc) goto over; dest = r3; over: */
1246 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1247 tcg_out_insn(s, RRE, LGR, dest, r3);
1248 }
1249}
1250
d5690ea4
RH
1251bool tcg_target_deposit_valid(int ofs, int len)
1252{
1253 return (facilities & FACILITY_GEN_INST_EXT) != 0;
1254}
1255
1256static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1257 int ofs, int len)
1258{
1259 int lsb = (63 - ofs);
1260 int msb = lsb - (len - 1);
f0bffc27 1261 tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0);
d5690ea4
RH
1262}
1263
8c081b18 1264static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
48bb3750 1265{
8c081b18
RH
1266 ptrdiff_t off = dest - s->code_ptr;
1267 if (off == (int16_t)off) {
48bb3750
RH
1268 tcg_out_insn(s, RI, BRC, cc, off);
1269 } else if (off == (int32_t)off) {
1270 tcg_out_insn(s, RIL, BRCL, cc, off);
1271 } else {
8c081b18 1272 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1273 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1274 }
1275}
1276
bec16311 1277static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
48bb3750 1278{
48bb3750 1279 if (l->has_value) {
8c081b18 1280 tgen_gotoi(s, cc, l->u.value_ptr);
48bb3750
RH
1281 } else if (USE_LONG_BRANCHES) {
1282 tcg_out16(s, RIL_BRCL | (cc << 4));
bec16311 1283 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, -2);
8c081b18 1284 s->code_ptr += 2;
48bb3750
RH
1285 } else {
1286 tcg_out16(s, RI_BRC | (cc << 4));
bec16311 1287 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, -2);
8c081b18 1288 s->code_ptr += 1;
48bb3750
RH
1289 }
1290}
1291
1292static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
bec16311 1293 TCGReg r1, TCGReg r2, TCGLabel *l)
48bb3750 1294{
8c081b18 1295 intptr_t off;
48bb3750
RH
1296
1297 if (l->has_value) {
8c081b18 1298 off = l->u.value_ptr - s->code_ptr;
48bb3750
RH
1299 } else {
1300 /* We need to keep the offset unchanged for retranslation. */
8c081b18 1301 off = s->code_ptr[1];
bec16311 1302 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
48bb3750
RH
1303 }
1304
1305 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1306 tcg_out16(s, off);
1307 tcg_out16(s, cc << 12 | (opc & 0xff));
1308}
1309
1310static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
bec16311 1311 TCGReg r1, int i2, TCGLabel *l)
48bb3750 1312{
48bb3750
RH
1313 tcg_target_long off;
1314
1315 if (l->has_value) {
8c081b18 1316 off = l->u.value_ptr - s->code_ptr;
48bb3750
RH
1317 } else {
1318 /* We need to keep the offset unchanged for retranslation. */
8c081b18 1319 off = s->code_ptr[1];
bec16311 1320 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
48bb3750
RH
1321 }
1322
1323 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1324 tcg_out16(s, off);
1325 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1326}
1327
1328static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
bec16311 1329 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
48bb3750
RH
1330{
1331 int cc;
1332
1333 if (facilities & FACILITY_GEN_INST_EXT) {
b879f308 1334 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1335 bool in_range;
1336 S390Opcode opc;
1337
1338 cc = tcg_cond_to_s390_cond[c];
1339
1340 if (!c2const) {
1341 opc = (type == TCG_TYPE_I32
1342 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1343 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
bec16311 1344 tgen_compare_branch(s, opc, cc, r1, c2, l);
48bb3750
RH
1345 return;
1346 }
1347
1348 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1349 If the immediate we've been given does not fit that range, we'll
1350 fall back to separate compare and branch instructions using the
1351 larger comparison range afforded by COMPARE IMMEDIATE. */
1352 if (type == TCG_TYPE_I32) {
1353 if (is_unsigned) {
1354 opc = RIE_CLIJ;
1355 in_range = (uint32_t)c2 == (uint8_t)c2;
1356 } else {
1357 opc = RIE_CIJ;
1358 in_range = (int32_t)c2 == (int8_t)c2;
1359 }
1360 } else {
1361 if (is_unsigned) {
1362 opc = RIE_CLGIJ;
1363 in_range = (uint64_t)c2 == (uint8_t)c2;
1364 } else {
1365 opc = RIE_CGIJ;
1366 in_range = (int64_t)c2 == (int8_t)c2;
1367 }
1368 }
1369 if (in_range) {
bec16311 1370 tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
48bb3750
RH
1371 return;
1372 }
1373 }
1374
1375 cc = tgen_cmp(s, type, c, r1, c2, c2const);
bec16311 1376 tgen_branch(s, cc, l);
48bb3750
RH
1377}
1378
a8111212 1379static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
48bb3750 1380{
8c081b18 1381 ptrdiff_t off = dest - s->code_ptr;
48bb3750
RH
1382 if (off == (int32_t)off) {
1383 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1384 } else {
8c081b18 1385 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1386 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1387 }
1388}
1389
a5a04f28 1390static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
48bb3750
RH
1391 TCGReg base, TCGReg index, int disp)
1392{
48bb3750 1393 switch (opc) {
a5a04f28 1394 case MO_UB:
48bb3750
RH
1395 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1396 break;
a5a04f28 1397 case MO_SB:
48bb3750
RH
1398 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1399 break;
b8dd88b8
RH
1400
1401 case MO_UW | MO_BSWAP:
1402 /* swapped unsigned halfword load with upper bits zeroed */
1403 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1404 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1405 break;
a5a04f28 1406 case MO_UW:
b8dd88b8
RH
1407 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1408 break;
1409
1410 case MO_SW | MO_BSWAP:
1411 /* swapped sign-extended halfword load */
1412 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1413 tgen_ext16s(s, TCG_TYPE_I64, data, data);
48bb3750 1414 break;
a5a04f28 1415 case MO_SW:
b8dd88b8
RH
1416 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1417 break;
1418
1419 case MO_UL | MO_BSWAP:
1420 /* swapped unsigned int load with upper bits zeroed */
1421 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1422 tgen_ext32u(s, data, data);
48bb3750 1423 break;
a5a04f28 1424 case MO_UL:
b8dd88b8
RH
1425 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1426 break;
1427
1428 case MO_SL | MO_BSWAP:
1429 /* swapped sign-extended int load */
1430 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1431 tgen_ext32s(s, data, data);
48bb3750 1432 break;
a5a04f28 1433 case MO_SL:
b8dd88b8
RH
1434 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1435 break;
1436
1437 case MO_Q | MO_BSWAP:
1438 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
48bb3750 1439 break;
a5a04f28 1440 case MO_Q:
b8dd88b8 1441 tcg_out_insn(s, RXY, LG, data, base, index, disp);
48bb3750 1442 break;
b8dd88b8 1443
48bb3750
RH
1444 default:
1445 tcg_abort();
1446 }
1447}
1448
a5a04f28 1449static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
48bb3750
RH
1450 TCGReg base, TCGReg index, int disp)
1451{
48bb3750 1452 switch (opc) {
a5a04f28 1453 case MO_UB:
48bb3750
RH
1454 if (disp >= 0 && disp < 0x1000) {
1455 tcg_out_insn(s, RX, STC, data, base, index, disp);
1456 } else {
1457 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1458 }
1459 break;
b8dd88b8
RH
1460
1461 case MO_UW | MO_BSWAP:
1462 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1463 break;
a5a04f28 1464 case MO_UW:
b8dd88b8 1465 if (disp >= 0 && disp < 0x1000) {
48bb3750
RH
1466 tcg_out_insn(s, RX, STH, data, base, index, disp);
1467 } else {
1468 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1469 }
1470 break;
b8dd88b8
RH
1471
1472 case MO_UL | MO_BSWAP:
1473 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1474 break;
a5a04f28 1475 case MO_UL:
b8dd88b8 1476 if (disp >= 0 && disp < 0x1000) {
48bb3750
RH
1477 tcg_out_insn(s, RX, ST, data, base, index, disp);
1478 } else {
1479 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1480 }
1481 break;
b8dd88b8
RH
1482
1483 case MO_Q | MO_BSWAP:
1484 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1485 break;
a5a04f28 1486 case MO_Q:
b8dd88b8 1487 tcg_out_insn(s, RXY, STG, data, base, index, disp);
48bb3750 1488 break;
b8dd88b8 1489
48bb3750
RH
1490 default:
1491 tcg_abort();
1492 }
1493}
1494
1495#if defined(CONFIG_SOFTMMU)
fb596415
RH
1496/* We're expecting to use a 20-bit signed offset on the tlb memory ops.
1497 Using the offset of the second entry in the last tlb table ensures
1498 that we can index all of the elements of the first entry. */
1499QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
1500 > 0x7ffff);
1501
1502/* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1503 addend into R2. Returns a register with the santitized guest address. */
1504static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
1505 int mem_index, bool is_ld)
48bb3750 1506{
a5a04f28 1507 TCGMemOp s_bits = opc & MO_SIZE;
547ec121 1508 uint64_t tlb_mask = TARGET_PAGE_MASK | ((1 << s_bits) - 1);
fb596415
RH
1509 int ofs;
1510
547ec121
RH
1511 if (facilities & FACILITY_GEN_INST_EXT) {
1512 tcg_out_risbg(s, TCG_REG_R2, addr_reg,
1513 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
1514 63 - CPU_TLB_ENTRY_BITS,
1515 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1);
1516 tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
48bb3750 1517 } else {
547ec121
RH
1518 tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
1519 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1520 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_R3, addr_reg);
1521 tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2,
1522 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1523 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
48bb3750
RH
1524 }
1525
fb596415 1526 if (is_ld) {
9349b4f9 1527 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
fb596415
RH
1528 } else {
1529 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
48bb3750 1530 }
48bb3750 1531 if (TARGET_LONG_BITS == 32) {
fb596415 1532 tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
48bb3750 1533 } else {
fb596415 1534 tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
48bb3750
RH
1535 }
1536
fb596415
RH
1537 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1538 tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs);
1539
48bb3750 1540 if (TARGET_LONG_BITS == 32) {
fb596415
RH
1541 tgen_ext32u(s, TCG_REG_R3, addr_reg);
1542 return TCG_REG_R3;
48bb3750 1543 }
fb596415
RH
1544 return addr_reg;
1545}
48bb3750 1546
fb596415
RH
1547static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
1548 TCGReg data, TCGReg addr, int mem_index,
1549 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1550{
1551 TCGLabelQemuLdst *label = new_ldst_label(s);
1552
1553 label->is_ld = is_ld;
1554 label->opc = opc;
1555 label->datalo_reg = data;
1556 label->addrlo_reg = addr;
1557 label->mem_index = mem_index;
1558 label->raddr = raddr;
1559 label->label_ptr[0] = label_ptr;
1560}
48bb3750 1561
fb596415
RH
1562static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1563{
1564 TCGReg addr_reg = lb->addrlo_reg;
1565 TCGReg data_reg = lb->datalo_reg;
1566 TCGMemOp opc = lb->opc;
48bb3750 1567
fb596415 1568 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
48bb3750 1569
fb596415
RH
1570 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1571 if (TARGET_LONG_BITS == 64) {
1572 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1573 }
1574 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, lb->mem_index);
1575 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
1576 tcg_out_call(s, qemu_ld_helpers[opc]);
1577 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
65a62a75 1578
fb596415 1579 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
48bb3750
RH
1580}
1581
fb596415 1582static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
48bb3750 1583{
fb596415
RH
1584 TCGReg addr_reg = lb->addrlo_reg;
1585 TCGReg data_reg = lb->datalo_reg;
1586 TCGMemOp opc = lb->opc;
1587
1588 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
1589
1590 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1591 if (TARGET_LONG_BITS == 64) {
1592 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1593 }
1594 switch (opc & MO_SIZE) {
1595 case MO_UB:
1596 tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1597 break;
1598 case MO_UW:
1599 tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1600 break;
1601 case MO_UL:
1602 tgen_ext32u(s, TCG_REG_R4, data_reg);
1603 break;
1604 case MO_Q:
1605 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1606 break;
1607 default:
1608 tcg_abort();
1609 }
1610 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, lb->mem_index);
1611 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
1612 tcg_out_call(s, qemu_st_helpers[opc]);
1613
1614 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
48bb3750
RH
1615}
1616#else
1617static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1618 TCGReg *index_reg, tcg_target_long *disp)
1619{
1620 if (TARGET_LONG_BITS == 32) {
1621 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1622 *addr_reg = TCG_TMP0;
1623 }
1624 if (GUEST_BASE < 0x80000) {
1625 *index_reg = TCG_REG_NONE;
1626 *disp = GUEST_BASE;
1627 } else {
1628 *index_reg = TCG_GUEST_BASE_REG;
1629 *disp = 0;
1630 }
1631}
1632#endif /* CONFIG_SOFTMMU */
1633
f24efee4
RH
1634static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1635 TCGMemOp opc, int mem_index)
48bb3750 1636{
fb596415
RH
1637#ifdef CONFIG_SOFTMMU
1638 tcg_insn_unit *label_ptr;
1639 TCGReg base_reg;
1640
1641 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
1642
1643 label_ptr = s->code_ptr + 1;
1644 tcg_out_insn(s, RI, BRC, S390_CC_NE, 0);
1645
1646 tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
48bb3750 1647
fb596415
RH
1648 add_qemu_ldst_label(s, 1, opc, data_reg, addr_reg, mem_index,
1649 s->code_ptr, label_ptr);
48bb3750 1650#else
f24efee4
RH
1651 TCGReg index_reg;
1652 tcg_target_long disp;
1653
48bb3750
RH
1654 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1655 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1656#endif
1657}
1658
f24efee4
RH
1659static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1660 TCGMemOp opc, int mem_index)
48bb3750 1661{
fb596415
RH
1662#ifdef CONFIG_SOFTMMU
1663 tcg_insn_unit *label_ptr;
1664 TCGReg base_reg;
1665
1666 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
1667
1668 label_ptr = s->code_ptr + 1;
1669 tcg_out_insn(s, RI, BRC, S390_CC_NE, 0);
1670
1671 tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
48bb3750 1672
fb596415
RH
1673 add_qemu_ldst_label(s, 0, opc, data_reg, addr_reg, mem_index,
1674 s->code_ptr, label_ptr);
48bb3750 1675#else
f24efee4
RH
1676 TCGReg index_reg;
1677 tcg_target_long disp;
1678
48bb3750
RH
1679 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1680 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1681#endif
2827822e
AG
1682}
1683
48bb3750
RH
1684# define OP_32_64(x) \
1685 case glue(glue(INDEX_op_,x),_i32): \
1686 case glue(glue(INDEX_op_,x),_i64)
48bb3750 1687
a9751609 1688static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2827822e
AG
1689 const TCGArg *args, const int *const_args)
1690{
48bb3750 1691 S390Opcode op;
0db921e6 1692 TCGArg a0, a1, a2;
48bb3750
RH
1693
1694 switch (opc) {
1695 case INDEX_op_exit_tb:
1696 /* return value */
1697 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
8c081b18 1698 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
48bb3750
RH
1699 break;
1700
1701 case INDEX_op_goto_tb:
1702 if (s->tb_jmp_offset) {
a10c64e0
RH
1703 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
1704 s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
1705 s->code_ptr += 2;
48bb3750
RH
1706 } else {
1707 /* load address stored at s->tb_next + args[0] */
1708 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
1709 /* and go there */
1710 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1711 }
8c081b18 1712 s->tb_next_offset[args[0]] = tcg_current_code_size(s);
48bb3750
RH
1713 break;
1714
48bb3750
RH
1715 OP_32_64(ld8u):
1716 /* ??? LLC (RXY format) is only present with the extended-immediate
1717 facility, whereas LLGC is always present. */
1718 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1719 break;
1720
1721 OP_32_64(ld8s):
1722 /* ??? LB is no smaller than LGB, so no point to using it. */
1723 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1724 break;
1725
1726 OP_32_64(ld16u):
1727 /* ??? LLH (RXY format) is only present with the extended-immediate
1728 facility, whereas LLGH is always present. */
1729 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1730 break;
1731
1732 case INDEX_op_ld16s_i32:
1733 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1734 break;
1735
1736 case INDEX_op_ld_i32:
1737 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1738 break;
1739
1740 OP_32_64(st8):
1741 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1742 TCG_REG_NONE, args[2]);
1743 break;
1744
1745 OP_32_64(st16):
1746 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1747 TCG_REG_NONE, args[2]);
1748 break;
1749
1750 case INDEX_op_st_i32:
1751 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1752 break;
1753
1754 case INDEX_op_add_i32:
0db921e6 1755 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1756 if (const_args[2]) {
0db921e6
RH
1757 do_addi_32:
1758 if (a0 == a1) {
1759 if (a2 == (int16_t)a2) {
1760 tcg_out_insn(s, RI, AHI, a0, a2);
1761 break;
1762 }
1763 if (facilities & FACILITY_EXT_IMM) {
1764 tcg_out_insn(s, RIL, AFI, a0, a2);
1765 break;
1766 }
1767 }
1768 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1769 } else if (a0 == a1) {
1770 tcg_out_insn(s, RR, AR, a0, a2);
48bb3750 1771 } else {
0db921e6 1772 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1773 }
1774 break;
1775 case INDEX_op_sub_i32:
0db921e6 1776 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1777 if (const_args[2]) {
0db921e6
RH
1778 a2 = -a2;
1779 goto do_addi_32;
48bb3750 1780 }
0db921e6 1781 tcg_out_insn(s, RR, SR, args[0], args[2]);
48bb3750
RH
1782 break;
1783
1784 case INDEX_op_and_i32:
1785 if (const_args[2]) {
07ff7983 1786 tgen_andi(s, TCG_TYPE_I32, args[0], args[2]);
48bb3750
RH
1787 } else {
1788 tcg_out_insn(s, RR, NR, args[0], args[2]);
1789 }
1790 break;
1791 case INDEX_op_or_i32:
1792 if (const_args[2]) {
1793 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1794 } else {
1795 tcg_out_insn(s, RR, OR, args[0], args[2]);
1796 }
1797 break;
1798 case INDEX_op_xor_i32:
1799 if (const_args[2]) {
1800 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1801 } else {
1802 tcg_out_insn(s, RR, XR, args[0], args[2]);
1803 }
1804 break;
1805
1806 case INDEX_op_neg_i32:
1807 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1808 break;
1809
1810 case INDEX_op_mul_i32:
1811 if (const_args[2]) {
1812 if ((int32_t)args[2] == (int16_t)args[2]) {
1813 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1814 } else {
1815 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1816 }
1817 } else {
1818 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1819 }
1820 break;
1821
1822 case INDEX_op_div2_i32:
1823 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1824 break;
1825 case INDEX_op_divu2_i32:
1826 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1827 break;
1828
1829 case INDEX_op_shl_i32:
1830 op = RS_SLL;
1831 do_shift32:
1832 if (const_args[2]) {
1833 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1834 } else {
1835 tcg_out_sh32(s, op, args[0], args[2], 0);
1836 }
1837 break;
1838 case INDEX_op_shr_i32:
1839 op = RS_SRL;
1840 goto do_shift32;
1841 case INDEX_op_sar_i32:
1842 op = RS_SRA;
1843 goto do_shift32;
1844
1845 case INDEX_op_rotl_i32:
1846 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1847 if (const_args[2]) {
1848 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1849 } else {
1850 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1851 }
1852 break;
1853 case INDEX_op_rotr_i32:
1854 if (const_args[2]) {
1855 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1856 TCG_REG_NONE, (32 - args[2]) & 31);
1857 } else {
1858 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1859 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1860 }
1861 break;
1862
1863 case INDEX_op_ext8s_i32:
1864 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1865 break;
1866 case INDEX_op_ext16s_i32:
1867 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1868 break;
1869 case INDEX_op_ext8u_i32:
1870 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1871 break;
1872 case INDEX_op_ext16u_i32:
1873 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1874 break;
1875
1876 OP_32_64(bswap16):
1877 /* The TCG bswap definition requires bits 0-47 already be zero.
1878 Thus we don't need the G-type insns to implement bswap16_i64. */
1879 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1880 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1881 break;
1882 OP_32_64(bswap32):
1883 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1884 break;
1885
3790b918 1886 case INDEX_op_add2_i32:
ad19b358
RH
1887 if (const_args[4]) {
1888 tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
1889 } else {
1890 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1891 }
3790b918
RH
1892 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1893 break;
1894 case INDEX_op_sub2_i32:
ad19b358
RH
1895 if (const_args[4]) {
1896 tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
1897 } else {
1898 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1899 }
3790b918
RH
1900 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1901 break;
1902
48bb3750 1903 case INDEX_op_br:
bec16311 1904 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
48bb3750
RH
1905 break;
1906
1907 case INDEX_op_brcond_i32:
1908 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
bec16311 1909 args[1], const_args[1], arg_label(args[3]));
48bb3750
RH
1910 break;
1911 case INDEX_op_setcond_i32:
1912 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1913 args[2], const_args[2]);
1914 break;
96a9f093
RH
1915 case INDEX_op_movcond_i32:
1916 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
1917 args[2], const_args[2], args[3]);
1918 break;
48bb3750 1919
f24efee4 1920 case INDEX_op_qemu_ld_i32:
48bb3750 1921 /* ??? Technically we can use a non-extending instruction. */
f24efee4
RH
1922 case INDEX_op_qemu_ld_i64:
1923 tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3]);
48bb3750 1924 break;
f24efee4
RH
1925 case INDEX_op_qemu_st_i32:
1926 case INDEX_op_qemu_st_i64:
1927 tcg_out_qemu_st(s, args[0], args[1], args[2], args[3]);
48bb3750
RH
1928 break;
1929
48bb3750
RH
1930 case INDEX_op_ld16s_i64:
1931 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1932 break;
1933 case INDEX_op_ld32u_i64:
1934 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1935 break;
1936 case INDEX_op_ld32s_i64:
1937 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1938 break;
1939 case INDEX_op_ld_i64:
1940 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1941 break;
1942
1943 case INDEX_op_st32_i64:
1944 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1945 break;
1946 case INDEX_op_st_i64:
1947 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1948 break;
1949
1950 case INDEX_op_add_i64:
0db921e6 1951 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 1952 if (const_args[2]) {
0db921e6
RH
1953 do_addi_64:
1954 if (a0 == a1) {
1955 if (a2 == (int16_t)a2) {
1956 tcg_out_insn(s, RI, AGHI, a0, a2);
1957 break;
1958 }
1959 if (facilities & FACILITY_EXT_IMM) {
1960 if (a2 == (int32_t)a2) {
1961 tcg_out_insn(s, RIL, AGFI, a0, a2);
1962 break;
1963 } else if (a2 == (uint32_t)a2) {
1964 tcg_out_insn(s, RIL, ALGFI, a0, a2);
1965 break;
1966 } else if (-a2 == (uint32_t)-a2) {
1967 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
1968 break;
1969 }
1970 }
1971 }
1972 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1973 } else if (a0 == a1) {
1974 tcg_out_insn(s, RRE, AGR, a0, a2);
48bb3750 1975 } else {
0db921e6 1976 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1977 }
1978 break;
1979 case INDEX_op_sub_i64:
0db921e6 1980 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 1981 if (const_args[2]) {
0db921e6
RH
1982 a2 = -a2;
1983 goto do_addi_64;
48bb3750
RH
1984 } else {
1985 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
1986 }
1987 break;
1988
1989 case INDEX_op_and_i64:
1990 if (const_args[2]) {
07ff7983 1991 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
48bb3750
RH
1992 } else {
1993 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
1994 }
1995 break;
1996 case INDEX_op_or_i64:
1997 if (const_args[2]) {
1998 tgen64_ori(s, args[0], args[2]);
1999 } else {
2000 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
2001 }
2002 break;
2003 case INDEX_op_xor_i64:
2004 if (const_args[2]) {
2005 tgen64_xori(s, args[0], args[2]);
2006 } else {
2007 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
2008 }
2009 break;
2010
2011 case INDEX_op_neg_i64:
2012 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2013 break;
2014 case INDEX_op_bswap64_i64:
2015 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2016 break;
2017
2018 case INDEX_op_mul_i64:
2019 if (const_args[2]) {
2020 if (args[2] == (int16_t)args[2]) {
2021 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
2022 } else {
2023 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
2024 }
2025 } else {
2026 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
2027 }
2028 break;
2029
2030 case INDEX_op_div2_i64:
2031 /* ??? We get an unnecessary sign-extension of the dividend
2032 into R3 with this definition, but as we do in fact always
2033 produce both quotient and remainder using INDEX_op_div_i64
2034 instead requires jumping through even more hoops. */
2035 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
2036 break;
2037 case INDEX_op_divu2_i64:
2038 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
2039 break;
36017dc6
RH
2040 case INDEX_op_mulu2_i64:
2041 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
2042 break;
48bb3750
RH
2043
2044 case INDEX_op_shl_i64:
2045 op = RSY_SLLG;
2046 do_shift64:
2047 if (const_args[2]) {
2048 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2049 } else {
2050 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2051 }
2052 break;
2053 case INDEX_op_shr_i64:
2054 op = RSY_SRLG;
2055 goto do_shift64;
2056 case INDEX_op_sar_i64:
2057 op = RSY_SRAG;
2058 goto do_shift64;
2059
2060 case INDEX_op_rotl_i64:
2061 if (const_args[2]) {
2062 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2063 TCG_REG_NONE, args[2]);
2064 } else {
2065 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2066 }
2067 break;
2068 case INDEX_op_rotr_i64:
2069 if (const_args[2]) {
2070 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2071 TCG_REG_NONE, (64 - args[2]) & 63);
2072 } else {
2073 /* We can use the smaller 32-bit negate because only the
2074 low 6 bits are examined for the rotate. */
2075 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2076 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2077 }
2078 break;
2079
2080 case INDEX_op_ext8s_i64:
2081 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2082 break;
2083 case INDEX_op_ext16s_i64:
2084 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2085 break;
2086 case INDEX_op_ext32s_i64:
2087 tgen_ext32s(s, args[0], args[1]);
2088 break;
2089 case INDEX_op_ext8u_i64:
2090 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2091 break;
2092 case INDEX_op_ext16u_i64:
2093 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2094 break;
2095 case INDEX_op_ext32u_i64:
2096 tgen_ext32u(s, args[0], args[1]);
2097 break;
2098
3790b918 2099 case INDEX_op_add2_i64:
ad19b358
RH
2100 if (const_args[4]) {
2101 if ((int64_t)args[4] >= 0) {
2102 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2103 } else {
2104 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2105 }
2106 } else {
2107 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2108 }
3790b918
RH
2109 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2110 break;
2111 case INDEX_op_sub2_i64:
ad19b358
RH
2112 if (const_args[4]) {
2113 if ((int64_t)args[4] >= 0) {
2114 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2115 } else {
2116 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2117 }
2118 } else {
2119 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2120 }
3790b918
RH
2121 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2122 break;
2123
48bb3750
RH
2124 case INDEX_op_brcond_i64:
2125 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
bec16311 2126 args[1], const_args[1], arg_label(args[3]));
48bb3750
RH
2127 break;
2128 case INDEX_op_setcond_i64:
2129 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2130 args[2], const_args[2]);
2131 break;
96a9f093
RH
2132 case INDEX_op_movcond_i64:
2133 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2134 args[2], const_args[2], args[3]);
2135 break;
48bb3750 2136
d5690ea4
RH
2137 OP_32_64(deposit):
2138 tgen_deposit(s, args[0], args[2], args[3], args[4]);
2139 break;
2140
96d0ee7f
RH
2141 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2142 case INDEX_op_mov_i64:
2143 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2144 case INDEX_op_movi_i64:
2145 case INDEX_op_call: /* Always emitted via tcg_out_call. */
48bb3750 2146 default:
48bb3750
RH
2147 tcg_abort();
2148 }
2827822e
AG
2149}
2150
48bb3750
RH
2151static const TCGTargetOpDef s390_op_defs[] = {
2152 { INDEX_op_exit_tb, { } },
2153 { INDEX_op_goto_tb, { } },
48bb3750
RH
2154 { INDEX_op_br, { } },
2155
48bb3750
RH
2156 { INDEX_op_ld8u_i32, { "r", "r" } },
2157 { INDEX_op_ld8s_i32, { "r", "r" } },
2158 { INDEX_op_ld16u_i32, { "r", "r" } },
2159 { INDEX_op_ld16s_i32, { "r", "r" } },
2160 { INDEX_op_ld_i32, { "r", "r" } },
2161 { INDEX_op_st8_i32, { "r", "r" } },
2162 { INDEX_op_st16_i32, { "r", "r" } },
2163 { INDEX_op_st_i32, { "r", "r" } },
2164
0db921e6
RH
2165 { INDEX_op_add_i32, { "r", "r", "ri" } },
2166 { INDEX_op_sub_i32, { "r", "0", "ri" } },
48bb3750
RH
2167 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2168
2169 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2170 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2171
07ff7983 2172 { INDEX_op_and_i32, { "r", "0", "ri" } },
671c835b
RH
2173 { INDEX_op_or_i32, { "r", "0", "rO" } },
2174 { INDEX_op_xor_i32, { "r", "0", "rX" } },
48bb3750
RH
2175
2176 { INDEX_op_neg_i32, { "r", "r" } },
2177
2178 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2179 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2180 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2181
2182 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2183 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2184
2185 { INDEX_op_ext8s_i32, { "r", "r" } },
2186 { INDEX_op_ext8u_i32, { "r", "r" } },
2187 { INDEX_op_ext16s_i32, { "r", "r" } },
2188 { INDEX_op_ext16u_i32, { "r", "r" } },
2189
2190 { INDEX_op_bswap16_i32, { "r", "r" } },
2191 { INDEX_op_bswap32_i32, { "r", "r" } },
2192
ad19b358
RH
2193 { INDEX_op_add2_i32, { "r", "r", "0", "1", "rA", "r" } },
2194 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "rA", "r" } },
3790b918 2195
671c835b
RH
2196 { INDEX_op_brcond_i32, { "r", "rC" } },
2197 { INDEX_op_setcond_i32, { "r", "r", "rC" } },
2198 { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } },
d5690ea4 2199 { INDEX_op_deposit_i32, { "r", "0", "r" } },
48bb3750 2200
f24efee4
RH
2201 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2202 { INDEX_op_qemu_ld_i64, { "r", "L" } },
2203 { INDEX_op_qemu_st_i32, { "L", "L" } },
2204 { INDEX_op_qemu_st_i64, { "L", "L" } },
48bb3750 2205
48bb3750
RH
2206 { INDEX_op_ld8u_i64, { "r", "r" } },
2207 { INDEX_op_ld8s_i64, { "r", "r" } },
2208 { INDEX_op_ld16u_i64, { "r", "r" } },
2209 { INDEX_op_ld16s_i64, { "r", "r" } },
2210 { INDEX_op_ld32u_i64, { "r", "r" } },
2211 { INDEX_op_ld32s_i64, { "r", "r" } },
2212 { INDEX_op_ld_i64, { "r", "r" } },
2213
2214 { INDEX_op_st8_i64, { "r", "r" } },
2215 { INDEX_op_st16_i64, { "r", "r" } },
2216 { INDEX_op_st32_i64, { "r", "r" } },
2217 { INDEX_op_st_i64, { "r", "r" } },
2218
0db921e6
RH
2219 { INDEX_op_add_i64, { "r", "r", "ri" } },
2220 { INDEX_op_sub_i64, { "r", "0", "ri" } },
48bb3750
RH
2221 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2222
2223 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2224 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
36017dc6 2225 { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
48bb3750 2226
07ff7983 2227 { INDEX_op_and_i64, { "r", "0", "ri" } },
48bb3750
RH
2228 { INDEX_op_or_i64, { "r", "0", "rO" } },
2229 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2230
2231 { INDEX_op_neg_i64, { "r", "r" } },
2232
2233 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2234 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2235 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2236
2237 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2238 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2239
2240 { INDEX_op_ext8s_i64, { "r", "r" } },
2241 { INDEX_op_ext8u_i64, { "r", "r" } },
2242 { INDEX_op_ext16s_i64, { "r", "r" } },
2243 { INDEX_op_ext16u_i64, { "r", "r" } },
2244 { INDEX_op_ext32s_i64, { "r", "r" } },
2245 { INDEX_op_ext32u_i64, { "r", "r" } },
2246
2247 { INDEX_op_bswap16_i64, { "r", "r" } },
2248 { INDEX_op_bswap32_i64, { "r", "r" } },
2249 { INDEX_op_bswap64_i64, { "r", "r" } },
2250
ad19b358
RH
2251 { INDEX_op_add2_i64, { "r", "r", "0", "1", "rA", "r" } },
2252 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "rA", "r" } },
3790b918 2253
48bb3750
RH
2254 { INDEX_op_brcond_i64, { "r", "rC" } },
2255 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
96a9f093 2256 { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
d5690ea4 2257 { INDEX_op_deposit_i64, { "r", "0", "r" } },
48bb3750 2258
48bb3750
RH
2259 { -1 },
2260};
2261
48bb3750
RH
2262static void query_facilities(void)
2263{
c9baa30f 2264 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
48bb3750 2265
c9baa30f
RH
2266 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2267 is present on all 64-bit systems, but let's check for it anyway. */
2268 if (hwcap & HWCAP_S390_STFLE) {
2269 register int r0 __asm__("0");
2270 register void *r1 __asm__("1");
48bb3750 2271
c9baa30f
RH
2272 /* stfle 0(%r1) */
2273 r1 = &facilities;
2274 asm volatile(".word 0xb2b0,0x1000"
2275 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
48bb3750
RH
2276 }
2277}
2278
2279static void tcg_target_init(TCGContext *s)
2827822e 2280{
48bb3750
RH
2281 query_facilities();
2282
2283 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2284 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2285
2286 tcg_regset_clear(tcg_target_call_clobber_regs);
2287 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2288 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2289 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2290 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2291 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2292 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
f24efee4
RH
2293 /* The r6 register is technically call-saved, but it's also a parameter
2294 register, so it can get killed by setup for the qemu_st helper. */
2295 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
48bb3750
RH
2296 /* The return register can be considered call-clobbered. */
2297 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2298
2299 tcg_regset_clear(s->reserved_regs);
2300 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2301 /* XXX many insns can't be used with R0, so we better avoid it for now */
2302 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2303 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2304
2305 tcg_add_target_add_op_defs(s390_op_defs);
2827822e
AG
2306}
2307
f167dc37
RH
2308#define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2309 + TCG_STATIC_CALL_ARGS_SIZE \
2310 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2311
48bb3750 2312static void tcg_target_qemu_prologue(TCGContext *s)
2827822e 2313{
48bb3750
RH
2314 /* stmg %r6,%r15,48(%r15) (save registers) */
2315 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2316
a4924e8b 2317 /* aghi %r15,-frame_size */
f167dc37 2318 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
a4924e8b
RH
2319
2320 tcg_set_frame(s, TCG_REG_CALL_STACK,
2321 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2322 CPU_TEMP_BUF_NLONGS * sizeof(long));
48bb3750
RH
2323
2324 if (GUEST_BASE >= 0x80000) {
2325 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
2326 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2327 }
2328
cea5f9a2
BS
2329 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2330 /* br %r3 (go to TB) */
2331 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
48bb3750
RH
2332
2333 tb_ret_addr = s->code_ptr;
2334
a4924e8b
RH
2335 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2336 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
f167dc37 2337 FRAME_SIZE + 48);
48bb3750
RH
2338
2339 /* br %r14 (return) */
2340 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2827822e 2341}
f167dc37
RH
2342
2343typedef struct {
d2e16f2c 2344 DebugFrameHeader h;
f167dc37
RH
2345 uint8_t fde_def_cfa[4];
2346 uint8_t fde_reg_ofs[18];
2347} DebugFrame;
2348
2349/* We're expecting a 2 byte uleb128 encoded value. */
2350QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2351
2352#define ELF_HOST_MACHINE EM_S390
2353
d2e16f2c
RH
2354static const DebugFrame debug_frame = {
2355 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2356 .h.cie.id = -1,
2357 .h.cie.version = 1,
2358 .h.cie.code_align = 1,
2359 .h.cie.data_align = 8, /* sleb128 8 */
2360 .h.cie.return_column = TCG_REG_R14,
f167dc37
RH
2361
2362 /* Total FDE size does not include the "len" member. */
d2e16f2c 2363 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
f167dc37
RH
2364
2365 .fde_def_cfa = {
2366 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
2367 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2368 (FRAME_SIZE >> 7)
2369 },
2370 .fde_reg_ofs = {
2371 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2372 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2373 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2374 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2375 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2376 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2377 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2378 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2379 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2380 }
2381};
2382
2383void tcg_register_jit(void *buf, size_t buf_size)
2384{
f167dc37
RH
2385 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2386}