]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2009 Ulrich Hecht <uli@suse.de> | |
5 | * Copyright (c) 2009 Alexander Graf <agraf@suse.de> | |
6 | * Copyright (c) 2010 Richard Henderson <rth@twiddle.net> | |
7 | * | |
8 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
9 | * of this software and associated documentation files (the "Software"), to deal | |
10 | * in the Software without restriction, including without limitation the rights | |
11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
12 | * copies of the Software, and to permit persons to whom the Software is | |
13 | * furnished to do so, subject to the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice shall be included in | |
16 | * all copies or substantial portions of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
24 | * THE SOFTWARE. | |
25 | */ | |
26 | ||
27 | #include "tcg-be-ldst.h" | |
28 | ||
29 | /* We only support generating code for 64-bit mode. */ | |
30 | #if TCG_TARGET_REG_BITS != 64 | |
31 | #error "unsupported code generation mode" | |
32 | #endif | |
33 | ||
34 | #include "elf.h" | |
35 | ||
36 | /* ??? The translation blocks produced by TCG are generally small enough to | |
37 | be entirely reachable with a 16-bit displacement. Leaving the option for | |
38 | a 32-bit displacement here Just In Case. */ | |
39 | #define USE_LONG_BRANCHES 0 | |
40 | ||
41 | #define TCG_CT_CONST_MULI 0x100 | |
42 | #define TCG_CT_CONST_ORI 0x200 | |
43 | #define TCG_CT_CONST_XORI 0x400 | |
44 | #define TCG_CT_CONST_CMPI 0x800 | |
45 | #define TCG_CT_CONST_ADLI 0x1000 | |
46 | ||
47 | /* Several places within the instruction set 0 means "no register" | |
48 | rather than TCG_REG_R0. */ | |
49 | #define TCG_REG_NONE 0 | |
50 | ||
51 | /* A scratch register that may be be used throughout the backend. */ | |
52 | #define TCG_TMP0 TCG_REG_R14 | |
53 | ||
54 | #ifndef CONFIG_SOFTMMU | |
55 | #define TCG_GUEST_BASE_REG TCG_REG_R13 | |
56 | #endif | |
57 | ||
58 | /* All of the following instructions are prefixed with their instruction | |
59 | format, and are defined as 8- or 16-bit quantities, even when the two | |
60 | halves of the 16-bit quantity may appear 32 bits apart in the insn. | |
61 | This makes it easy to copy the values from the tables in Appendix B. */ | |
62 | typedef enum S390Opcode { | |
63 | RIL_AFI = 0xc209, | |
64 | RIL_AGFI = 0xc208, | |
65 | RIL_ALFI = 0xc20b, | |
66 | RIL_ALGFI = 0xc20a, | |
67 | RIL_BRASL = 0xc005, | |
68 | RIL_BRCL = 0xc004, | |
69 | RIL_CFI = 0xc20d, | |
70 | RIL_CGFI = 0xc20c, | |
71 | RIL_CLFI = 0xc20f, | |
72 | RIL_CLGFI = 0xc20e, | |
73 | RIL_IIHF = 0xc008, | |
74 | RIL_IILF = 0xc009, | |
75 | RIL_LARL = 0xc000, | |
76 | RIL_LGFI = 0xc001, | |
77 | RIL_LGRL = 0xc408, | |
78 | RIL_LLIHF = 0xc00e, | |
79 | RIL_LLILF = 0xc00f, | |
80 | RIL_LRL = 0xc40d, | |
81 | RIL_MSFI = 0xc201, | |
82 | RIL_MSGFI = 0xc200, | |
83 | RIL_NIHF = 0xc00a, | |
84 | RIL_NILF = 0xc00b, | |
85 | RIL_OIHF = 0xc00c, | |
86 | RIL_OILF = 0xc00d, | |
87 | RIL_SLFI = 0xc205, | |
88 | RIL_SLGFI = 0xc204, | |
89 | RIL_XIHF = 0xc006, | |
90 | RIL_XILF = 0xc007, | |
91 | ||
92 | RI_AGHI = 0xa70b, | |
93 | RI_AHI = 0xa70a, | |
94 | RI_BRC = 0xa704, | |
95 | RI_IIHH = 0xa500, | |
96 | RI_IIHL = 0xa501, | |
97 | RI_IILH = 0xa502, | |
98 | RI_IILL = 0xa503, | |
99 | RI_LGHI = 0xa709, | |
100 | RI_LLIHH = 0xa50c, | |
101 | RI_LLIHL = 0xa50d, | |
102 | RI_LLILH = 0xa50e, | |
103 | RI_LLILL = 0xa50f, | |
104 | RI_MGHI = 0xa70d, | |
105 | RI_MHI = 0xa70c, | |
106 | RI_NIHH = 0xa504, | |
107 | RI_NIHL = 0xa505, | |
108 | RI_NILH = 0xa506, | |
109 | RI_NILL = 0xa507, | |
110 | RI_OIHH = 0xa508, | |
111 | RI_OIHL = 0xa509, | |
112 | RI_OILH = 0xa50a, | |
113 | RI_OILL = 0xa50b, | |
114 | ||
115 | RIE_CGIJ = 0xec7c, | |
116 | RIE_CGRJ = 0xec64, | |
117 | RIE_CIJ = 0xec7e, | |
118 | RIE_CLGRJ = 0xec65, | |
119 | RIE_CLIJ = 0xec7f, | |
120 | RIE_CLGIJ = 0xec7d, | |
121 | RIE_CLRJ = 0xec77, | |
122 | RIE_CRJ = 0xec76, | |
123 | RIE_RISBG = 0xec55, | |
124 | ||
125 | RRE_AGR = 0xb908, | |
126 | RRE_ALGR = 0xb90a, | |
127 | RRE_ALCR = 0xb998, | |
128 | RRE_ALCGR = 0xb988, | |
129 | RRE_CGR = 0xb920, | |
130 | RRE_CLGR = 0xb921, | |
131 | RRE_DLGR = 0xb987, | |
132 | RRE_DLR = 0xb997, | |
133 | RRE_DSGFR = 0xb91d, | |
134 | RRE_DSGR = 0xb90d, | |
135 | RRE_LGBR = 0xb906, | |
136 | RRE_LCGR = 0xb903, | |
137 | RRE_LGFR = 0xb914, | |
138 | RRE_LGHR = 0xb907, | |
139 | RRE_LGR = 0xb904, | |
140 | RRE_LLGCR = 0xb984, | |
141 | RRE_LLGFR = 0xb916, | |
142 | RRE_LLGHR = 0xb985, | |
143 | RRE_LRVR = 0xb91f, | |
144 | RRE_LRVGR = 0xb90f, | |
145 | RRE_LTGR = 0xb902, | |
146 | RRE_MLGR = 0xb986, | |
147 | RRE_MSGR = 0xb90c, | |
148 | RRE_MSR = 0xb252, | |
149 | RRE_NGR = 0xb980, | |
150 | RRE_OGR = 0xb981, | |
151 | RRE_SGR = 0xb909, | |
152 | RRE_SLGR = 0xb90b, | |
153 | RRE_SLBR = 0xb999, | |
154 | RRE_SLBGR = 0xb989, | |
155 | RRE_XGR = 0xb982, | |
156 | ||
157 | RRF_LOCR = 0xb9f2, | |
158 | RRF_LOCGR = 0xb9e2, | |
159 | ||
160 | RR_AR = 0x1a, | |
161 | RR_ALR = 0x1e, | |
162 | RR_BASR = 0x0d, | |
163 | RR_BCR = 0x07, | |
164 | RR_CLR = 0x15, | |
165 | RR_CR = 0x19, | |
166 | RR_DR = 0x1d, | |
167 | RR_LCR = 0x13, | |
168 | RR_LR = 0x18, | |
169 | RR_LTR = 0x12, | |
170 | RR_NR = 0x14, | |
171 | RR_OR = 0x16, | |
172 | RR_SR = 0x1b, | |
173 | RR_SLR = 0x1f, | |
174 | RR_XR = 0x17, | |
175 | ||
176 | RSY_RLL = 0xeb1d, | |
177 | RSY_RLLG = 0xeb1c, | |
178 | RSY_SLLG = 0xeb0d, | |
179 | RSY_SRAG = 0xeb0a, | |
180 | RSY_SRLG = 0xeb0c, | |
181 | ||
182 | RS_SLL = 0x89, | |
183 | RS_SRA = 0x8a, | |
184 | RS_SRL = 0x88, | |
185 | ||
186 | RXY_AG = 0xe308, | |
187 | RXY_AY = 0xe35a, | |
188 | RXY_CG = 0xe320, | |
189 | RXY_CY = 0xe359, | |
190 | RXY_LAY = 0xe371, | |
191 | RXY_LB = 0xe376, | |
192 | RXY_LG = 0xe304, | |
193 | RXY_LGB = 0xe377, | |
194 | RXY_LGF = 0xe314, | |
195 | RXY_LGH = 0xe315, | |
196 | RXY_LHY = 0xe378, | |
197 | RXY_LLGC = 0xe390, | |
198 | RXY_LLGF = 0xe316, | |
199 | RXY_LLGH = 0xe391, | |
200 | RXY_LMG = 0xeb04, | |
201 | RXY_LRV = 0xe31e, | |
202 | RXY_LRVG = 0xe30f, | |
203 | RXY_LRVH = 0xe31f, | |
204 | RXY_LY = 0xe358, | |
205 | RXY_STCY = 0xe372, | |
206 | RXY_STG = 0xe324, | |
207 | RXY_STHY = 0xe370, | |
208 | RXY_STMG = 0xeb24, | |
209 | RXY_STRV = 0xe33e, | |
210 | RXY_STRVG = 0xe32f, | |
211 | RXY_STRVH = 0xe33f, | |
212 | RXY_STY = 0xe350, | |
213 | ||
214 | RX_A = 0x5a, | |
215 | RX_C = 0x59, | |
216 | RX_L = 0x58, | |
217 | RX_LA = 0x41, | |
218 | RX_LH = 0x48, | |
219 | RX_ST = 0x50, | |
220 | RX_STC = 0x42, | |
221 | RX_STH = 0x40, | |
222 | ||
223 | NOP = 0x0707, | |
224 | } S390Opcode; | |
225 | ||
226 | #ifdef CONFIG_DEBUG_TCG | |
227 | static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { | |
228 | "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", | |
229 | "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15" | |
230 | }; | |
231 | #endif | |
232 | ||
233 | /* Since R6 is a potential argument register, choose it last of the | |
234 | call-saved registers. Likewise prefer the call-clobbered registers | |
235 | in reverse order to maximize the chance of avoiding the arguments. */ | |
236 | static const int tcg_target_reg_alloc_order[] = { | |
237 | /* Call saved registers. */ | |
238 | TCG_REG_R13, | |
239 | TCG_REG_R12, | |
240 | TCG_REG_R11, | |
241 | TCG_REG_R10, | |
242 | TCG_REG_R9, | |
243 | TCG_REG_R8, | |
244 | TCG_REG_R7, | |
245 | TCG_REG_R6, | |
246 | /* Call clobbered registers. */ | |
247 | TCG_REG_R14, | |
248 | TCG_REG_R0, | |
249 | TCG_REG_R1, | |
250 | /* Argument registers, in reverse order of allocation. */ | |
251 | TCG_REG_R5, | |
252 | TCG_REG_R4, | |
253 | TCG_REG_R3, | |
254 | TCG_REG_R2, | |
255 | }; | |
256 | ||
257 | static const int tcg_target_call_iarg_regs[] = { | |
258 | TCG_REG_R2, | |
259 | TCG_REG_R3, | |
260 | TCG_REG_R4, | |
261 | TCG_REG_R5, | |
262 | TCG_REG_R6, | |
263 | }; | |
264 | ||
265 | static const int tcg_target_call_oarg_regs[] = { | |
266 | TCG_REG_R2, | |
267 | }; | |
268 | ||
269 | #define S390_CC_EQ 8 | |
270 | #define S390_CC_LT 4 | |
271 | #define S390_CC_GT 2 | |
272 | #define S390_CC_OV 1 | |
273 | #define S390_CC_NE (S390_CC_LT | S390_CC_GT) | |
274 | #define S390_CC_LE (S390_CC_LT | S390_CC_EQ) | |
275 | #define S390_CC_GE (S390_CC_GT | S390_CC_EQ) | |
276 | #define S390_CC_NEVER 0 | |
277 | #define S390_CC_ALWAYS 15 | |
278 | ||
279 | /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */ | |
280 | static const uint8_t tcg_cond_to_s390_cond[] = { | |
281 | [TCG_COND_EQ] = S390_CC_EQ, | |
282 | [TCG_COND_NE] = S390_CC_NE, | |
283 | [TCG_COND_LT] = S390_CC_LT, | |
284 | [TCG_COND_LE] = S390_CC_LE, | |
285 | [TCG_COND_GT] = S390_CC_GT, | |
286 | [TCG_COND_GE] = S390_CC_GE, | |
287 | [TCG_COND_LTU] = S390_CC_LT, | |
288 | [TCG_COND_LEU] = S390_CC_LE, | |
289 | [TCG_COND_GTU] = S390_CC_GT, | |
290 | [TCG_COND_GEU] = S390_CC_GE, | |
291 | }; | |
292 | ||
293 | /* Condition codes that result from a LOAD AND TEST. Here, we have no | |
294 | unsigned instruction variation, however since the test is vs zero we | |
295 | can re-map the outcomes appropriately. */ | |
296 | static const uint8_t tcg_cond_to_ltr_cond[] = { | |
297 | [TCG_COND_EQ] = S390_CC_EQ, | |
298 | [TCG_COND_NE] = S390_CC_NE, | |
299 | [TCG_COND_LT] = S390_CC_LT, | |
300 | [TCG_COND_LE] = S390_CC_LE, | |
301 | [TCG_COND_GT] = S390_CC_GT, | |
302 | [TCG_COND_GE] = S390_CC_GE, | |
303 | [TCG_COND_LTU] = S390_CC_NEVER, | |
304 | [TCG_COND_LEU] = S390_CC_EQ, | |
305 | [TCG_COND_GTU] = S390_CC_NE, | |
306 | [TCG_COND_GEU] = S390_CC_ALWAYS, | |
307 | }; | |
308 | ||
309 | #ifdef CONFIG_SOFTMMU | |
310 | static void * const qemu_ld_helpers[16] = { | |
311 | [MO_UB] = helper_ret_ldub_mmu, | |
312 | [MO_SB] = helper_ret_ldsb_mmu, | |
313 | [MO_LEUW] = helper_le_lduw_mmu, | |
314 | [MO_LESW] = helper_le_ldsw_mmu, | |
315 | [MO_LEUL] = helper_le_ldul_mmu, | |
316 | [MO_LESL] = helper_le_ldsl_mmu, | |
317 | [MO_LEQ] = helper_le_ldq_mmu, | |
318 | [MO_BEUW] = helper_be_lduw_mmu, | |
319 | [MO_BESW] = helper_be_ldsw_mmu, | |
320 | [MO_BEUL] = helper_be_ldul_mmu, | |
321 | [MO_BESL] = helper_be_ldsl_mmu, | |
322 | [MO_BEQ] = helper_be_ldq_mmu, | |
323 | }; | |
324 | ||
325 | static void * const qemu_st_helpers[16] = { | |
326 | [MO_UB] = helper_ret_stb_mmu, | |
327 | [MO_LEUW] = helper_le_stw_mmu, | |
328 | [MO_LEUL] = helper_le_stl_mmu, | |
329 | [MO_LEQ] = helper_le_stq_mmu, | |
330 | [MO_BEUW] = helper_be_stw_mmu, | |
331 | [MO_BEUL] = helper_be_stl_mmu, | |
332 | [MO_BEQ] = helper_be_stq_mmu, | |
333 | }; | |
334 | #endif | |
335 | ||
336 | static tcg_insn_unit *tb_ret_addr; | |
337 | ||
338 | /* A list of relevant facilities used by this translator. Some of these | |
339 | are required for proper operation, and these are checked at startup. */ | |
340 | ||
341 | #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2)) | |
342 | #define FACILITY_LONG_DISP (1ULL << (63 - 18)) | |
343 | #define FACILITY_EXT_IMM (1ULL << (63 - 21)) | |
344 | #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34)) | |
345 | #define FACILITY_LOAD_ON_COND (1ULL << (63 - 45)) | |
346 | #define FACILITY_FAST_BCR_SER FACILITY_LOAD_ON_COND | |
347 | ||
348 | static uint64_t facilities; | |
349 | ||
350 | static void patch_reloc(tcg_insn_unit *code_ptr, int type, | |
351 | intptr_t value, intptr_t addend) | |
352 | { | |
353 | intptr_t pcrel2 = (tcg_insn_unit *)value - (code_ptr - 1); | |
354 | tcg_debug_assert(addend == -2); | |
355 | ||
356 | switch (type) { | |
357 | case R_390_PC16DBL: | |
358 | tcg_debug_assert(pcrel2 == (int16_t)pcrel2); | |
359 | tcg_patch16(code_ptr, pcrel2); | |
360 | break; | |
361 | case R_390_PC32DBL: | |
362 | tcg_debug_assert(pcrel2 == (int32_t)pcrel2); | |
363 | tcg_patch32(code_ptr, pcrel2); | |
364 | break; | |
365 | default: | |
366 | tcg_abort(); | |
367 | break; | |
368 | } | |
369 | } | |
370 | ||
371 | /* parse target specific constraints */ | |
372 | static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) | |
373 | { | |
374 | const char *ct_str = *pct_str; | |
375 | ||
376 | switch (ct_str[0]) { | |
377 | case 'r': /* all registers */ | |
378 | ct->ct |= TCG_CT_REG; | |
379 | tcg_regset_set32(ct->u.regs, 0, 0xffff); | |
380 | break; | |
381 | case 'R': /* not R0 */ | |
382 | ct->ct |= TCG_CT_REG; | |
383 | tcg_regset_set32(ct->u.regs, 0, 0xffff); | |
384 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
385 | break; | |
386 | case 'L': /* qemu_ld/st constraint */ | |
387 | ct->ct |= TCG_CT_REG; | |
388 | tcg_regset_set32(ct->u.regs, 0, 0xffff); | |
389 | tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2); | |
390 | tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3); | |
391 | tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4); | |
392 | break; | |
393 | case 'a': /* force R2 for division */ | |
394 | ct->ct |= TCG_CT_REG; | |
395 | tcg_regset_clear(ct->u.regs); | |
396 | tcg_regset_set_reg(ct->u.regs, TCG_REG_R2); | |
397 | break; | |
398 | case 'b': /* force R3 for division */ | |
399 | ct->ct |= TCG_CT_REG; | |
400 | tcg_regset_clear(ct->u.regs); | |
401 | tcg_regset_set_reg(ct->u.regs, TCG_REG_R3); | |
402 | break; | |
403 | case 'A': | |
404 | ct->ct |= TCG_CT_CONST_ADLI; | |
405 | break; | |
406 | case 'K': | |
407 | ct->ct |= TCG_CT_CONST_MULI; | |
408 | break; | |
409 | case 'O': | |
410 | ct->ct |= TCG_CT_CONST_ORI; | |
411 | break; | |
412 | case 'X': | |
413 | ct->ct |= TCG_CT_CONST_XORI; | |
414 | break; | |
415 | case 'C': | |
416 | ct->ct |= TCG_CT_CONST_CMPI; | |
417 | break; | |
418 | default: | |
419 | return -1; | |
420 | } | |
421 | ct_str++; | |
422 | *pct_str = ct_str; | |
423 | ||
424 | return 0; | |
425 | } | |
426 | ||
427 | /* Immediates to be used with logical OR. This is an optimization only, | |
428 | since a full 64-bit immediate OR can always be performed with 4 sequential | |
429 | OI[LH][LH] instructions. What we're looking for is immediates that we | |
430 | can load efficiently, and the immediate load plus the reg-reg OR is | |
431 | smaller than the sequential OI's. */ | |
432 | ||
433 | static int tcg_match_ori(TCGType type, tcg_target_long val) | |
434 | { | |
435 | if (facilities & FACILITY_EXT_IMM) { | |
436 | if (type == TCG_TYPE_I32) { | |
437 | /* All 32-bit ORs can be performed with 1 48-bit insn. */ | |
438 | return 1; | |
439 | } | |
440 | } | |
441 | ||
442 | /* Look for negative values. These are best to load with LGHI. */ | |
443 | if (val < 0) { | |
444 | if (val == (int16_t)val) { | |
445 | return 0; | |
446 | } | |
447 | if (facilities & FACILITY_EXT_IMM) { | |
448 | if (val == (int32_t)val) { | |
449 | return 0; | |
450 | } | |
451 | } | |
452 | } | |
453 | ||
454 | return 1; | |
455 | } | |
456 | ||
457 | /* Immediates to be used with logical XOR. This is almost, but not quite, | |
458 | only an optimization. XOR with immediate is only supported with the | |
459 | extended-immediate facility. That said, there are a few patterns for | |
460 | which it is better to load the value into a register first. */ | |
461 | ||
462 | static int tcg_match_xori(TCGType type, tcg_target_long val) | |
463 | { | |
464 | if ((facilities & FACILITY_EXT_IMM) == 0) { | |
465 | return 0; | |
466 | } | |
467 | ||
468 | if (type == TCG_TYPE_I32) { | |
469 | /* All 32-bit XORs can be performed with 1 48-bit insn. */ | |
470 | return 1; | |
471 | } | |
472 | ||
473 | /* Look for negative values. These are best to load with LGHI. */ | |
474 | if (val < 0 && val == (int32_t)val) { | |
475 | return 0; | |
476 | } | |
477 | ||
478 | return 1; | |
479 | } | |
480 | ||
481 | /* Imediates to be used with comparisons. */ | |
482 | ||
483 | static int tcg_match_cmpi(TCGType type, tcg_target_long val) | |
484 | { | |
485 | if (facilities & FACILITY_EXT_IMM) { | |
486 | /* The COMPARE IMMEDIATE instruction is available. */ | |
487 | if (type == TCG_TYPE_I32) { | |
488 | /* We have a 32-bit immediate and can compare against anything. */ | |
489 | return 1; | |
490 | } else { | |
491 | /* ??? We have no insight here into whether the comparison is | |
492 | signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit | |
493 | signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses | |
494 | a 32-bit unsigned immediate. If we were to use the (semi) | |
495 | obvious "val == (int32_t)val" we would be enabling unsigned | |
496 | comparisons vs very large numbers. The only solution is to | |
497 | take the intersection of the ranges. */ | |
498 | /* ??? Another possible solution is to simply lie and allow all | |
499 | constants here and force the out-of-range values into a temp | |
500 | register in tgen_cmp when we have knowledge of the actual | |
501 | comparison code in use. */ | |
502 | return val >= 0 && val <= 0x7fffffff; | |
503 | } | |
504 | } else { | |
505 | /* Only the LOAD AND TEST instruction is available. */ | |
506 | return val == 0; | |
507 | } | |
508 | } | |
509 | ||
510 | /* Immediates to be used with add2/sub2. */ | |
511 | ||
512 | static int tcg_match_add2i(TCGType type, tcg_target_long val) | |
513 | { | |
514 | if (facilities & FACILITY_EXT_IMM) { | |
515 | if (type == TCG_TYPE_I32) { | |
516 | return 1; | |
517 | } else if (val >= -0xffffffffll && val <= 0xffffffffll) { | |
518 | return 1; | |
519 | } | |
520 | } | |
521 | return 0; | |
522 | } | |
523 | ||
524 | /* Test if a constant matches the constraint. */ | |
525 | static int tcg_target_const_match(tcg_target_long val, TCGType type, | |
526 | const TCGArgConstraint *arg_ct) | |
527 | { | |
528 | int ct = arg_ct->ct; | |
529 | ||
530 | if (ct & TCG_CT_CONST) { | |
531 | return 1; | |
532 | } | |
533 | ||
534 | if (type == TCG_TYPE_I32) { | |
535 | val = (int32_t)val; | |
536 | } | |
537 | ||
538 | /* The following are mutually exclusive. */ | |
539 | if (ct & TCG_CT_CONST_MULI) { | |
540 | /* Immediates that may be used with multiply. If we have the | |
541 | general-instruction-extensions, then we have MULTIPLY SINGLE | |
542 | IMMEDIATE with a signed 32-bit, otherwise we have only | |
543 | MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */ | |
544 | if (facilities & FACILITY_GEN_INST_EXT) { | |
545 | return val == (int32_t)val; | |
546 | } else { | |
547 | return val == (int16_t)val; | |
548 | } | |
549 | } else if (ct & TCG_CT_CONST_ADLI) { | |
550 | return tcg_match_add2i(type, val); | |
551 | } else if (ct & TCG_CT_CONST_ORI) { | |
552 | return tcg_match_ori(type, val); | |
553 | } else if (ct & TCG_CT_CONST_XORI) { | |
554 | return tcg_match_xori(type, val); | |
555 | } else if (ct & TCG_CT_CONST_CMPI) { | |
556 | return tcg_match_cmpi(type, val); | |
557 | } | |
558 | ||
559 | return 0; | |
560 | } | |
561 | ||
562 | /* Emit instructions according to the given instruction format. */ | |
563 | ||
564 | static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2) | |
565 | { | |
566 | tcg_out16(s, (op << 8) | (r1 << 4) | r2); | |
567 | } | |
568 | ||
569 | static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op, | |
570 | TCGReg r1, TCGReg r2) | |
571 | { | |
572 | tcg_out32(s, (op << 16) | (r1 << 4) | r2); | |
573 | } | |
574 | ||
575 | static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op, | |
576 | TCGReg r1, TCGReg r2, int m3) | |
577 | { | |
578 | tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2); | |
579 | } | |
580 | ||
581 | static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2) | |
582 | { | |
583 | tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff)); | |
584 | } | |
585 | ||
586 | static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2) | |
587 | { | |
588 | tcg_out16(s, op | (r1 << 4)); | |
589 | tcg_out32(s, i2); | |
590 | } | |
591 | ||
592 | static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1, | |
593 | TCGReg b2, TCGReg r3, int disp) | |
594 | { | |
595 | tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12) | |
596 | | (disp & 0xfff)); | |
597 | } | |
598 | ||
599 | static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1, | |
600 | TCGReg b2, TCGReg r3, int disp) | |
601 | { | |
602 | tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3); | |
603 | tcg_out32(s, (op & 0xff) | (b2 << 28) | |
604 | | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4)); | |
605 | } | |
606 | ||
607 | #define tcg_out_insn_RX tcg_out_insn_RS | |
608 | #define tcg_out_insn_RXY tcg_out_insn_RSY | |
609 | ||
610 | /* Emit an opcode with "type-checking" of the format. */ | |
611 | #define tcg_out_insn(S, FMT, OP, ...) \ | |
612 | glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__) | |
613 | ||
614 | ||
615 | /* emit 64-bit shifts */ | |
616 | static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest, | |
617 | TCGReg src, TCGReg sh_reg, int sh_imm) | |
618 | { | |
619 | tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm); | |
620 | } | |
621 | ||
622 | /* emit 32-bit shifts */ | |
623 | static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest, | |
624 | TCGReg sh_reg, int sh_imm) | |
625 | { | |
626 | tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm); | |
627 | } | |
628 | ||
629 | static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) | |
630 | { | |
631 | if (src != dst) { | |
632 | if (type == TCG_TYPE_I32) { | |
633 | tcg_out_insn(s, RR, LR, dst, src); | |
634 | } else { | |
635 | tcg_out_insn(s, RRE, LGR, dst, src); | |
636 | } | |
637 | } | |
638 | } | |
639 | ||
640 | /* load a register with an immediate value */ | |
641 | static void tcg_out_movi(TCGContext *s, TCGType type, | |
642 | TCGReg ret, tcg_target_long sval) | |
643 | { | |
644 | static const S390Opcode lli_insns[4] = { | |
645 | RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH | |
646 | }; | |
647 | ||
648 | tcg_target_ulong uval = sval; | |
649 | int i; | |
650 | ||
651 | if (type == TCG_TYPE_I32) { | |
652 | uval = (uint32_t)sval; | |
653 | sval = (int32_t)sval; | |
654 | } | |
655 | ||
656 | /* Try all 32-bit insns that can load it in one go. */ | |
657 | if (sval >= -0x8000 && sval < 0x8000) { | |
658 | tcg_out_insn(s, RI, LGHI, ret, sval); | |
659 | return; | |
660 | } | |
661 | ||
662 | for (i = 0; i < 4; i++) { | |
663 | tcg_target_long mask = 0xffffull << i*16; | |
664 | if ((uval & mask) == uval) { | |
665 | tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16); | |
666 | return; | |
667 | } | |
668 | } | |
669 | ||
670 | /* Try all 48-bit insns that can load it in one go. */ | |
671 | if (facilities & FACILITY_EXT_IMM) { | |
672 | if (sval == (int32_t)sval) { | |
673 | tcg_out_insn(s, RIL, LGFI, ret, sval); | |
674 | return; | |
675 | } | |
676 | if (uval <= 0xffffffff) { | |
677 | tcg_out_insn(s, RIL, LLILF, ret, uval); | |
678 | return; | |
679 | } | |
680 | if ((uval & 0xffffffff) == 0) { | |
681 | tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1); | |
682 | return; | |
683 | } | |
684 | } | |
685 | ||
686 | /* Try for PC-relative address load. */ | |
687 | if ((sval & 1) == 0) { | |
688 | ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1; | |
689 | if (off == (int32_t)off) { | |
690 | tcg_out_insn(s, RIL, LARL, ret, off); | |
691 | return; | |
692 | } | |
693 | } | |
694 | ||
695 | /* If extended immediates are not present, then we may have to issue | |
696 | several instructions to load the low 32 bits. */ | |
697 | if (!(facilities & FACILITY_EXT_IMM)) { | |
698 | /* A 32-bit unsigned value can be loaded in 2 insns. And given | |
699 | that the lli_insns loop above did not succeed, we know that | |
700 | both insns are required. */ | |
701 | if (uval <= 0xffffffff) { | |
702 | tcg_out_insn(s, RI, LLILL, ret, uval); | |
703 | tcg_out_insn(s, RI, IILH, ret, uval >> 16); | |
704 | return; | |
705 | } | |
706 | ||
707 | /* If all high bits are set, the value can be loaded in 2 or 3 insns. | |
708 | We first want to make sure that all the high bits get set. With | |
709 | luck the low 16-bits can be considered negative to perform that for | |
710 | free, otherwise we load an explicit -1. */ | |
711 | if (sval >> 31 >> 1 == -1) { | |
712 | if (uval & 0x8000) { | |
713 | tcg_out_insn(s, RI, LGHI, ret, uval); | |
714 | } else { | |
715 | tcg_out_insn(s, RI, LGHI, ret, -1); | |
716 | tcg_out_insn(s, RI, IILL, ret, uval); | |
717 | } | |
718 | tcg_out_insn(s, RI, IILH, ret, uval >> 16); | |
719 | return; | |
720 | } | |
721 | } | |
722 | ||
723 | /* If we get here, both the high and low parts have non-zero bits. */ | |
724 | ||
725 | /* Recurse to load the lower 32-bits. */ | |
726 | tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff); | |
727 | ||
728 | /* Insert data into the high 32-bits. */ | |
729 | uval = uval >> 31 >> 1; | |
730 | if (facilities & FACILITY_EXT_IMM) { | |
731 | if (uval < 0x10000) { | |
732 | tcg_out_insn(s, RI, IIHL, ret, uval); | |
733 | } else if ((uval & 0xffff) == 0) { | |
734 | tcg_out_insn(s, RI, IIHH, ret, uval >> 16); | |
735 | } else { | |
736 | tcg_out_insn(s, RIL, IIHF, ret, uval); | |
737 | } | |
738 | } else { | |
739 | if (uval & 0xffff) { | |
740 | tcg_out_insn(s, RI, IIHL, ret, uval); | |
741 | } | |
742 | if (uval & 0xffff0000) { | |
743 | tcg_out_insn(s, RI, IIHH, ret, uval >> 16); | |
744 | } | |
745 | } | |
746 | } | |
747 | ||
748 | ||
749 | /* Emit a load/store type instruction. Inputs are: | |
750 | DATA: The register to be loaded or stored. | |
751 | BASE+OFS: The effective address. | |
752 | OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0. | |
753 | OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */ | |
754 | ||
755 | static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy, | |
756 | TCGReg data, TCGReg base, TCGReg index, | |
757 | tcg_target_long ofs) | |
758 | { | |
759 | if (ofs < -0x80000 || ofs >= 0x80000) { | |
760 | /* Combine the low 20 bits of the offset with the actual load insn; | |
761 | the high 44 bits must come from an immediate load. */ | |
762 | tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000; | |
763 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low); | |
764 | ofs = low; | |
765 | ||
766 | /* If we were already given an index register, add it in. */ | |
767 | if (index != TCG_REG_NONE) { | |
768 | tcg_out_insn(s, RRE, AGR, TCG_TMP0, index); | |
769 | } | |
770 | index = TCG_TMP0; | |
771 | } | |
772 | ||
773 | if (opc_rx && ofs >= 0 && ofs < 0x1000) { | |
774 | tcg_out_insn_RX(s, opc_rx, data, base, index, ofs); | |
775 | } else { | |
776 | tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs); | |
777 | } | |
778 | } | |
779 | ||
780 | ||
781 | /* load data without address translation or endianness conversion */ | |
782 | static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data, | |
783 | TCGReg base, intptr_t ofs) | |
784 | { | |
785 | if (type == TCG_TYPE_I32) { | |
786 | tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs); | |
787 | } else { | |
788 | tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs); | |
789 | } | |
790 | } | |
791 | ||
792 | static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data, | |
793 | TCGReg base, intptr_t ofs) | |
794 | { | |
795 | if (type == TCG_TYPE_I32) { | |
796 | tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs); | |
797 | } else { | |
798 | tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs); | |
799 | } | |
800 | } | |
801 | ||
802 | static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, | |
803 | TCGReg base, intptr_t ofs) | |
804 | { | |
805 | return false; | |
806 | } | |
807 | ||
808 | /* load data from an absolute host address */ | |
809 | static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs) | |
810 | { | |
811 | intptr_t addr = (intptr_t)abs; | |
812 | ||
813 | if ((facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) { | |
814 | ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1; | |
815 | if (disp == (int32_t)disp) { | |
816 | if (type == TCG_TYPE_I32) { | |
817 | tcg_out_insn(s, RIL, LRL, dest, disp); | |
818 | } else { | |
819 | tcg_out_insn(s, RIL, LGRL, dest, disp); | |
820 | } | |
821 | return; | |
822 | } | |
823 | } | |
824 | ||
825 | tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff); | |
826 | tcg_out_ld(s, type, dest, dest, addr & 0xffff); | |
827 | } | |
828 | ||
829 | static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src, | |
830 | int msb, int lsb, int ofs, int z) | |
831 | { | |
832 | /* Format RIE-f */ | |
833 | tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src); | |
834 | tcg_out16(s, (msb << 8) | (z << 7) | lsb); | |
835 | tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff)); | |
836 | } | |
837 | ||
838 | static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) | |
839 | { | |
840 | if (facilities & FACILITY_EXT_IMM) { | |
841 | tcg_out_insn(s, RRE, LGBR, dest, src); | |
842 | return; | |
843 | } | |
844 | ||
845 | if (type == TCG_TYPE_I32) { | |
846 | if (dest == src) { | |
847 | tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24); | |
848 | } else { | |
849 | tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24); | |
850 | } | |
851 | tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24); | |
852 | } else { | |
853 | tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56); | |
854 | tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56); | |
855 | } | |
856 | } | |
857 | ||
858 | static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) | |
859 | { | |
860 | if (facilities & FACILITY_EXT_IMM) { | |
861 | tcg_out_insn(s, RRE, LLGCR, dest, src); | |
862 | return; | |
863 | } | |
864 | ||
865 | if (dest == src) { | |
866 | tcg_out_movi(s, type, TCG_TMP0, 0xff); | |
867 | src = TCG_TMP0; | |
868 | } else { | |
869 | tcg_out_movi(s, type, dest, 0xff); | |
870 | } | |
871 | if (type == TCG_TYPE_I32) { | |
872 | tcg_out_insn(s, RR, NR, dest, src); | |
873 | } else { | |
874 | tcg_out_insn(s, RRE, NGR, dest, src); | |
875 | } | |
876 | } | |
877 | ||
878 | static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) | |
879 | { | |
880 | if (facilities & FACILITY_EXT_IMM) { | |
881 | tcg_out_insn(s, RRE, LGHR, dest, src); | |
882 | return; | |
883 | } | |
884 | ||
885 | if (type == TCG_TYPE_I32) { | |
886 | if (dest == src) { | |
887 | tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16); | |
888 | } else { | |
889 | tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16); | |
890 | } | |
891 | tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16); | |
892 | } else { | |
893 | tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48); | |
894 | tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48); | |
895 | } | |
896 | } | |
897 | ||
898 | static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) | |
899 | { | |
900 | if (facilities & FACILITY_EXT_IMM) { | |
901 | tcg_out_insn(s, RRE, LLGHR, dest, src); | |
902 | return; | |
903 | } | |
904 | ||
905 | if (dest == src) { | |
906 | tcg_out_movi(s, type, TCG_TMP0, 0xffff); | |
907 | src = TCG_TMP0; | |
908 | } else { | |
909 | tcg_out_movi(s, type, dest, 0xffff); | |
910 | } | |
911 | if (type == TCG_TYPE_I32) { | |
912 | tcg_out_insn(s, RR, NR, dest, src); | |
913 | } else { | |
914 | tcg_out_insn(s, RRE, NGR, dest, src); | |
915 | } | |
916 | } | |
917 | ||
918 | static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src) | |
919 | { | |
920 | tcg_out_insn(s, RRE, LGFR, dest, src); | |
921 | } | |
922 | ||
923 | static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src) | |
924 | { | |
925 | tcg_out_insn(s, RRE, LLGFR, dest, src); | |
926 | } | |
927 | ||
928 | /* Accept bit patterns like these: | |
929 | 0....01....1 | |
930 | 1....10....0 | |
931 | 1..10..01..1 | |
932 | 0..01..10..0 | |
933 | Copied from gcc sources. */ | |
934 | static inline bool risbg_mask(uint64_t c) | |
935 | { | |
936 | uint64_t lsb; | |
937 | /* We don't change the number of transitions by inverting, | |
938 | so make sure we start with the LSB zero. */ | |
939 | if (c & 1) { | |
940 | c = ~c; | |
941 | } | |
942 | /* Reject all zeros or all ones. */ | |
943 | if (c == 0) { | |
944 | return false; | |
945 | } | |
946 | /* Find the first transition. */ | |
947 | lsb = c & -c; | |
948 | /* Invert to look for a second transition. */ | |
949 | c = ~c; | |
950 | /* Erase the first transition. */ | |
951 | c &= -lsb; | |
952 | /* Find the second transition, if any. */ | |
953 | lsb = c & -c; | |
954 | /* Match if all the bits are 1's, or if c is zero. */ | |
955 | return c == -lsb; | |
956 | } | |
957 | ||
958 | static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val) | |
959 | { | |
960 | int msb, lsb; | |
961 | if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) { | |
962 | /* Achieve wraparound by swapping msb and lsb. */ | |
963 | msb = 64 - ctz64(~val); | |
964 | lsb = clz64(~val) - 1; | |
965 | } else { | |
966 | msb = clz64(val); | |
967 | lsb = 63 - ctz64(val); | |
968 | } | |
969 | tcg_out_risbg(s, out, in, msb, lsb, 0, 1); | |
970 | } | |
971 | ||
972 | static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) | |
973 | { | |
974 | static const S390Opcode ni_insns[4] = { | |
975 | RI_NILL, RI_NILH, RI_NIHL, RI_NIHH | |
976 | }; | |
977 | static const S390Opcode nif_insns[2] = { | |
978 | RIL_NILF, RIL_NIHF | |
979 | }; | |
980 | uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull); | |
981 | int i; | |
982 | ||
983 | /* Look for the zero-extensions. */ | |
984 | if ((val & valid) == 0xffffffff) { | |
985 | tgen_ext32u(s, dest, dest); | |
986 | return; | |
987 | } | |
988 | if (facilities & FACILITY_EXT_IMM) { | |
989 | if ((val & valid) == 0xff) { | |
990 | tgen_ext8u(s, TCG_TYPE_I64, dest, dest); | |
991 | return; | |
992 | } | |
993 | if ((val & valid) == 0xffff) { | |
994 | tgen_ext16u(s, TCG_TYPE_I64, dest, dest); | |
995 | return; | |
996 | } | |
997 | } | |
998 | ||
999 | /* Try all 32-bit insns that can perform it in one go. */ | |
1000 | for (i = 0; i < 4; i++) { | |
1001 | tcg_target_ulong mask = ~(0xffffull << i*16); | |
1002 | if (((val | ~valid) & mask) == mask) { | |
1003 | tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16); | |
1004 | return; | |
1005 | } | |
1006 | } | |
1007 | ||
1008 | /* Try all 48-bit insns that can perform it in one go. */ | |
1009 | if (facilities & FACILITY_EXT_IMM) { | |
1010 | for (i = 0; i < 2; i++) { | |
1011 | tcg_target_ulong mask = ~(0xffffffffull << i*32); | |
1012 | if (((val | ~valid) & mask) == mask) { | |
1013 | tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32); | |
1014 | return; | |
1015 | } | |
1016 | } | |
1017 | } | |
1018 | if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) { | |
1019 | tgen_andi_risbg(s, dest, dest, val); | |
1020 | return; | |
1021 | } | |
1022 | ||
1023 | /* Fall back to loading the constant. */ | |
1024 | tcg_out_movi(s, type, TCG_TMP0, val); | |
1025 | if (type == TCG_TYPE_I32) { | |
1026 | tcg_out_insn(s, RR, NR, dest, TCG_TMP0); | |
1027 | } else { | |
1028 | tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0); | |
1029 | } | |
1030 | } | |
1031 | ||
1032 | static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val) | |
1033 | { | |
1034 | static const S390Opcode oi_insns[4] = { | |
1035 | RI_OILL, RI_OILH, RI_OIHL, RI_OIHH | |
1036 | }; | |
1037 | static const S390Opcode nif_insns[2] = { | |
1038 | RIL_OILF, RIL_OIHF | |
1039 | }; | |
1040 | ||
1041 | int i; | |
1042 | ||
1043 | /* Look for no-op. */ | |
1044 | if (val == 0) { | |
1045 | return; | |
1046 | } | |
1047 | ||
1048 | if (facilities & FACILITY_EXT_IMM) { | |
1049 | /* Try all 32-bit insns that can perform it in one go. */ | |
1050 | for (i = 0; i < 4; i++) { | |
1051 | tcg_target_ulong mask = (0xffffull << i*16); | |
1052 | if ((val & mask) != 0 && (val & ~mask) == 0) { | |
1053 | tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16); | |
1054 | return; | |
1055 | } | |
1056 | } | |
1057 | ||
1058 | /* Try all 48-bit insns that can perform it in one go. */ | |
1059 | for (i = 0; i < 2; i++) { | |
1060 | tcg_target_ulong mask = (0xffffffffull << i*32); | |
1061 | if ((val & mask) != 0 && (val & ~mask) == 0) { | |
1062 | tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32); | |
1063 | return; | |
1064 | } | |
1065 | } | |
1066 | ||
1067 | /* Perform the OR via sequential modifications to the high and | |
1068 | low parts. Do this via recursion to handle 16-bit vs 32-bit | |
1069 | masks in each half. */ | |
1070 | tgen64_ori(s, dest, val & 0x00000000ffffffffull); | |
1071 | tgen64_ori(s, dest, val & 0xffffffff00000000ull); | |
1072 | } else { | |
1073 | /* With no extended-immediate facility, we don't need to be so | |
1074 | clever. Just iterate over the insns and mask in the constant. */ | |
1075 | for (i = 0; i < 4; i++) { | |
1076 | tcg_target_ulong mask = (0xffffull << i*16); | |
1077 | if ((val & mask) != 0) { | |
1078 | tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16); | |
1079 | } | |
1080 | } | |
1081 | } | |
1082 | } | |
1083 | ||
1084 | static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val) | |
1085 | { | |
1086 | /* Perform the xor by parts. */ | |
1087 | if (val & 0xffffffff) { | |
1088 | tcg_out_insn(s, RIL, XILF, dest, val); | |
1089 | } | |
1090 | if (val > 0xffffffff) { | |
1091 | tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1); | |
1092 | } | |
1093 | } | |
1094 | ||
1095 | static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1, | |
1096 | TCGArg c2, int c2const) | |
1097 | { | |
1098 | bool is_unsigned = is_unsigned_cond(c); | |
1099 | if (c2const) { | |
1100 | if (c2 == 0) { | |
1101 | if (type == TCG_TYPE_I32) { | |
1102 | tcg_out_insn(s, RR, LTR, r1, r1); | |
1103 | } else { | |
1104 | tcg_out_insn(s, RRE, LTGR, r1, r1); | |
1105 | } | |
1106 | return tcg_cond_to_ltr_cond[c]; | |
1107 | } else { | |
1108 | if (is_unsigned) { | |
1109 | if (type == TCG_TYPE_I32) { | |
1110 | tcg_out_insn(s, RIL, CLFI, r1, c2); | |
1111 | } else { | |
1112 | tcg_out_insn(s, RIL, CLGFI, r1, c2); | |
1113 | } | |
1114 | } else { | |
1115 | if (type == TCG_TYPE_I32) { | |
1116 | tcg_out_insn(s, RIL, CFI, r1, c2); | |
1117 | } else { | |
1118 | tcg_out_insn(s, RIL, CGFI, r1, c2); | |
1119 | } | |
1120 | } | |
1121 | } | |
1122 | } else { | |
1123 | if (is_unsigned) { | |
1124 | if (type == TCG_TYPE_I32) { | |
1125 | tcg_out_insn(s, RR, CLR, r1, c2); | |
1126 | } else { | |
1127 | tcg_out_insn(s, RRE, CLGR, r1, c2); | |
1128 | } | |
1129 | } else { | |
1130 | if (type == TCG_TYPE_I32) { | |
1131 | tcg_out_insn(s, RR, CR, r1, c2); | |
1132 | } else { | |
1133 | tcg_out_insn(s, RRE, CGR, r1, c2); | |
1134 | } | |
1135 | } | |
1136 | } | |
1137 | return tcg_cond_to_s390_cond[c]; | |
1138 | } | |
1139 | ||
1140 | static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, | |
1141 | TCGReg dest, TCGReg c1, TCGArg c2, int c2const) | |
1142 | { | |
1143 | int cc; | |
1144 | ||
1145 | switch (cond) { | |
1146 | case TCG_COND_GTU: | |
1147 | case TCG_COND_GT: | |
1148 | do_greater: | |
1149 | /* The result of a compare has CC=2 for GT and CC=3 unused. | |
1150 | ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */ | |
1151 | tgen_cmp(s, type, cond, c1, c2, c2const); | |
1152 | tcg_out_movi(s, type, dest, 0); | |
1153 | tcg_out_insn(s, RRE, ALCGR, dest, dest); | |
1154 | return; | |
1155 | ||
1156 | case TCG_COND_GEU: | |
1157 | do_geu: | |
1158 | /* We need "real" carry semantics, so use SUBTRACT LOGICAL | |
1159 | instead of COMPARE LOGICAL. This needs an extra move. */ | |
1160 | tcg_out_mov(s, type, TCG_TMP0, c1); | |
1161 | if (c2const) { | |
1162 | tcg_out_movi(s, TCG_TYPE_I64, dest, 0); | |
1163 | if (type == TCG_TYPE_I32) { | |
1164 | tcg_out_insn(s, RIL, SLFI, TCG_TMP0, c2); | |
1165 | } else { | |
1166 | tcg_out_insn(s, RIL, SLGFI, TCG_TMP0, c2); | |
1167 | } | |
1168 | } else { | |
1169 | if (type == TCG_TYPE_I32) { | |
1170 | tcg_out_insn(s, RR, SLR, TCG_TMP0, c2); | |
1171 | } else { | |
1172 | tcg_out_insn(s, RRE, SLGR, TCG_TMP0, c2); | |
1173 | } | |
1174 | tcg_out_movi(s, TCG_TYPE_I64, dest, 0); | |
1175 | } | |
1176 | tcg_out_insn(s, RRE, ALCGR, dest, dest); | |
1177 | return; | |
1178 | ||
1179 | case TCG_COND_LEU: | |
1180 | case TCG_COND_LTU: | |
1181 | case TCG_COND_LT: | |
1182 | /* Swap operands so that we can use GEU/GTU/GT. */ | |
1183 | if (c2const) { | |
1184 | tcg_out_movi(s, type, TCG_TMP0, c2); | |
1185 | c2 = c1; | |
1186 | c2const = 0; | |
1187 | c1 = TCG_TMP0; | |
1188 | } else { | |
1189 | TCGReg t = c1; | |
1190 | c1 = c2; | |
1191 | c2 = t; | |
1192 | } | |
1193 | if (cond == TCG_COND_LEU) { | |
1194 | goto do_geu; | |
1195 | } | |
1196 | cond = tcg_swap_cond(cond); | |
1197 | goto do_greater; | |
1198 | ||
1199 | case TCG_COND_NE: | |
1200 | /* X != 0 is X > 0. */ | |
1201 | if (c2const && c2 == 0) { | |
1202 | cond = TCG_COND_GTU; | |
1203 | goto do_greater; | |
1204 | } | |
1205 | break; | |
1206 | ||
1207 | case TCG_COND_EQ: | |
1208 | /* X == 0 is X <= 0 is 0 >= X. */ | |
1209 | if (c2const && c2 == 0) { | |
1210 | tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0); | |
1211 | c2 = c1; | |
1212 | c2const = 0; | |
1213 | c1 = TCG_TMP0; | |
1214 | goto do_geu; | |
1215 | } | |
1216 | break; | |
1217 | ||
1218 | default: | |
1219 | break; | |
1220 | } | |
1221 | ||
1222 | cc = tgen_cmp(s, type, cond, c1, c2, c2const); | |
1223 | if (facilities & FACILITY_LOAD_ON_COND) { | |
1224 | /* Emit: d = 0, t = 1, d = (cc ? t : d). */ | |
1225 | tcg_out_movi(s, TCG_TYPE_I64, dest, 0); | |
1226 | tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1); | |
1227 | tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc); | |
1228 | } else { | |
1229 | /* Emit: d = 1; if (cc) goto over; d = 0; over: */ | |
1230 | tcg_out_movi(s, type, dest, 1); | |
1231 | tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1); | |
1232 | tcg_out_movi(s, type, dest, 0); | |
1233 | } | |
1234 | } | |
1235 | ||
1236 | static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest, | |
1237 | TCGReg c1, TCGArg c2, int c2const, TCGReg r3) | |
1238 | { | |
1239 | int cc; | |
1240 | if (facilities & FACILITY_LOAD_ON_COND) { | |
1241 | cc = tgen_cmp(s, type, c, c1, c2, c2const); | |
1242 | tcg_out_insn(s, RRF, LOCGR, dest, r3, cc); | |
1243 | } else { | |
1244 | c = tcg_invert_cond(c); | |
1245 | cc = tgen_cmp(s, type, c, c1, c2, c2const); | |
1246 | ||
1247 | /* Emit: if (cc) goto over; dest = r3; over: */ | |
1248 | tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1); | |
1249 | tcg_out_insn(s, RRE, LGR, dest, r3); | |
1250 | } | |
1251 | } | |
1252 | ||
1253 | bool tcg_target_deposit_valid(int ofs, int len) | |
1254 | { | |
1255 | return (facilities & FACILITY_GEN_INST_EXT) != 0; | |
1256 | } | |
1257 | ||
1258 | static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src, | |
1259 | int ofs, int len) | |
1260 | { | |
1261 | int lsb = (63 - ofs); | |
1262 | int msb = lsb - (len - 1); | |
1263 | tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0); | |
1264 | } | |
1265 | ||
1266 | static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest) | |
1267 | { | |
1268 | ptrdiff_t off = dest - s->code_ptr; | |
1269 | if (off == (int16_t)off) { | |
1270 | tcg_out_insn(s, RI, BRC, cc, off); | |
1271 | } else if (off == (int32_t)off) { | |
1272 | tcg_out_insn(s, RIL, BRCL, cc, off); | |
1273 | } else { | |
1274 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest); | |
1275 | tcg_out_insn(s, RR, BCR, cc, TCG_TMP0); | |
1276 | } | |
1277 | } | |
1278 | ||
1279 | static void tgen_branch(TCGContext *s, int cc, TCGLabel *l) | |
1280 | { | |
1281 | if (l->has_value) { | |
1282 | tgen_gotoi(s, cc, l->u.value_ptr); | |
1283 | } else if (USE_LONG_BRANCHES) { | |
1284 | tcg_out16(s, RIL_BRCL | (cc << 4)); | |
1285 | tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, -2); | |
1286 | s->code_ptr += 2; | |
1287 | } else { | |
1288 | tcg_out16(s, RI_BRC | (cc << 4)); | |
1289 | tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, -2); | |
1290 | s->code_ptr += 1; | |
1291 | } | |
1292 | } | |
1293 | ||
1294 | static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc, | |
1295 | TCGReg r1, TCGReg r2, TCGLabel *l) | |
1296 | { | |
1297 | intptr_t off; | |
1298 | ||
1299 | if (l->has_value) { | |
1300 | off = l->u.value_ptr - s->code_ptr; | |
1301 | } else { | |
1302 | /* We need to keep the offset unchanged for retranslation. */ | |
1303 | off = s->code_ptr[1]; | |
1304 | tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2); | |
1305 | } | |
1306 | ||
1307 | tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2); | |
1308 | tcg_out16(s, off); | |
1309 | tcg_out16(s, cc << 12 | (opc & 0xff)); | |
1310 | } | |
1311 | ||
1312 | static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc, | |
1313 | TCGReg r1, int i2, TCGLabel *l) | |
1314 | { | |
1315 | tcg_target_long off; | |
1316 | ||
1317 | if (l->has_value) { | |
1318 | off = l->u.value_ptr - s->code_ptr; | |
1319 | } else { | |
1320 | /* We need to keep the offset unchanged for retranslation. */ | |
1321 | off = s->code_ptr[1]; | |
1322 | tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2); | |
1323 | } | |
1324 | ||
1325 | tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc); | |
1326 | tcg_out16(s, off); | |
1327 | tcg_out16(s, (i2 << 8) | (opc & 0xff)); | |
1328 | } | |
1329 | ||
1330 | static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c, | |
1331 | TCGReg r1, TCGArg c2, int c2const, TCGLabel *l) | |
1332 | { | |
1333 | int cc; | |
1334 | ||
1335 | if (facilities & FACILITY_GEN_INST_EXT) { | |
1336 | bool is_unsigned = is_unsigned_cond(c); | |
1337 | bool in_range; | |
1338 | S390Opcode opc; | |
1339 | ||
1340 | cc = tcg_cond_to_s390_cond[c]; | |
1341 | ||
1342 | if (!c2const) { | |
1343 | opc = (type == TCG_TYPE_I32 | |
1344 | ? (is_unsigned ? RIE_CLRJ : RIE_CRJ) | |
1345 | : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ)); | |
1346 | tgen_compare_branch(s, opc, cc, r1, c2, l); | |
1347 | return; | |
1348 | } | |
1349 | ||
1350 | /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field. | |
1351 | If the immediate we've been given does not fit that range, we'll | |
1352 | fall back to separate compare and branch instructions using the | |
1353 | larger comparison range afforded by COMPARE IMMEDIATE. */ | |
1354 | if (type == TCG_TYPE_I32) { | |
1355 | if (is_unsigned) { | |
1356 | opc = RIE_CLIJ; | |
1357 | in_range = (uint32_t)c2 == (uint8_t)c2; | |
1358 | } else { | |
1359 | opc = RIE_CIJ; | |
1360 | in_range = (int32_t)c2 == (int8_t)c2; | |
1361 | } | |
1362 | } else { | |
1363 | if (is_unsigned) { | |
1364 | opc = RIE_CLGIJ; | |
1365 | in_range = (uint64_t)c2 == (uint8_t)c2; | |
1366 | } else { | |
1367 | opc = RIE_CGIJ; | |
1368 | in_range = (int64_t)c2 == (int8_t)c2; | |
1369 | } | |
1370 | } | |
1371 | if (in_range) { | |
1372 | tgen_compare_imm_branch(s, opc, cc, r1, c2, l); | |
1373 | return; | |
1374 | } | |
1375 | } | |
1376 | ||
1377 | cc = tgen_cmp(s, type, c, r1, c2, c2const); | |
1378 | tgen_branch(s, cc, l); | |
1379 | } | |
1380 | ||
1381 | static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) | |
1382 | { | |
1383 | ptrdiff_t off = dest - s->code_ptr; | |
1384 | if (off == (int32_t)off) { | |
1385 | tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off); | |
1386 | } else { | |
1387 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest); | |
1388 | tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0); | |
1389 | } | |
1390 | } | |
1391 | ||
1392 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, | |
1393 | TCGReg base, TCGReg index, int disp) | |
1394 | { | |
1395 | switch (opc & (MO_SSIZE | MO_BSWAP)) { | |
1396 | case MO_UB: | |
1397 | tcg_out_insn(s, RXY, LLGC, data, base, index, disp); | |
1398 | break; | |
1399 | case MO_SB: | |
1400 | tcg_out_insn(s, RXY, LGB, data, base, index, disp); | |
1401 | break; | |
1402 | ||
1403 | case MO_UW | MO_BSWAP: | |
1404 | /* swapped unsigned halfword load with upper bits zeroed */ | |
1405 | tcg_out_insn(s, RXY, LRVH, data, base, index, disp); | |
1406 | tgen_ext16u(s, TCG_TYPE_I64, data, data); | |
1407 | break; | |
1408 | case MO_UW: | |
1409 | tcg_out_insn(s, RXY, LLGH, data, base, index, disp); | |
1410 | break; | |
1411 | ||
1412 | case MO_SW | MO_BSWAP: | |
1413 | /* swapped sign-extended halfword load */ | |
1414 | tcg_out_insn(s, RXY, LRVH, data, base, index, disp); | |
1415 | tgen_ext16s(s, TCG_TYPE_I64, data, data); | |
1416 | break; | |
1417 | case MO_SW: | |
1418 | tcg_out_insn(s, RXY, LGH, data, base, index, disp); | |
1419 | break; | |
1420 | ||
1421 | case MO_UL | MO_BSWAP: | |
1422 | /* swapped unsigned int load with upper bits zeroed */ | |
1423 | tcg_out_insn(s, RXY, LRV, data, base, index, disp); | |
1424 | tgen_ext32u(s, data, data); | |
1425 | break; | |
1426 | case MO_UL: | |
1427 | tcg_out_insn(s, RXY, LLGF, data, base, index, disp); | |
1428 | break; | |
1429 | ||
1430 | case MO_SL | MO_BSWAP: | |
1431 | /* swapped sign-extended int load */ | |
1432 | tcg_out_insn(s, RXY, LRV, data, base, index, disp); | |
1433 | tgen_ext32s(s, data, data); | |
1434 | break; | |
1435 | case MO_SL: | |
1436 | tcg_out_insn(s, RXY, LGF, data, base, index, disp); | |
1437 | break; | |
1438 | ||
1439 | case MO_Q | MO_BSWAP: | |
1440 | tcg_out_insn(s, RXY, LRVG, data, base, index, disp); | |
1441 | break; | |
1442 | case MO_Q: | |
1443 | tcg_out_insn(s, RXY, LG, data, base, index, disp); | |
1444 | break; | |
1445 | ||
1446 | default: | |
1447 | tcg_abort(); | |
1448 | } | |
1449 | } | |
1450 | ||
1451 | static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data, | |
1452 | TCGReg base, TCGReg index, int disp) | |
1453 | { | |
1454 | switch (opc & (MO_SIZE | MO_BSWAP)) { | |
1455 | case MO_UB: | |
1456 | if (disp >= 0 && disp < 0x1000) { | |
1457 | tcg_out_insn(s, RX, STC, data, base, index, disp); | |
1458 | } else { | |
1459 | tcg_out_insn(s, RXY, STCY, data, base, index, disp); | |
1460 | } | |
1461 | break; | |
1462 | ||
1463 | case MO_UW | MO_BSWAP: | |
1464 | tcg_out_insn(s, RXY, STRVH, data, base, index, disp); | |
1465 | break; | |
1466 | case MO_UW: | |
1467 | if (disp >= 0 && disp < 0x1000) { | |
1468 | tcg_out_insn(s, RX, STH, data, base, index, disp); | |
1469 | } else { | |
1470 | tcg_out_insn(s, RXY, STHY, data, base, index, disp); | |
1471 | } | |
1472 | break; | |
1473 | ||
1474 | case MO_UL | MO_BSWAP: | |
1475 | tcg_out_insn(s, RXY, STRV, data, base, index, disp); | |
1476 | break; | |
1477 | case MO_UL: | |
1478 | if (disp >= 0 && disp < 0x1000) { | |
1479 | tcg_out_insn(s, RX, ST, data, base, index, disp); | |
1480 | } else { | |
1481 | tcg_out_insn(s, RXY, STY, data, base, index, disp); | |
1482 | } | |
1483 | break; | |
1484 | ||
1485 | case MO_Q | MO_BSWAP: | |
1486 | tcg_out_insn(s, RXY, STRVG, data, base, index, disp); | |
1487 | break; | |
1488 | case MO_Q: | |
1489 | tcg_out_insn(s, RXY, STG, data, base, index, disp); | |
1490 | break; | |
1491 | ||
1492 | default: | |
1493 | tcg_abort(); | |
1494 | } | |
1495 | } | |
1496 | ||
1497 | #if defined(CONFIG_SOFTMMU) | |
1498 | /* We're expecting to use a 20-bit signed offset on the tlb memory ops. | |
1499 | Using the offset of the second entry in the last tlb table ensures | |
1500 | that we can index all of the elements of the first entry. */ | |
1501 | QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1]) | |
1502 | > 0x7ffff); | |
1503 | ||
1504 | /* Load and compare a TLB entry, leaving the flags set. Loads the TLB | |
1505 | addend into R2. Returns a register with the santitized guest address. */ | |
1506 | static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc, | |
1507 | int mem_index, bool is_ld) | |
1508 | { | |
1509 | unsigned s_bits = opc & MO_SIZE; | |
1510 | unsigned a_bits = get_alignment_bits(opc); | |
1511 | unsigned s_mask = (1 << s_bits) - 1; | |
1512 | unsigned a_mask = (1 << a_bits) - 1; | |
1513 | int ofs, a_off; | |
1514 | uint64_t tlb_mask; | |
1515 | ||
1516 | /* For aligned accesses, we check the first byte and include the alignment | |
1517 | bits within the address. For unaligned access, we check that we don't | |
1518 | cross pages using the address of the last byte of the access. */ | |
1519 | a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask); | |
1520 | tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; | |
1521 | ||
1522 | if (facilities & FACILITY_GEN_INST_EXT) { | |
1523 | tcg_out_risbg(s, TCG_REG_R2, addr_reg, | |
1524 | 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS, | |
1525 | 63 - CPU_TLB_ENTRY_BITS, | |
1526 | 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1); | |
1527 | if (a_off) { | |
1528 | tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off); | |
1529 | tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask); | |
1530 | } else { | |
1531 | tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask); | |
1532 | } | |
1533 | } else { | |
1534 | tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE, | |
1535 | TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); | |
1536 | tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off); | |
1537 | tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2, | |
1538 | (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); | |
1539 | tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask); | |
1540 | } | |
1541 | ||
1542 | if (is_ld) { | |
1543 | ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read); | |
1544 | } else { | |
1545 | ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); | |
1546 | } | |
1547 | if (TARGET_LONG_BITS == 32) { | |
1548 | tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs); | |
1549 | } else { | |
1550 | tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs); | |
1551 | } | |
1552 | ||
1553 | ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend); | |
1554 | tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs); | |
1555 | ||
1556 | if (TARGET_LONG_BITS == 32) { | |
1557 | tgen_ext32u(s, TCG_REG_R3, addr_reg); | |
1558 | return TCG_REG_R3; | |
1559 | } | |
1560 | return addr_reg; | |
1561 | } | |
1562 | ||
1563 | static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, | |
1564 | TCGReg data, TCGReg addr, | |
1565 | tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) | |
1566 | { | |
1567 | TCGLabelQemuLdst *label = new_ldst_label(s); | |
1568 | ||
1569 | label->is_ld = is_ld; | |
1570 | label->oi = oi; | |
1571 | label->datalo_reg = data; | |
1572 | label->addrlo_reg = addr; | |
1573 | label->raddr = raddr; | |
1574 | label->label_ptr[0] = label_ptr; | |
1575 | } | |
1576 | ||
1577 | static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | |
1578 | { | |
1579 | TCGReg addr_reg = lb->addrlo_reg; | |
1580 | TCGReg data_reg = lb->datalo_reg; | |
1581 | TCGMemOpIdx oi = lb->oi; | |
1582 | TCGMemOp opc = get_memop(oi); | |
1583 | ||
1584 | patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2); | |
1585 | ||
1586 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); | |
1587 | if (TARGET_LONG_BITS == 64) { | |
1588 | tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg); | |
1589 | } | |
1590 | tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi); | |
1591 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr); | |
1592 | tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]); | |
1593 | tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2); | |
1594 | ||
1595 | tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); | |
1596 | } | |
1597 | ||
1598 | static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | |
1599 | { | |
1600 | TCGReg addr_reg = lb->addrlo_reg; | |
1601 | TCGReg data_reg = lb->datalo_reg; | |
1602 | TCGMemOpIdx oi = lb->oi; | |
1603 | TCGMemOp opc = get_memop(oi); | |
1604 | ||
1605 | patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2); | |
1606 | ||
1607 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); | |
1608 | if (TARGET_LONG_BITS == 64) { | |
1609 | tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg); | |
1610 | } | |
1611 | switch (opc & MO_SIZE) { | |
1612 | case MO_UB: | |
1613 | tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); | |
1614 | break; | |
1615 | case MO_UW: | |
1616 | tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); | |
1617 | break; | |
1618 | case MO_UL: | |
1619 | tgen_ext32u(s, TCG_REG_R4, data_reg); | |
1620 | break; | |
1621 | case MO_Q: | |
1622 | tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); | |
1623 | break; | |
1624 | default: | |
1625 | tcg_abort(); | |
1626 | } | |
1627 | tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi); | |
1628 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr); | |
1629 | tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); | |
1630 | ||
1631 | tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); | |
1632 | } | |
1633 | #else | |
1634 | static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, | |
1635 | TCGReg *index_reg, tcg_target_long *disp) | |
1636 | { | |
1637 | if (TARGET_LONG_BITS == 32) { | |
1638 | tgen_ext32u(s, TCG_TMP0, *addr_reg); | |
1639 | *addr_reg = TCG_TMP0; | |
1640 | } | |
1641 | if (guest_base < 0x80000) { | |
1642 | *index_reg = TCG_REG_NONE; | |
1643 | *disp = guest_base; | |
1644 | } else { | |
1645 | *index_reg = TCG_GUEST_BASE_REG; | |
1646 | *disp = 0; | |
1647 | } | |
1648 | } | |
1649 | #endif /* CONFIG_SOFTMMU */ | |
1650 | ||
1651 | static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | |
1652 | TCGMemOpIdx oi) | |
1653 | { | |
1654 | TCGMemOp opc = get_memop(oi); | |
1655 | #ifdef CONFIG_SOFTMMU | |
1656 | unsigned mem_index = get_mmuidx(oi); | |
1657 | tcg_insn_unit *label_ptr; | |
1658 | TCGReg base_reg; | |
1659 | ||
1660 | base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1); | |
1661 | ||
1662 | /* We need to keep the offset unchanged for retranslation. */ | |
1663 | tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); | |
1664 | label_ptr = s->code_ptr; | |
1665 | s->code_ptr += 1; | |
1666 | ||
1667 | tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); | |
1668 | ||
1669 | add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr); | |
1670 | #else | |
1671 | TCGReg index_reg; | |
1672 | tcg_target_long disp; | |
1673 | ||
1674 | tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); | |
1675 | tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp); | |
1676 | #endif | |
1677 | } | |
1678 | ||
1679 | static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | |
1680 | TCGMemOpIdx oi) | |
1681 | { | |
1682 | TCGMemOp opc = get_memop(oi); | |
1683 | #ifdef CONFIG_SOFTMMU | |
1684 | unsigned mem_index = get_mmuidx(oi); | |
1685 | tcg_insn_unit *label_ptr; | |
1686 | TCGReg base_reg; | |
1687 | ||
1688 | base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0); | |
1689 | ||
1690 | /* We need to keep the offset unchanged for retranslation. */ | |
1691 | tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); | |
1692 | label_ptr = s->code_ptr; | |
1693 | s->code_ptr += 1; | |
1694 | ||
1695 | tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); | |
1696 | ||
1697 | add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr); | |
1698 | #else | |
1699 | TCGReg index_reg; | |
1700 | tcg_target_long disp; | |
1701 | ||
1702 | tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); | |
1703 | tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp); | |
1704 | #endif | |
1705 | } | |
1706 | ||
1707 | # define OP_32_64(x) \ | |
1708 | case glue(glue(INDEX_op_,x),_i32): \ | |
1709 | case glue(glue(INDEX_op_,x),_i64) | |
1710 | ||
1711 | static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, | |
1712 | const TCGArg *args, const int *const_args) | |
1713 | { | |
1714 | S390Opcode op; | |
1715 | TCGArg a0, a1, a2; | |
1716 | ||
1717 | switch (opc) { | |
1718 | case INDEX_op_exit_tb: | |
1719 | /* return value */ | |
1720 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]); | |
1721 | tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr); | |
1722 | break; | |
1723 | ||
1724 | case INDEX_op_goto_tb: | |
1725 | if (s->tb_jmp_insn_offset) { | |
1726 | /* branch displacement must be aligned for atomic patching; | |
1727 | * see if we need to add extra nop before branch | |
1728 | */ | |
1729 | if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) { | |
1730 | tcg_out16(s, NOP); | |
1731 | } | |
1732 | tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4)); | |
1733 | s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); | |
1734 | s->code_ptr += 2; | |
1735 | } else { | |
1736 | /* load address stored at s->tb_jmp_target_addr + args[0] */ | |
1737 | tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, | |
1738 | s->tb_jmp_target_addr + args[0]); | |
1739 | /* and go there */ | |
1740 | tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0); | |
1741 | } | |
1742 | s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s); | |
1743 | break; | |
1744 | ||
1745 | OP_32_64(ld8u): | |
1746 | /* ??? LLC (RXY format) is only present with the extended-immediate | |
1747 | facility, whereas LLGC is always present. */ | |
1748 | tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]); | |
1749 | break; | |
1750 | ||
1751 | OP_32_64(ld8s): | |
1752 | /* ??? LB is no smaller than LGB, so no point to using it. */ | |
1753 | tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]); | |
1754 | break; | |
1755 | ||
1756 | OP_32_64(ld16u): | |
1757 | /* ??? LLH (RXY format) is only present with the extended-immediate | |
1758 | facility, whereas LLGH is always present. */ | |
1759 | tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]); | |
1760 | break; | |
1761 | ||
1762 | case INDEX_op_ld16s_i32: | |
1763 | tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]); | |
1764 | break; | |
1765 | ||
1766 | case INDEX_op_ld_i32: | |
1767 | tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]); | |
1768 | break; | |
1769 | ||
1770 | OP_32_64(st8): | |
1771 | tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1], | |
1772 | TCG_REG_NONE, args[2]); | |
1773 | break; | |
1774 | ||
1775 | OP_32_64(st16): | |
1776 | tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1], | |
1777 | TCG_REG_NONE, args[2]); | |
1778 | break; | |
1779 | ||
1780 | case INDEX_op_st_i32: | |
1781 | tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]); | |
1782 | break; | |
1783 | ||
1784 | case INDEX_op_add_i32: | |
1785 | a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; | |
1786 | if (const_args[2]) { | |
1787 | do_addi_32: | |
1788 | if (a0 == a1) { | |
1789 | if (a2 == (int16_t)a2) { | |
1790 | tcg_out_insn(s, RI, AHI, a0, a2); | |
1791 | break; | |
1792 | } | |
1793 | if (facilities & FACILITY_EXT_IMM) { | |
1794 | tcg_out_insn(s, RIL, AFI, a0, a2); | |
1795 | break; | |
1796 | } | |
1797 | } | |
1798 | tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2); | |
1799 | } else if (a0 == a1) { | |
1800 | tcg_out_insn(s, RR, AR, a0, a2); | |
1801 | } else { | |
1802 | tcg_out_insn(s, RX, LA, a0, a1, a2, 0); | |
1803 | } | |
1804 | break; | |
1805 | case INDEX_op_sub_i32: | |
1806 | a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; | |
1807 | if (const_args[2]) { | |
1808 | a2 = -a2; | |
1809 | goto do_addi_32; | |
1810 | } | |
1811 | tcg_out_insn(s, RR, SR, args[0], args[2]); | |
1812 | break; | |
1813 | ||
1814 | case INDEX_op_and_i32: | |
1815 | if (const_args[2]) { | |
1816 | tgen_andi(s, TCG_TYPE_I32, args[0], args[2]); | |
1817 | } else { | |
1818 | tcg_out_insn(s, RR, NR, args[0], args[2]); | |
1819 | } | |
1820 | break; | |
1821 | case INDEX_op_or_i32: | |
1822 | if (const_args[2]) { | |
1823 | tgen64_ori(s, args[0], args[2] & 0xffffffff); | |
1824 | } else { | |
1825 | tcg_out_insn(s, RR, OR, args[0], args[2]); | |
1826 | } | |
1827 | break; | |
1828 | case INDEX_op_xor_i32: | |
1829 | if (const_args[2]) { | |
1830 | tgen64_xori(s, args[0], args[2] & 0xffffffff); | |
1831 | } else { | |
1832 | tcg_out_insn(s, RR, XR, args[0], args[2]); | |
1833 | } | |
1834 | break; | |
1835 | ||
1836 | case INDEX_op_neg_i32: | |
1837 | tcg_out_insn(s, RR, LCR, args[0], args[1]); | |
1838 | break; | |
1839 | ||
1840 | case INDEX_op_mul_i32: | |
1841 | if (const_args[2]) { | |
1842 | if ((int32_t)args[2] == (int16_t)args[2]) { | |
1843 | tcg_out_insn(s, RI, MHI, args[0], args[2]); | |
1844 | } else { | |
1845 | tcg_out_insn(s, RIL, MSFI, args[0], args[2]); | |
1846 | } | |
1847 | } else { | |
1848 | tcg_out_insn(s, RRE, MSR, args[0], args[2]); | |
1849 | } | |
1850 | break; | |
1851 | ||
1852 | case INDEX_op_div2_i32: | |
1853 | tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]); | |
1854 | break; | |
1855 | case INDEX_op_divu2_i32: | |
1856 | tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]); | |
1857 | break; | |
1858 | ||
1859 | case INDEX_op_shl_i32: | |
1860 | op = RS_SLL; | |
1861 | do_shift32: | |
1862 | if (const_args[2]) { | |
1863 | tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]); | |
1864 | } else { | |
1865 | tcg_out_sh32(s, op, args[0], args[2], 0); | |
1866 | } | |
1867 | break; | |
1868 | case INDEX_op_shr_i32: | |
1869 | op = RS_SRL; | |
1870 | goto do_shift32; | |
1871 | case INDEX_op_sar_i32: | |
1872 | op = RS_SRA; | |
1873 | goto do_shift32; | |
1874 | ||
1875 | case INDEX_op_rotl_i32: | |
1876 | /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */ | |
1877 | if (const_args[2]) { | |
1878 | tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]); | |
1879 | } else { | |
1880 | tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0); | |
1881 | } | |
1882 | break; | |
1883 | case INDEX_op_rotr_i32: | |
1884 | if (const_args[2]) { | |
1885 | tcg_out_sh64(s, RSY_RLL, args[0], args[1], | |
1886 | TCG_REG_NONE, (32 - args[2]) & 31); | |
1887 | } else { | |
1888 | tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]); | |
1889 | tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0); | |
1890 | } | |
1891 | break; | |
1892 | ||
1893 | case INDEX_op_ext8s_i32: | |
1894 | tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]); | |
1895 | break; | |
1896 | case INDEX_op_ext16s_i32: | |
1897 | tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]); | |
1898 | break; | |
1899 | case INDEX_op_ext8u_i32: | |
1900 | tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]); | |
1901 | break; | |
1902 | case INDEX_op_ext16u_i32: | |
1903 | tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]); | |
1904 | break; | |
1905 | ||
1906 | OP_32_64(bswap16): | |
1907 | /* The TCG bswap definition requires bits 0-47 already be zero. | |
1908 | Thus we don't need the G-type insns to implement bswap16_i64. */ | |
1909 | tcg_out_insn(s, RRE, LRVR, args[0], args[1]); | |
1910 | tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16); | |
1911 | break; | |
1912 | OP_32_64(bswap32): | |
1913 | tcg_out_insn(s, RRE, LRVR, args[0], args[1]); | |
1914 | break; | |
1915 | ||
1916 | case INDEX_op_add2_i32: | |
1917 | if (const_args[4]) { | |
1918 | tcg_out_insn(s, RIL, ALFI, args[0], args[4]); | |
1919 | } else { | |
1920 | tcg_out_insn(s, RR, ALR, args[0], args[4]); | |
1921 | } | |
1922 | tcg_out_insn(s, RRE, ALCR, args[1], args[5]); | |
1923 | break; | |
1924 | case INDEX_op_sub2_i32: | |
1925 | if (const_args[4]) { | |
1926 | tcg_out_insn(s, RIL, SLFI, args[0], args[4]); | |
1927 | } else { | |
1928 | tcg_out_insn(s, RR, SLR, args[0], args[4]); | |
1929 | } | |
1930 | tcg_out_insn(s, RRE, SLBR, args[1], args[5]); | |
1931 | break; | |
1932 | ||
1933 | case INDEX_op_br: | |
1934 | tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0])); | |
1935 | break; | |
1936 | ||
1937 | case INDEX_op_brcond_i32: | |
1938 | tgen_brcond(s, TCG_TYPE_I32, args[2], args[0], | |
1939 | args[1], const_args[1], arg_label(args[3])); | |
1940 | break; | |
1941 | case INDEX_op_setcond_i32: | |
1942 | tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], | |
1943 | args[2], const_args[2]); | |
1944 | break; | |
1945 | case INDEX_op_movcond_i32: | |
1946 | tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], | |
1947 | args[2], const_args[2], args[3]); | |
1948 | break; | |
1949 | ||
1950 | case INDEX_op_qemu_ld_i32: | |
1951 | /* ??? Technically we can use a non-extending instruction. */ | |
1952 | case INDEX_op_qemu_ld_i64: | |
1953 | tcg_out_qemu_ld(s, args[0], args[1], args[2]); | |
1954 | break; | |
1955 | case INDEX_op_qemu_st_i32: | |
1956 | case INDEX_op_qemu_st_i64: | |
1957 | tcg_out_qemu_st(s, args[0], args[1], args[2]); | |
1958 | break; | |
1959 | ||
1960 | case INDEX_op_ld16s_i64: | |
1961 | tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]); | |
1962 | break; | |
1963 | case INDEX_op_ld32u_i64: | |
1964 | tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]); | |
1965 | break; | |
1966 | case INDEX_op_ld32s_i64: | |
1967 | tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]); | |
1968 | break; | |
1969 | case INDEX_op_ld_i64: | |
1970 | tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]); | |
1971 | break; | |
1972 | ||
1973 | case INDEX_op_st32_i64: | |
1974 | tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]); | |
1975 | break; | |
1976 | case INDEX_op_st_i64: | |
1977 | tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]); | |
1978 | break; | |
1979 | ||
1980 | case INDEX_op_add_i64: | |
1981 | a0 = args[0], a1 = args[1], a2 = args[2]; | |
1982 | if (const_args[2]) { | |
1983 | do_addi_64: | |
1984 | if (a0 == a1) { | |
1985 | if (a2 == (int16_t)a2) { | |
1986 | tcg_out_insn(s, RI, AGHI, a0, a2); | |
1987 | break; | |
1988 | } | |
1989 | if (facilities & FACILITY_EXT_IMM) { | |
1990 | if (a2 == (int32_t)a2) { | |
1991 | tcg_out_insn(s, RIL, AGFI, a0, a2); | |
1992 | break; | |
1993 | } else if (a2 == (uint32_t)a2) { | |
1994 | tcg_out_insn(s, RIL, ALGFI, a0, a2); | |
1995 | break; | |
1996 | } else if (-a2 == (uint32_t)-a2) { | |
1997 | tcg_out_insn(s, RIL, SLGFI, a0, -a2); | |
1998 | break; | |
1999 | } | |
2000 | } | |
2001 | } | |
2002 | tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2); | |
2003 | } else if (a0 == a1) { | |
2004 | tcg_out_insn(s, RRE, AGR, a0, a2); | |
2005 | } else { | |
2006 | tcg_out_insn(s, RX, LA, a0, a1, a2, 0); | |
2007 | } | |
2008 | break; | |
2009 | case INDEX_op_sub_i64: | |
2010 | a0 = args[0], a1 = args[1], a2 = args[2]; | |
2011 | if (const_args[2]) { | |
2012 | a2 = -a2; | |
2013 | goto do_addi_64; | |
2014 | } else { | |
2015 | tcg_out_insn(s, RRE, SGR, args[0], args[2]); | |
2016 | } | |
2017 | break; | |
2018 | ||
2019 | case INDEX_op_and_i64: | |
2020 | if (const_args[2]) { | |
2021 | tgen_andi(s, TCG_TYPE_I64, args[0], args[2]); | |
2022 | } else { | |
2023 | tcg_out_insn(s, RRE, NGR, args[0], args[2]); | |
2024 | } | |
2025 | break; | |
2026 | case INDEX_op_or_i64: | |
2027 | if (const_args[2]) { | |
2028 | tgen64_ori(s, args[0], args[2]); | |
2029 | } else { | |
2030 | tcg_out_insn(s, RRE, OGR, args[0], args[2]); | |
2031 | } | |
2032 | break; | |
2033 | case INDEX_op_xor_i64: | |
2034 | if (const_args[2]) { | |
2035 | tgen64_xori(s, args[0], args[2]); | |
2036 | } else { | |
2037 | tcg_out_insn(s, RRE, XGR, args[0], args[2]); | |
2038 | } | |
2039 | break; | |
2040 | ||
2041 | case INDEX_op_neg_i64: | |
2042 | tcg_out_insn(s, RRE, LCGR, args[0], args[1]); | |
2043 | break; | |
2044 | case INDEX_op_bswap64_i64: | |
2045 | tcg_out_insn(s, RRE, LRVGR, args[0], args[1]); | |
2046 | break; | |
2047 | ||
2048 | case INDEX_op_mul_i64: | |
2049 | if (const_args[2]) { | |
2050 | if (args[2] == (int16_t)args[2]) { | |
2051 | tcg_out_insn(s, RI, MGHI, args[0], args[2]); | |
2052 | } else { | |
2053 | tcg_out_insn(s, RIL, MSGFI, args[0], args[2]); | |
2054 | } | |
2055 | } else { | |
2056 | tcg_out_insn(s, RRE, MSGR, args[0], args[2]); | |
2057 | } | |
2058 | break; | |
2059 | ||
2060 | case INDEX_op_div2_i64: | |
2061 | /* ??? We get an unnecessary sign-extension of the dividend | |
2062 | into R3 with this definition, but as we do in fact always | |
2063 | produce both quotient and remainder using INDEX_op_div_i64 | |
2064 | instead requires jumping through even more hoops. */ | |
2065 | tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]); | |
2066 | break; | |
2067 | case INDEX_op_divu2_i64: | |
2068 | tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]); | |
2069 | break; | |
2070 | case INDEX_op_mulu2_i64: | |
2071 | tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]); | |
2072 | break; | |
2073 | ||
2074 | case INDEX_op_shl_i64: | |
2075 | op = RSY_SLLG; | |
2076 | do_shift64: | |
2077 | if (const_args[2]) { | |
2078 | tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]); | |
2079 | } else { | |
2080 | tcg_out_sh64(s, op, args[0], args[1], args[2], 0); | |
2081 | } | |
2082 | break; | |
2083 | case INDEX_op_shr_i64: | |
2084 | op = RSY_SRLG; | |
2085 | goto do_shift64; | |
2086 | case INDEX_op_sar_i64: | |
2087 | op = RSY_SRAG; | |
2088 | goto do_shift64; | |
2089 | ||
2090 | case INDEX_op_rotl_i64: | |
2091 | if (const_args[2]) { | |
2092 | tcg_out_sh64(s, RSY_RLLG, args[0], args[1], | |
2093 | TCG_REG_NONE, args[2]); | |
2094 | } else { | |
2095 | tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0); | |
2096 | } | |
2097 | break; | |
2098 | case INDEX_op_rotr_i64: | |
2099 | if (const_args[2]) { | |
2100 | tcg_out_sh64(s, RSY_RLLG, args[0], args[1], | |
2101 | TCG_REG_NONE, (64 - args[2]) & 63); | |
2102 | } else { | |
2103 | /* We can use the smaller 32-bit negate because only the | |
2104 | low 6 bits are examined for the rotate. */ | |
2105 | tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]); | |
2106 | tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0); | |
2107 | } | |
2108 | break; | |
2109 | ||
2110 | case INDEX_op_ext8s_i64: | |
2111 | tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]); | |
2112 | break; | |
2113 | case INDEX_op_ext16s_i64: | |
2114 | tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]); | |
2115 | break; | |
2116 | case INDEX_op_ext_i32_i64: | |
2117 | case INDEX_op_ext32s_i64: | |
2118 | tgen_ext32s(s, args[0], args[1]); | |
2119 | break; | |
2120 | case INDEX_op_ext8u_i64: | |
2121 | tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]); | |
2122 | break; | |
2123 | case INDEX_op_ext16u_i64: | |
2124 | tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]); | |
2125 | break; | |
2126 | case INDEX_op_extu_i32_i64: | |
2127 | case INDEX_op_ext32u_i64: | |
2128 | tgen_ext32u(s, args[0], args[1]); | |
2129 | break; | |
2130 | ||
2131 | case INDEX_op_add2_i64: | |
2132 | if (const_args[4]) { | |
2133 | if ((int64_t)args[4] >= 0) { | |
2134 | tcg_out_insn(s, RIL, ALGFI, args[0], args[4]); | |
2135 | } else { | |
2136 | tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]); | |
2137 | } | |
2138 | } else { | |
2139 | tcg_out_insn(s, RRE, ALGR, args[0], args[4]); | |
2140 | } | |
2141 | tcg_out_insn(s, RRE, ALCGR, args[1], args[5]); | |
2142 | break; | |
2143 | case INDEX_op_sub2_i64: | |
2144 | if (const_args[4]) { | |
2145 | if ((int64_t)args[4] >= 0) { | |
2146 | tcg_out_insn(s, RIL, SLGFI, args[0], args[4]); | |
2147 | } else { | |
2148 | tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]); | |
2149 | } | |
2150 | } else { | |
2151 | tcg_out_insn(s, RRE, SLGR, args[0], args[4]); | |
2152 | } | |
2153 | tcg_out_insn(s, RRE, SLBGR, args[1], args[5]); | |
2154 | break; | |
2155 | ||
2156 | case INDEX_op_brcond_i64: | |
2157 | tgen_brcond(s, TCG_TYPE_I64, args[2], args[0], | |
2158 | args[1], const_args[1], arg_label(args[3])); | |
2159 | break; | |
2160 | case INDEX_op_setcond_i64: | |
2161 | tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], | |
2162 | args[2], const_args[2]); | |
2163 | break; | |
2164 | case INDEX_op_movcond_i64: | |
2165 | tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], | |
2166 | args[2], const_args[2], args[3]); | |
2167 | break; | |
2168 | ||
2169 | OP_32_64(deposit): | |
2170 | tgen_deposit(s, args[0], args[2], args[3], args[4]); | |
2171 | break; | |
2172 | ||
2173 | case INDEX_op_mb: | |
2174 | /* The host memory model is quite strong, we simply need to | |
2175 | serialize the instruction stream. */ | |
2176 | if (args[0] & TCG_MO_ST_LD) { | |
2177 | tcg_out_insn(s, RR, BCR, | |
2178 | facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0); | |
2179 | } | |
2180 | break; | |
2181 | ||
2182 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | |
2183 | case INDEX_op_mov_i64: | |
2184 | case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ | |
2185 | case INDEX_op_movi_i64: | |
2186 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | |
2187 | default: | |
2188 | tcg_abort(); | |
2189 | } | |
2190 | } | |
2191 | ||
2192 | static const TCGTargetOpDef s390_op_defs[] = { | |
2193 | { INDEX_op_exit_tb, { } }, | |
2194 | { INDEX_op_goto_tb, { } }, | |
2195 | { INDEX_op_br, { } }, | |
2196 | ||
2197 | { INDEX_op_ld8u_i32, { "r", "r" } }, | |
2198 | { INDEX_op_ld8s_i32, { "r", "r" } }, | |
2199 | { INDEX_op_ld16u_i32, { "r", "r" } }, | |
2200 | { INDEX_op_ld16s_i32, { "r", "r" } }, | |
2201 | { INDEX_op_ld_i32, { "r", "r" } }, | |
2202 | { INDEX_op_st8_i32, { "r", "r" } }, | |
2203 | { INDEX_op_st16_i32, { "r", "r" } }, | |
2204 | { INDEX_op_st_i32, { "r", "r" } }, | |
2205 | ||
2206 | { INDEX_op_add_i32, { "r", "r", "ri" } }, | |
2207 | { INDEX_op_sub_i32, { "r", "0", "ri" } }, | |
2208 | { INDEX_op_mul_i32, { "r", "0", "rK" } }, | |
2209 | ||
2210 | { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } }, | |
2211 | { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } }, | |
2212 | ||
2213 | { INDEX_op_and_i32, { "r", "0", "ri" } }, | |
2214 | { INDEX_op_or_i32, { "r", "0", "rO" } }, | |
2215 | { INDEX_op_xor_i32, { "r", "0", "rX" } }, | |
2216 | ||
2217 | { INDEX_op_neg_i32, { "r", "r" } }, | |
2218 | ||
2219 | { INDEX_op_shl_i32, { "r", "0", "Ri" } }, | |
2220 | { INDEX_op_shr_i32, { "r", "0", "Ri" } }, | |
2221 | { INDEX_op_sar_i32, { "r", "0", "Ri" } }, | |
2222 | ||
2223 | { INDEX_op_rotl_i32, { "r", "r", "Ri" } }, | |
2224 | { INDEX_op_rotr_i32, { "r", "r", "Ri" } }, | |
2225 | ||
2226 | { INDEX_op_ext8s_i32, { "r", "r" } }, | |
2227 | { INDEX_op_ext8u_i32, { "r", "r" } }, | |
2228 | { INDEX_op_ext16s_i32, { "r", "r" } }, | |
2229 | { INDEX_op_ext16u_i32, { "r", "r" } }, | |
2230 | ||
2231 | { INDEX_op_bswap16_i32, { "r", "r" } }, | |
2232 | { INDEX_op_bswap32_i32, { "r", "r" } }, | |
2233 | ||
2234 | { INDEX_op_add2_i32, { "r", "r", "0", "1", "rA", "r" } }, | |
2235 | { INDEX_op_sub2_i32, { "r", "r", "0", "1", "rA", "r" } }, | |
2236 | ||
2237 | { INDEX_op_brcond_i32, { "r", "rC" } }, | |
2238 | { INDEX_op_setcond_i32, { "r", "r", "rC" } }, | |
2239 | { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } }, | |
2240 | { INDEX_op_deposit_i32, { "r", "0", "r" } }, | |
2241 | ||
2242 | { INDEX_op_qemu_ld_i32, { "r", "L" } }, | |
2243 | { INDEX_op_qemu_ld_i64, { "r", "L" } }, | |
2244 | { INDEX_op_qemu_st_i32, { "L", "L" } }, | |
2245 | { INDEX_op_qemu_st_i64, { "L", "L" } }, | |
2246 | ||
2247 | { INDEX_op_ld8u_i64, { "r", "r" } }, | |
2248 | { INDEX_op_ld8s_i64, { "r", "r" } }, | |
2249 | { INDEX_op_ld16u_i64, { "r", "r" } }, | |
2250 | { INDEX_op_ld16s_i64, { "r", "r" } }, | |
2251 | { INDEX_op_ld32u_i64, { "r", "r" } }, | |
2252 | { INDEX_op_ld32s_i64, { "r", "r" } }, | |
2253 | { INDEX_op_ld_i64, { "r", "r" } }, | |
2254 | ||
2255 | { INDEX_op_st8_i64, { "r", "r" } }, | |
2256 | { INDEX_op_st16_i64, { "r", "r" } }, | |
2257 | { INDEX_op_st32_i64, { "r", "r" } }, | |
2258 | { INDEX_op_st_i64, { "r", "r" } }, | |
2259 | ||
2260 | { INDEX_op_add_i64, { "r", "r", "ri" } }, | |
2261 | { INDEX_op_sub_i64, { "r", "0", "ri" } }, | |
2262 | { INDEX_op_mul_i64, { "r", "0", "rK" } }, | |
2263 | ||
2264 | { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } }, | |
2265 | { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } }, | |
2266 | { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } }, | |
2267 | ||
2268 | { INDEX_op_and_i64, { "r", "0", "ri" } }, | |
2269 | { INDEX_op_or_i64, { "r", "0", "rO" } }, | |
2270 | { INDEX_op_xor_i64, { "r", "0", "rX" } }, | |
2271 | ||
2272 | { INDEX_op_neg_i64, { "r", "r" } }, | |
2273 | ||
2274 | { INDEX_op_shl_i64, { "r", "r", "Ri" } }, | |
2275 | { INDEX_op_shr_i64, { "r", "r", "Ri" } }, | |
2276 | { INDEX_op_sar_i64, { "r", "r", "Ri" } }, | |
2277 | ||
2278 | { INDEX_op_rotl_i64, { "r", "r", "Ri" } }, | |
2279 | { INDEX_op_rotr_i64, { "r", "r", "Ri" } }, | |
2280 | ||
2281 | { INDEX_op_ext8s_i64, { "r", "r" } }, | |
2282 | { INDEX_op_ext8u_i64, { "r", "r" } }, | |
2283 | { INDEX_op_ext16s_i64, { "r", "r" } }, | |
2284 | { INDEX_op_ext16u_i64, { "r", "r" } }, | |
2285 | { INDEX_op_ext32s_i64, { "r", "r" } }, | |
2286 | { INDEX_op_ext32u_i64, { "r", "r" } }, | |
2287 | ||
2288 | { INDEX_op_ext_i32_i64, { "r", "r" } }, | |
2289 | { INDEX_op_extu_i32_i64, { "r", "r" } }, | |
2290 | ||
2291 | { INDEX_op_bswap16_i64, { "r", "r" } }, | |
2292 | { INDEX_op_bswap32_i64, { "r", "r" } }, | |
2293 | { INDEX_op_bswap64_i64, { "r", "r" } }, | |
2294 | ||
2295 | { INDEX_op_add2_i64, { "r", "r", "0", "1", "rA", "r" } }, | |
2296 | { INDEX_op_sub2_i64, { "r", "r", "0", "1", "rA", "r" } }, | |
2297 | ||
2298 | { INDEX_op_brcond_i64, { "r", "rC" } }, | |
2299 | { INDEX_op_setcond_i64, { "r", "r", "rC" } }, | |
2300 | { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } }, | |
2301 | { INDEX_op_deposit_i64, { "r", "0", "r" } }, | |
2302 | ||
2303 | { INDEX_op_mb, { } }, | |
2304 | { -1 }, | |
2305 | }; | |
2306 | ||
2307 | static void query_facilities(void) | |
2308 | { | |
2309 | unsigned long hwcap = qemu_getauxval(AT_HWCAP); | |
2310 | ||
2311 | /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this | |
2312 | is present on all 64-bit systems, but let's check for it anyway. */ | |
2313 | if (hwcap & HWCAP_S390_STFLE) { | |
2314 | register int r0 __asm__("0"); | |
2315 | register void *r1 __asm__("1"); | |
2316 | ||
2317 | /* stfle 0(%r1) */ | |
2318 | r1 = &facilities; | |
2319 | asm volatile(".word 0xb2b0,0x1000" | |
2320 | : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc"); | |
2321 | } | |
2322 | } | |
2323 | ||
2324 | static void tcg_target_init(TCGContext *s) | |
2325 | { | |
2326 | query_facilities(); | |
2327 | ||
2328 | tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff); | |
2329 | tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff); | |
2330 | ||
2331 | tcg_regset_clear(tcg_target_call_clobber_regs); | |
2332 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); | |
2333 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1); | |
2334 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); | |
2335 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3); | |
2336 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4); | |
2337 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5); | |
2338 | /* The r6 register is technically call-saved, but it's also a parameter | |
2339 | register, so it can get killed by setup for the qemu_st helper. */ | |
2340 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6); | |
2341 | /* The return register can be considered call-clobbered. */ | |
2342 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); | |
2343 | ||
2344 | tcg_regset_clear(s->reserved_regs); | |
2345 | tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); | |
2346 | /* XXX many insns can't be used with R0, so we better avoid it for now */ | |
2347 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); | |
2348 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); | |
2349 | ||
2350 | tcg_add_target_add_op_defs(s390_op_defs); | |
2351 | } | |
2352 | ||
2353 | #define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \ | |
2354 | + TCG_STATIC_CALL_ARGS_SIZE \ | |
2355 | + CPU_TEMP_BUF_NLONGS * sizeof(long))) | |
2356 | ||
2357 | static void tcg_target_qemu_prologue(TCGContext *s) | |
2358 | { | |
2359 | /* stmg %r6,%r15,48(%r15) (save registers) */ | |
2360 | tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48); | |
2361 | ||
2362 | /* aghi %r15,-frame_size */ | |
2363 | tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE); | |
2364 | ||
2365 | tcg_set_frame(s, TCG_REG_CALL_STACK, | |
2366 | TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET, | |
2367 | CPU_TEMP_BUF_NLONGS * sizeof(long)); | |
2368 | ||
2369 | #ifndef CONFIG_SOFTMMU | |
2370 | if (guest_base >= 0x80000) { | |
2371 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); | |
2372 | tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); | |
2373 | } | |
2374 | #endif | |
2375 | ||
2376 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); | |
2377 | /* br %r3 (go to TB) */ | |
2378 | tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]); | |
2379 | ||
2380 | tb_ret_addr = s->code_ptr; | |
2381 | ||
2382 | /* lmg %r6,%r15,fs+48(%r15) (restore registers) */ | |
2383 | tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, | |
2384 | FRAME_SIZE + 48); | |
2385 | ||
2386 | /* br %r14 (return) */ | |
2387 | tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14); | |
2388 | } | |
2389 | ||
2390 | typedef struct { | |
2391 | DebugFrameHeader h; | |
2392 | uint8_t fde_def_cfa[4]; | |
2393 | uint8_t fde_reg_ofs[18]; | |
2394 | } DebugFrame; | |
2395 | ||
2396 | /* We're expecting a 2 byte uleb128 encoded value. */ | |
2397 | QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); | |
2398 | ||
2399 | #define ELF_HOST_MACHINE EM_S390 | |
2400 | ||
2401 | static const DebugFrame debug_frame = { | |
2402 | .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ | |
2403 | .h.cie.id = -1, | |
2404 | .h.cie.version = 1, | |
2405 | .h.cie.code_align = 1, | |
2406 | .h.cie.data_align = 8, /* sleb128 8 */ | |
2407 | .h.cie.return_column = TCG_REG_R14, | |
2408 | ||
2409 | /* Total FDE size does not include the "len" member. */ | |
2410 | .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), | |
2411 | ||
2412 | .fde_def_cfa = { | |
2413 | 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */ | |
2414 | (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ | |
2415 | (FRAME_SIZE >> 7) | |
2416 | }, | |
2417 | .fde_reg_ofs = { | |
2418 | 0x86, 6, /* DW_CFA_offset, %r6, 48 */ | |
2419 | 0x87, 7, /* DW_CFA_offset, %r7, 56 */ | |
2420 | 0x88, 8, /* DW_CFA_offset, %r8, 64 */ | |
2421 | 0x89, 9, /* DW_CFA_offset, %r92, 72 */ | |
2422 | 0x8a, 10, /* DW_CFA_offset, %r10, 80 */ | |
2423 | 0x8b, 11, /* DW_CFA_offset, %r11, 88 */ | |
2424 | 0x8c, 12, /* DW_CFA_offset, %r12, 96 */ | |
2425 | 0x8d, 13, /* DW_CFA_offset, %r13, 104 */ | |
2426 | 0x8e, 14, /* DW_CFA_offset, %r14, 112 */ | |
2427 | } | |
2428 | }; | |
2429 | ||
2430 | void tcg_register_jit(void *buf, size_t buf_size) | |
2431 | { | |
2432 | tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); | |
2433 | } |