]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/sparc64/tcg-target.c.inc
ffcb879211facc478cb245febaba1b86aa5d182e
[mirror_qemu.git] / tcg / sparc64 / tcg-target.c.inc
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /* We only support generating code for 64-bit mode. */
26 #ifndef __arch64__
27 #error "unsupported code generation mode"
28 #endif
29
30 #include "../tcg-ldst.c.inc"
31 #include "../tcg-pool.c.inc"
32
33 #ifdef CONFIG_DEBUG_TCG
34 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
35 "%g0",
36 "%g1",
37 "%g2",
38 "%g3",
39 "%g4",
40 "%g5",
41 "%g6",
42 "%g7",
43 "%o0",
44 "%o1",
45 "%o2",
46 "%o3",
47 "%o4",
48 "%o5",
49 "%o6",
50 "%o7",
51 "%l0",
52 "%l1",
53 "%l2",
54 "%l3",
55 "%l4",
56 "%l5",
57 "%l6",
58 "%l7",
59 "%i0",
60 "%i1",
61 "%i2",
62 "%i3",
63 "%i4",
64 "%i5",
65 "%i6",
66 "%i7",
67 };
68 #endif
69
70 #define TCG_CT_CONST_S11 0x100
71 #define TCG_CT_CONST_S13 0x200
72 #define TCG_CT_CONST_ZERO 0x400
73
74 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
75
76 /* Define some temporary registers. T3 is used for constant generation. */
77 #define TCG_REG_T1 TCG_REG_G1
78 #define TCG_REG_T2 TCG_REG_G2
79 #define TCG_REG_T3 TCG_REG_O7
80
81 #ifndef CONFIG_SOFTMMU
82 # define TCG_GUEST_BASE_REG TCG_REG_I5
83 #endif
84
85 #define TCG_REG_TB TCG_REG_I1
86
87 static const int tcg_target_reg_alloc_order[] = {
88 TCG_REG_L0,
89 TCG_REG_L1,
90 TCG_REG_L2,
91 TCG_REG_L3,
92 TCG_REG_L4,
93 TCG_REG_L5,
94 TCG_REG_L6,
95 TCG_REG_L7,
96
97 TCG_REG_I0,
98 TCG_REG_I1,
99 TCG_REG_I2,
100 TCG_REG_I3,
101 TCG_REG_I4,
102 TCG_REG_I5,
103
104 TCG_REG_G3,
105 TCG_REG_G4,
106 TCG_REG_G5,
107
108 TCG_REG_O0,
109 TCG_REG_O1,
110 TCG_REG_O2,
111 TCG_REG_O3,
112 TCG_REG_O4,
113 TCG_REG_O5,
114 };
115
116 static const int tcg_target_call_iarg_regs[6] = {
117 TCG_REG_O0,
118 TCG_REG_O1,
119 TCG_REG_O2,
120 TCG_REG_O3,
121 TCG_REG_O4,
122 TCG_REG_O5,
123 };
124
125 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
126 {
127 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
128 tcg_debug_assert(slot >= 0 && slot <= 3);
129 return TCG_REG_O0 + slot;
130 }
131
132 #define INSN_OP(x) ((x) << 30)
133 #define INSN_OP2(x) ((x) << 22)
134 #define INSN_OP3(x) ((x) << 19)
135 #define INSN_OPF(x) ((x) << 5)
136 #define INSN_RD(x) ((x) << 25)
137 #define INSN_RS1(x) ((x) << 14)
138 #define INSN_RS2(x) (x)
139 #define INSN_ASI(x) ((x) << 5)
140
141 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
142 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
143 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
144 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
145 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
146 #define INSN_COND(x) ((x) << 25)
147
148 #define COND_N 0x0
149 #define COND_E 0x1
150 #define COND_LE 0x2
151 #define COND_L 0x3
152 #define COND_LEU 0x4
153 #define COND_CS 0x5
154 #define COND_NEG 0x6
155 #define COND_VS 0x7
156 #define COND_A 0x8
157 #define COND_NE 0x9
158 #define COND_G 0xa
159 #define COND_GE 0xb
160 #define COND_GU 0xc
161 #define COND_CC 0xd
162 #define COND_POS 0xe
163 #define COND_VC 0xf
164 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
165
166 #define RCOND_Z 1
167 #define RCOND_LEZ 2
168 #define RCOND_LZ 3
169 #define RCOND_NZ 5
170 #define RCOND_GZ 6
171 #define RCOND_GEZ 7
172
173 #define MOVCC_ICC (1 << 18)
174 #define MOVCC_XCC (1 << 18 | 1 << 12)
175
176 #define BPCC_ICC 0
177 #define BPCC_XCC (2 << 20)
178 #define BPCC_PT (1 << 19)
179 #define BPCC_PN 0
180 #define BPCC_A (1 << 29)
181
182 #define BPR_PT BPCC_PT
183
184 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
185 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
186 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
187 #define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
188 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
189 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
190 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
191 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
192 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
193 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
194 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
195 #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
196 #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
197 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
198 #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
199 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
200 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
201 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
202 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
203 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
204 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
205 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
206
207 #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
208 #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
209
210 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
211 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
212 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
213
214 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
215 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
216 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
217
218 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
219 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
220 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
221 #define RETURN (INSN_OP(2) | INSN_OP3(0x39))
222 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
223 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
224 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
225 #define CALL INSN_OP(1)
226 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
227 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
228 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
229 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
230 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
231 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
232 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
233 #define STB (INSN_OP(3) | INSN_OP3(0x05))
234 #define STH (INSN_OP(3) | INSN_OP3(0x06))
235 #define STW (INSN_OP(3) | INSN_OP3(0x04))
236 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
237 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
238 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
239 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
240 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
241 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
242 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
243 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
244 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
245 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
246 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
247 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
248
249 #define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
250
251 #define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
252
253 #ifndef ASI_PRIMARY_LITTLE
254 #define ASI_PRIMARY_LITTLE 0x88
255 #endif
256
257 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
258 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
259 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
260 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
261 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
262
263 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
264 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
265 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
266
267 #ifndef use_vis3_instructions
268 bool use_vis3_instructions;
269 #endif
270
271 static bool check_fit_i64(int64_t val, unsigned int bits)
272 {
273 return val == sextract64(val, 0, bits);
274 }
275
276 static bool check_fit_i32(int32_t val, unsigned int bits)
277 {
278 return val == sextract32(val, 0, bits);
279 }
280
281 #define check_fit_tl check_fit_i64
282 #define check_fit_ptr check_fit_i64
283
284 static bool patch_reloc(tcg_insn_unit *src_rw, int type,
285 intptr_t value, intptr_t addend)
286 {
287 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
288 uint32_t insn = *src_rw;
289 intptr_t pcrel;
290
291 value += addend;
292 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
293
294 switch (type) {
295 case R_SPARC_WDISP16:
296 if (!check_fit_ptr(pcrel >> 2, 16)) {
297 return false;
298 }
299 insn &= ~INSN_OFF16(-1);
300 insn |= INSN_OFF16(pcrel);
301 break;
302 case R_SPARC_WDISP19:
303 if (!check_fit_ptr(pcrel >> 2, 19)) {
304 return false;
305 }
306 insn &= ~INSN_OFF19(-1);
307 insn |= INSN_OFF19(pcrel);
308 break;
309 case R_SPARC_13:
310 if (!check_fit_ptr(value, 13)) {
311 return false;
312 }
313 insn &= ~INSN_IMM13(-1);
314 insn |= INSN_IMM13(value);
315 break;
316 default:
317 g_assert_not_reached();
318 }
319
320 *src_rw = insn;
321 return true;
322 }
323
324 /* test if a constant matches the constraint */
325 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
326 {
327 if (ct & TCG_CT_CONST) {
328 return 1;
329 }
330
331 if (type == TCG_TYPE_I32) {
332 val = (int32_t)val;
333 }
334
335 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
336 return 1;
337 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
338 return 1;
339 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
340 return 1;
341 } else {
342 return 0;
343 }
344 }
345
346 static void tcg_out_nop(TCGContext *s)
347 {
348 tcg_out32(s, NOP);
349 }
350
351 static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
352 TCGReg rs2, int op)
353 {
354 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
355 }
356
357 static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
358 int32_t offset, int op)
359 {
360 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
361 }
362
363 static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
364 int32_t val2, int val2const, int op)
365 {
366 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
367 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
368 }
369
370 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
371 {
372 if (ret != arg) {
373 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
374 }
375 return true;
376 }
377
378 static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
379 {
380 if (ret != arg) {
381 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
382 } else {
383 tcg_out_nop(s);
384 }
385 }
386
387 static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
388 {
389 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
390 }
391
392 /* A 13-bit constant sign-extended to 64 bits. */
393 static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg)
394 {
395 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
396 }
397
398 /* A 32-bit constant sign-extended to 64 bits. */
399 static void tcg_out_movi_s32(TCGContext *s, TCGReg ret, int32_t arg)
400 {
401 tcg_out_sethi(s, ret, ~arg);
402 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
403 }
404
405 /* A 32-bit constant zero-extended to 64 bits. */
406 static void tcg_out_movi_u32(TCGContext *s, TCGReg ret, uint32_t arg)
407 {
408 tcg_out_sethi(s, ret, arg);
409 if (arg & 0x3ff) {
410 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
411 }
412 }
413
414 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
415 tcg_target_long arg, bool in_prologue,
416 TCGReg scratch)
417 {
418 tcg_target_long hi, lo = (int32_t)arg;
419 tcg_target_long test, lsb;
420
421 /* A 13-bit constant sign-extended to 64-bits. */
422 if (check_fit_tl(arg, 13)) {
423 tcg_out_movi_s13(s, ret, arg);
424 return;
425 }
426
427 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
428 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
429 tcg_out_movi_u32(s, ret, arg);
430 return;
431 }
432
433 /* A 13-bit constant relative to the TB. */
434 if (!in_prologue) {
435 test = tcg_tbrel_diff(s, (void *)arg);
436 if (check_fit_ptr(test, 13)) {
437 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
438 return;
439 }
440 }
441
442 /* A 32-bit constant sign-extended to 64-bits. */
443 if (arg == lo) {
444 tcg_out_movi_s32(s, ret, arg);
445 return;
446 }
447
448 /* A 32-bit constant, shifted. */
449 lsb = ctz64(arg);
450 test = (tcg_target_long)arg >> lsb;
451 if (lsb > 10 && test == extract64(test, 0, 21)) {
452 tcg_out_sethi(s, ret, test << 10);
453 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
454 return;
455 } else if (test == (uint32_t)test || test == (int32_t)test) {
456 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
457 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
458 return;
459 }
460
461 /* Use the constant pool, if possible. */
462 if (!in_prologue) {
463 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
464 tcg_tbrel_diff(s, NULL));
465 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
466 return;
467 }
468
469 /* A 64-bit constant decomposed into 2 32-bit pieces. */
470 if (check_fit_i32(lo, 13)) {
471 hi = (arg - lo) >> 32;
472 tcg_out_movi_u32(s, ret, hi);
473 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
474 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
475 } else {
476 hi = arg >> 32;
477 tcg_out_movi_u32(s, ret, hi);
478 tcg_out_movi_u32(s, scratch, lo);
479 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
480 tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
481 }
482 }
483
484 static void tcg_out_movi(TCGContext *s, TCGType type,
485 TCGReg ret, tcg_target_long arg)
486 {
487 tcg_debug_assert(ret != TCG_REG_T3);
488 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T3);
489 }
490
491 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
492 {
493 g_assert_not_reached();
494 }
495
496 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
497 {
498 g_assert_not_reached();
499 }
500
501 static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
502 {
503 tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
504 }
505
506 static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
507 {
508 tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL);
509 tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL);
510 }
511
512 static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
513 {
514 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
515 }
516
517 static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
518 {
519 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
520 }
521
522 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
523 {
524 tcg_out_ext32s(s, rd, rs);
525 }
526
527 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
528 {
529 tcg_out_ext32u(s, rd, rs);
530 }
531
532 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
533 {
534 tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
535 }
536
537 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
538 {
539 return false;
540 }
541
542 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
543 tcg_target_long imm)
544 {
545 /* This function is only used for passing structs by reference. */
546 g_assert_not_reached();
547 }
548
549 static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
550 TCGReg a2, int op)
551 {
552 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
553 }
554
555 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
556 intptr_t offset, int op)
557 {
558 if (check_fit_ptr(offset, 13)) {
559 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
560 INSN_IMM13(offset));
561 } else {
562 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
563 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
564 }
565 }
566
567 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
568 TCGReg arg1, intptr_t arg2)
569 {
570 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
571 }
572
573 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
574 TCGReg arg1, intptr_t arg2)
575 {
576 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
577 }
578
579 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
580 TCGReg base, intptr_t ofs)
581 {
582 if (val == 0) {
583 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
584 return true;
585 }
586 return false;
587 }
588
589 static void tcg_out_sety(TCGContext *s, TCGReg rs)
590 {
591 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
592 }
593
594 static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
595 int32_t val2, int val2const, int uns)
596 {
597 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
598 if (uns) {
599 tcg_out_sety(s, TCG_REG_G0);
600 } else {
601 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
602 tcg_out_sety(s, TCG_REG_T1);
603 }
604
605 tcg_out_arithc(s, rd, rs1, val2, val2const,
606 uns ? ARITH_UDIV : ARITH_SDIV);
607 }
608
609 static const uint8_t tcg_cond_to_bcond[] = {
610 [TCG_COND_EQ] = COND_E,
611 [TCG_COND_NE] = COND_NE,
612 [TCG_COND_LT] = COND_L,
613 [TCG_COND_GE] = COND_GE,
614 [TCG_COND_LE] = COND_LE,
615 [TCG_COND_GT] = COND_G,
616 [TCG_COND_LTU] = COND_CS,
617 [TCG_COND_GEU] = COND_CC,
618 [TCG_COND_LEU] = COND_LEU,
619 [TCG_COND_GTU] = COND_GU,
620 };
621
622 static const uint8_t tcg_cond_to_rcond[] = {
623 [TCG_COND_EQ] = RCOND_Z,
624 [TCG_COND_NE] = RCOND_NZ,
625 [TCG_COND_LT] = RCOND_LZ,
626 [TCG_COND_GT] = RCOND_GZ,
627 [TCG_COND_LE] = RCOND_LEZ,
628 [TCG_COND_GE] = RCOND_GEZ
629 };
630
631 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
632 {
633 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
634 }
635
636 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
637 {
638 int off19 = 0;
639
640 if (l->has_value) {
641 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
642 } else {
643 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
644 }
645 tcg_out_bpcc0(s, scond, flags, off19);
646 }
647
648 static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
649 {
650 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
651 }
652
653 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
654 int32_t arg2, int const_arg2, TCGLabel *l)
655 {
656 tcg_out_cmp(s, arg1, arg2, const_arg2);
657 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
658 tcg_out_nop(s);
659 }
660
661 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
662 int32_t v1, int v1const)
663 {
664 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
665 | INSN_RS1(tcg_cond_to_bcond[cond])
666 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
667 }
668
669 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
670 TCGReg c1, int32_t c2, int c2const,
671 int32_t v1, int v1const)
672 {
673 tcg_out_cmp(s, c1, c2, c2const);
674 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
675 }
676
677 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
678 int32_t arg2, int const_arg2, TCGLabel *l)
679 {
680 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
681 if (arg2 == 0 && !is_unsigned_cond(cond)) {
682 int off16 = 0;
683
684 if (l->has_value) {
685 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
686 } else {
687 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
688 }
689 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
690 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
691 } else {
692 tcg_out_cmp(s, arg1, arg2, const_arg2);
693 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
694 }
695 tcg_out_nop(s);
696 }
697
698 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
699 int32_t v1, int v1const)
700 {
701 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
702 | (tcg_cond_to_rcond[cond] << 10)
703 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
704 }
705
706 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
707 TCGReg c1, int32_t c2, int c2const,
708 int32_t v1, int v1const)
709 {
710 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
711 Note that the immediate range is one bit smaller, so we must check
712 for that as well. */
713 if (c2 == 0 && !is_unsigned_cond(cond)
714 && (!v1const || check_fit_i32(v1, 10))) {
715 tcg_out_movr(s, cond, ret, c1, v1, v1const);
716 } else {
717 tcg_out_cmp(s, c1, c2, c2const);
718 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
719 }
720 }
721
722 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
723 TCGReg c1, int32_t c2, int c2const)
724 {
725 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
726 switch (cond) {
727 case TCG_COND_LTU:
728 case TCG_COND_GEU:
729 /* The result of the comparison is in the carry bit. */
730 break;
731
732 case TCG_COND_EQ:
733 case TCG_COND_NE:
734 /* For equality, we can transform to inequality vs zero. */
735 if (c2 != 0) {
736 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
737 c2 = TCG_REG_T1;
738 } else {
739 c2 = c1;
740 }
741 c1 = TCG_REG_G0, c2const = 0;
742 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
743 break;
744
745 case TCG_COND_GTU:
746 case TCG_COND_LEU:
747 /* If we don't need to load a constant into a register, we can
748 swap the operands on GTU/LEU. There's no benefit to loading
749 the constant into a temporary register. */
750 if (!c2const || c2 == 0) {
751 TCGReg t = c1;
752 c1 = c2;
753 c2 = t;
754 c2const = 0;
755 cond = tcg_swap_cond(cond);
756 break;
757 }
758 /* FALLTHRU */
759
760 default:
761 tcg_out_cmp(s, c1, c2, c2const);
762 tcg_out_movi_s13(s, ret, 0);
763 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
764 return;
765 }
766
767 tcg_out_cmp(s, c1, c2, c2const);
768 if (cond == TCG_COND_LTU) {
769 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
770 } else {
771 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
772 }
773 }
774
775 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
776 TCGReg c1, int32_t c2, int c2const)
777 {
778 if (use_vis3_instructions) {
779 switch (cond) {
780 case TCG_COND_NE:
781 if (c2 != 0) {
782 break;
783 }
784 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
785 /* FALLTHRU */
786 case TCG_COND_LTU:
787 tcg_out_cmp(s, c1, c2, c2const);
788 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
789 return;
790 default:
791 break;
792 }
793 }
794
795 /* For 64-bit signed comparisons vs zero, we can avoid the compare
796 if the input does not overlap the output. */
797 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
798 tcg_out_movi_s13(s, ret, 0);
799 tcg_out_movr(s, cond, ret, c1, 1, 1);
800 } else {
801 tcg_out_cmp(s, c1, c2, c2const);
802 tcg_out_movi_s13(s, ret, 0);
803 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
804 }
805 }
806
807 static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
808 TCGReg al, TCGReg ah, int32_t bl, int blconst,
809 int32_t bh, int bhconst, int opl, int oph)
810 {
811 TCGReg tmp = TCG_REG_T1;
812
813 /* Note that the low parts are fully consumed before tmp is set. */
814 if (rl != ah && (bhconst || rl != bh)) {
815 tmp = rl;
816 }
817
818 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
819 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
820 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
821 }
822
823 static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
824 TCGReg al, TCGReg ah, int32_t bl, int blconst,
825 int32_t bh, int bhconst, bool is_sub)
826 {
827 TCGReg tmp = TCG_REG_T1;
828
829 /* Note that the low parts are fully consumed before tmp is set. */
830 if (rl != ah && (bhconst || rl != bh)) {
831 tmp = rl;
832 }
833
834 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
835
836 if (use_vis3_instructions && !is_sub) {
837 /* Note that ADDXC doesn't accept immediates. */
838 if (bhconst && bh != 0) {
839 tcg_out_movi_s13(s, TCG_REG_T2, bh);
840 bh = TCG_REG_T2;
841 }
842 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
843 } else if (bh == TCG_REG_G0) {
844 /* If we have a zero, we can perform the operation in two insns,
845 with the arithmetic first, and a conditional move into place. */
846 if (rh == ah) {
847 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
848 is_sub ? ARITH_SUB : ARITH_ADD);
849 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
850 } else {
851 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
852 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
853 }
854 } else {
855 /*
856 * Otherwise adjust BH as if there is carry into T2.
857 * Note that constant BH is constrained to 11 bits for the MOVCC,
858 * so the adjustment fits 12 bits.
859 */
860 if (bhconst) {
861 tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
862 } else {
863 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
864 is_sub ? ARITH_SUB : ARITH_ADD);
865 }
866 /* ... smoosh T2 back to original BH if carry is clear ... */
867 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
868 /* ... and finally perform the arithmetic with the new operand. */
869 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
870 }
871
872 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
873 }
874
875 static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
876 bool in_prologue, bool tail_call)
877 {
878 uintptr_t desti = (uintptr_t)dest;
879
880 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
881 desti & ~0xfff, in_prologue, TCG_REG_T2);
882 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
883 TCG_REG_T1, desti & 0xfff, JMPL);
884 }
885
886 static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
887 bool in_prologue)
888 {
889 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
890
891 if (disp == (int32_t)disp) {
892 tcg_out32(s, CALL | (uint32_t)disp >> 2);
893 } else {
894 tcg_out_jmpl_const(s, dest, in_prologue, false);
895 }
896 }
897
898 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
899 const TCGHelperInfo *info)
900 {
901 tcg_out_call_nodelay(s, dest, false);
902 tcg_out_nop(s);
903 }
904
905 static void tcg_out_mb(TCGContext *s, TCGArg a0)
906 {
907 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
908 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
909 }
910
911 /* Generate global QEMU prologue and epilogue code */
912 static void tcg_target_qemu_prologue(TCGContext *s)
913 {
914 int tmp_buf_size, frame_size;
915
916 /*
917 * The TCG temp buffer is at the top of the frame, immediately
918 * below the frame pointer. Use the logical (aligned) offset here;
919 * the stack bias is applied in temp_allocate_frame().
920 */
921 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
922 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
923
924 /*
925 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
926 * otherwise the minimal frame usable by callees.
927 */
928 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
929 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
930 frame_size += TCG_TARGET_STACK_ALIGN - 1;
931 frame_size &= -TCG_TARGET_STACK_ALIGN;
932 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
933 INSN_IMM13(-frame_size));
934
935 #ifndef CONFIG_SOFTMMU
936 if (guest_base != 0) {
937 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
938 guest_base, true, TCG_REG_T1);
939 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
940 }
941 #endif
942
943 /* We choose TCG_REG_TB such that no move is required. */
944 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
945 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
946
947 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
948 /* delay slot */
949 tcg_out_nop(s);
950
951 /* Epilogue for goto_ptr. */
952 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
953 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
954 /* delay slot */
955 tcg_out_movi_s13(s, TCG_REG_O0, 0);
956 }
957
958 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
959 {
960 int i;
961 for (i = 0; i < count; ++i) {
962 p[i] = NOP;
963 }
964 }
965
966 static const TCGLdstHelperParam ldst_helper_param = {
967 .ntmp = 1, .tmp = { TCG_REG_T1 }
968 };
969
970 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
971 {
972 MemOp opc = get_memop(lb->oi);
973 MemOp sgn;
974
975 if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
976 (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
977 return false;
978 }
979
980 /* Use inline tcg_out_ext32s; otherwise let the helper sign-extend. */
981 sgn = (opc & MO_SIZE) < MO_32 ? MO_SIGN : 0;
982
983 tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
984 tcg_out_call(s, qemu_ld_helpers[opc & (MO_SIZE | sgn)], NULL);
985 tcg_out_ld_helper_ret(s, lb, sgn, &ldst_helper_param);
986
987 tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
988 return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
989 (intptr_t)lb->raddr, 0);
990 }
991
992 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
993 {
994 MemOp opc = get_memop(lb->oi);
995
996 if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
997 (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
998 return false;
999 }
1000
1001 tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1002 tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE], NULL);
1003
1004 tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
1005 return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
1006 (intptr_t)lb->raddr, 0);
1007 }
1008
1009 typedef struct {
1010 TCGReg base;
1011 TCGReg index;
1012 TCGAtomAlign aa;
1013 } HostAddress;
1014
1015 bool tcg_target_has_memory_bswap(MemOp memop)
1016 {
1017 return true;
1018 }
1019
1020 /* We expect to use a 13-bit negative offset from ENV. */
1021 #define MIN_TLB_MASK_TABLE_OFS -(1 << 12)
1022
1023 /*
1024 * For softmmu, perform the TLB load and compare.
1025 * For useronly, perform any required alignment tests.
1026 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1027 * is required and fill in @h with the host address for the fast path.
1028 */
1029 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1030 TCGReg addr_reg, MemOpIdx oi,
1031 bool is_ld)
1032 {
1033 TCGType addr_type = s->addr_type;
1034 TCGLabelQemuLdst *ldst = NULL;
1035 MemOp opc = get_memop(oi);
1036 MemOp s_bits = opc & MO_SIZE;
1037 unsigned a_mask;
1038
1039 /* We don't support unaligned accesses. */
1040 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1041 h->aa.align = MAX(h->aa.align, s_bits);
1042 a_mask = (1u << h->aa.align) - 1;
1043
1044 #ifdef CONFIG_SOFTMMU
1045 int mem_index = get_mmuidx(oi);
1046 int fast_off = tlb_mask_table_ofs(s, mem_index);
1047 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1048 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1049 int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
1050 : offsetof(CPUTLBEntry, addr_write);
1051 int add_off = offsetof(CPUTLBEntry, addend);
1052 int compare_mask;
1053 int cc;
1054
1055 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1056 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T2, TCG_AREG0, mask_off);
1057 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T3, TCG_AREG0, table_off);
1058
1059 /* Extract the page index, shifted into place for tlb index. */
1060 tcg_out_arithi(s, TCG_REG_T1, addr_reg,
1061 s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
1062 tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND);
1063
1064 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1065 tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T3, ARITH_ADD);
1066
1067 /*
1068 * Load the tlb comparator and the addend.
1069 * Always load the entire 64-bit comparator for simplicity.
1070 * We will ignore the high bits via BPCC_ICC below.
1071 */
1072 tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_T2, TCG_REG_T1, cmp_off);
1073 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T1, TCG_REG_T1, add_off);
1074 h->base = TCG_REG_T1;
1075
1076 /* Mask out the page offset, except for the required alignment. */
1077 compare_mask = s->page_mask | a_mask;
1078 if (check_fit_tl(compare_mask, 13)) {
1079 tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND);
1080 } else {
1081 tcg_out_movi_s32(s, TCG_REG_T3, compare_mask);
1082 tcg_out_arith(s, TCG_REG_T3, addr_reg, TCG_REG_T3, ARITH_AND);
1083 }
1084 tcg_out_cmp(s, TCG_REG_T2, TCG_REG_T3, 0);
1085
1086 ldst = new_ldst_label(s);
1087 ldst->is_ld = is_ld;
1088 ldst->oi = oi;
1089 ldst->addrlo_reg = addr_reg;
1090 ldst->label_ptr[0] = s->code_ptr;
1091
1092 /* bne,pn %[xi]cc, label0 */
1093 cc = addr_type == TCG_TYPE_I32 ? BPCC_ICC : BPCC_XCC;
1094 tcg_out_bpcc0(s, COND_NE, BPCC_PN | cc, 0);
1095 #else
1096 /*
1097 * If the size equals the required alignment, we can skip the test
1098 * and allow host SIGBUS to deliver SIGBUS to the guest.
1099 * Otherwise, test for at least natural alignment and defer
1100 * everything else to the helper functions.
1101 */
1102 if (s_bits != get_alignment_bits(opc)) {
1103 tcg_debug_assert(check_fit_tl(a_mask, 13));
1104 tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC);
1105
1106 ldst = new_ldst_label(s);
1107 ldst->is_ld = is_ld;
1108 ldst->oi = oi;
1109 ldst->addrlo_reg = addr_reg;
1110 ldst->label_ptr[0] = s->code_ptr;
1111
1112 /* bne,pn %icc, label0 */
1113 tcg_out_bpcc0(s, COND_NE, BPCC_PN | BPCC_ICC, 0);
1114 }
1115 h->base = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0;
1116 #endif
1117
1118 /* If the guest address must be zero-extended, do in the delay slot. */
1119 if (addr_type == TCG_TYPE_I32) {
1120 tcg_out_ext32u(s, TCG_REG_T2, addr_reg);
1121 h->index = TCG_REG_T2;
1122 } else {
1123 if (ldst) {
1124 tcg_out_nop(s);
1125 }
1126 h->index = addr_reg;
1127 }
1128 return ldst;
1129 }
1130
1131 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1132 MemOpIdx oi, TCGType data_type)
1133 {
1134 static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
1135 [MO_UB] = LDUB,
1136 [MO_SB] = LDSB,
1137 [MO_UB | MO_LE] = LDUB,
1138 [MO_SB | MO_LE] = LDSB,
1139
1140 [MO_BEUW] = LDUH,
1141 [MO_BESW] = LDSH,
1142 [MO_BEUL] = LDUW,
1143 [MO_BESL] = LDSW,
1144 [MO_BEUQ] = LDX,
1145 [MO_BESQ] = LDX,
1146
1147 [MO_LEUW] = LDUH_LE,
1148 [MO_LESW] = LDSH_LE,
1149 [MO_LEUL] = LDUW_LE,
1150 [MO_LESL] = LDSW_LE,
1151 [MO_LEUQ] = LDX_LE,
1152 [MO_LESQ] = LDX_LE,
1153 };
1154
1155 TCGLabelQemuLdst *ldst;
1156 HostAddress h;
1157
1158 ldst = prepare_host_addr(s, &h, addr, oi, true);
1159
1160 tcg_out_ldst_rr(s, data, h.base, h.index,
1161 ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]);
1162
1163 if (ldst) {
1164 ldst->type = data_type;
1165 ldst->datalo_reg = data;
1166 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1167 }
1168 }
1169
1170 static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1171 MemOpIdx oi, TCGType data_type)
1172 {
1173 static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1174 [MO_UB] = STB,
1175
1176 [MO_BEUW] = STH,
1177 [MO_BEUL] = STW,
1178 [MO_BEUQ] = STX,
1179
1180 [MO_LEUW] = STH_LE,
1181 [MO_LEUL] = STW_LE,
1182 [MO_LEUQ] = STX_LE,
1183 };
1184
1185 TCGLabelQemuLdst *ldst;
1186 HostAddress h;
1187
1188 ldst = prepare_host_addr(s, &h, addr, oi, false);
1189
1190 tcg_out_ldst_rr(s, data, h.base, h.index,
1191 st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]);
1192
1193 if (ldst) {
1194 ldst->type = data_type;
1195 ldst->datalo_reg = data;
1196 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1197 }
1198 }
1199
1200 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1201 {
1202 if (check_fit_ptr(a0, 13)) {
1203 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1204 tcg_out_movi_s13(s, TCG_REG_O0, a0);
1205 return;
1206 } else {
1207 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1208 if (check_fit_ptr(tb_diff, 13)) {
1209 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1210 /* Note that TCG_REG_TB has been unwound to O1. */
1211 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1212 return;
1213 }
1214 }
1215 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1216 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1217 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1218 }
1219
1220 static void tcg_out_goto_tb(TCGContext *s, int which)
1221 {
1222 ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1223
1224 /* Load link and indirect branch. */
1225 set_jmp_insn_offset(s, which);
1226 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
1227 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1228 /* delay slot */
1229 tcg_out_nop(s);
1230 set_jmp_reset_offset(s, which);
1231
1232 /*
1233 * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1234 * to the beginning of this TB.
1235 */
1236 off = -tcg_current_code_size(s);
1237 if (check_fit_i32(off, 13)) {
1238 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1239 } else {
1240 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1241 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1242 }
1243 }
1244
1245 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1246 uintptr_t jmp_rx, uintptr_t jmp_rw)
1247 {
1248 }
1249
1250 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1251 const TCGArg args[TCG_MAX_OP_ARGS],
1252 const int const_args[TCG_MAX_OP_ARGS])
1253 {
1254 TCGArg a0, a1, a2;
1255 int c, c2;
1256
1257 /* Hoist the loads of the most common arguments. */
1258 a0 = args[0];
1259 a1 = args[1];
1260 a2 = args[2];
1261 c2 = const_args[2];
1262
1263 switch (opc) {
1264 case INDEX_op_goto_ptr:
1265 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1266 tcg_out_mov_delay(s, TCG_REG_TB, a0);
1267 break;
1268 case INDEX_op_br:
1269 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1270 tcg_out_nop(s);
1271 break;
1272
1273 #define OP_32_64(x) \
1274 glue(glue(case INDEX_op_, x), _i32): \
1275 glue(glue(case INDEX_op_, x), _i64)
1276
1277 OP_32_64(ld8u):
1278 tcg_out_ldst(s, a0, a1, a2, LDUB);
1279 break;
1280 OP_32_64(ld8s):
1281 tcg_out_ldst(s, a0, a1, a2, LDSB);
1282 break;
1283 OP_32_64(ld16u):
1284 tcg_out_ldst(s, a0, a1, a2, LDUH);
1285 break;
1286 OP_32_64(ld16s):
1287 tcg_out_ldst(s, a0, a1, a2, LDSH);
1288 break;
1289 case INDEX_op_ld_i32:
1290 case INDEX_op_ld32u_i64:
1291 tcg_out_ldst(s, a0, a1, a2, LDUW);
1292 break;
1293 OP_32_64(st8):
1294 tcg_out_ldst(s, a0, a1, a2, STB);
1295 break;
1296 OP_32_64(st16):
1297 tcg_out_ldst(s, a0, a1, a2, STH);
1298 break;
1299 case INDEX_op_st_i32:
1300 case INDEX_op_st32_i64:
1301 tcg_out_ldst(s, a0, a1, a2, STW);
1302 break;
1303 OP_32_64(add):
1304 c = ARITH_ADD;
1305 goto gen_arith;
1306 OP_32_64(sub):
1307 c = ARITH_SUB;
1308 goto gen_arith;
1309 OP_32_64(and):
1310 c = ARITH_AND;
1311 goto gen_arith;
1312 OP_32_64(andc):
1313 c = ARITH_ANDN;
1314 goto gen_arith;
1315 OP_32_64(or):
1316 c = ARITH_OR;
1317 goto gen_arith;
1318 OP_32_64(orc):
1319 c = ARITH_ORN;
1320 goto gen_arith;
1321 OP_32_64(xor):
1322 c = ARITH_XOR;
1323 goto gen_arith;
1324 case INDEX_op_shl_i32:
1325 c = SHIFT_SLL;
1326 do_shift32:
1327 /* Limit immediate shift count lest we create an illegal insn. */
1328 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1329 break;
1330 case INDEX_op_shr_i32:
1331 c = SHIFT_SRL;
1332 goto do_shift32;
1333 case INDEX_op_sar_i32:
1334 c = SHIFT_SRA;
1335 goto do_shift32;
1336 case INDEX_op_mul_i32:
1337 c = ARITH_UMUL;
1338 goto gen_arith;
1339
1340 OP_32_64(neg):
1341 c = ARITH_SUB;
1342 goto gen_arith1;
1343 OP_32_64(not):
1344 c = ARITH_ORN;
1345 goto gen_arith1;
1346
1347 case INDEX_op_div_i32:
1348 tcg_out_div32(s, a0, a1, a2, c2, 0);
1349 break;
1350 case INDEX_op_divu_i32:
1351 tcg_out_div32(s, a0, a1, a2, c2, 1);
1352 break;
1353
1354 case INDEX_op_brcond_i32:
1355 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1356 break;
1357 case INDEX_op_setcond_i32:
1358 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1359 break;
1360 case INDEX_op_movcond_i32:
1361 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1362 break;
1363
1364 case INDEX_op_add2_i32:
1365 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1366 args[4], const_args[4], args[5], const_args[5],
1367 ARITH_ADDCC, ARITH_ADDC);
1368 break;
1369 case INDEX_op_sub2_i32:
1370 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1371 args[4], const_args[4], args[5], const_args[5],
1372 ARITH_SUBCC, ARITH_SUBC);
1373 break;
1374 case INDEX_op_mulu2_i32:
1375 c = ARITH_UMUL;
1376 goto do_mul2;
1377 case INDEX_op_muls2_i32:
1378 c = ARITH_SMUL;
1379 do_mul2:
1380 /* The 32-bit multiply insns produce a full 64-bit result. */
1381 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1382 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1383 break;
1384
1385 case INDEX_op_qemu_ld_a32_i32:
1386 case INDEX_op_qemu_ld_a64_i32:
1387 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1388 break;
1389 case INDEX_op_qemu_ld_a32_i64:
1390 case INDEX_op_qemu_ld_a64_i64:
1391 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1392 break;
1393 case INDEX_op_qemu_st_a32_i32:
1394 case INDEX_op_qemu_st_a64_i32:
1395 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1396 break;
1397 case INDEX_op_qemu_st_a32_i64:
1398 case INDEX_op_qemu_st_a64_i64:
1399 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1400 break;
1401
1402 case INDEX_op_ld32s_i64:
1403 tcg_out_ldst(s, a0, a1, a2, LDSW);
1404 break;
1405 case INDEX_op_ld_i64:
1406 tcg_out_ldst(s, a0, a1, a2, LDX);
1407 break;
1408 case INDEX_op_st_i64:
1409 tcg_out_ldst(s, a0, a1, a2, STX);
1410 break;
1411 case INDEX_op_shl_i64:
1412 c = SHIFT_SLLX;
1413 do_shift64:
1414 /* Limit immediate shift count lest we create an illegal insn. */
1415 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1416 break;
1417 case INDEX_op_shr_i64:
1418 c = SHIFT_SRLX;
1419 goto do_shift64;
1420 case INDEX_op_sar_i64:
1421 c = SHIFT_SRAX;
1422 goto do_shift64;
1423 case INDEX_op_mul_i64:
1424 c = ARITH_MULX;
1425 goto gen_arith;
1426 case INDEX_op_div_i64:
1427 c = ARITH_SDIVX;
1428 goto gen_arith;
1429 case INDEX_op_divu_i64:
1430 c = ARITH_UDIVX;
1431 goto gen_arith;
1432 case INDEX_op_extrh_i64_i32:
1433 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1434 break;
1435
1436 case INDEX_op_brcond_i64:
1437 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1438 break;
1439 case INDEX_op_setcond_i64:
1440 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1441 break;
1442 case INDEX_op_movcond_i64:
1443 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1444 break;
1445 case INDEX_op_add2_i64:
1446 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1447 const_args[4], args[5], const_args[5], false);
1448 break;
1449 case INDEX_op_sub2_i64:
1450 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1451 const_args[4], args[5], const_args[5], true);
1452 break;
1453 case INDEX_op_muluh_i64:
1454 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1455 break;
1456
1457 gen_arith:
1458 tcg_out_arithc(s, a0, a1, a2, c2, c);
1459 break;
1460
1461 gen_arith1:
1462 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1463 break;
1464
1465 case INDEX_op_mb:
1466 tcg_out_mb(s, a0);
1467 break;
1468
1469 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1470 case INDEX_op_mov_i64:
1471 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1472 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1473 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1474 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1475 case INDEX_op_ext8s_i64:
1476 case INDEX_op_ext8u_i32:
1477 case INDEX_op_ext8u_i64:
1478 case INDEX_op_ext16s_i32:
1479 case INDEX_op_ext16s_i64:
1480 case INDEX_op_ext16u_i32:
1481 case INDEX_op_ext16u_i64:
1482 case INDEX_op_ext32s_i64:
1483 case INDEX_op_ext32u_i64:
1484 case INDEX_op_ext_i32_i64:
1485 case INDEX_op_extu_i32_i64:
1486 case INDEX_op_extrl_i64_i32:
1487 default:
1488 g_assert_not_reached();
1489 }
1490 }
1491
1492 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1493 {
1494 switch (op) {
1495 case INDEX_op_goto_ptr:
1496 return C_O0_I1(r);
1497
1498 case INDEX_op_ld8u_i32:
1499 case INDEX_op_ld8u_i64:
1500 case INDEX_op_ld8s_i32:
1501 case INDEX_op_ld8s_i64:
1502 case INDEX_op_ld16u_i32:
1503 case INDEX_op_ld16u_i64:
1504 case INDEX_op_ld16s_i32:
1505 case INDEX_op_ld16s_i64:
1506 case INDEX_op_ld_i32:
1507 case INDEX_op_ld32u_i64:
1508 case INDEX_op_ld32s_i64:
1509 case INDEX_op_ld_i64:
1510 case INDEX_op_neg_i32:
1511 case INDEX_op_neg_i64:
1512 case INDEX_op_not_i32:
1513 case INDEX_op_not_i64:
1514 case INDEX_op_ext32s_i64:
1515 case INDEX_op_ext32u_i64:
1516 case INDEX_op_ext_i32_i64:
1517 case INDEX_op_extu_i32_i64:
1518 case INDEX_op_extrl_i64_i32:
1519 case INDEX_op_extrh_i64_i32:
1520 case INDEX_op_qemu_ld_a32_i32:
1521 case INDEX_op_qemu_ld_a64_i32:
1522 case INDEX_op_qemu_ld_a32_i64:
1523 case INDEX_op_qemu_ld_a64_i64:
1524 return C_O1_I1(r, r);
1525
1526 case INDEX_op_st8_i32:
1527 case INDEX_op_st8_i64:
1528 case INDEX_op_st16_i32:
1529 case INDEX_op_st16_i64:
1530 case INDEX_op_st_i32:
1531 case INDEX_op_st32_i64:
1532 case INDEX_op_st_i64:
1533 case INDEX_op_qemu_st_a32_i32:
1534 case INDEX_op_qemu_st_a64_i32:
1535 case INDEX_op_qemu_st_a32_i64:
1536 case INDEX_op_qemu_st_a64_i64:
1537 return C_O0_I2(rZ, r);
1538
1539 case INDEX_op_add_i32:
1540 case INDEX_op_add_i64:
1541 case INDEX_op_mul_i32:
1542 case INDEX_op_mul_i64:
1543 case INDEX_op_div_i32:
1544 case INDEX_op_div_i64:
1545 case INDEX_op_divu_i32:
1546 case INDEX_op_divu_i64:
1547 case INDEX_op_sub_i32:
1548 case INDEX_op_sub_i64:
1549 case INDEX_op_and_i32:
1550 case INDEX_op_and_i64:
1551 case INDEX_op_andc_i32:
1552 case INDEX_op_andc_i64:
1553 case INDEX_op_or_i32:
1554 case INDEX_op_or_i64:
1555 case INDEX_op_orc_i32:
1556 case INDEX_op_orc_i64:
1557 case INDEX_op_xor_i32:
1558 case INDEX_op_xor_i64:
1559 case INDEX_op_shl_i32:
1560 case INDEX_op_shl_i64:
1561 case INDEX_op_shr_i32:
1562 case INDEX_op_shr_i64:
1563 case INDEX_op_sar_i32:
1564 case INDEX_op_sar_i64:
1565 case INDEX_op_setcond_i32:
1566 case INDEX_op_setcond_i64:
1567 return C_O1_I2(r, rZ, rJ);
1568
1569 case INDEX_op_brcond_i32:
1570 case INDEX_op_brcond_i64:
1571 return C_O0_I2(rZ, rJ);
1572 case INDEX_op_movcond_i32:
1573 case INDEX_op_movcond_i64:
1574 return C_O1_I4(r, rZ, rJ, rI, 0);
1575 case INDEX_op_add2_i32:
1576 case INDEX_op_add2_i64:
1577 case INDEX_op_sub2_i32:
1578 case INDEX_op_sub2_i64:
1579 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
1580 case INDEX_op_mulu2_i32:
1581 case INDEX_op_muls2_i32:
1582 return C_O2_I2(r, r, rZ, rJ);
1583 case INDEX_op_muluh_i64:
1584 return C_O1_I2(r, r, r);
1585
1586 default:
1587 g_assert_not_reached();
1588 }
1589 }
1590
1591 static void tcg_target_init(TCGContext *s)
1592 {
1593 /*
1594 * Only probe for the platform and capabilities if we haven't already
1595 * determined maximum values at compile time.
1596 */
1597 #ifndef use_vis3_instructions
1598 {
1599 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1600 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1601 }
1602 #endif
1603
1604 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1605 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1606
1607 tcg_target_call_clobber_regs = 0;
1608 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1609 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1610 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1611 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1612 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1613 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1614 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1615 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1616 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1617 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1618 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1619 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1620 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1621 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1622 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1623
1624 s->reserved_regs = 0;
1625 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1626 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1627 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1628 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1629 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1630 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1631 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1632 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1633 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T3); /* for internal use */
1634 }
1635
1636 #define ELF_HOST_MACHINE EM_SPARCV9
1637
1638 typedef struct {
1639 DebugFrameHeader h;
1640 uint8_t fde_def_cfa[4];
1641 uint8_t fde_win_save;
1642 uint8_t fde_ret_save[3];
1643 } DebugFrame;
1644
1645 static const DebugFrame debug_frame = {
1646 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1647 .h.cie.id = -1,
1648 .h.cie.version = 1,
1649 .h.cie.code_align = 1,
1650 .h.cie.data_align = -sizeof(void *) & 0x7f,
1651 .h.cie.return_column = 15, /* o7 */
1652
1653 /* Total FDE size does not include the "len" member. */
1654 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1655
1656 .fde_def_cfa = {
1657 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1658 (2047 & 0x7f) | 0x80, (2047 >> 7)
1659 },
1660 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1661 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1662 };
1663
1664 void tcg_register_jit(const void *buf, size_t buf_size)
1665 {
1666 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1667 }